<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">53132</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2024.053132</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Dynamic Multi-Layer Perceptron for Fetal Health Classification Using Cardiotocography Data</article-title>
<alt-title alt-title-type="left-running-head">Dynamic Multi-Layer Perceptron for Fetal Health Classification Using Cardiotocography Data</alt-title>
<alt-title alt-title-type="right-running-head">Dynamic Multi-Layer Perceptron for Fetal Health Classification Using Cardiotocography Data</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Sirisha</surname><given-names>Uddagiri</given-names></name><xref ref-type="aff" rid="aff-1">1</xref><xref ref-type="author-notes" rid="fn1"><sup>#</sup></xref></contrib>
<contrib id="author-2" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Srinivasu</surname><given-names>Parvathaneni Naga</given-names></name><xref ref-type="aff" rid="aff-2">2</xref><xref ref-type="aff" rid="aff-3">3</xref><email>parvathanenins@gmail.com</email></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Padmavathi</surname><given-names>Panguluri</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Kim</surname><given-names>Seongki</given-names></name><xref ref-type="aff" rid="aff-5">5</xref><xref ref-type="author-notes" rid="fn1"><sup>#</sup></xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Pavate</surname><given-names>Aruna</given-names></name><xref ref-type="aff" rid="aff-6">6</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Shafi</surname><given-names>Jana</given-names></name><xref ref-type="aff" rid="aff-7">7</xref></contrib>
<contrib id="author-7" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Ijaz</surname><given-names>Muhammad Fazal</given-names></name><xref ref-type="aff" rid="aff-8">8</xref><email>mfazal@mit.edu.au</email></contrib>
<aff id="aff-1"><label>1</label><institution>Department of Computer Science and Engineering, Prasad V Potluri Siddhartha Institute of Technology</institution>, <addr-line>Vijayawada, 520007</addr-line>,
<country>India</country></aff>
<aff id="aff-2"><label>2</label><institution>Amrita School of Computing, Amrita Vishwa Vidyapeetham</institution>, <addr-line>Amaravati, Andhra Pradesh, 522503</addr-line>, <country>India</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Tele informatics Engineering, Federal University of Cear&#x00E1;</institution>, <addr-line>Fortaleza, 60455</addr-line>-<addr-line>970</addr-line>, <country>Brazil</country></aff>
<aff id="aff-4"><label>4</label><institution>Department of Computer Science and Engineering, Koneru Lakshmaiah Education Foundation, Vaddeswaram</institution>, <addr-line>Guntur, Andhra Pradesh, 522302</addr-line>, <country>India</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Computer Engineering, Chosun University</institution>, <addr-line>Gwangju, 61452</addr-line>, <country>Republic of Korea</country></aff>
<aff id="aff-6"><label>6</label><institution>School of CSIT, Symbiosis Skills and Professional University</institution>, <addr-line>Pune, 412101</addr-line>, <country>India</country></aff>
<aff id="aff-7"><label>7</label><institution>Department of Computer Engineering and Information, College of Engineering in Wadi Alddawasir, Prince Sattam bin Abdulaziz University, Wadi Alddawasir</institution>, <addr-line>11991</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-8"><label>8</label><institution>School of IT and Engineering, Melbourne Institute of Technology</institution>, <addr-line>Melbourne, 3000</addr-line>, <country>Australia</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Authors: Parvathaneni Naga Srinivasu. Email: <email>parvathanenins@gmail.com</email>; Muhammad Fazal Ijaz. Email: <email>mfazal@mit.edu.au</email></corresp>
<fn id="fn1"><p><sup>#</sup>These authors contributed equally</p></fn>
</author-notes>
<pub-date date-type="collection" publication-format="electronic">
<year>2024</year></pub-date>
<pub-date date-type="pub" publication-format="electronic">
<day>15</day>
<month>8</month>
<year>2024</year></pub-date>
<volume>80</volume>
<issue>2</issue>
<fpage>2301</fpage>
<lpage>2330</lpage>
<history>
<date date-type="received">
<day>25</day>
<month>4</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>7</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2024 Sirisha et al.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Sirisha et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_53132.pdf"></self-uri>
<abstract>
<p>Fetal health care is vital in ensuring the health of pregnant women and the fetus. Regular check-ups need to be taken by the mother to determine the status of the fetus&#x2019; growth and identify any potential problems. To know the status of the fetus, doctors monitor blood reports, Ultrasounds, cardiotocography (CTG) data, etc. Still, in this research, we have considered CTG data, which provides information on heart rate and uterine contractions during pregnancy. Several researchers have proposed various methods for classifying the status of fetus growth. Manual processing of CTG data is time-consuming and unreliable. So, automated tools should be used to classify fetal health. This study proposes a novel neural network-based architecture, the Dynamic Multi-Layer Perceptron model, evaluated from a single layer to several layers to classify fetal health. Various strategies were applied, including pre-processing data using techniques like Balancing, Scaling, Normalization hyperparameter tuning, batch normalization, early stopping, etc., to enhance the model&#x2019;s performance. A comparative analysis of the proposed method is done against the traditional machine learning models to showcase its accuracy (97%). An ablation study without any pre-processing techniques is also illustrated. This study easily provides valuable interpretations for healthcare professionals in the decision-making process.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Fetal health</kwd>
<kwd>cardiotocography data</kwd>
<kwd>deep learning</kwd>
<kwd>dynamic multi-layer perceptron</kwd>
<kwd>feature engineering</kwd>
</kwd-group>
<funding-group>
<award-group id="awg1">
<funding-source>National Research Foundation of Korea (NRF)</funding-source>
<award-id>NRF-2023R1A2C1005950</award-id>
</award-group>
<award-group id="awg2">
<funding-source>Prince Sattam bin Abdulaziz University</funding-source>
<award-id>PSAU/2024/R/1445</award-id>
</award-group>
</funding-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>Fetal health plays an important role in child&#x2019;s future and healthy pregnancy. Fetal health is monitored by doctors based on the mother&#x2019;s health history, physical examinations, blood tests, screening tests, and cardiotocography, which tracks heart rate and contractions to assess risks [<xref ref-type="bibr" rid="ref-1">1</xref>,<xref ref-type="bibr" rid="ref-2">2</xref>]. Regular check-ups need to be taken by the mother to know the status of the fetus&#x2019;s growth and to know any potential problems. The frequency of check-ups varies from person to person based on individual health factors like high-risk pregnancies and the growth rate of the fetus. Healthy pregnancy lays the foundation for the timely development of organs and reduces the risk of chronic diseases in individuals [<xref ref-type="bibr" rid="ref-3">3</xref>,<xref ref-type="bibr" rid="ref-4">4</xref>].</p>
<p>AI is used in various sectors, one of which is health care. Machine learning and deep learning algorithms can be used to analyze medical records like blood test reports, ultrasound data, CTG data, etc., to detect complications in pregnant women early. AI is also useful for categorizing pregnant women (fetal health) based on their risk factors and suggesting further plans [<xref ref-type="bibr" rid="ref-5">5</xref>&#x2013;<xref ref-type="bibr" rid="ref-7">7</xref>]. To process CTG data, is time-consuming and error-prone. So, AI helps analyze CTG data and provides a faster, more detailed report than human interpretation. However, AI plays a vital role in empowering healthcare professionals in the healthcare sector. Healthcare professionals with their expertise and AI-based reports, CTG data deliver optimal care for pregnant women and fetuses.</p>
<p>Classification of fetal health is important to ensure accurate, effective prenatal care and early detection of potential complications. Traditional methods may sometime prone to errors and inconsistencies so we need to use emerging tools such as machine and deep learning techniques. Manual processing of CTG data is time-consuming and unreliable. The available tools are not enough for fetal health classification. To enhance the diagnostic accuracy, we can use advanced neural network architectures. So, to achieve better results, we propose a Dynamic Multi-Layer Perceptron model (DMLP). We apply various pre-processing techniques, balance the dataset, scale the dataset, and extract the most important features from the CTG data of the fetus. The processed dataset is supplied as input from the traditional ML methods and the DMLP model.</p>
<p>Ensuring accurate and timely classification of fetal health is crucial for effective prenatal care, enabling early detection and intervention for potential complications. Traditional assessment methods, which often depend on subjective evaluation by healthcare professionals, can be prone to inconsistencies and errors. Machine Learning (ML) classifiers have emerged as promising tools to enhance diagnostic accuracy and objectivity in medical assessments. However, to push the boundaries of classification performance, it is essential to explore advanced neural network architectures. This study proposes the use of a Deep Multi-Layer Perceptron (DMLP) model for fetal health classification. By harnessing the sophisticated learning capabilities of the DMLP, we aim to improve the precision and reliability of fetal health diagnostics. Furthermore, we conduct a comprehensive comparison of the DMLP model against traditional ML classifiers to evaluate its effectiveness and identify its advantages and limitations. This research aspires to contribute significantly to the field of prenatal care by providing insights into the application of advanced neural networks, ultimately aiding in the development of more accurate and dependable diagnostic tools.</p>
<p>A summary of the main points of this study is as follows:
<list list-type="bullet">
<list-item>
<p>The study presents a DMLP neural network for fetal health prediction. The process begins with a single neuron and continues through several levels, with the concealed layers and neurons being adjusted dynamically. Early stopping, batch normalization, and dropout are some of the optimization strategies that this strategy uses to improve performance deliberately.</p></list-item>
<list-item>
<p>The study also examined classic ML methods that combined resampling, feature scaling, and feature selection; these modifications greatly enhanced the model&#x2019;s performance.</p></list-item>
<list-item>
<p>In the domain of fetal health categorization, we conducted a thorough investigation comparing the Dynamic MLP model with many classifiers, including Random Forest (RF), XGBoost (XGB), Decision Tree (DT), K-Nearest Neighborhood, and Logistic Regression (LR). To aid clinical specialists in their analysis and interpretation, we looked into the relevance of aspects related to each model.</p></list-item>
</list></p>
<p>A brief outline of the following sections, presented in chronological order, is as follows: <xref ref-type="sec" rid="s2">Section 2</xref> describes the literature review, <xref ref-type="sec" rid="s3">Section 3</xref> describes the datasets and procedures that will be utilized, and our recommended methodology. The results of the model&#x2019;s execution are described in <xref ref-type="sec" rid="s4">Section 4</xref>. In the last section, we will review the main points of our analysis and discuss potential avenues for further study.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Literature Review</title>
<p>Fetal health classification is important for the early detection of potential risk factors, which can be analyzed during regular check-ups. During pregnancy, the doctors observe fetal movements, heart rate, and growth using various approaches, such as blood tests and ultrasounds. When doctors identify any potential risk factors in pregnant women, timely care should be taken to avoid any miscarriage. AI-based image analysis suggests that healthcare professionals can easily make decisions. Researchers have proposed various machine and deep learning models to determine the status of the fetus. Manual processing of CTG data can sometimes be prone to errors and takes a lot of time. These models analyze the CTG data and suggest the health care professionals in decision-making. Alam et al. [<xref ref-type="bibr" rid="ref-8">8</xref>] used ML models to analyze CTG data for fetal health classification and achieved an accuracy of 97.51% using a random forest algorithm. Rahmayanti et al. [<xref ref-type="bibr" rid="ref-9">9</xref>], Noor et al. [<xref ref-type="bibr" rid="ref-10">10</xref>] have used seven algorithms ANN, LSTM, XGB, LGBM, RF, KNN, and SVM for fetal health classification using CTG data.</p>
<p>Akbulut et al. [<xref ref-type="bibr" rid="ref-11">11</xref>] have gathered data from 96 pregnant women through various questions and evaluations by healthcare professionals. They predicted fetal anomalies using multiple classification algorithms like DT, NN, and SVM. Abiyev et al. [<xref ref-type="bibr" rid="ref-12">12</xref>] proposed a Type2 Fuzzy Neural Network that helps in the decision-making of the fetus state in fetal health classification. Kasim [<xref ref-type="bibr" rid="ref-13">13</xref>] proposed a multi-class fetal classification using an ELM algorithm to analyze CTG data. Mandala [<xref ref-type="bibr" rid="ref-14">14</xref>] proposed LGBM for fetal health classification and achieved an accuracy of 98.31%. Shruthi et al. [<xref ref-type="bibr" rid="ref-15">15</xref>], Hoodbhoy et al. [<xref ref-type="bibr" rid="ref-16">16</xref>] focussed on regular check-ups of pregnant women; otherwise, risk factors cannot be assessed for the baby&#x2019;s fetal health. Imran Molla et al. [<xref ref-type="bibr" rid="ref-17">17</xref>], Islam et al. [<xref ref-type="bibr" rid="ref-18">18</xref>] used the CTG dataset for fetal health assessment, particularly oxygen deficiency in fetuses, using a random forest algorithm, and they achieved an accuracy of 94.8%.</p>
<p>The current classification approaches for fetal health categorization encounter several substantial obstacles that limit their precision. A major challenge is the quality and variety of the input data in fetal health monitoring. This is because the data comes from several sources, such as ultrasound pictures, heart rate monitors, and biochemical indicators, which may differ in terms of accuracy and reliability. Furthermore, these models sometimes encounter difficulties when dealing with unbalanced datasets, in which cases of certain foetal health issues are much less common than others. This may result in predictions that are biased and lack the capacity to detect uncommon but crucial disorders accurately. Moreover, current models may not sufficiently include the ever-changing and complex nature of fetal health, in which several parameters that would associate with one other affect the final classification decision. To tackle these issues, it is necessary to create classification models that are robust to deal with imbalanced dataset, and can deal with the inconsistency during the classification.</p>
<p>The proposed DMLP model is proven to be efficient in dealing with complex and high dimensional data though efficient feature engineering process. DMLPs have the ability to capture complex patterns and dependencies in the data by applying multiple layers of non-linear transformations.</p>
</sec>
<sec id="s3">
<label>3</label>
<title>Methods and Materials</title>
<p>The manuscript will find all the required details regarding the dataset and the pre-processing methods used to improve the prediction model&#x2019;s accuracy. <xref ref-type="fig" rid="fig-1">Fig. 1</xref> shows the workflow of the proposed approach for fetal health classification.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Workflow of the proposed fetal health classification model</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-1.tif"/>
</fig>
<sec id="s3_1">
<label>3.1</label>
<title>Data Repository Details</title>
<p>The fetal classification dataset [<xref ref-type="bibr" rid="ref-19">19</xref>] is a publicly available dataset obtained from Kaggle with 22 columns &#x0026; 2126 records extracted from cardiotocography examinations. Features such as &#x201C;uterine contractions,&#x201D; &#x201C;light decelerations,&#x201D; &#x201C;prolonged decelerations,&#x201D; &#x201C;abnormal short-term variability,&#x201D; and many more are included in the dataset. The dataset contains 3 different classes&#x2014;&#x201C;normal,&#x201D; &#x201C;suspect,&#x201D; &#x0026; &#x201C;pathological&#x201D;. The records reflect thorough assessments of various physiological parameters obtained from the cardiotocogram examinations. During prenatal therapy, it is possible to predict the fetal health state based on the assessment of these traits, which serve as the foundation for classification. The dataset&#x2019;s attributes are summarized in <xref ref-type="fig" rid="fig-2">Fig. 2</xref>, including information about the data type and possible values for each attribute.</p>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>Features in the fetal dataset and their corresponding datatypes</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-2.tif"/>
</fig>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Data Insights</title>
<p>A complete understanding of the distribution and uncertainty of the dataset is achieved via the study, which comprises predicting probability density and computing variances across several data points. Looking at the probability density and variances may give you a better idea of the underlying probabilistic features and how they change across different data types. In <xref ref-type="fig" rid="fig-3">Fig. 3</xref>, a grid of Kernel Density Estimation (KDE) plots for each feature in the dataset depicts the expected probability density and variances across several &#x2018;fetal_health&#x2019; classes (Blue-normal, orange-suspect, green-pathological). The differences in feature distributions among health classes in the color-coded charts. The exploratory data analysis (EDA) tool indispensable for this project is these KDE charts, which help with feature distribution insights, data pre-processing, feature selection, and model interpretation.</p>
<fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>Graph depicting attributes in the fetal health classification dataset</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-3.tif"/>
</fig>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Data Pre-Processing</title>
<p>Improving data quality, and reducing noise to improve the efficacy of the classification models are all responsibilities of data pre-processing. Important tasks in this phase include feature selection, cleaning, missing values management, scaling, categorical variable encoding &#x0026; handling data anomalies [<xref ref-type="bibr" rid="ref-20">20</xref>]. The efficacy of data pre-processing determines the success or failure of training &#x0026; evaluating models.</p>
<p>Using the seaborn package and matplotlib, <xref ref-type="fig" rid="fig-4">Fig. 4</xref> generates a heatmap to display the dataset&#x2019;s correlation matrix. By looking at the size and direction of the correlations among the divergent variables in the dataset, this visualization is a great tool for understanding their relationships.</p>
<fig id="fig-4">
<label>Figure 4</label>
<caption>
<title>Heatmap of all attributes in the fetal dataset</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-4.tif"/>
</fig>
<sec id="s3_3_1">
<label>3.3.1</label>
<title>Handling Outliers</title>
<p>In data analysis, dealing with outliers as shown in <xref ref-type="fig" rid="fig-5">Fig. 5</xref> is essential for ensuring reliable statistical models. Effective handling of outliers is achieved by using techniques including trimming, capping, transformation, and imputation, which strengthen statistical studies and improve the quality of models. We can use the following equations for capping data points (p) below the lower cap/upper cap:</p>
<p>p<sup>&#x002A;</sup> &#x003D; LC (replace p with LC), p<sup>&#x002A;</sup> &#x003D; UC (replace p with UC).</p>
<fig id="fig-5">
<label>Figure 5</label>
<caption>
<title>An illustration of the outliers in the dataset</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-5.tif"/>
</fig>
</sec>
<sec id="s3_3_2">
<label>3.3.2</label>
<title>Addressing Class Imbalance</title>
<p><xref ref-type="fig" rid="fig-6">Fig. 6</xref> is a bar graph showing the distribution of various classes before/after SMOTE (Synthetic Minority Oversampling Technique) was applied in the fetal classification dataset. The <italic>x</italic>-axis represents the class label, which is fetal health. There are three classes: normal (represented by the number 1), suspect (represented by the number 2), and pathological (represented by the number 3). The <italic>y</italic>-axis represents the number of data points in each class. From <xref ref-type="fig" rid="fig-6">Fig. 6</xref> we can observe the distribution of classes is imbalanced. There are significantly more data points in the class labeled &#x201C;normal&#x201D; (176) than in the classes labeled &#x201C;suspect&#x201D; (295) and &#x201C;pathological&#x201D; (0). This imbalance can be a problem for machine learning algorithms, as they may favor the majority class and perform poorly on the minority classes. SMOTE [<xref ref-type="bibr" rid="ref-21">21</xref>] is an oversampling technique that can be used to address class imbalance. SMOTE works by creating synthetic data points for the minority class. These synthetic data points are created by interpolating between existing data points in the minority class. After SMOTE is applied, the distribution of classes would be more balanced. This can help to improve the performance of machine learning and deep learning algorithms on imbalanced datasets.</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption>
<title>Dataset comparison pre and post SMOTE</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-6.tif"/>
</fig>
</sec>
<sec id="s3_3_3">
<label>3.3.3</label>
<title>Feature Selection</title>
<p>One of the most important parts of machine learning is feature selection [<xref ref-type="bibr" rid="ref-22">22</xref>], which involves finding the right features to use for making predictions. Statistical tests such as the chi-square test, ANOVA, <italic>F</italic>-test, or mutual information score were applied. These tests determine how well each attribute correlates with the dependent variable. We choose the top <inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:mi>F</mml:mi></mml:math></inline-formula> features for our study based on their determined importance rankings. However, the value of <italic>F</italic> &#x003D;10 in the current evaluation, but during the evaluation process based on the assigned weights, the most significant 9 features in the decision process are prolonged decelerations, abnormal short-term variability, percentage of time with abnormal long-term variability, histogram mean, histogram mode, accelerations histogram median, accelerations, histogram variance, baseline value, mean value of short-term variability. The training process uses the subset of characteristics shown in <xref ref-type="fig" rid="fig-7">Fig. 7</xref> to maximize the model&#x2019;s performance of the most important variables. This procedure helps to make models more efficient, simpler to compute, and easier to understand. The Chi-Square Test (<italic>&#x03A7;</italic>&#x00B2;) is given in <xref ref-type="disp-formula" rid="eqn-1">Eq. (1)</xref>.</p>
<p><disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:mrow><mml:mo>(</mml:mo><mml:msup><mml:mi>X</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x2211;</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>where, <italic>O</italic><sub><italic>j</italic></sub>: Observed frequency for category <italic>j</italic>; <italic>E</italic><sub><italic>j</italic></sub>: Expected frequency for category <italic>j</italic>.</p>
<fig id="fig-7">
<label>Figure 7</label>
<caption>
<title>The graphs represent the feature importance in decreasing order of their significance</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-7.tif"/>
</fig>
<p>To determine if ANOVA compares their means there is a statistically significant difference between more than two groups, ANOVA compared in <xref ref-type="disp-formula" rid="eqn-2">Eq. (2)</xref>.
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:mi>F</mml:mi><mml:mo>=</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>M</mml:mi><mml:mi>S</mml:mi><mml:mi>B</mml:mi><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mi>M</mml:mi><mml:mi>S</mml:mi><mml:mi>W</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p><italic>F</italic> represents <italic>F</italic>-statistic, Mean Squares Between (MSB), and Mean Squares Within (MSW).</p>
<p>The mutual dependence between two variables is illustrated in <xref ref-type="disp-formula" rid="eqn-3">Eq. (3)</xref>.
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:mi>M</mml:mi><mml:mi>I</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>A</mml:mi><mml:mo>&#x003A;</mml:mo><mml:mi>B</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>G</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>B</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mi>G</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>B</mml:mi><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mi>A</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p><italic>G</italic>(<italic>B</italic>) is the entropy of variable <italic>B</italic>, and <italic>G</italic>(<italic>B/A</italic>) is the conditional entropy of <italic>B</italic> given <italic>A</italic>.</p>
</sec>
<sec id="s3_3_4">
<label>3.3.4</label>
<title>Feature-Scaling</title>
<p>Because machine learning models use feature values as numerical inputs without comprehending their inherent importance, data scaling is an essential procedure in this field. Scaling is essential for making data interpretable and treating features fairly. Scaling data can be done in two main ways:
<list list-type="bullet">
<list-item>
<p><italic>Normalization:</italic> Features that do not have a normal (Gaussian) distribution can be handled using this method. The features are normalized when their values are adjusted to fall within a certain range and is expressed in <xref ref-type="disp-formula" rid="eqn-4">Eq. (4)</xref>.</p></list-item>
</list>
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:mi mathvariant="bold-italic">f</mml:mi><mml:msup><mml:mi>&#x00A0;</mml:mi><mml:mrow><mml:mi mathvariant="bold">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi mathvariant="bold-italic">f</mml:mi><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi mathvariant="bold-italic">f</mml:mi><mml:mrow><mml:mo mathvariant="bold" movablelimits="true" form="prefix">min</mml:mo></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi mathvariant="bold-italic">f</mml:mi><mml:mrow><mml:mo mathvariant="bold" movablelimits="true" form="prefix">max</mml:mo></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi mathvariant="bold-italic">f</mml:mi><mml:mrow><mml:mo mathvariant="bold" movablelimits="true" form="prefix">min</mml:mo></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:math></disp-formula>
<list list-type="bullet">
<list-item>
<p><italic>Standardization:</italic> Features with a normal distribution but values that vary greatly from one another are used for standardization. It normalizes the characteristics such that they all have a mean of zero and a standard deviation of one and is illustrated in <xref ref-type="disp-formula" rid="eqn-5">Eq. (5)</xref>.</p></list-item>
</list>
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:mi mathvariant="bold-italic">f</mml:mi><mml:msup><mml:mi>&#x00A0;</mml:mi><mml:mrow><mml:mi mathvariant="bold">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi mathvariant="bold-italic">x</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03BC;</mml:mi></mml:mrow><mml:mi>&#x03C3;</mml:mi></mml:mfrac></mml:math></disp-formula></p>
<p>Demonstration of dataset attributes post-application of SMOTE, and scaling techniques is illustrated in <xref ref-type="fig" rid="fig-8">Fig. 8</xref>. The feature distribution determines whether the specified dataset should be normalized or standardized. For example, normalization is used when a feature&#x2019;s distribution is abnormal. Standardization becomes necessary when characteristics have values that vary widely but otherwise follow a normal distribution. Notably, tree-based techniques, such as XGB and RF, may not necessitate scaling because they are less affected by feature size. Normalization is useful for some algorithms, such as LR and DMLP. The dataset is normalized using the min-max scaling strategy to increase the performance of logistic regression and DMLP algorithms.</p>
<fig id="fig-8">
<label>Figure 8</label>
<caption>
<title>Demonstration of dataset attributes post-application of SMOTE and scaling techniques</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-8a.tif"/>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-8b.tif"/>
</fig>
</sec>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Proposed Model</title>
<p>The current section of the manuscript presents the Dynamic Multi-Layer Perceptron with the other conventional classification techniques in classifying fetal health from the Cardiotocography Data. The conventional classification techniques include the RF, XGB, LR, KNN, and DT. All these methods are discussed alongside the proposed model.</p>
<sec id="s4_1">
<label>4.1</label>
<title>Random Forest</title>
<p>By building a network of decision trees, the ensemble learning algorithm, Random Forest is useful for fetal health classification. A voting method is used during prediction to aggregate individual tree outputs and improve overall model performance after these decision trees are trained on random subsets of the data. Bootstrapping and feature selection are made more random by the algorithm at each split, which helps to improve generalization and decrease overfitting. RF evaluates feature relevance based on the decrease in impurity achieved by each feature across all trees, whereas the Gini impurity measure directs the development of decision trees. It can detect important features for reliable predictions using this approach. Complex fetal health classification tasks are well-suited to Random Forest due to its ensemble nature and feature interpretability. First, we can write the Gini impurity for an FHD in <xref ref-type="disp-formula" rid="eqn-6">Eq. (6)</xref>.
<disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:mi>G</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>F</mml:mi><mml:mi>H</mml:mi><mml:mi>D</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:munderover><mml:msubsup><mml:mi>p</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></disp-formula></p>
<p>The notation <inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:mi>c</mml:mi></mml:math></inline-formula> in the above equation designates the number of classes, and the notation <inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:mi>p</mml:mi></mml:math></inline-formula> designates the probability of class in the FHD dataset.</p>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>XGBoost</title>
<p>The ensemble learning algorithm XGBoost is essential for fetal health prediction because it combines regularization with gradient boosting. A loss term assessing prediction accuracy and a regularization term prohibiting overfitting make up the objective function that XGB minimizes through the sequential construction of decision trees. Computing gradients &#x0026; Hessians, which stand for derivatives of the loss function, is an integral part of the optimization process. We distribute weights to trees according to their contributions, and the structure of each tree is decided by minimizing the objective function. Regularization terms control model complexity. XGB is resilient against overfitting &#x0026; suitable for varied datasets; it is particularly effective for fetal health classification due to its adaptability to complex interactions, automatic management of missing values, and feature importance score. In multiclass classification, the objective function for XGB comprises a regularization term and the aggregate of the individual loss functions for each class [<xref ref-type="bibr" rid="ref-23">23</xref>]. Class probabilities are typically calculated using the softmax algorithm. Expressing the global objective function using <xref ref-type="disp-formula" rid="eqn-7">Eq. (7)</xref> is possible.
<disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:mi>O</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>X</mml:mi><mml:mi>G</mml:mi><mml:mi>B</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mi>L</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="fraktur">i</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="fraktur">i</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>K</mml:mi></mml:mrow></mml:munderover><mml:mi>&#x03A9;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mstyle displaystyle="false" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mo>&#x222B;</mml:mo><mml:mspace width="negativethinmathspace" /><mml:mspace width="negativethinmathspace" /><mml:mspace width="negativethinmathspace" /><mml:mspace width="negativethinmathspace" /><mml:mspace width="negativethinmathspace" /><mml:mspace width="negativethinmathspace" /><mml:mspace width="negativethinmathspace" /><mml:mo>&#x21B7;</mml:mo></mml:mstyle></mml:mrow></mml:mstyle><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>where, <italic>n</italic> is the total instances. <italic>K</italic> is the total classes. <inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:mi>L</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="fraktur">i</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="fraktur">i</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is the individual loss function measuring the difference among the true class <inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="fraktur">i</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> and the predicted probability <inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:msub><mml:mrow><mml:mover><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="fraktur">i</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> for instance <inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:mi>i</mml:mi></mml:math></inline-formula>. In multiclass classification, the cross-entropy loss is commonly used. <inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:mrow><mml:mi mathvariant="normal">&#x03A9;</mml:mi></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is the regularization term for the <italic>k<sup>th</sup></italic> tree in the ensemble.</p>
<p>The predicted probability for class <italic>k</italic> is calculated using the softmax function <xref ref-type="disp-formula" rid="eqn-8">Eq. (8)</xref>.
<disp-formula id="eqn-8"><label>(8)</label><mml:math id="mml-eqn-8" display="block"><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mi>K</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>&#x03B7;</mml:mi><mml:mi>k</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mrow><mml:mi>&#x1D4A6;</mml:mi></mml:mrow></mml:mrow></mml:msubsup><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>&#x03B7;</mml:mi><mml:mi>k</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:mfrac></mml:math></disp-formula></p>
<p>In the above equation, the notation <inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:mi>&#x03B7;</mml:mi><mml:mi>k</mml:mi></mml:math></inline-formula> is the output of the <inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:msup><mml:mi>k</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> tree.</p>
</sec>
<sec id="s4_3">
<label>4.3</label>
<title>Logistic Regression</title>
<p>A binary classification algorithm, logistic regression can be enhanced to manage jobs involving multiple classes. Regarding fetal health classification, LR models the likelihood that an instance belongs to a specific class. It is usual practice to convert the linear combination of input features into probabilities using the logistic function, often known as the sigmoid function. The softmax function allows logistic regression to be extended for multiclass classification. After calculating the probability of each class, the one with the highest probability is used to make a prediction. Solving this <xref ref-type="disp-formula" rid="eqn-9">Eq. (9)</xref> gives the class <inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:mi>k</mml:mi></mml:math></inline-formula> of the softmax function.
<disp-formula id="eqn-9"><label>(9)</label><mml:math id="mml-eqn-9" display="block"><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mi>k</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>0</mml:mn><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mi>X</mml:mi><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mi>X</mml:mi><mml:mn>2</mml:mn><mml:mo>+</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mi>X</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>K</mml:mi></mml:mrow></mml:msubsup><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>0</mml:mn><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mi>X</mml:mi><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mi>X</mml:mi><mml:mn>2</mml:mn><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mi>X</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:mfrac></mml:math></disp-formula>where,
<list list-type="bullet">
<list-item>
<p><italic>K</italic> is the number of classes.</p></list-item>
<list-item>
<p><italic>&#x03B2;</italic><sub>0</sub><italic>k</italic>, <italic>&#x03B2;</italic><sub>1</sub><italic>k</italic>,&#x2026;, <italic>&#x03B2;</italic><sub><italic>n</italic></sub><italic>k</italic> are the coefficients associated with class <italic>k</italic> out of the <italic>K</italic> possible classes.</p></list-item>
</list></p>
<p>Training the LR model entails adjusting the coefficients while maximizing the likelihood of the observed data. The model uses the computed probabilities to forecast the class, and these coefficients define the decision boundary.</p>
</sec>
<sec id="s4_4">
<label>4.4</label>
<title>K-Nearest Neighbors</title>
<p>One flexible and non-parametric approach useful in fetal health classification is K-Nearest Neighbours. When applied to fetal health, KNN works on the premise that instances with comparable features should have comparable class labels. To determine a data point&#x2019;s classification, the algorithm looks at the labels of its K-Nearest Neighbors, where <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:mi>k</mml:mi></mml:math></inline-formula> is a number that the user specifies [<xref ref-type="bibr" rid="ref-24">24</xref>]. Data points are usually classified using majority voting, which assigns them to the class that appears most often among the K-Nearest Neighbors. The prediction for a new data point can be mathematically represented in <xref ref-type="disp-formula" rid="eqn-10">Eq. (10)</xref>.
<disp-formula id="eqn-10"><label>(10)</label><mml:math id="mml-eqn-10" display="block"><mml:mrow><mml:mover><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mi>g</mml:mi><mml:mi>m</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:munderover><mml:mi>I</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>k</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Here, <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:mrow><mml:mover><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> is the predicted class label, <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">Y</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents the class label of the <italic>n<sup>th</sup></italic> nearest neighbor, and <italic>I</italic> is the indicator function. The technique considers the distances among the new data point and every other data point across the training set, chooses the K-Nearest Neighbors, and subsequently majority vote to assign the class label. When the decision boundaries are complex or non-linear, KNN is a good fit for fetal health prediction since it is both simple and effective. It may, however, be performance-dependent on the distance metric and k values. Thus, fine-tuning is likely to be necessary.</p>
</sec>
<sec id="s4_5">
<label>4.5</label>
<title>Decision Tree</title>
<p>Using decision trees, which are robust and easily interpretable models, allows for the classification of fetal health in circumstances involving many classes. Using feature values as a basis, recursively partitioning the feature space into pieces. Each partition in a decision tree represents a node, and the last partitions, known as leaf nodes, serve as labels for the classes [<xref ref-type="bibr" rid="ref-25">25</xref>]. As part of the decision-making process, the feature space is partitioned at each node based on a given feature and a splitting criterion, such as Gini impurity or entropy. Maximizing the homogeneity of class labels inside each partition is the goal of the splitting criterion, which aims to produce as pure of divisions as feasible. With each branch in the decision tree, minimizing the impurity measure identified by the notation <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:mi>I</mml:mi><mml:mi>G</mml:mi></mml:math></inline-formula> is the mathematical objective over a node <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:mi>m</mml:mi></mml:math></inline-formula> with <inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:mi>n</mml:mi></mml:math></inline-formula> classes are measured as shown in <xref ref-type="disp-formula" rid="eqn-11">Eq. (11)</xref>.</p>
<p><disp-formula id="eqn-11"><label>(11)</label><mml:math id="mml-eqn-11" display="block"><mml:mi>I</mml:mi><mml:mi>G</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>m</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mi>p</mml:mi><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mfrac><mml:mi>i</mml:mi><mml:mi>m</mml:mi></mml:mfrac><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:math></disp-formula>where, <inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mfrac><mml:mi>i</mml:mi><mml:mi>m</mml:mi></mml:mfrac></mml:mstyle><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is the proportion of samples in node m belonging to class <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:mi>i</mml:mi></mml:math></inline-formula>. The decision tree keeps recursively splitting the feature space until it reaches a stopping condition, like a maximum depth, minimum samples per leaf, or when there is no more improvement in impurity reduction. For fetal health classification, decision trees excel due to their interpretability and ability to manage non-linear decision boundaries. Clinicians can better understand the decision-making process in this way. On the other hand, overfitting is more common in deep learning. So, to prevent them, we might need to use regularization techniques.</p>
</sec>
<sec id="s4_6">
<label>4.6</label>
<title>Dynamic Multi-Layer Perceptron Network</title>
<p>Predicting the fetal health status from various input features is the domain&#x2019;s specialty, and the Dynamic Multi-Layer Perceptron is the customizable neural network design used for this study. Layers that reflect relevant features obtained from diagnostic tests, layers that analyze inputs using weighted connections and activation functions, and final layers that generate predictions on fetal health issues are all part of a Multi-Layer Perceptron (MLP). The DMLP design&#x2019;s adaptability allows for customizing the total hidden layers and neurons according to the classification task&#x2019;s complexity. Upgrading the MLP model from a one-neuron to seven neurons in a multi-feedforward layered model is the logical next step for fetal health classification. With three neurons per class to seven hidden layers, we intend to train and assess networks with diverse topologies. In the bottom hidden layer, there are 24 neurons; subsequent layers increase this amount. To avoid overfitting, use the Early Stopping call-back and choose your model and parameters based on their performance on the validation set for accuracy. The use of dropout and batch normalization significantly enhances generalizability. Also, an F1-score can be used to monitor how well the model is doing [<xref ref-type="bibr" rid="ref-26">26</xref>]. This comprehensive strategy is implemented to improve deep learning models for accurate fetal health categorization. This comprehensive strategy is being implemented to decrease concerns about overfitting. The calculation determines the weighted total for each class <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:mi>k</mml:mi></mml:math></inline-formula> in the output layer in <xref ref-type="disp-formula" rid="eqn-12">Eq. (12)</xref>.</p>
<p><disp-formula id="eqn-12"><label>(12)</label><mml:math id="mml-eqn-12" display="block"><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup><mml:msub><mml:mi>w</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mspace width="thinmathspace" /><mml:mo>.</mml:mo><mml:mspace width="thinmathspace" /><mml:mi>a</mml:mi><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula>where,
<list list-type="bullet">
<list-item>
<p><inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the weighted sum for class <italic>c</italic>.</p></list-item>
<list-item>
<p><inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:msub><mml:mi>w</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the weight connecting <italic>n<sup>th</sup></italic> neuron in the previous layer to the <italic>k<sup>th</sup></italic> neuron in the output layer.</p></list-item>
<list-item>
<p><inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:mi>a</mml:mi><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the activation function of nth neuron in the previous layer.</p></list-item>
<list-item>
<p><inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the bias term for class <italic>c</italic>.</p></list-item>
</list></p>
<p>The softmax activation function is applied to the logits to obtain class probabilities, as illustrated in <xref ref-type="disp-formula" rid="eqn-13">Eq. (13)</xref>.</p>
<p><disp-formula id="eqn-13"><label>(13)</label><mml:math id="mml-eqn-13" display="block"><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>Y</mml:mi><mml:mo>=</mml:mo><mml:mi>k</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>z</mml:mi><mml:mi>k</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msubsup><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>z</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:mfrac></mml:math></disp-formula>where, <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>Y</mml:mi><mml:mo>=</mml:mo><mml:mi>k</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> is the probability of class <inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:mi>k</mml:mi></mml:math></inline-formula>, and <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:mi>C</mml:mi></mml:math></inline-formula> is the total classes.</p>
<p>It is possible to represent the objective function of a MLP network, frequently utilized for supervised learning tasks, as the reduction of a cost or loss function [<xref ref-type="bibr" rid="ref-27">27</xref>]. In the case of regression tasks, the objective function <inline-formula id="ieqn-28"><mml:math id="mml-ieqn-28"><mml:mi>O</mml:mi><mml:mspace width="negativethinmathspace" /><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi mathvariant="normal">&#x03B8;</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is typically expressed as the mean squared error (MSE), while in the case of classification tasks, it is expressed as the cross-entropy in <xref ref-type="disp-formula" rid="eqn-14">Eq. (14)</xref>.</p>
<p><disp-formula id="eqn-14"><label>(14)</label><mml:math id="mml-eqn-14" display="block"><mml:mi>O</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="normal">&#x03B8;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>K</mml:mi></mml:mrow></mml:munderover><mml:msubsup><mml:mi>z</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup><mml:mi>log</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>&#x03B8;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:msup><mml:mi>p</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>)</mml:mo></mml:mrow><mml:mi>k</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>where, <inline-formula id="ieqn-29"><mml:math id="mml-ieqn-29"><mml:mi>O</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="normal">&#x03B8;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> is objective function. <inline-formula id="ieqn-30"><mml:math id="mml-ieqn-30"><mml:mrow><mml:mi mathvariant="normal">&#x03B8;</mml:mi></mml:mrow></mml:math></inline-formula> represents the parameters, i.e., weights and biases <inline-formula id="ieqn-31"><mml:math id="mml-ieqn-31"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x03B8;</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:msup><mml:mi>p</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is the predicted output at input <inline-formula id="ieqn-32"><mml:math id="mml-ieqn-32"><mml:msup><mml:mi>p</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:math></inline-formula> and <inline-formula id="ieqn-33"><mml:math id="mml-ieqn-33"><mml:msubsup><mml:mi>z</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup></mml:math></inline-formula> is the actual output at input <inline-formula id="ieqn-34"><mml:math id="mml-ieqn-34"><mml:msup><mml:mi>p</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:math></inline-formula>, <italic>n</italic> is the number of training samples in a dataset, and <italic>K</italic> is the number of classes in fetal classification.</p>
<p>The weights and biases are updated using the gradient descent update rule. The gradient for the weights <inline-formula id="ieqn-35"><mml:math id="mml-ieqn-35"><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:msub><mml:mi>w</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is given in <xref ref-type="disp-formula" rid="eqn-15">Eq. (15)</xref>.</p>
<p><disp-formula id="eqn-15"><label>(15)</label><mml:math id="mml-eqn-15" display="block"><mml:mfrac><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>J</mml:mi></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:msub><mml:mi>w</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>c</mml:mi></mml:mfrac><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:msubsup><mml:mi>x</mml:mi><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>a</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup><mml:mo>.</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msubsup><mml:mi>y</mml:mi><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>k</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup><mml:mo>&#x2212;</mml:mo><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msup><mml:mi>Y</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>a</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mi>K</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>where, <inline-formula id="ieqn-36"><mml:math id="mml-ieqn-36"><mml:msubsup><mml:mi>x</mml:mi><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>a</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msubsup></mml:math></inline-formula> is the <inline-formula id="ieqn-37"><mml:math id="mml-ieqn-37"><mml:msup><mml:mi>b</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> feature of the <inline-formula id="ieqn-38"><mml:math id="mml-ieqn-38"><mml:msup><mml:mi>a</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> training sample.</p>
<p>The complexity of the problem and the data determine the optimal number of hidden layers and neurons to increase the performance of DMLP. The following are a few possible benefits:
<list list-type="bullet">
<list-item>
<p>Increasing the amount of hidden layers and neurons improves the model&#x2019;s capacity to interpret complex patterns and correlations in the data. This might improve performance, especially for fetal datasets with complicated structures or non-linear relationships.</p></list-item>
<list-item>
<p>The network&#x2019;s ability to extract complicated features from input data is enhanced when more neurons are in each hidden layer.</p></list-item>
<list-item>
<p>Hidden layers can be added to the neural network for deep learning designs.</p></list-item>
<list-item>
<p>There is a direct correlation between the amount of model parameters and the likelihood of overfitting the training data. We delve into deeper architectures utilizing suitable regularization techniques, like dropout or weight decay.</p></list-item>
</list></p>
</sec>
</sec>
<sec id="s5">
<label>5</label>
<title>Results and Discussion</title>
<p>In this section, we will examine and compare different classifiers that are used to classify fetal health. A comprehensive evaluation of these models utilized important measures like F1-score, recall, precision, and accuracy. We also looked at how scaling and other pre-processing methods affected the efficiency of these classifiers. At the same time, we explored the complexities of deep learning with the Dynamic Multi-Layer Perceptron Network, going from one hidden layer to seven.</p>
<sec id="s5_1">
<label>5.1</label>
<title>Hyperparameters Used for Training Each Model</title>
<p>Machine learning model performance is greatly influenced by hyperparameters, which is why they are so important. The training procedure and the final model are affected by these external configurations, which are not learned from data. Reaching peak model performance, warding off problems like overfitting and underfitting, and improving generalization to novel, unknown data depend on correct hyperparameter tuning. Examples of common hyperparameters are parameters about the model&#x2019;s architecture, regularization, and learning rate. Machine learning weakening entails experimenting with different values to discover the optimal ones. The model parameters used in the current study are shown in <xref ref-type="table" rid="table-1">Table 1</xref>.</p>
<table-wrap id="table-1">
<label>Table 1</label>
<caption>
<title>Details of Hyperparameters that are used in classifiers</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Model</th>
<th>Parameter</th>
<th>Value</th>
</tr>
</thead>
<tbody>
<tr>
<td>LR</td>
<td>CV</td>
<td>StratifiedKFold (n_splits &#x003D; 10, random_state &#x003D; 42, shuffle &#x003D; True)</td>
</tr>
<tr>
<td/>
<td>Estimator</td>
<td>LogisticRegression (max_iter &#x003D; 1000, random_state &#x003D; 42)</td>
</tr>
<tr>
<td/>
<td>n_jobs</td>
<td>&#x2212;1</td>
</tr>
<tr>
<td/>
<td>Param_grid</td>
<td>Three hyperparameters for &#x2018;C&#x2019;, &#x2018;penalty&#x2019;, and &#x2018;solver&#x2019;</td>
</tr>
<tr>
<td/>
<td>Scoring</td>
<td>&#x2018;F1_micro&#x2019;</td>
</tr>
<tr>
<td/>
<td>Verbose</td>
<td>1</td>
</tr>
<tr>
<td>DT</td>
<td>Criterion</td>
<td>Entropy</td>
</tr>
<tr>
<td/>
<td>Max_depth</td>
<td>9</td>
</tr>
<tr>
<td/>
<td>Min_samples_leaf</td>
<td>1</td>
</tr>
<tr>
<td/>
<td>Min_samples_split</td>
<td>3</td>
</tr>
<tr>
<td/>
<td>Random_state</td>
<td>42</td>
</tr>
<tr>
<td>RF</td>
<td>Criterion</td>
<td>Entropy</td>
</tr>
<tr>
<td/>
<td>Max_depth</td>
<td>9</td>
</tr>
<tr>
<td/>
<td>Max features</td>
<td>Sqrt</td>
</tr>
<tr>
<td/>
<td>Max leaf nodes</td>
<td>9</td>
</tr>
<tr>
<td/>
<td>N estimators</td>
<td>1000</td>
</tr>
<tr>
<td>XGB</td>
<td>Learning rate</td>
<td>0.1</td>
</tr>
<tr>
<td/>
<td>Number of estimators</td>
<td>100</td>
</tr>
<tr>
<td/>
<td>Max depth</td>
<td>3</td>
</tr>
<tr>
<td/>
<td>Min child weight</td>
<td>1</td>
</tr>
<tr>
<td/>
<td>Subsample</td>
<td>0.8</td>
</tr>
<tr>
<td/>
<td>Colsample by tree</td>
<td>0.8</td>
</tr>
<tr>
<td>KNN</td>
<td>Number of neighbors</td>
<td>5</td>
</tr>
<tr>
<td/>
<td>Weights</td>
<td>&#x2018;Uniform&#x2019;</td>
</tr>
<tr>
<td/>
<td>Algorithm</td>
<td>&#x2018;Auto&#x2019;</td>
</tr>
<tr>
<td/>
<td>Leaf size</td>
<td>30</td>
</tr>
<tr>
<td/>
<td>p (Power parameter)</td>
<td>2</td>
</tr>
<tr>
<td>DMLP</td>
<td>Dropout ratio</td>
<td>0.1</td>
</tr>
<tr>
<td/>
<td>Batch size</td>
<td>32</td>
</tr>
<tr>
<td/>
<td>Learning rate</td>
<td>0.01</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s5_2">
<label>5.2</label>
<title>Performances of Various Classification Techniques</title>
<p>Various evaluation indicators were plotted across different models and setups to provide a comprehensive perspective of model performance. As a result, the study evaluated each method for fetal health classification and drew comparisons between them. In addition, we analyzed the significance of traits to learn how they contributed to classification. By providing a comprehensive assessment of the performance and interpretability of both conventional classifications, the current study seeks to shed light on their efficacy for fetal health classification. A confusion matrix is a way to summarize a machine learning model&#x2019;s performance in fetal health categorization. It does this by comparing the model&#x2019;s predictions with the actual outcomes. The confusion matrix appears when evaluating the results of classifications across many classes, which often happens in fetal health categorization.</p>
<p>Usually, there are four entries in the confusion matrix:
<list list-type="bullet">
<list-item>
<p>First, there are cases where the model accurately predicts a positive class, such as the presence of a specific fetal health issue, these cases are called True Positives.</p></list-item>
<list-item>
<p>In cases where the model accurately predicts a negative class&#x2014;for example, the absence of a specific fetal health condition is assumed as True Negative.</p></list-item>
<list-item>
<p>The third mistake is when the model inaccurately predicts a positive class when none exists and is assumed as a False Positive.</p></list-item>
<list-item>
<p>When the model makes an inaccurate prediction of a negative class when one exists, it is assumed as False Negative.</p></list-item>
</list></p>
<p><xref ref-type="fig" rid="fig-9">Fig. 9</xref> shows how the confusion matrix may be used to obtain several metrics for evaluating the model&#x2019;s performance, including recall, accuracy, precision, and F1-score. These indicators provide insights into the model&#x2019;s accuracy in identifying fetal health issues and areas for improvement.</p>
<fig id="fig-9">
<label>Figure 9</label>
<caption>
<title>Confusion matrices generated by our classifiers. (a) XGBoost (b) random forest (c) KNN (d) decision tree (e) logistic regression</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-9.tif"/>
</fig>
</sec>
<sec id="s5_3">
<label>5.3</label>
<title>Feature Importance</title>
<p>In fetal health categorization, feature importance describes how each feature or variable in a dataset affects the model&#x2019;s prediction ability. It helps to determine which features are crucial for the model to accurately classify cases of fetal health [<xref ref-type="bibr" rid="ref-28">28</xref>]. Understanding which traits are most crucial for model interpretation is essential, as it exposes which factors play a pivotal role in determining the outcome. Analyzing how much weight the model gave to each input feature is what feature importance analysis is all about in the context of fetal health categorization. During training, popular algorithms like DT, RF, and XGB make it easy to determine which features are most important, are illustrated in <xref ref-type="fig" rid="fig-10">Figs. 10</xref> and <xref ref-type="fig" rid="fig-11">11</xref>. Bar graphs that show feature dependency highlight which features most influence a model&#x2019;s predictions by displaying their importance scores. These graphs help identify key features, simplify the model, and improve performance by focusing on the most impactful data [<xref ref-type="bibr" rid="ref-29">29</xref>].</p>
<fig id="fig-10">
<label>Figure 10</label>
<caption>
<title>Importance of features as determined by our classifiers. (a) Random forest (b) XGBoost (c) decision tree (d) logistic regression (e) KNN</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-10a.tif"/>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-10b.tif"/>
</fig><fig id="fig-11">
<label>Figure 11</label>
<caption>
<title>Importance of features as determined by our classifiers concerning different classes. (a) Random forest (b) XGBoost (c) decision tree (d) logistic regression (e) KNN</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-11a.tif"/>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-11b.tif"/>
</fig>
<p>A feature&#x2019;s impact on the accuracy of predictions is proportional to its significance score. When making decisions, the model gives more weight to features considered more important and less weight to features considered less important. Academics and practitioners can better understand the most important elements impacting fetal health outcomes through feature-importance visualizations or analyses. Improving the categorization model&#x2019;s interpretability, directing more studies, and prioritizing characteristics for data gathering or enhancement can all be achieved with this information.</p>
</sec>
<sec id="s5_4">
<label>5.4</label>
<title>Assessment of the Performance of Various ML Classifiers Concerning SMOTE and Scaling</title>
<p>In the current study we have used the LOOCV based cross validation alongside the K-Fold validating the proposed model. A comparative analysis is also promised, encompassing three scenarios: the algorithm without any pre-processing, the algorithm with scaling, and the algorithm with the data balancing technique SMOTE. The final scenario involves applying both data balancing and scaling simultaneously. This comprehensive comparison of accuracy in <xref ref-type="table" rid="table-2">Table 2</xref> aims to provide insights into the impact of different pre-processing techniques on the algorithm&#x2019;s performance, shedding light on the effectiveness of balancing and scaling individually and in combination. Analysis of various classifiers for fetal health classification is illustrated in <xref ref-type="table" rid="table-3">Table 3</xref> and <xref ref-type="fig" rid="fig-12">Fig. 12</xref>.</p>
<table-wrap id="table-2">
<label>Table 2</label>
<caption>
<title>Performance of various machine learning classifiers concerning SMOTE and scaling on training data</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th rowspan="2">Classifier</th>
<th align="center" colspan="3">Accuracy</th>
</tr>
<tr>
<th>K-Fold &#x003D; 10 </th>
<th>STK-Fold &#x003D; 10</th>
<th>CV &#x003D; 10 </th>
</tr>
</thead>
<tbody>
<tr>
<td>LR</td>
<td>0.8753</td>
<td>0.8800</td>
<td>0.8829</td>
</tr>
<tr>
<td>LR scaled</td>
<td>0.895</td>
<td>0.8947</td>
<td>0.8959</td>
</tr>
<tr>
<td>LR smote</td>
<td>0.8255</td>
<td>0.8258</td>
<td>0.8232</td>
</tr>
<tr>
<td>LR smote&#x002B;scaled</td>
<td>0.8938</td>
<td>0.8959</td>
<td>0.8941</td>
</tr>
<tr>
<td>RFC</td>
<td>0.94</td>
<td>0.9435</td>
<td>0.9347</td>
</tr>
<tr>
<td>RFC scaled</td>
<td>0.9376</td>
<td>0.9441</td>
<td>0.9347</td>
</tr>
<tr>
<td>RFC smote</td>
<td>0.9796</td>
<td>0.9786</td>
<td>0.9793</td>
</tr>
<tr>
<td>RFC smote&#x002B;scaled</td>
<td>0.9798</td>
<td>0.9791</td>
<td>0.9793</td>
</tr>
<tr>
<td>DT</td>
<td>0.9112</td>
<td>0.9118</td>
<td>0.9118</td>
</tr>
<tr>
<td>DT scaled</td>
<td>0.9112</td>
<td>0.9135</td>
<td>0.9135</td>
</tr>
<tr>
<td>DT smote</td>
<td>0.9581</td>
<td>0.9592</td>
<td>0.9592</td>
</tr>
<tr>
<td>DT smote&#x002B;scaled</td>
<td>0.9579</td>
<td>0.9597</td>
<td>0.9597</td>
</tr>
<tr>
<td>XGB</td>
<td>0.9476</td>
<td>0.9506</td>
<td>0.9424</td>
</tr>
<tr>
<td>XGB scaled</td>
<td>0.9476</td>
<td>0.9506</td>
<td>0.9424</td>
</tr>
<tr>
<td>XGB smote</td>
<td>0.9821</td>
<td>0.9806</td>
<td>0.9814</td>
</tr>
<tr>
<td>XGB smote&#x002B;scaled</td>
<td>0.9821</td>
<td>0.9806</td>
<td>0.9814</td>
</tr>
</tbody>
</table>
</table-wrap><table-wrap id="table-3">
<label>Table 3</label>
<caption>
<title>Analysis of various classifiers for fetal health classification on test data</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Classifier</th>
<th>Accuracy</th>
<th>Precision</th>
<th>Recall</th>
<th>F1-score</th>
<th>Validation</th>
</tr>
</thead>
<tbody>
<tr>
<td>LR</td>
<td>0.84</td>
<td>0.883</td>
<td>0.84</td>
<td>0.854</td>
<td>STK-Fold</td>
</tr>
<tr>
<td>DT</td>
<td>0.932</td>
<td>0.94</td>
<td>0.932</td>
<td>0.934</td>
<td>CV &#x003D; 10</td>
</tr>
<tr>
<td>RF</td>
<td>0.869</td>
<td>0.903</td>
<td>0.869</td>
<td>0.878</td>
<td>K-Fold</td>
</tr>
<tr>
<td>KNN</td>
<td>0.904</td>
<td>0.918</td>
<td>0.904</td>
<td>0.908</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>XGB</td>
<td>0.962</td>
<td>0.963</td>
<td>0.962</td>
<td>0.963</td>
<td>K-Fold</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-12">
<label>Figure 12</label>
<caption>
<title>Comparative analysis among different classifiers</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-12a.tif"/><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-12b.tif"/>
</fig>
</sec>
<sec id="s5_5">
<label>5.5</label>
<title>Analysis of DMLP Model</title>
<p>Training and validation set performance metrics are the primary emphasis of the DMLP model, which aims to analyze the connection between the model&#x2019;s capacity as indicated by its layers and neurons. The dataset has been partitioned into 80-20 for training and testing. The validation set used for fine-tuning the model is the integral part of the training sample, allotted 10% of the training size. According to graphical representations, a more noticeable disparity between training and validation measures is associated with enhanced model capacity, which may indicate overperformance. With an astounding F1-score of 91.33% on the validation set and 89.95% on the testing set, the model with five hidden layers and 51,000 parameters beats others.</p>
<p>Nevertheless, a more robust hyperparameter search is needed because worries regarding the significance of the results arise due to the lack of statistical tests and cross-validation. Future studies should utilize comprehensive cross-validation procedures and statistical testing to reliably choose models, as larger models show lower metrics, which could indicate overfitting or randomness. Neural network analysis with emphasis on layers and neurons is illustrated in <xref ref-type="table" rid="table-4">Table 4</xref>, and training accuracy, validation accuracy, training loss, and validation losses are illustrated in <xref ref-type="fig" rid="fig-13">Fig. 13</xref>.</p>
<table-wrap id="table-4">
<label>Table 4</label>
<caption>
<title>Neural network analysis with emphasis on layers and neurons</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Number of hidden layers</th>
<th>Dataset</th>
<th>Accuracy</th>
<th>Recall</th>
<th>Precision</th>
<th>F1-score</th>
</tr>
</thead>
<tbody>
<tr>
<td rowspan="3">Hidden layers: 0</td>
<td>Train</td>
<td>0.897255</td>
<td>0.775105</td>
<td>0.810843</td>
<td>0.791288</td>
</tr>
<tr>
<td>Val</td>
<td>0.910588</td>
<td>0.812367</td>
<td>0.851881</td>
<td>0.830056</td>
</tr>
<tr>
<td>Test</td>
<td>0.877934</td>
<td>0.759616</td>
<td>0.790722</td>
<td>0.774363</td>
</tr>
<tr>
<td rowspan="3">Hidden layers: 1</td>
<td>Train</td>
<td>0.902745</td>
<td>0.797689</td>
<td>0.839475</td>
<td>0.815868</td>
</tr>
<tr>
<td>Val</td>
<td>0.917647</td>
<td>0.823905</td>
<td>0.857171</td>
<td>0.837455</td>
</tr>
<tr>
<td>Test</td>
<td>0.887324</td>
<td>0.772924</td>
<td>0.821229</td>
<td>0.794625</td>
</tr>
<tr>
<td rowspan="3">Hidden layers: 2</td>
<td>Train</td>
<td>0.949804</td>
<td>0.896137</td>
<td>0.918845</td>
<td>0.905934</td>
</tr>
<tr>
<td>Val</td>
<td>0.936471</td>
<td>0.873745</td>
<td>0.888437</td>
<td>0.880945</td>
</tr>
<tr>
<td>Test</td>
<td>0.915493</td>
<td>0.826012</td>
<td>0.888366</td>
<td>0.852180</td>
</tr>
<tr>
<td rowspan="3">Hidden layers: 3</td>
<td>Train</td>
<td>0.964706</td>
<td>0.955364</td>
<td>0.934053</td>
<td>0.943413</td>
</tr>
<tr>
<td>Val</td>
<td>0.941176</td>
<td>0.893561</td>
<td>0.890115</td>
<td>0.891819</td>
</tr>
<tr>
<td>Test</td>
<td>0.913146</td>
<td>0.864504</td>
<td>0.866390</td>
<td>0.862515</td>
</tr>
<tr>
<td rowspan="3">Hidden layers: 4</td>
<td>Train</td>
<td>0.974118</td>
<td>0.966846</td>
<td>0.953394</td>
<td>0.959190</td>
</tr>
<tr>
<td>Val</td>
<td>0.948235</td>
<td>0.909742</td>
<td>0.909742</td>
<td>0.909742</td>
</tr>
<tr>
<td>Test</td>
<td>0.920188</td>
<td>0.863642</td>
<td>0.877638</td>
<td>0.867586</td>
</tr>
<tr>
<td rowspan="3">Hidden layers: 5</td>
<td>Train</td>
<td><bold>0.988235</bold></td>
<td>0.979083</td>
<td>0.978831</td>
<td>0.978678</td>
</tr>
<tr>
<td>Val</td>
<td>0.952941</td>
<td>0.921041</td>
<td>0.906015</td>
<td>0.913383</td>
</tr>
<tr>
<td>Test</td>
<td>0.941315</td>
<td>0.895135</td>
<td>0.907351</td>
<td>0.899480</td>
</tr>
<tr>
<td rowspan="3">Hidden layers: 6</td>
<td>Train</td>
<td><bold>0.989020</bold></td>
<td>0.986870</td>
<td>0.975567</td>
<td>0.980676</td>
</tr>
<tr>
<td>Val</td>
<td>0.948235</td>
<td>0.910511</td>
<td>0.916292</td>
<td>0.912959</td>
</tr>
<tr>
<td>Test</td>
<td>0.929577</td>
<td>0.869201</td>
<td>0.871750</td>
<td>0.866832</td>
</tr>
<tr>
<td rowspan="3">Hidden layers: 7</td>
<td>Train</td>
<td><bold>0.976471</bold></td>
<td>0.963213</td>
<td>0.956494</td>
<td>0.959631</td>
</tr>
<tr>
<td>Val</td>
<td>0.945882</td>
<td>0.887827</td>
<td>0.892734</td>
<td>0.889859</td>
</tr>
<tr>
<td>Test</td>
<td>0.934272</td>
<td>0.874312</td>
<td>0.895760</td>
<td>0.882646</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-13">
<label>Figure 13</label>
<caption>
<title>Neural network analysis with emphasis on layers and neurons</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-13a.tif"/>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-13b.tif"/>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_53132-fig-13c.tif"/>
</fig>
<p>As the number of hidden layers grows, there is a general trend towards improved accuracy, recall, precision, and F1-score across training, validation, and test sets. With 6 hidden layers, the model performs at its peak, with a training accuracy of 98.90%, a validation accuracy of 94.82%, and a test accuracy of 92.96%. The performance indicators appear to reach a plateau or decline at 7 hidden layers, suggesting that overfitting is possible as the model complexity increases.</p>
</sec>
<sec id="s5_6">
<label>5.6</label>
<title>Ablation Study</title>
<p>It can be observed that DMLP model had attained 97% accuracy when utilizing pre-processing techniques, whereas without pre-processing, the model attained 96% accuracy. Removing the resampling, feature scaling, and feature selection phases allowed us to better grasp the significance of data preparation in our Dynamic Multi-Layer Perceptron (DMLP) model. After removing this pre-processing, we tested the model (<xref ref-type="table" rid="table-5">Table 5</xref>) on the fetal health classification task. As a result, we can gauge how much these methods improve the model&#x2019;s predictive power and, maybe, learn which features were most crucial.</p>
<table-wrap id="table-5">
<label>Table 5</label>
<caption>
<title>Analysis of various classifiers for fetal health classification</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Classifier</th>
<th>Accuracy</th>
<th>Precision</th>
<th>Recall</th>
<th>F1-score</th>
</tr>
</thead>
<tbody>
<tr>
<td>LR</td>
<td>0.82</td>
<td>0.87</td>
<td>0.83</td>
<td>0.84</td>
</tr>
<tr>
<td>DT</td>
<td>0.91</td>
<td>0.93</td>
<td>0.92</td>
<td>0.91</td>
</tr>
<tr>
<td>RF</td>
<td>0.84</td>
<td>0.90</td>
<td>0.84</td>
<td>0.85</td>
</tr>
<tr>
<td>KNN</td>
<td>0.89</td>
<td>0.90</td>
<td>0.89</td>
<td>0.88</td>
</tr>
<tr>
<td>XGB</td>
<td>0.93</td>
<td>0.94</td>
<td>0.94</td>
<td>0.94</td>
</tr>
<tr>
<td>DMLP</td>
<td>0.96</td>
<td>0.95</td>
<td>0.93</td>
<td>0.943</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The practical implications associated with the current study outline the proposed model&#x2019;s limitations. We used a CTG dataset from medical centre and a specific population. The demographic, genetic, and environmental differences between populations restrict the generalizability of our findings. The dataset size with limited processing records is one of the potential limitations. The feature scaling that is performed in the current study yields better accuracy, but at the same time, smoothing values would miss out on some of the significant data for analysis. The other limitation in the evaluation process is that the k-value in multi-fold validation is confined to 10, where the model yields a better accuracy compared to k&#x003D;5, but needs considerable computation efforts to yield the accuracy. The tradeoff between accuracy and computational efforts may be further analyzed to improve the comprehensibility of the model. The hyperparameters are considered fixed and can be fine-tuned by further evaluating the model in different settings. The feature weights and dependencies can be further evaluated using explainable models. The ablation study can be performed by discarding some of the less significant features from the dataset for better analysis of the robustness of the model.</p>
</sec>
</sec>
<sec id="s6">
<label>6</label>
<title>Conclusion</title>
<p>We present a revolutionary Dynamic Multi-Layer Perceptron neural network for fetal health categorization, which has grown from a simple one-neurons to a complex multi-layer design. The performance of the DMLP model is deliberately improved by including optimization approaches such as early stopping, batch normalization, and dropout. A comprehensive approach is successful, as shown by the synergistic study of classic machine learning approaches with the Dynamic MLP model. The predictive framework is much more effective and dependable now that resampling, feature scaling, and feature selection are part of it and an ablation study without these techniques is also presented. After comparing it to different ML classifiers, we found that the Dynamic MLP model is resilient with an accuracy of 97%. Clinical specialists can learn a lot by comparing models such as LR, XGB, DT, KNN, and RF to ensure the model can be used in real-world healthcare decision-making. These models facilitate data analysis and interpretation. We by offering a comprehensive and efficient way to improve accuracy and interpretability, our research contributes to expanding information on fetal health classification approaches.</p>
<p>Future research could enhance the DMLP model to allow for longitudinal analysis, which would allow for continuous monitoring of fetal health during pregnancy. Improving model transparency and fostering trust among clinic professionals can be achieved by delving more into Explainable AI techniques such as LIME and SHAP.</p>
</sec>
</body>
<back>

<glossary content-type="abbreviations" id="glossary-1">
<title>Nomenclature</title>
<def-list>
<def-item>
<term><bold>Acronym</bold></term>
<def>
<p><bold>Abbreviation</bold></p>
</def>
</def-item>
<def-item>
<term>ML/DL</term>
<def>
<p>Machine Learning/Deep Learning</p>
</def>
</def-item>
<def-item>
<term>RF</term>
<def>
<p>Random Forest</p>
</def>
</def-item>
<def-item>
<term>XGB</term>
<def>
<p>XGBoost</p>
</def>
</def-item>
<def-item>
<term>DT</term>
<def>
<p>Decision Tree</p>
</def>
</def-item>
<def-item>
<term>LR</term>
<def>
<p>Logistic Regression</p>
</def>
</def-item>
<def-item>
<term>KNN</term>
<def>
<p>K-Nearest Neighborhood</p>
</def>
</def-item>
<def-item>
<term>DMLP</term>
<def>
<p>Dynamic Multi-Layer Perceptron Model</p>
</def>
</def-item>
<def-item>
<term>CTG</term>
<def>
<p>Cardiotocography Data</p>
</def>
</def-item>
</def-list>
</glossary>
<ack><p>We thank all the authors for their research contributions.</p>
</ack>
<sec><title>Funding Statement</title>
<p>This work was supported by the National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT) (NRF-2023R1A2C1005950). Jana Shafi is supported via funding from Prince Sattam bin Abdulaziz University Project Number (PSAU/2024/R/1445).</p>
</sec>
<sec><title>Author Contributions</title>
<p>The authors confirm contribution to the paper as follows: Study conception and design: Uddagiri Sirisha, Parvathaneni Naga Srinivasu, Panguluri Padmavathi, Seongki Kim; data collection: Aruna Pavate, Jana Shafi, Muhammad Fazal Ijaz; analysis and interpretation of results: Uddagiri Sirisha, Parvathaneni Naga Srinivasu, Panguluri Padmavathi, Seongki Kim, Aruna Pavate, Jana Shafi, Muhammad Fazal Ijaz; draft manuscript preparation: Uddagiri Sirisha, Parvathaneni Naga Srinivasu, Panguluri Padmavathi, Seongki Kim; supervision: Jana Shafi, Muhammad Fazal Ijaz. All authors reviewed the results and approved the final version of the manuscript.</p>
</sec>
<sec sec-type="data-availability"><title>Availability of Data and Materials</title>
<p>Publicly available dataset is used in this study. It is available here: <ext-link ext-link-type="uri" xlink:href="https://www.kaggle.com/datasets/andrewmvd/fetal-health-classification">https://www.kaggle.com/datasets/andrewmvd/fetal-health-classification</ext-link>, accessed on 25 April 2024.</p>
</sec>
<sec sec-type="COI-statement"><title>Conflicts of Interest</title>
<p>The authors declare that they have no conflicts of interest to report regarding the present study.</p>
</sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. M.</given-names> <surname>Ponsiglione</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Cosentino</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Cesarelli</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Amato</surname></string-name>, and <string-name><given-names>M.</given-names> <surname>Romano</surname></string-name></person-group>, &#x201C;<article-title>A comprehensive review of techniques for processing and analyzing fetal heart rate signals</article-title>,&#x201D; <source><italic>Sensors</italic></source>, vol. <volume>21</volume>, no. <issue>18</issue>, pp. <fpage>6136</fpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.3390/s21186136</pub-id>; <pub-id pub-id-type="pmid">34577342</pub-id></mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Piri</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Mohapatra</surname></string-name>, and <string-name><given-names>R.</given-names> <surname>Dey</surname></string-name></person-group>, &#x201C;<article-title>Fetal health status classification using moga-cd based feature selection approach</article-title>,&#x201D; in <conf-name>2020 IEEE Int. Conf. Electron., Comput. Commun. Technol. (CONECCT)</conf-name>, <publisher-loc>Bangalore, India</publisher-loc>, <year>Jul. 2020</year>, pp. <fpage>1</fpage>&#x2013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/CONECCT50063.2020.9198377</pub-id>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Li</surname></string-name> and <string-name><given-names>X.</given-names> <surname>Liu</surname></string-name></person-group>, &#x201C;<article-title>Fetal health classification based on machine learning</article-title>,&#x201D; in <conf-name>2021 IEEE 2nd Int. Conf. Big Data, Artif. Intell. Internet Things Eng. (ICBAIE)</conf-name>, <year>Mar. 2021</year>, pp. <fpage>899</fpage>&#x2013;<lpage>902</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICBAIE52039.2021.9389902</pub-id>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>I. J.</given-names> <surname>Jebadurai</surname></string-name>, <string-name><given-names>G. J. L.</given-names> <surname>Paulraj</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Jebadurai</surname></string-name>, and <string-name><given-names>S.</given-names> <surname>Silas</surname></string-name></person-group>, &#x201C;<article-title>Experimental analysis of filtering-based feature selection techniques for fetal health classification</article-title>,&#x201D; <source><italic>Serbian J. Electr. Eng.</italic></source>, vol. <volume>19</volume>, no. <issue>2</issue>, pp. <fpage>207</fpage>&#x2013;<lpage>224</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.2298/SJEE2202207J</pub-id>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Mehbodniya</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Fetal health classification from cardiotocographic data using machine learning</article-title>,&#x201D; <source><italic>Expert. Syst.</italic></source>, vol. <volume>39</volume>, no. <issue>6</issue>, pp. <fpage>e12899</fpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1111/exsy.12899</pub-id>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Das</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Mukherjee</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Roy</surname></string-name>, and <string-name><given-names>C. K.</given-names> <surname>Saha</surname></string-name></person-group>, &#x201C;<article-title>Fetal health classification from cardiotocograph for both stages of labor&#x2014;A soft-computing-based approach</article-title>,&#x201D; <source><italic>Diagnostics</italic></source>, vol. <volume>13</volume>, no. <issue>5</issue>, pp. <fpage>858</fpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.3390/diagnostics13050858</pub-id>; <pub-id pub-id-type="pmid">36900002</pub-id></mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Kuzu</surname></string-name> and <string-name><given-names>Y.</given-names> <surname>Santur</surname></string-name></person-group>, &#x201C;<article-title>Early diagnosis and classification of fetal health status from a fetal cardiotocography dataset using ensemble learning</article-title>,&#x201D; <source><italic>Diagnostics</italic></source>, vol. <volume>13</volume>, no. <issue>15</issue>, pp. <fpage>2471</fpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.3390/diagnostics13152471</pub-id>; <pub-id pub-id-type="pmid">37568833</pub-id></mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. T.</given-names> <surname>Alam</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Comparative analysis of different efficient machine learning methods for fetal health classification</article-title>,&#x201D; <source><italic>Appl. Bionics Biomech.</italic></source>, vol. <volume>2022</volume>, no. <issue>9</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>12</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1155/2022/6321884</pub-id>; <pub-id pub-id-type="pmid">35498140</pub-id></mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N.</given-names> <surname>Rahmayanti</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Pradani</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Pahlawan</surname></string-name>, and <string-name><given-names>R.</given-names> <surname>Vinarti</surname></string-name></person-group>, &#x201C;<article-title>Comparison of machine learning algorithms to classify fetal health using cardiotocogram data</article-title>,&#x201D; <source><italic>Procedia Comput. Sci.</italic></source>, vol. <volume>197</volume>, no. <issue>14</issue>, pp. <fpage>162</fpage>&#x2013;<lpage>171</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1016/j.procs.2021.12.130</pub-id>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>N. F. M.</given-names> <surname>Noor</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Ahmad</surname></string-name>, and <string-name><given-names>N. M.</given-names> <surname>Noor</surname></string-name></person-group>, &#x201C;<article-title>Fetal health classification using supervised learning approach</article-title>,&#x201D; in <conf-name>2021 IEEE Nat. Biomed. Eng. Conf. (NBEC)</conf-name>, <year>2021</year>, pp. <fpage>36</fpage>&#x2013;<lpage>41</lpage>. doi: <pub-id pub-id-type="doi">10.1109/NBEC51914.2021.9637660</pub-id>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Akbulut</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Ertugrul</surname></string-name>, and <string-name><given-names>V.</given-names> <surname>Topcu</surname></string-name></person-group>, &#x201C;<article-title>Fetal health status prediction based on maternal clinical history using machine learning techniques</article-title>,&#x201D; <source><italic>Comput. Methods Programs Biomed.</italic></source>, vol. <volume>163</volume>, no. <issue>6245</issue>, pp. <fpage>87</fpage>&#x2013;<lpage>100</lpage>, <year>2018</year>. doi: <pub-id pub-id-type="doi">10.1016/j.cmpb.2018.06.010</pub-id>; <pub-id pub-id-type="pmid">30119860</pub-id></mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Abiyev</surname></string-name>, <string-name><given-names>J. B.</given-names> <surname>Idoko</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Alt&#x0131;parmak</surname></string-name>, and <string-name><given-names>M.</given-names> <surname>T&#x00FC;z&#x00FC;nkan</surname></string-name></person-group>, &#x201C;<article-title>Fetal health state detection using interval type-2 fuzzy neural networks</article-title>,&#x201D; <source><italic>Diagnostics</italic></source>, vol. <volume>13</volume>, no. <issue>10</issue>, pp. <fpage>1690</fpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.3390/diagnostics13101690</pub-id>; <pub-id pub-id-type="pmid">37238176</pub-id></mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>&#x00D6;.</given-names> <surname>Kasim</surname></string-name></person-group>, &#x201C;<article-title>Multi-classification of fetal health status using extreme learning machine</article-title>,&#x201D; <source><italic>Icontech Int. J.</italic></source>, vol. <volume>5</volume>, no. <issue>2</issue>, pp. <fpage>62</fpage>&#x2013;<lpage>70</lpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.46291/ICONTECHvol5iss2pp62-70</pub-id>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. K.</given-names> <surname>Mandala</surname></string-name></person-group>, &#x201C;<article-title>Unveiling the unborn: advancing fetal health classification through machine learning</article-title>,&#x201D; <source>arXiv preprint arXiv:2310.00505</source>, vol. <volume>1</volume>, pp. <fpage>2121</fpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.36922/aih.2121</pub-id>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Shruthi</surname></string-name> and <string-name><given-names>A. S.</given-names> <surname>Poornima</surname></string-name></person-group>, &#x201C;<article-title>A method for predicting and classifying fetus health using machine learning</article-title>,&#x201D; <source><italic>Int. J. Intell. Syst. Appl. Eng.</italic></source>, vol. <volume>11</volume>, no. <issue>2</issue>, pp. <fpage>752</fpage>&#x2013;<lpage>762</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.18201/ijisae.2023.328</pub-id>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Hoodbhoy</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Noman</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Shafique</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Nasim</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Chowdhury</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Hasan</surname></string-name></person-group>, &#x201C;<article-title>Use of machine learning algorithms for prediction of fetal risk using cardiotocographic data</article-title>,&#x201D; <source><italic>Int. J. Appl. Basic Med. Res.</italic></source>, vol. <volume>9</volume>, no. <issue>4</issue>, pp. <fpage>226</fpage>, <year>2019</year>. doi: <pub-id pub-id-type="doi">10.4103/ijabmr.IJABMR_370_18</pub-id>; <pub-id pub-id-type="pmid">31681548</pub-id></mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>M. M.</given-names> <surname>Imran Molla</surname></string-name>, <string-name><given-names>J. J.</given-names> <surname>Jui</surname></string-name>, <string-name><given-names>B. S.</given-names> <surname>Bari</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Rashid</surname></string-name>, and <string-name><given-names>M. J.</given-names> <surname>Hasan</surname></string-name></person-group>, &#x201C;<article-title>Cardiotocogram data classification using random forest-based machine learning algorithm</article-title>,&#x201D; in <conf-name>Proc. 11th Nat. Tech. Seminar on Unmanned Syst. Technol. 2019: NUSYS&#x2019;19</conf-name>, <publisher-name>Springer Singapore</publisher-name>, <year>2021</year>, pp. <fpage>357</fpage>&#x2013;<lpage>369</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-981-15-5281-6_25</pub-id>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>M. M.</given-names> <surname>Islam</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Rokunojjaman</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Amin</surname></string-name>, <string-name><given-names>M. N.</given-names> <surname>Akhtar</surname></string-name>, and <string-name><given-names>I. H.</given-names> <surname>Sarker</surname></string-name></person-group>, &#x201C;<article-title>Diagnosis and classification of fetal health based on ctg data using machine learning techniques</article-title>,&#x201D; in <conf-name>Proc. Int. Conf. Mach. Intell. Emerg. Technol.</conf-name>, <publisher-loc>Cham, Switzerland</publisher-loc>, <year>Sep. 2022</year>, pp. <fpage>3</fpage>&#x2013;<lpage>16</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-031-34622-4_11</pub-id>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab>&#x201C;Fetal health Classification Dataset</collab></person-group>,&#x201D; <article-title>Kaggle</article-title>. <year>2024</year>. <comment>Accessed: Jun. 25, 2024.
[Online]</comment>. Available: <ext-link ext-link-type="uri" xlink:href="https://www.kaggle.com/datasets/andrewmvd/fetal-health-classification">https://www.kaggle.com/datasets/andrewmvd/fetal-health-classification</ext-link></mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Dwivedi</surname></string-name>, <string-name><given-names>A. A.</given-names> <surname>Khan</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Mugde</surname></string-name>, and <string-name><given-names>G.</given-names> <surname>Sharma</surname></string-name></person-group>, &#x201C;<article-title>Diagnosing the major contributing factors in the classification of the fetal health status using cardiotocography measurements: An automl and xai approach</article-title>,&#x201D; in <conf-name>2021,The 13th Int. Conf. Electron., Comput., Artif. Intell. (ECAI)</conf-name>, <publisher-name>IEEE</publisher-name>, <year>Jul. 2021</year>, pp. <fpage>1</fpage>&#x2013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ECAI52376.2021.9515070</pub-id>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J. H.</given-names> <surname>Joloudari</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Marefat</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Nematollahi</surname></string-name>, <string-name><given-names>S. S.</given-names> <surname>Oyelere</surname></string-name>, and <string-name><given-names>S.</given-names> <surname>Hussain</surname></string-name></person-group>, &#x201C;<article-title>Effective class-imbalance learning based on SMOTE and convolutional neural networks</article-title>,&#x201D; <source><italic>Appl. Sci.</italic></source>, vol. <volume>13</volume>, no. <issue>6</issue>, pp. <fpage>4006</fpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.3390/app13064006</pub-id>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Salini</surname></string-name>, <string-name><given-names>S. N.</given-names> <surname>Mohanty</surname></string-name>, <string-name><given-names>J. V. N.</given-names> <surname>Ramesh</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Yang</surname></string-name>, and <string-name><given-names>M. M. V.</given-names> <surname>Chalapathi</surname></string-name></person-group>, &#x201C;<article-title>Cardiotocography data analysis for fetal health classification using machine learning models</article-title>,&#x201D; <source><italic>IEEE Access</italic></source>, vol. <volume>12</volume>, no. <issue>1</issue>, pp. <fpage>26005</fpage>&#x2013;<lpage>26022</lpage>, <year>2024</year>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2024.3364755</pub-id>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Bollegala</surname></string-name></person-group>, &#x201C;<article-title>Dynamic feature scaling for online learning of binary classifiers</article-title>,&#x201D; <source><italic>Knowl.-Based Syst.</italic></source>, vol. <volume>129</volume>, no. <issue>8</issue>, pp. <fpage>97</fpage>&#x2013;<lpage>105</lpage>, <year>2017</year>. doi: <pub-id pub-id-type="doi">10.1016/j.knosys.2017.05.010</pub-id>.</mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Mandal</surname></string-name>, <string-name><given-names>P. K.</given-names> <surname>Singh</surname></string-name>, <string-name><given-names>M. F.</given-names> <surname>Ijaz</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Shafi</surname></string-name>, and <string-name><given-names>R.</given-names> <surname>Sarkar</surname></string-name></person-group>, &#x201C;<article-title>A tri-stage wrapper-filter feature selection framework for disease classification</article-title>,&#x201D; <source><italic>Sensors</italic></source>, vol. <volume>21</volume>, no. <issue>16</issue>, pp. <fpage>5571</fpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.3390/s21165571</pub-id>; <pub-id pub-id-type="pmid">34451013</pub-id></mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Ma</surname></string-name> and <string-name><given-names>J.</given-names> <surname>Zhai</surname></string-name></person-group>, &#x201C;<article-title>Big data decision tree for continuous-valued attributes based on unbalanced cut points</article-title>,&#x201D; <source><italic>J. Big Data</italic></source>, vol. <volume>10</volume>, no. <issue>135</issue>, pp. <fpage>1328</fpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.1186/s40537-023-00816-2</pub-id>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T. B.</given-names> <surname>Krishna</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Kokil</surname></string-name></person-group>, &#x201C;<article-title>Automated classification of common maternal fetal ultrasound planes using multi-layer perceptron with deep feature integration</article-title>,&#x201D; <source><italic>Biomed. Signal Process. Control</italic></source>, vol. <volume>86</volume>, no. <issue>11</issue>, pp. <fpage>105283</fpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2023.105283</pub-id>.</mixed-citation></ref>
<ref id="ref-27"><label>[27]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. A.</given-names> <surname>Bader</surname></string-name></person-group>, &#x201C;<article-title>An optimized single layer perceptron-based approach for cardiotocography data classification</article-title>,&#x201D; <source><italic>Int. J. Adv. Comput. Sci. Appl.</italic></source>, vol. <volume>13</volume>, no. <issue>10</issue>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.14569/IJACSA.2022.0131076</pub-id>.</mixed-citation></ref>
<ref id="ref-28"><label>[28]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Yin</surname></string-name> and <string-name><given-names>Y.</given-names> <surname>Bingi</surname></string-name></person-group>, &#x201C;<article-title>Using machine learning to classify human fetal health and analyze feature importance</article-title>,&#x201D; <source><italic>BioMedInformatics</italic></source>, vol. <volume>3</volume>, no. <issue>2</issue>, pp. <fpage>280</fpage>&#x2013;<lpage>298</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.3390/biomedinformatics3020019</pub-id>.</mixed-citation></ref>
<ref id="ref-29"><label>[29]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Zhao</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhang</surname></string-name>, and <string-name><given-names>Y.</given-names> <surname>Deng</surname></string-name></person-group>, &#x201C;<article-title>A comprehensive feature analysis of the fetal heart rate signal for the intelligent assessment of fetal state</article-title>,&#x201D; <source><italic>J. Clin. Med.</italic></source>, vol. <volume>7</volume>, no. <issue>8</issue>, pp. <fpage>223</fpage>, <year>2018</year>. doi: <pub-id pub-id-type="doi">10.3390/jcm7080223</pub-id>; <pub-id pub-id-type="pmid">30127256</pub-id></mixed-citation></ref>
</ref-list>
</back></article>