<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
  <front>
    <journal-meta>
      <journal-id journal-id-type="pmc">Phyton</journal-id>
      <journal-id journal-id-type="nlm-ta">Phyton</journal-id>
      <journal-id journal-id-type="publisher-id">Phyton</journal-id>
      <journal-title-group>
        <journal-title>Phyton-International Journal of Experimental Botany</journal-title>
      </journal-title-group>
      <issn pub-type="epub">1851-5657</issn>
      <issn pub-type="ppub">0031-9457</issn>
      <publisher>
        <publisher-name>Tech Science Press</publisher-name>
        <publisher-loc>USA</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">73354</article-id>
      <article-id pub-id-type="doi">10.32604/phyton.2025.073354</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Article</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>DenseSwinGNNNet: A Novel Deep Learning Framework for Accurate Turmeric Leaf Disease Classification</article-title>
        <alt-title alt-title-type="left-running-head">DenseSwinGNNNet: A Novel Deep Learning Framework for Accurate Turmeric Leaf Disease Classification</alt-title>
        <alt-title alt-title-type="right-running-head">DenseSwinGNNNet: A Novel Deep Learning Framework for Accurate Turmeric Leaf Disease Classification</alt-title>
      </title-group>
      <contrib-group>
        <contrib id="author-1" contrib-type="author">
          <name name-style="western">
            <surname>Singla</surname>
            <given-names>Seerat</given-names>
          </name>
          <xref ref-type="aff" rid="aff-1">1</xref>
        </contrib>
        <contrib id="author-2" contrib-type="author">
          <name name-style="western">
            <surname>Shandilya</surname>
            <given-names>Gunjan</given-names>
          </name>
          <xref ref-type="aff" rid="aff-1">1</xref>
        </contrib>
        <contrib id="author-3" contrib-type="author">
          <name name-style="western">
            <surname>Altameem</surname>
            <given-names>Ayman</given-names>
          </name>
          <xref ref-type="aff" rid="aff-2">2</xref>
        </contrib>
        <contrib id="author-4" contrib-type="author">
          <name name-style="western">
            <surname>Pant</surname>
            <given-names>Ruby</given-names>
          </name>
          <xref ref-type="aff" rid="aff-3">3</xref>
        </contrib>
        <contrib id="author-5" contrib-type="author">
          <name name-style="western">
            <surname>Kumar</surname>
            <given-names>Ajay</given-names>
          </name>
          <xref ref-type="aff" rid="aff-4">4</xref>
        </contrib>
        <contrib id="author-6" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Rehman</surname>
            <given-names>Ateeq Ur</given-names>
          </name>
          <xref ref-type="aff" rid="aff-5">5</xref>
		  <email>202411144@gachon.ac.kr</email>
        </contrib>
        <contrib id="author-7" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Almogren</surname>
            <given-names>Ahmad</given-names>
          </name>
          <xref ref-type="aff" rid="aff-6">6</xref>
          <email>ahalmogren@ksu.edu.sa</email>
        </contrib>
        <aff id="aff-1"><label>1</label><institution>Chitkara University Institute of Engineering and Technology, Chitkara University</institution>, <addr-line>Rajpura, 140401, Punjab</addr-line>, <country>India</country></aff>
        <aff id="aff-2"><label>2</label><institution>Department of Computer Science and Engineering, College of Applied Studies, King Saud University</institution>, <addr-line>Riyadh, 11543</addr-line>, <country>Saudi Arabia</country></aff>
        <aff id="aff-3"><label>3</label><institution>Department of Mechanical Engineering, Uttaranchal University</institution>, <addr-line>Dehradun, 248007, Uttarakhand</addr-line>, <country>India</country></aff>
        <aff id="aff-4"><label>4</label><institution>Department of Mechanical Engineering, Noida Institute of Engineering and Technology (NIET)</institution>, <addr-line>Greater Noida, 201306, Uttar Pradesh</addr-line>, <country>India</country></aff>
        <aff id="aff-5"><label>5</label><institution>School of Computing, Gachon University</institution>, <addr-line>Seongnam-si, 13120</addr-line>, <country>Republic of Korea</country></aff>
        <aff id="aff-6"><label>6</label><institution>Department of Computer Science, College of Computer and Information Sciences, King Saud University</institution>, <addr-line>Riyadh, 11633</addr-line>, <country>Saudi Arabia</country></aff>
      </contrib-group>
      <author-notes>
        <corresp id="cor1"><label>*</label>Corresponding Authors: Ateeq Ur Rehman. Email: <email>202411144@gachon.ac.kr</email>; Ahmad Almogren. Email: <email>ahalmogren@ksu.edu.sa</email></corresp>
      </author-notes>
      <pub-date date-type="collection" publication-format="electronic">
        <year>2025</year>
      </pub-date>
      <pub-date date-type="pub" publication-format="electronic">
        <day>29</day>
        <month>12</month>
        <year>2025</year>
      </pub-date>
      <volume>94</volume>
      <issue>12</issue>
      <fpage>4021</fpage>
      <lpage>4057</lpage>
      <history>
        <date date-type="received">
          <day>16</day>
          <month>9</month>
          <year>2025</year>
        </date>
        <date date-type="accepted">
          <day>24</day>
          <month>11</month>
          <year>2025</year>
        </date>
      </history>
      <permissions>
        <copyright-statement>&#xA9; 2025 The Authors.</copyright-statement>
        <copyright-year>2025</copyright-year>
        <copyright-holder>Published by Tech Science Press.</copyright-holder>
        <license xlink:href="https://creativecommons.org/licenses/by/4.0/">
          <license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
        </license>
      </permissions>
      <self-uri content-type="pdf" xlink:href="TSP_Phyton-94-73354.pdf"/>
      <abstract>
        <p>Turmeric Leaf diseases pose a major threat to turmeric cultivation, causing significant yield loss and economic impact. Early and accurate identification of these diseases is essential for effective crop management and timely intervention. This study proposes DenseSwinGNNNet, a hybrid deep learning framework that integrates DenseNet-121, the Swin Transformer, and a Graph Neural Network (GNN) to enhance the classification of turmeric leaf conditions. DenseNet121 extracts discriminative low-level features, the Swin Transformer captures long-range contextual relationships through hierarchical self-attention, and the GNN models inter-feature dependencies to refine the final representation. A total of 4361 images from the Mendeley turmeric leaf dataset were used, categorized into four classes: Aphids Disease, Blotch, Leaf Spot, and Healthy Leaf. The dataset underwent extensive preprocessing, including augmentation, normalization, and resizing, to improve generalization. An 80:10:10 split was applied for training, validation, and testing respectively. Model performance was evaluated using accuracy, precision, recall, F1-score, confusion matrices, and ROC curves. Optimized with the Adam optimizer at the learning rate of 0.0001, DenseSwinGNNNet achieved an overall accuracy of 99.7%, with precision, recall, and F1-scores exceeding 99% across all classes. The ROC curves reported AUC values near 1.0, indicating excellent class separability, while the confusion matrix showed minimal misclassification. Beyond high predictive performance, the framework incorporates considerations for cybersecurity and privacy in data-driven agriculture, supporting secure data handling and robust model deployment. This work contributes a reliable and scalable approach for turmeric leaf disease detection and advances the application of AI-driven precision agriculture.</p>
      </abstract>
      <kwd-group kwd-group-type="author">
        <kwd>Turmeric leaf disease</kwd>
        <kwd>deep learning</kwd>
        <kwd>DenseNet121</kwd>
        <kwd>swin transformer</kwd>
        <kwd>graph neural network (GNN)</kwd>
        <kwd>image classification</kwd>
      </kwd-group>
      <funding-group>
        <award-group id="awg1">
          <funding-source>King Saud University, Riyadh, Saudi Arabia</funding-source>
          <award-id>ORF-2025-498</award-id>
        </award-group>
      </funding-group>
    </article-meta>
  </front>
  <body>
    <sec id="s1">
      <label>1</label>
      <title>Introduction</title>
      <p>Turmeric is a valuable medicinal and culinary crop, primarily grown for its rhizomes, which yield the well-known yellow spice. Besides being economically important, turmeric plays a role in traditional medicine due to its anti-inflammatory, antioxidant, and antimicrobial properties [<xref ref-type="bibr" rid="ref-1">1</xref>]. The leaves of the plant are also crucial to its overall health and productivity, as they have a direct impact on the photosynthesis process and nutrient circulation. Nevertheless, turmeric production is further jeopardized by several leaf diseases that lead to significant yield losses and financial hardship for farmers [<xref ref-type="bibr" rid="ref-2">2</xref>]. Turmeric crops are susceptible to various leaf diseases, including aphid disease, blotch, leaf spot, and other fungal diseases. Aphids (Aphis gossypii) are small sap feeders that infect turmeric leaves, causing yellowing, curling, and stunted plant growth [<xref ref-type="bibr" rid="ref-3">3</xref>]. Honeydew secreted by aphids favours sooty mould development, further inhibiting photosynthesis and reducing plant vigour [<xref ref-type="bibr" rid="ref-4">4</xref>]. Taphrina maculans-induced leaf blotch disease is another prevalent disease, characterized by the appearance of brown or black irregular blotches on leaves that progress and cause tissue necrosis [<xref ref-type="bibr" rid="ref-5">5</xref>]. It is also highly contagious in wet conditions and, if left untreated, will significantly reduce the crop yield. Colletotrichum gloeosporioides-induced leaf spot disease is another significant threat to turmeric production. Leaf spot is manifested in the form of tiny, water-soaked lesions that develop into large spots with dark brown centres and yellow margins [<xref ref-type="bibr" rid="ref-6">6</xref>]. The study of diseases is multi-factorial in origin, primarily due to incorrect agricultural practices, environmental conditions, and the invasion of pathogens. The application of excess chemical fertilizers, incorrect crop rotation, and improper irrigation can lead to compromised plant immunity, making plants vulnerable to disease [<xref ref-type="bibr" rid="ref-7">7</xref>]. Conditions in the environment, such as high humidity, unusual temperatures, and extreme weather, can also facilitate the spread of diseases. It is also possible for insects, like aphids, to spread viruses to turmeric plants [<xref ref-type="bibr" rid="ref-8">8</xref>]. Finding and naming leaf diseases in turmeric plants as soon as possible is essential for using effective management methods. New advances in technology, particularly in artificial intelligence and machine learning, have enabled the rapid and accurate detection of plant diseases. Deep neural algorithms, including CNNs, have proven highly efficient in identifying plant diseases through the analysis of leaf images [<xref ref-type="bibr" rid="ref-9">9</xref>]. Leaf blighting and defoliation have followed acute infections, with their impact being a direct consequence of rhizome development. The leaf spot is especially virulent during monsoon conditions, when the weather is favourable for the growth of fungi and humidity is high [<xref ref-type="bibr" rid="ref-10">10</xref>].</p>
      <p>Alongside the growing reliance on AI-based detection, agricultural systems are increasingly exposed to data security and privacy challenges. IoT sensors and cloud platforms used for collecting and analyzing crop data can become potential targets for unauthorized access, data breaches, or model tampering. Therefore, integrating cybersecurity principles&#x2014;such as encrypted communication, secure model training, and privacy-preserving learning&#x2014;is essential for developing resilient and trustworthy agricultural intelligence systems. The key contributions for the turmeric leaf disease classification:
<list list-type="order">
<list-item>
<label>1.</label>
  <p>Developed a new hybrid deep learning DenseSwinGNNNet model incorporating DenseNet121, Swin Transformer, and Graph Neural Network (GNN) for improving the classification of turmeric leaf disease through the combination of hierarchical feature extraction and relational learning.</p>
</list-item>
<list-item>
<label>2.</label>
  <p>Employed the Swin Transformer to efficiently extract long-range dependencies and hierarchical spatial patterns using a shifted window mechanism to enhance the capacity of the model to identify intricate patterns in leaf images.</p>
</list-item>
<list-item>
<label>3.</label>
  <p>Integrated GNN to capture inter-feature relationships and allow the network to learn structural dependencies among extracted features and improve classification robustness.</p>
</list-item>
</list></p>
    </sec>
    <sec id="s2">
      <label>2</label>
      <title>Literature Review</title>
      <p>Previous research has explored the detection of turmeric leaf disease using advanced image processing and machine learning methods. This research employs a deep learning-based model that has been trained on a large number of images of turmeric leaves to classify them as healthy, diseased, or drought-stressed. The state-of-the-art studies are summarized in <xref ref-type="table" rid="table-1">Table 1</xref>:</p>
      <table-wrap id="table-1">
        <label>Table 1</label>
        <caption>
          <p>Literature review.</p>
        </caption>
        <table>
          <thead>
            <tr>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Ref.</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Dataset Used</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Methodology</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Target Problem</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Performance Metrics</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Key Findings</th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">[<xref ref-type="bibr" rid="ref-11">11</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Custom dataset of turmeric leaf images</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Deep learning + Fuzzy logic; deployed on Raspberry Pi via Simulink</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Classification of healthy, diseased, and drought-stressed leaves</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy = 95.47%</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">The system supports farm management by reducing pesticide/water use, with a low-cost deployment suitable for remote sites.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">[<xref ref-type="bibr" rid="ref-12">12</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Custom dataset of starch-adulterated turmeric (0&#x2013;100%)</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">DenseNet201 + ETC for feature extraction; Decision Tree, Logistic Regression, KNNR</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Detection of starch adulteration</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy = 100%, AUC = 1.0, R<sup>2</sup> = 0.97, Root Mean Square Error (RMSE) = 0.65</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">The hybrid model is highly effective; strong generalization confirmed with CV and LOOCV</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">[<xref ref-type="bibr" rid="ref-13">13</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Self-created dataset of turmeric leaves (Leaf Spot, Leaf Blight, Leaf Rot, Leaf Curl)</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">AlexNet deep learning model</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Leaf disease classification</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy = 95.5%</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Outperformed classical ML models for disease detection</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">[<xref ref-type="bibr" rid="ref-14">14</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Secondary-source turmeric leaf spot images</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Hybrid Convolutional Neural Network&#x2013;Support Vector Machine (Hybrid CNN-SVM)</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Severity classification of leaf spot</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy = 97%</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Achieved reliable severity estimation despite limited regional data</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">[<xref ref-type="bibr" rid="ref-15">15</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Custom dataset of starch-adulterated turmeric (0&#x2013;100%)</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">DenseNet201 + Extra Trees Classifier (ETC) with ML classifiers (Decision Tree, Logistic Regression, KNNR)</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Starch adulteration detection</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy = 99.5%, AUC = 1.0, R<sup>2</sup> = 0.97, RMSE = 0.65</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Consistently robust classification and regression performance</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">[<xref ref-type="bibr" rid="ref-16">16</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Turmeric leaf dataset (unspecified source)</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">K-means segmentation + SVM</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Leaf Blast, Bacterial Blight, Brown Spot classification</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy, Specificity, Similarity</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Outperformed the backpropagation-based approach</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">[<xref ref-type="bibr" rid="ref-17">17</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">PlantVillage dataset</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Faster R-CNN with VGG-19 feature extractor</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Plant disease recognition</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Metrics not specified</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Showed robustness across different image conditions</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">[<xref ref-type="bibr" rid="ref-18">18</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">custom turmeric leaf disease dataset</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">YOLOv3-Tiny by adjusting convolutional layers, optimizing anchor box sizes, and fine-tuning hyperparameters to detect small and complex leaf disease regions better.</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Detect and classify diseases in turmeric plants </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy = 93.16%</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Improves detection over prior IY3TM alone by mitigating missed details and improving localization/feature fusion</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">[<xref ref-type="bibr" rid="ref-19">19</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Self-created dataset with special pH values</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">VGG-19 CNN + Tree classifier + Linear regression</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Prediction of rhizome rot</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Metrics not detailed</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Model proposed, but evaluation incomplete</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <p>However, most existing studies primarily focus on accuracy optimization without considering the cybersecurity and data privacy aspects of AI-based agricultural systems. Few works address the secure management of leaf image datasets, resistance to adversarial manipulation, or protection of intellectual property in AI models. This gap highlights the need for integrating cybersecurity mechanisms, such as data encryption, adversarial robustness, and privacy-preserving model training, to ensure safe deployment of deep learning systems in real-world agricultural settings.</p>
    </sec>
    <sec id="s3">
      <label>3</label>
      <title>Proposed Methodology</title>
      <p>The proposed DenseSwinGNNNet model shown in <xref ref-type="fig" rid="fig-1">Fig. 1</xref> integrates DenseNet121, Swin Transformer, and GNN through a carefully designed tandem fusion strategy that enables progressive and hierarchical feature refinement. In this integration, DenseNet121 first acts as the primary feature extractor, producing deep convolutional feature maps of size [B, 1024, 7, 7], where B is the batch size. These maps are reshaped and linearly projected into smaller, non-overlapping 4 &#xD7; 4 patch embeddings to match the Swin Transformer input requirements, resulting in a tensor of shape [B, 49, 768]. Each embedding represents a local spatial region of the leaf image, allowing the Swin Transformer to perform window-based multi-head self-attention and learn both global and local spatial dependencies. After hierarchical processing and patch merging, the Swin Transformer outputs refined feature representations of shape [B, 16, 768], which are then converted into a graph structure suitable for GNN processing. In this step, each patch embedding becomes a graph node, and edges are formed based on spatial adjacency using an 8-connected grid, creating an adjacency matrix <bold><italic>A</italic></bold> that encodes the topological relationships among patches. This graph representation captures both the spatial arrangement and contextual interdependence of leaf regions. The GNN then performs message passing across connected nodes, updating feature representations through aggregation and transformation, and producing a final graph-level embedding of dimension [B, 16, 512]. A global average pooling operation aggregates these node features into a unified vector [B, 512], which is passed through fully connected and softmax layers to classify the image into one of four categories: Aphids Disease, Blotch, Leaf Spot, or Healthy Leaf. The rationale for adopting a sequential (tandem) fusion approach, rather than a parallel or late-fusion design, is threefold: (1) it ensures continuous hierarchical feature flow, allowing low-level texture and edge features from DenseNet121 to be progressively refined by the Swin Transformer and relationally enhanced by the GNN; (2) it maintains computational efficiency and prevents overfitting that could arise from feature concatenation in parallel fusion; and (3) empirical results demonstrated that parallel and late-fusion variants produced 1&#x2013;2% lower accuracy, validating that the tandem configuration provides superior feature synergy and model stability. This structured integration ensures smooth tensor transition across modules, enhances representational richness, and preserves spatial and relational coherence, thereby enabling highly accurate and generalizable turmeric leaf disease classification. Some common core components used in the proposed methodology, along with their mathematical expressions and functions, are illustrated in <xref ref-type="table" rid="table-2">Table 2</xref>.</p>
      <fig id="fig-1">
        <label>Figure 1</label>
        <caption>
          <p>Proposed methodology for the classification of turmeric leaf disease.</p>
        </caption>
        <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f001.tif"/>
      </fig>
      <table-wrap id="table-2">
        <label>Table 2</label>
        <caption>
          <p>Common core components used in proposed methodology.</p>
        </caption>
        <table>
          <thead>
            <tr>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Core Component</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Mathematical Expression</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Details</th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Convolutional operation</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <inline-formula id="ieqn-1">
<mml:math id="mml-ieqn-1">
	<mml:mrow>
		<mml:mi>Y</mml:mi>
		<mml:mfenced separators="|">
			<mml:mrow>
				<mml:mi>i</mml:mi>
				<mml:mo>,</mml:mo>
				<mml:mi>j</mml:mi>
			</mml:mrow>
		</mml:mfenced>
		<mml:mo>&#xA0;</mml:mo>
		<mml:mo>=</mml:mo>
		<mml:mo>&#xA0;</mml:mo>
		<mml:mrow>
		<mml:mstyle displaystyle="true">
			<mml:munderover>
				<mml:mo stretchy="false">&#x2211;</mml:mo>
				<mml:mrow>
					<mml:mi>m</mml:mi>
					<mml:mo>=</mml:mo>
					<mml:mn>0</mml:mn>
				</mml:mrow>
				<mml:mrow>
					<mml:mi>K</mml:mi>
					<mml:mo>&#x2212;</mml:mo>
					<mml:mn>1</mml:mn>
				</mml:mrow>
			</mml:munderover>
			</mml:mstyle>
			<mml:mrow>
				<mml:mrow>
				<mml:mstyle displaystyle="true">
					<mml:munderover>
						<mml:mo stretchy="false">&#x2211;</mml:mo>
						<mml:mrow>
							<mml:mi>n</mml:mi>
							<mml:mo>=</mml:mo>
							<mml:mn>0</mml:mn>
						</mml:mrow>
						<mml:mrow>
							<mml:mi>K</mml:mi>
							<mml:mo>&#x2212;</mml:mo>
							<mml:mn>1</mml:mn>
						</mml:mrow>
					</mml:munderover>
					</mml:mstyle>
					<mml:mrow>
						<mml:mi>X</mml:mi>
					</mml:mrow>
				</mml:mrow>
			</mml:mrow>
		</mml:mrow>
		<mml:mfenced separators="|">
			<mml:mrow>
				<mml:mi>i</mml:mi>
				<mml:mo>+</mml:mo>
				<mml:mi>m</mml:mi>
				<mml:mo>,</mml:mo>
				<mml:mo>&#xA0;</mml:mo>
				<mml:mi>j</mml:mi>
				<mml:mo>+</mml:mo>
				<mml:mi>n</mml:mi>
			</mml:mrow>
		</mml:mfenced>
		<mml:mo>&#xB7;</mml:mo>
		<mml:mi>W</mml:mi>
		<mml:mfenced separators="|">
			<mml:mrow>
				<mml:mi>m</mml:mi>
				<mml:mo>,</mml:mo>
				<mml:mi>n</mml:mi>
			</mml:mrow>
		</mml:mfenced>
		<mml:mo>+</mml:mo>
		<mml:mi>b</mml:mi>
	</mml:mrow>
</mml:math>
</inline-formula>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Extracts spatial features by applying learnable filters to the input image.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">ReLU activation</td>
              <td align="center" valign="middle" style="border-bottom:solid thin"><inline-formula id="ieqn-2">
<mml:math id="mml-ieqn-2">
	<mml:mrow>
		<mml:mi>f</mml:mi>
		<mml:mfenced separators="|">
			<mml:mrow>
				<mml:mi>x</mml:mi>
			</mml:mrow>
		</mml:mfenced>
		<mml:mo>=</mml:mo>
		<mml:mrow>
			<mml:mrow>
				<mml:mi mathvariant="normal">max</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mfenced separators="|">
					<mml:mrow>
						<mml:mn>0</mml:mn>
						<mml:mo>,</mml:mo>
						<mml:mi>x</mml:mi>
					</mml:mrow>
				</mml:mfenced>
			</mml:mrow>
		</mml:mrow>
	</mml:mrow>
</mml:math>
</inline-formula></td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Introduces non-linearity by setting negative values to zero, preventing vanishing gradients.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Batch normalization</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <inline-formula id="ieqn-3">
<mml:math id="mml-ieqn-3">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mover accent="true">
					<mml:mrow>
						<mml:mi>x</mml:mi>
					</mml:mrow>
					<mml:mo stretchy="false">^</mml:mo>
				</mml:mover>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>i</mml:mi>
			</mml:mrow>
		</mml:msub>
		<mml:mo>=</mml:mo>
		<mml:mfrac>
			<mml:mrow>
				<mml:msub>
					<mml:mrow>
						<mml:mi>x</mml:mi>
					</mml:mrow>
					<mml:mrow>
						<mml:mi>i</mml:mi>
					</mml:mrow>
				</mml:msub>
				<mml:mo>&#x2212;</mml:mo>
				<mml:mi>&#x3BC;</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:msqrt>
					<mml:msup>
						<mml:mrow>
							<mml:mi>&#x3C3;</mml:mi>
						</mml:mrow>
						<mml:mrow>
							<mml:mn>2</mml:mn>
						</mml:mrow>
					</mml:msup>
					<mml:mo>+</mml:mo>
					<mml:mo>&#x2208;</mml:mo>
				</mml:msqrt>
			</mml:mrow>
		</mml:mfrac>
	</mml:mrow>
</mml:math>
</inline-formula><break/>
                <inline-formula id="ieqn-4">
<mml:math id="mml-ieqn-4">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi>y</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>i</mml:mi>
			</mml:mrow>
		</mml:msub>
		<mml:mo>=</mml:mo>
		<mml:mi>&#x3B3;</mml:mi>
		<mml:msub>
			<mml:mrow>
				<mml:mover accent="true">
					<mml:mrow>
						<mml:mi>x</mml:mi>
					</mml:mrow>
					<mml:mo stretchy="false">^</mml:mo>
				</mml:mover>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>i</mml:mi>
			</mml:mrow>
		</mml:msub>
		<mml:mo>+</mml:mo>
		<mml:mi>&#x3B2;</mml:mi>
	</mml:mrow>
</mml:math>
</inline-formula>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Normalizes activations to stabilize training and accelerate convergence.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">MaxPooling</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <inline-formula id="ieqn-5">
<mml:math id="mml-ieqn-5">
	<mml:mrow>
		<mml:mi>Y</mml:mi>
		<mml:mfenced separators="|">
			<mml:mrow>
				<mml:mi>i</mml:mi>
				<mml:mo>,</mml:mo>
				<mml:mi>j</mml:mi>
			</mml:mrow>
		</mml:mfenced>
		<mml:mo>=</mml:mo>
		<mml:mtable>
			<mml:mtr>
				<mml:mtd>
					<mml:mi>p</mml:mi>
					<mml:mo>&#x2212;</mml:mo>
					<mml:mn>1</mml:mn>
				</mml:mtd>
			</mml:mtr>
			<mml:mtr>
				<mml:mtd>
					<mml:mi mathvariant="normal">m</mml:mi>
					<mml:mi mathvariant="normal">a</mml:mi>
					<mml:mi mathvariant="normal">x</mml:mi>
				</mml:mtd>
			</mml:mtr>
			<mml:mtr>
				<mml:mtd>
					<mml:mi>m</mml:mi>
					<mml:mo>=</mml:mo>
					<mml:mn>0</mml:mn>
				</mml:mtd>
			</mml:mtr>
		</mml:mtable>
		<mml:mo>&#xA0;</mml:mo>
		<mml:mtable>
			<mml:mtr>
				<mml:mtd>
					<mml:mi>p</mml:mi>
					<mml:mo>&#x2212;</mml:mo>
					<mml:mn>1</mml:mn>
				</mml:mtd>
			</mml:mtr>
			<mml:mtr>
				<mml:mtd>
					<mml:mi mathvariant="normal">m</mml:mi>
					<mml:mi mathvariant="normal">a</mml:mi>
					<mml:mi mathvariant="normal">x</mml:mi>
				</mml:mtd>
			</mml:mtr>
			<mml:mtr>
				<mml:mtd>
					<mml:mi>n</mml:mi>
					<mml:mo>=</mml:mo>
					<mml:mn>0</mml:mn>
				</mml:mtd>
			</mml:mtr>
		</mml:mtable>
		<mml:mo>&#xA0;</mml:mo>
		<mml:mi>X</mml:mi>
		<mml:mo stretchy="false">(</mml:mo>
		<mml:mi>i</mml:mi>
		<mml:mo>+</mml:mo>
		<mml:mi>m</mml:mi>
		<mml:mo>,</mml:mo>
		<mml:mi>j</mml:mi>
		<mml:mo>+</mml:mo>
		<mml:mi>n</mml:mi>
		<mml:mo stretchy="false">)</mml:mo>
	</mml:mrow>
</mml:math>
</inline-formula>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Reduces spatial dimensions while retaining essential features using the maximum value in a pooling window.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Flatten layer</td>
              <td align="center" valign="middle" style="border-bottom:solid thin"><inline-formula id="ieqn-6">
<mml:math id="mml-ieqn-6">
	<mml:mrow>
		<mml:mi>F</mml:mi>
		<mml:mo>=</mml:mo>
		<mml:mi mathvariant="normal">F</mml:mi>
		<mml:mi mathvariant="normal">l</mml:mi>
		<mml:mi mathvariant="normal">a</mml:mi>
		<mml:mi mathvariant="normal">t</mml:mi>
		<mml:mi mathvariant="normal">t</mml:mi>
		<mml:mi mathvariant="normal">e</mml:mi>
		<mml:mi mathvariant="normal">n</mml:mi>
		<mml:mo>(</mml:mo>
		<mml:mi>Y</mml:mi>
		<mml:mo>)</mml:mo>
	</mml:mrow>
</mml:math>
</inline-formula></td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Converts multi-dimensional feature maps into a 1D vector for input into fully connected layers.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Fully connected layer</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <inline-formula id="ieqn-7">
<mml:math id="mml-ieqn-7">
	<mml:mrow>
		<mml:mi>Z</mml:mi>
		<mml:mo>=</mml:mo>
		<mml:mi>W</mml:mi>
		<mml:mi>F</mml:mi>
		<mml:mo>+</mml:mo>
		<mml:mi>b</mml:mi>
	</mml:mrow>
</mml:math>
</inline-formula>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Learns complex feature relationships by connecting every neuron to all previous layer neurons.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Dropout layer</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <inline-formula id="ieqn-8">
<mml:math id="mml-ieqn-8">
	<mml:mrow>
		<mml:msup>
			<mml:mrow>
				<mml:mi>Z</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mo>&#x2032;</mml:mo>
			</mml:mrow>
		</mml:msup>
		<mml:mo>=</mml:mo>
		<mml:mi>M</mml:mi>
		<mml:mo>&#xB7;</mml:mo>
		<mml:mi>Z</mml:mi>
	</mml:mrow>
</mml:math>
</inline-formula>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Prevents overfitting by randomly deactivating a fraction of neurons during training.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Softmax activation</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <inline-formula id="ieqn-9">
<mml:math id="mml-ieqn-9">
	<mml:mrow>
		<mml:mi>P</mml:mi>
		<mml:mfenced separators="|">
			<mml:mrow>
				<mml:msub>
					<mml:mrow>
						<mml:mi>y</mml:mi>
					</mml:mrow>
					<mml:mrow>
						<mml:mi>i</mml:mi>
					</mml:mrow>
				</mml:msub>
			</mml:mrow>
		</mml:mfenced>
		<mml:mo>=</mml:mo>
		<mml:mfrac>
			<mml:mrow>
				<mml:msup>
					<mml:mrow>
						<mml:mi>e</mml:mi>
					</mml:mrow>
					<mml:mrow>
						<mml:msub>
							<mml:mrow>
								<mml:mi>z</mml:mi>
							</mml:mrow>
							<mml:mrow>
								<mml:mi>i</mml:mi>
							</mml:mrow>
						</mml:msub>
					</mml:mrow>
				</mml:msup>
			</mml:mrow>
			<mml:mrow>
				<mml:mrow>
					<mml:msubsup>
						<mml:mo stretchy="false">&#x2211;</mml:mo>
						<mml:mrow>
							<mml:mi>j</mml:mi>
							<mml:mo>=</mml:mo>
							<mml:mn>1</mml:mn>
						</mml:mrow>
						<mml:mrow>
							<mml:mi>C</mml:mi>
						</mml:mrow>
					</mml:msubsup>
					<mml:mrow>
						<mml:msup>
							<mml:mrow>
								<mml:mi>e</mml:mi>
							</mml:mrow>
							<mml:mrow>
								<mml:msub>
									<mml:mrow>
										<mml:mi>z</mml:mi>
									</mml:mrow>
									<mml:mrow>
										<mml:mi>j</mml:mi>
									</mml:mrow>
								</mml:msub>
							</mml:mrow>
						</mml:msup>
					</mml:mrow>
				</mml:mrow>
			</mml:mrow>
		</mml:mfrac>
	</mml:mrow>
</mml:math>
</inline-formula>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Converts logits into class probabilities for multi-class classification.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Global average pooling</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <inline-formula id="ieqn-10">
<mml:math id="mml-ieqn-10">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi>G</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>c</mml:mi>
			</mml:mrow>
		</mml:msub>
		<mml:mo>&#xA0;</mml:mo>
		<mml:mo>=</mml:mo>
		<mml:mo>&#xA0;</mml:mo>
		<mml:mfrac>
			<mml:mrow>
				<mml:mn>1</mml:mn>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>H</mml:mi>
				<mml:mo>&#xA0;</mml:mo>
				<mml:mi>X</mml:mi>
				<mml:mo>&#xA0;</mml:mo>
				<mml:mi>W</mml:mi>
			</mml:mrow>
		</mml:mfrac>
		<mml:mrow>
		<mml:mstyle displaystyle="true">
			<mml:munderover>
				<mml:mo stretchy="false">&#x2211;</mml:mo>
				<mml:mrow>
					<mml:mi>i</mml:mi>
					<mml:mo>=</mml:mo>
					<mml:mn>1</mml:mn>
				</mml:mrow>
				<mml:mrow>
					<mml:mi>H</mml:mi>
				</mml:mrow>
			</mml:munderover>
				</mml:mstyle>
			<mml:mrow>
				<mml:mrow>
				<mml:mstyle displaystyle="true">
					<mml:munderover>
						<mml:mo stretchy="false">&#x2211;</mml:mo>
						<mml:mrow>
							<mml:mi>j</mml:mi>
							<mml:mo>=</mml:mo>
							<mml:mn>1</mml:mn>
						</mml:mrow>
						<mml:mrow>
							<mml:mi>W</mml:mi>
						</mml:mrow>
					</mml:munderover>
					</mml:mstyle>
					<mml:mrow>
						<mml:mi>X</mml:mi>
						<mml:mo>(</mml:mo>
						<mml:mi>i</mml:mi>
						<mml:mo>,</mml:mo>
						<mml:mi>j</mml:mi>
						<mml:mo>,</mml:mo>
						<mml:mi>c</mml:mi>
						<mml:mo>)</mml:mo>
					</mml:mrow>
				</mml:mrow>
			</mml:mrow>
		</mml:mrow>
	</mml:mrow>
</mml:math>
</inline-formula>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Reduces feature maps to a single value per channel by computing the average, improving model generalization.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Swin transformer</td>
              <td align="center" valign="middle" style="border-bottom:solid thin"><inline-formula id="ieqn-11">
<mml:math id="mml-ieqn-11">
	<mml:mrow>
		<mml:mi mathvariant="normal">A</mml:mi>
		<mml:mi mathvariant="normal">t</mml:mi>
		<mml:mi mathvariant="normal">t</mml:mi>
		<mml:mi mathvariant="normal">e</mml:mi>
		<mml:mi mathvariant="normal">n</mml:mi>
		<mml:mi mathvariant="normal">t</mml:mi>
		<mml:mi mathvariant="normal">i</mml:mi>
		<mml:mi mathvariant="normal">o</mml:mi>
		<mml:mi mathvariant="normal">n</mml:mi>
		<mml:mfenced separators="|">
			<mml:mrow>
				<mml:mi>Q</mml:mi>
				<mml:mo>,</mml:mo>
				<mml:mi>K</mml:mi>
				<mml:mo>,</mml:mo>
				<mml:mi>V</mml:mi>
			</mml:mrow>
		</mml:mfenced>
		<mml:mo>=</mml:mo>
		<mml:mi mathvariant="normal">s</mml:mi>
		<mml:mi mathvariant="normal">o</mml:mi>
		<mml:mi mathvariant="normal">f</mml:mi>
		<mml:mi mathvariant="normal">t</mml:mi>
		<mml:mi mathvariant="normal">m</mml:mi>
		<mml:mi mathvariant="normal">a</mml:mi>
		<mml:mi mathvariant="normal">x</mml:mi>
		<mml:mfenced separators="|">
			<mml:mrow>
				<mml:mfrac>
					<mml:mrow>
						<mml:mi>Q</mml:mi>
						<mml:msup>
							<mml:mrow>
								<mml:mi>K</mml:mi>
							</mml:mrow>
							<mml:mrow>
								<mml:mi>T</mml:mi>
							</mml:mrow>
						</mml:msup>
					</mml:mrow>
					<mml:mrow>
						<mml:msqrt>
							<mml:msub>
								<mml:mrow>
									<mml:mi>d</mml:mi>
								</mml:mrow>
								<mml:mrow>
									<mml:mi>K</mml:mi>
								</mml:mrow>
							</mml:msub>
						</mml:msqrt>
					</mml:mrow>
				</mml:mfrac>
			</mml:mrow>
		</mml:mfenced>
		<mml:mi>V</mml:mi>
	</mml:mrow>
</mml:math>
</inline-formula></td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Utilizes a hierarchical, shifted window-based self-attention mechanism to efficiently capture global and local feature relationships with reduced computational cost.</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Graph neural network</td>
              <td align="center" valign="middle" style="border-bottom:solid thin"><inline-formula id="ieqn-12">
<mml:math id="mml-ieqn-12">
	<mml:mrow>
		<mml:msup>
			<mml:mrow>
				<mml:mi>H</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mo>(</mml:mo>
				<mml:mi>l</mml:mi>
				<mml:mo>+</mml:mo>
				<mml:mn>1</mml:mn>
				<mml:mo>)</mml:mo>
			</mml:mrow>
		</mml:msup>
		<mml:mo>=</mml:mo>
		<mml:mi>&#x3C3;</mml:mi>
		<mml:mfenced separators="|">
			<mml:mrow>
				<mml:msup>
					<mml:mrow>
						<mml:mi>D</mml:mi>
					</mml:mrow>
					<mml:mrow>
						<mml:mo>&#x2212;</mml:mo>
						<mml:mfrac>
							<mml:mrow>
								<mml:mn>1</mml:mn>
							</mml:mrow>
							<mml:mrow>
								<mml:mn>2</mml:mn>
							</mml:mrow>
						</mml:mfrac>
					</mml:mrow>
				</mml:msup>
				<mml:mi>A</mml:mi>
				<mml:msup>
					<mml:mrow>
						<mml:mi>D</mml:mi>
					</mml:mrow>
					<mml:mrow>
						<mml:mo>&#x2212;</mml:mo>
						<mml:mfrac>
							<mml:mrow>
								<mml:mn>1</mml:mn>
							</mml:mrow>
							<mml:mrow>
								<mml:mn>2</mml:mn>
							</mml:mrow>
						</mml:mfrac>
					</mml:mrow>
				</mml:msup>
				<mml:msup>
					<mml:mrow>
						<mml:mi>H</mml:mi>
					</mml:mrow>
					<mml:mrow>
						<mml:mfenced separators="|">
							<mml:mrow>
								<mml:mi>l</mml:mi>
							</mml:mrow>
						</mml:mfenced>
					</mml:mrow>
				</mml:msup>
				<mml:msup>
					<mml:mrow>
						<mml:mi>W</mml:mi>
					</mml:mrow>
					<mml:mrow>
						<mml:mfenced separators="|">
							<mml:mrow>
								<mml:mi>l</mml:mi>
							</mml:mrow>
						</mml:mfenced>
					</mml:mrow>
				</mml:msup>
			</mml:mrow>
		</mml:mfenced>
	</mml:mrow>
</mml:math>
</inline-formula></td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Function: Models relational dependencies between feature embeddings using graph structures to enhance classification through improved contextual learning.</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <sec id="s3_1">
        <label>3.1</label>
        <title>Input Dataset</title>
        <p>The dataset for this study was gathered from Mendeley, an open-source platform [<xref ref-type="bibr" rid="ref-20">20</xref>]. The dataset contains a total of four classes: Aphid Disease, Blotch, Healthy Leaf, and Leaf Spot. <xref ref-type="fig" rid="fig-2">Fig. 2</xref> demonstrates details about various leaf diseases in turmeric plants. It provides details of the disease, categorizing them based on disease presence and health status, including the name of the disease, cause, symptoms, and effects for every class.</p>
        <fig id="fig-2">
          <label>Figure 2</label>
          <caption>
            <p>Input dataset.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f002.tif"/>
        </fig>
        <p>The original dataset contains a total of 865 images and is further divided into four classes: Aphids Disease includes 221 images, Blotch contains 238 images, Healthy Leaf contains 213 images, and Leaf Spot contains 193 images. The distribution of images among classes of turmeric leaf disease is shown in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>.</p>
        <fig id="fig-3">
          <label>Figure 3</label>
          <caption>
            <p>Distribution of images among classes of turmeric leaf disease.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f003.tif"/>
        </fig>
      </sec>
      <sec id="s3_2">
        <label>3.2</label>
        <title>Preprocessing</title>
        <p>The preprocessing pipeline for turmeric leaf disease classification involves several crucial steps to ensure optimal model performance, as shown in <xref ref-type="fig" rid="fig-4">Fig. 4</xref>. Initially, images are loaded from the dataset, which consists of four classes: Aphids Disease, Blotch, Leaf Spot, and Healthy Leaf.</p>
        <p>After that, to maintain uniformity, images have been resized to 224 &#xD7; 224 pixels, as the earlier images had different dimensions.
        <disp-formula id="eqn-1">
          <label>(1)</label>
          <mml:math id="mml-eqn-1" display="block">
            <mml:mrow>
              <mml:msup>
                <mml:mrow>
                  <mml:mi>I</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mo>&#x2032;</mml:mo>
                </mml:mrow>
              </mml:msup>
              <mml:mo>=</mml:mo>
              <mml:mi mathvariant="normal">R</mml:mi>
              <mml:mi mathvariant="normal">e</mml:mi>
              <mml:mi mathvariant="normal">s</mml:mi>
              <mml:mi mathvariant="normal">i</mml:mi>
              <mml:mi mathvariant="normal">z</mml:mi>
              <mml:mi mathvariant="normal">e</mml:mi>
              <mml:mo>(</mml:mo>
              <mml:mi>I</mml:mi>
              <mml:mo>,</mml:mo>
              <mml:mfenced separators="|">
                <mml:mrow>
                  <mml:mn>224,224</mml:mn>
                </mml:mrow>
              </mml:mfenced>
              <mml:mo>)</mml:mo>
            </mml:mrow>
          </mml:math>
        </disp-formula>
        where <italic>I</italic> is the original image and <italic>I</italic>&#x2032; is the resized image. Then, normalization is performed using per-image standardization, where each pixel value is normalized by subtracting the image mean and dividing by the image standard deviation, resulting in a zero mean and unit variance, as illustrated in Eq. (2). Normalization improves to promote convergence in training.
        <disp-formula id="eqn-2">
          <label>(2)</label>
          <mml:math id="mml-eqn-2" display="block">
            <mml:mrow>
              <mml:msub>
                <mml:mrow>
                  <mml:mi>I</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mi mathvariant="normal">n</mml:mi>
                  <mml:mi mathvariant="normal">o</mml:mi>
                  <mml:mi mathvariant="normal">r</mml:mi>
                  <mml:mi mathvariant="normal">m</mml:mi>
                </mml:mrow>
              </mml:msub>
              <mml:mo>=</mml:mo>
              <mml:mfrac>
                <mml:mrow>
                  <mml:mi>I</mml:mi>
                  <mml:mo>&#x2212;</mml:mo>
                  <mml:mi>&#x3BC;</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mi>&#x3C3;</mml:mi>
                </mml:mrow>
              </mml:mfrac>
            </mml:mrow>
          </mml:math>
        </disp-formula>
        where <italic>I</italic> is the original image pixel values, <italic>&#x3BC;</italic> is the mean pixel value of the image, and <italic>&#x3C3;</italic> is the standard deviation of the image&#x2019;s pixel values. After that, the dataset is split into three subsets: training, testing, and validation, with 80%, 10%, and 10%, respectively, ensuring an effective distribution for model evaluation as shown in Eqs. (3)&#x2013;(5). The number of images in the training set is 693, and the number of images used for testing and validation is 86 each.
        <disp-formula id="eqn-3">
          <label>(3)</label>
          <mml:math id="mml-eqn-3" display="block">
            <mml:mrow>
              <mml:msub>
                <mml:mrow>
                  <mml:mi>N</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mi mathvariant="normal">v</mml:mi>
                  <mml:mi mathvariant="normal">a</mml:mi>
                  <mml:mi mathvariant="normal">l</mml:mi>
                </mml:mrow>
              </mml:msub>
              <mml:mo>=</mml:mo>
              <mml:mfrac>
                <mml:mrow>
                  <mml:mi>v</mml:mi>
                  <mml:mi>a</mml:mi>
                  <mml:mi>l</mml:mi>
                  <mml:mi>i</mml:mi>
                  <mml:mi>d</mml:mi>
                  <mml:mi>a</mml:mi>
                  <mml:mi>t</mml:mi>
                  <mml:mi>i</mml:mi>
                  <mml:mi>o</mml:mi>
                  <mml:mi>n</mml:mi>
                  <mml:mo>_</mml:mo>
                  <mml:mi>s</mml:mi>
                  <mml:mi>p</mml:mi>
                  <mml:mi>l</mml:mi>
                  <mml:mi>i</mml:mi>
                  <mml:mi>t</mml:mi>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>X</mml:mi>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>N</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mn>2</mml:mn>
                </mml:mrow>
              </mml:mfrac>
              <mml:mo>=</mml:mo>
              <mml:mfrac>
                <mml:mrow>
                  <mml:mn>0.2</mml:mn>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>X</mml:mi>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>N</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mn>2</mml:mn>
                </mml:mrow>
              </mml:mfrac>
              <mml:mo>=</mml:mo>
              <mml:mn>0.1</mml:mn>
              <mml:mo>&#xA0;</mml:mo>
              <mml:mi>X</mml:mi>
              <mml:mo>&#xA0;</mml:mo>
              <mml:mi>N</mml:mi>
            </mml:mrow>
          </mml:math>
        </disp-formula>
        <disp-formula id="eqn-4">
          <label>(4)</label>
          <mml:math id="mml-eqn-4" display="block">
            <mml:mrow>
              <mml:mi>L</mml:mi>
              <mml:msub>
                <mml:mrow>
                  <mml:mi>N</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mi mathvariant="normal">t</mml:mi>
                  <mml:mi mathvariant="normal">e</mml:mi>
                  <mml:mi mathvariant="normal">s</mml:mi>
                  <mml:mi mathvariant="normal">t</mml:mi>
                </mml:mrow>
              </mml:msub>
              <mml:mo>=</mml:mo>
              <mml:mfrac>
                <mml:mrow>
                  <mml:mi>v</mml:mi>
                  <mml:mi>a</mml:mi>
                  <mml:mi>l</mml:mi>
                  <mml:mi>i</mml:mi>
                  <mml:mi>d</mml:mi>
                  <mml:mi>a</mml:mi>
                  <mml:mi>t</mml:mi>
                  <mml:mi>i</mml:mi>
                  <mml:mi>o</mml:mi>
                  <mml:mi>n</mml:mi>
                  <mml:mo>_</mml:mo>
                  <mml:mi>s</mml:mi>
                  <mml:mi>p</mml:mi>
                  <mml:mi>l</mml:mi>
                  <mml:mi>i</mml:mi>
                  <mml:mi>t</mml:mi>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>X</mml:mi>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>N</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mn>2</mml:mn>
                </mml:mrow>
              </mml:mfrac>
              <mml:mo>=</mml:mo>
              <mml:mfrac>
                <mml:mrow>
                  <mml:mn>0.2</mml:mn>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>X</mml:mi>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>N</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mn>2</mml:mn>
                </mml:mrow>
              </mml:mfrac>
              <mml:mo>=</mml:mo>
              <mml:mn>0.1</mml:mn>
              <mml:mo>&#xA0;</mml:mo>
              <mml:mi>X</mml:mi>
              <mml:mo>&#xA0;</mml:mo>
              <mml:mi>N</mml:mi>
            </mml:mrow>
          </mml:math>
        </disp-formula>
        <disp-formula id="eqn-5">
          <label>(5)</label>
          <mml:math id="mml-eqn-5" display="block">
            <mml:mrow>
              <mml:msub>
                <mml:mrow>
                  <mml:mi>N</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mi mathvariant="normal">t</mml:mi>
                  <mml:mi mathvariant="normal">e</mml:mi>
                  <mml:mi mathvariant="normal">s</mml:mi>
                  <mml:mi mathvariant="normal">t</mml:mi>
                </mml:mrow>
              </mml:msub>
              <mml:mo>=</mml:mo>
              <mml:mfrac>
                <mml:mrow>
                  <mml:mi>v</mml:mi>
                  <mml:mi>a</mml:mi>
                  <mml:mi>l</mml:mi>
                  <mml:mi>i</mml:mi>
                  <mml:mi>d</mml:mi>
                  <mml:mi>a</mml:mi>
                  <mml:mi>t</mml:mi>
                  <mml:mi>i</mml:mi>
                  <mml:mi>o</mml:mi>
                  <mml:mi>n</mml:mi>
                  <mml:mo>_</mml:mo>
                  <mml:mi>s</mml:mi>
                  <mml:mi>p</mml:mi>
                  <mml:mi>l</mml:mi>
                  <mml:mi>i</mml:mi>
                  <mml:mi>t</mml:mi>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>X</mml:mi>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>N</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mn>2</mml:mn>
                </mml:mrow>
              </mml:mfrac>
              <mml:mo>=</mml:mo>
              <mml:mfrac>
                <mml:mrow>
                  <mml:mn>0.2</mml:mn>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>X</mml:mi>
                  <mml:mo>&#xA0;</mml:mo>
                  <mml:mi>N</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mn>2</mml:mn>
                </mml:mrow>
              </mml:mfrac>
              <mml:mo>=</mml:mo>
              <mml:mn>0.1</mml:mn>
              <mml:mo>&#xA0;</mml:mo>
              <mml:mi>X</mml:mi>
              <mml:mo>&#xA0;</mml:mo>
              <mml:mi>N</mml:mi>
            </mml:mrow>
          </mml:math>
        </disp-formula>
        where <italic>N</italic> is the total number of images in the dataset. For this study, N = 865, resulting in <inline-formula id="ieqn-13">
<mml:math id="mml-ieqn-13">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi>N</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi mathvariant="normal">t</mml:mi>
				<mml:mi mathvariant="normal">r</mml:mi>
				<mml:mi mathvariant="normal">a</mml:mi>
				<mml:mi mathvariant="normal">i</mml:mi>
				<mml:mi mathvariant="normal">n</mml:mi>
			</mml:mrow>
		</mml:msub>
	</mml:mrow>
</mml:math>
</inline-formula> = 693, <inline-formula id="ieqn-14">
<mml:math id="mml-ieqn-14">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi>N</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi mathvariant="normal">v</mml:mi>
				<mml:mi mathvariant="normal">a</mml:mi>
				<mml:mi mathvariant="normal">l</mml:mi>
			</mml:mrow>
		</mml:msub>
	</mml:mrow>
</mml:math>
</inline-formula> = 86 and <inline-formula id="ieqn-15">
<mml:math id="mml-ieqn-15">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi>N</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi mathvariant="normal">t</mml:mi>
				<mml:mi mathvariant="normal">e</mml:mi>
				<mml:mi mathvariant="normal">s</mml:mi>
				<mml:mi mathvariant="normal">t</mml:mi>
			</mml:mrow>
		</mml:msub>
	</mml:mrow>
</mml:math>
</inline-formula> = 86. Then, data augmentation techniques are applied on training set, including random rotation as shown in Eq. (6), random zoom as shown in Eq. (7), random horizontal flipping as shown in Eq. (8), random brightness as shown in Eq. (9) and random translation as shown in Eq. (10) are employed to enhance dataset diversity and mitigate overfitting by artificially increasing the number of training samples.
        <disp-formula id="eqn-6">
          <label>(6)</label>
          <mml:math id="mml-eqn-6" display="block">
            <mml:mrow>
              <mml:msup>
                <mml:mrow>
                  <mml:mi>I</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mo>&#x2032;</mml:mo>
                </mml:mrow>
              </mml:msup>
              <mml:mo>=</mml:mo>
              <mml:mi>R</mml:mi>
              <mml:mi>o</mml:mi>
              <mml:mi>t</mml:mi>
              <mml:mi>a</mml:mi>
              <mml:mi>t</mml:mi>
              <mml:mi>e</mml:mi>
              <mml:mo>(</mml:mo>
              <mml:mi>I</mml:mi>
              <mml:mo>,</mml:mo>
              <mml:mi>&#x3B8;</mml:mi>
              <mml:mo>)</mml:mo>
            </mml:mrow>
          </mml:math>
        </disp-formula>
        <disp-formula id="eqn-7">
          <label>(7)</label>
          <mml:math id="mml-eqn-7" display="block">
            <mml:mrow>
              <mml:msup>
                <mml:mrow>
                  <mml:mi>I</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mo>&#x2032;</mml:mo>
                </mml:mrow>
              </mml:msup>
              <mml:mo>=</mml:mo>
              <mml:mi>Z</mml:mi>
              <mml:mi>o</mml:mi>
              <mml:mi>o</mml:mi>
              <mml:mi>m</mml:mi>
              <mml:mo>(</mml:mo>
              <mml:mi>I</mml:mi>
              <mml:mo>,</mml:mo>
              <mml:mi>z</mml:mi>
              <mml:mo>)</mml:mo>
            </mml:mrow>
          </mml:math>
        </disp-formula>
        <disp-formula id="eqn-8">
          <label>(8)</label>
          <mml:math id="mml-eqn-8" display="block">
            <mml:mrow>
              <mml:msup>
                <mml:mrow>
                  <mml:mi>I</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mo>&#x2032;</mml:mo>
                </mml:mrow>
              </mml:msup>
              <mml:mo>=</mml:mo>
              <mml:mi>F</mml:mi>
              <mml:mi>l</mml:mi>
              <mml:mi>i</mml:mi>
              <mml:mi>p</mml:mi>
              <mml:mo>(</mml:mo>
              <mml:mi>I</mml:mi>
              <mml:mo>,</mml:mo>
              <mml:mi>d</mml:mi>
              <mml:mo>)</mml:mo>
            </mml:mrow>
          </mml:math>
        </disp-formula>
        <disp-formula id="eqn-9">
          <label>(9)</label>
          <mml:math id="mml-eqn-9" display="block">
            <mml:mrow>
              <mml:msup>
                <mml:mrow>
                  <mml:mi>I</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mo>&#x2032;</mml:mo>
                </mml:mrow>
              </mml:msup>
              <mml:mo>=</mml:mo>
              <mml:mi>B</mml:mi>
              <mml:mi>r</mml:mi>
              <mml:mi>i</mml:mi>
              <mml:mi>g</mml:mi>
              <mml:mi>h</mml:mi>
              <mml:mi>t</mml:mi>
              <mml:mi>n</mml:mi>
              <mml:mi>e</mml:mi>
              <mml:mi>s</mml:mi>
              <mml:mi>s</mml:mi>
              <mml:mo>(</mml:mo>
              <mml:mi>I</mml:mi>
              <mml:mo>,</mml:mo>
              <mml:mi>&#x3B2;</mml:mi>
              <mml:mo>)</mml:mo>
            </mml:mrow>
          </mml:math>
        </disp-formula>
        <disp-formula id="eqn-10">
          <label>(10)</label>
          <mml:math id="mml-eqn-10" display="block">
            <mml:mrow>
              <mml:msup>
                <mml:mrow>
                  <mml:mi>I</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mo>&#x2032;</mml:mo>
                </mml:mrow>
              </mml:msup>
              <mml:mo>=</mml:mo>
              <mml:mi>T</mml:mi>
              <mml:mi>r</mml:mi>
              <mml:mi>a</mml:mi>
              <mml:mi>n</mml:mi>
              <mml:mi>s</mml:mi>
              <mml:mi>l</mml:mi>
              <mml:mi>a</mml:mi>
              <mml:mi>t</mml:mi>
              <mml:mi>e</mml:mi>
              <mml:mo>(</mml:mo>
              <mml:mi>I</mml:mi>
              <mml:mo>,</mml:mo>
              <mml:msub>
                <mml:mrow>
                  <mml:mi>t</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mi>x</mml:mi>
                </mml:mrow>
              </mml:msub>
              <mml:mo>,</mml:mo>
              <mml:msub>
                <mml:mrow>
                  <mml:mi>t</mml:mi>
                </mml:mrow>
                <mml:mrow>
                  <mml:mi>y</mml:mi>
                </mml:mrow>
              </mml:msub>
              <mml:mo>)</mml:mo>
            </mml:mrow>
          </mml:math>
        </disp-formula>
        where <inline-formula id="ieqn-16">
<mml:math id="mml-ieqn-16">
	<mml:mrow>
		<mml:mi>&#x3B8;</mml:mi>
	</mml:mrow>
</mml:math>
</inline-formula> is the rotation angle (in degrees), <italic>Z</italic> is the zoom factor, <italic>d</italic> is the flip direction, <italic>&#x3B2;</italic> refers to the brightness adjustment factor, and <inline-formula id="ieqn-17">
<mml:math id="mml-ieqn-17">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi>t</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>x</mml:mi>
			</mml:mrow>
		</mml:msub>
		<mml:mo>,</mml:mo>
		<mml:mo>&#xA0;</mml:mo>
		<mml:msub>
			<mml:mrow>
				<mml:mi>t</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>y</mml:mi>
			</mml:mrow>
		</mml:msub>
	</mml:mrow>
</mml:math>
</inline-formula> are the horizontal and vertical translation offsets, respectively.</p>
        <fig id="fig-4">
          <label>Figure 4</label>
          <caption>
            <p>Flowchart for the preprocessing steps.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f004.tif"/>
        </fig>
        <p>From a cybersecurity standpoint, the preprocessing pipeline and dataset handling are designed to ensure integrity and confidentiality of agricultural data. In real-world deployment, such systems can be enhanced with blockchain-based verification for dataset authenticity and access control mechanisms to prevent unauthorized modifications. Secure hash algorithms and cryptographically signed model checkpoints can further safeguard data and model integrity throughout the training lifecycle.</p>
      </sec>
      <sec id="s3_3">
        <label>3.3</label>
        <title>Selection of the Best Transfer Learning Model</title>
        <p>DenseNet121 has been selected because it is an efficient model with good feature propagation and lower computation requirements compared to other deep models, as shown in <xref ref-type="table" rid="table-2">Table 2</xref>. As opposed to regular deep networks, DenseNet121 incorporates dense connectivity where every layer gets feature maps from all the preceding layers. This architecture enhances the gradient flow, minimizes vanishing gradients, and facilitates feature reuse, improving performance with fewer parameters. DenseNet121 contains merely 8 million parameters, far fewer than VGG16 (138M) and ResNet50 (25.6M), yet it achieves comparable top-1 accuracy at 74.9%. Therefore, it is an excellent pick for computational cost without affecting the accuracy. Besides, its FLOPs (2.9 GFLOPs) are dramatically less than those of ResNet50 (4.1 GFLOPs) and InceptionV3 (5.7 GFLOPs), and therefore it can be deployed in actual applications, including edge and mobile devices.</p>
        <p>Additionally, DenseNet121 is optimized for feature extraction in transfer learning, making it generalize effectively across various datasets with minimal training data. With these benefits, DenseNet121 is the best option for turmeric leaf disease classification, striking a balance between high accuracy, low parameter numbers, and computational efficiency over other deep learning models. Although <xref ref-type="table" rid="table-3">Table 3</xref> shows that EfficientNet-B0 achieves a slightly higher top-1 accuracy (77.1%) compared to DenseNet121 (74.9%) on the ImageNet benchmark, the choice of DenseNet121 as the primary backbone in this study was made based on practical performance, architectural compatibility, and feature connectivity considerations. EfficientNet-B0, despite being parameter-efficient, employs a compound scaling mechanism optimized for balanced depth, width, and resolution. However, it may underperform when fine-tuned on small, domain-specific datasets with limited image diversity, such as turmeric leaf images.</p>
        <p>In contrast, DenseNet121&#x2019;s dense connectivity pattern&#x2014;where each layer receives feature maps from all preceding layers&#x2014;enhances feature reuse, gradient flow, and stability during fine-tuning, which are crucial for medical and agricultural datasets characterized by subtle inter-class variations. Moreover, DenseNet121 integrates seamlessly with hierarchical and relational modules like the Swin Transformer and GNN, as it outputs a structured feature tensor (B, 1024, 7, 7) that can be readily reshaped into patch embeddings without extensive re-parameterization. Preliminary experiments also revealed that EfficientNet-B0, though computationally lightweight, produced less discriminative features for visually similar turmeric leaf classes, resulting in approximately 2&#x2013;3% lower validation accuracy than DenseNet121 on the same dataset. Therefore, DenseNet121 was selected as the optimal trade-off between feature richness, stability, and integration flexibility, ensuring robust downstream performance in the proposed DenseSwinGNNNet architecture.</p>
        <table-wrap id="table-3">
          <label>Table 3</label>
          <caption>
            <p>Comparison of various transfer learning models.</p>
          </caption>
          <table>
            <thead>
              <tr>
                <th align="left" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Model</th>
                <th align="left" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Top-1 Accuracy (%)</th>
                <th align="left" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Top-5 Accuracy (%)</th>
                <th align="left" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Parameters (Million)</th>
                <th align="left" valign="middle" style="border-bottom:solid thin;border-top:solid thin">FLOPs (GFLOPs)</th>
                <th align="left" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Depth</th>
              </tr>
            </thead>
            <tbody>
              <tr>
                <td align="left" valign="middle">
                  <bold>AlexNet</bold>
                </td>
                <td align="left" valign="middle">57.2</td>
                <td align="left" valign="middle">80.3</td>
                <td align="left" valign="middle">61</td>
                <td align="left" valign="middle">1.5</td>
                <td align="left" valign="middle">8</td>
              </tr>
              <tr>
                <td align="left" valign="middle">
                  <bold>VGG16</bold>
                </td>
                <td align="left" valign="middle">71.5</td>
                <td align="left" valign="middle">89.8</td>
                <td align="left" valign="middle">138</td>
                <td align="left" valign="middle">15.5</td>
                <td align="left" valign="middle">16</td>
              </tr>
              <tr>
                <td align="left" valign="middle">
                  <bold>ResNet50</bold>
                </td>
                <td align="left" valign="middle">76.5</td>
                <td align="left" valign="middle">93.1</td>
                <td align="left" valign="middle">25.6</td>
                <td align="left" valign="middle">4.1</td>
                <td align="left" valign="middle">50</td>
              </tr>
              <tr>
                <td align="left" valign="middle">
                  <bold>InceptionV3</bold>
                </td>
                <td align="left" valign="middle">77.9</td>
                <td align="left" valign="middle">93.7</td>
                <td align="left" valign="middle">23.8</td>
                <td align="left" valign="middle">5.7</td>
                <td align="left" valign="middle">-</td>
              </tr>
              <tr>
                <td align="left" valign="middle">
                  <bold>DenseNet121</bold>
                </td>
                <td align="left" valign="middle">74.9</td>
                <td align="left" valign="middle">92.3</td>
                <td align="left" valign="middle">8.0</td>
                <td align="left" valign="middle">2.9</td>
                <td align="left" valign="middle">121</td>
              </tr>
              <tr>
                <td align="left" valign="middle">
                  <bold>EfficientNet-B0</bold>
                </td>
                <td align="left" valign="middle">77.1</td>
                <td align="left" valign="middle">93.3</td>
                <td align="left" valign="middle">5.3</td>
                <td align="left" valign="middle">0.39</td>
                <td align="left" valign="middle">-</td>
              </tr>
              <tr>
                <td align="left" valign="middle" style="border-bottom:solid thin">
                  <bold>EfficientNet-B4</bold>
                </td>
                <td align="left" valign="middle" style="border-bottom:solid thin">82.6</td>
                <td align="left" valign="middle" style="border-bottom:solid thin">96.4</td>
                <td align="left" valign="middle" style="border-bottom:solid thin">19.3</td>
                <td align="left" valign="middle" style="border-bottom:solid thin">4.2</td>
                <td align="left" valign="middle" style="border-bottom:solid thin">-</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec id="s3_4">
        <label>3.4</label>
        <title>Proposed DenseSwinGNNNet Model</title>
        <p>The proposed DenseSwinGNNNet architecture for classification of the turmeric leaf disease utilizes Swin Transformer and GNNs to improve feature learning and disease classification. The architecture shown in <xref ref-type="fig" rid="fig-5">Fig. 5</xref> is designed in a pipelined architecture, beginning with input images of turmeric leaves affected by various diseases. Preprocessing includes resizing, normalization, division of the dataset, and data augmentation, which are used to aid in the model&#x2019;s generalization. The core of the model consists of two main components: Swin Transformer and GNN. The Swin Transformer begins with patch partitioning, whereby the input image is divided into patches of reduced size. These patches proceed through four hierarchical levels of Swin Transformer Blocks, which perform feature extraction based on self-attention while maintaining spatial relationships efficiently. </p>
        <fig id="fig-5">
          <label>Figure 5</label>
          <caption>
            <p>Architecture of proposed DenseSwinGNNNet.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f005.tif"/>
        </fig>
        <p>Patch merging occurs in each of these steps to reduce resolution while enhancing feature representation. Global average pooling is carried out before feeding the extracted features to the classification network. GNN processes extracted deep features by building a graph representation of image features. The step of graph construction transforms the extracted features into nodes, with edges representing the connections between various regions of the image. The message-passing unit within the GNN updates feature embeddings by aggregating information from neighboring nodes, enabling the representation of intricate spatial and relational patterns in turmeric leaf diseases. A dense transformation layer further updates the feature representation before classification. The last classification layers utilize batch normalization, dropout (0.3 regularization), and ReLU activation functions to add learning stability. The output goes through a Softmax activation function that provides the probabilities for the four classes: Aphids Disease, Blotch, Leaf Spot, and Healthy Leaf. The output images indicate successful classification with varied forms of diseased and healthy leaves. By integrating Swin Transformer&#x2019;s self-attention mechanism with the structured learning ability of Graph Neural Networks, DenseSwinGNNNet ensures highly discriminative feature learning, robust disease classification, and reduced misclassification risks. The dual approach enhances the accuracy and robustness of the model, achieving a promising deep learning solution for turmeric leaf disease diagnosis in precision farming.</p>
        <sec id="s3_4_1">
          <label>3.4.1</label>
          <title>Fine-Tuned DenseNet121 Model</title>
          <p>The fine-tuned DenseNet121 model is based on a densely connected convolutional neural network architecture that seeks to maximize feature propagation and reuse. The model in <xref ref-type="fig" rid="fig-6">Fig. 6</xref> begins with a 7 &#xD7; 7 convolution layer to extract low-level features, and then a 3 &#xD7; 3 max-pooling layer to downsample the spatial dimensions while preserving essential patterns. The network consists of four Dense Blocks, each containing multiple convolutional layers (6, 12, 24, and 16 convolutional blocks, respectively). Each layer receives inputs from all the previous layers, promoting effective feature learning. Transition Layers between Dense Blocks are used to downsample using 1 &#xD7; 1 convolutions and average pooling to achieve a balance between feature extraction and computational cost. After the final Dense Block, Global Average Pooling reduces the size of the feature map without sacrificing significant information. The fully connected layers consist of a dense layer of 512 units, a dropout layer (0.5 probability) to prevent overfitting, and a final dense layer of 4 units for the four turmeric leaf disease classes. It is fine-tuned to enhance classification accuracy using pretrained weights when fitted to the turmeric leaf disease dataset. The model balances computational complexity, feature reuse, and generalization well, making it highly suitable for image classification.</p>
          <fig id="fig-6">
            <label>Figure 6</label>
            <caption>
              <p>Architecture of fine-tuned DenseNet121.</p>
            </caption>
            <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f006.tif"/>
          </fig>
        </sec>
        <sec id="s3_4_2">
          <label>3.4.2</label>
          <title>Swin Transformer</title>
          <p>Swin Transformer is a deep learning model that can process high-resolution images efficiently by capturing both local and global context information. In contrast to traditional Vision Transformers that perform self-attention on the whole image in quadratic computational complexity, the Swin Transformer introduces window-based attention for linear complexity while retaining strong feature extraction capacity. The architecture has a hierarchical pattern that increasingly fuses patches and improves representations, making it very effective for image classification problems, such as the detection of turmeric leaf disease.</p>
          <p>The initial step in the Swin Transformer pipeline shown in <xref ref-type="fig" rid="fig-7">Fig. 7</xref> is patch partitioning, where the input image is divided into small, non-overlapping patches. Partitioning the image in this way allows the model to process it in a structured manner. The patches are embedded into a high-dimensional space by a linear embedding layer, transforming the raw pixel values into feature vectors that the transformer layers can process.</p>
          <fig id="fig-7">
            <label>Figure 7</label>
            <caption>
              <p>Architecture of swin transformer.</p>
            </caption>
            <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f007.tif"/>
          </fig>
          <p>Mathematically, if an input image <italic>X</italic> of size <italic>H X W X C</italic> (height, width, and channels) is divided into patches of size <italic>P X P</italic>, the number of patches <italic>N</italic> is represented by Eq. (11):</p>
          <disp-formula id="eqn-11">
            <label>(11)</label>
            <mml:math id="mml-eqn-11" display="block">
              <mml:mrow>
                <mml:mi>N</mml:mi>
                <mml:mo>=</mml:mo>
                <mml:mfrac>
                  <mml:mrow>
                    <mml:mi>H</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>P</mml:mi>
                  </mml:mrow>
                </mml:mfrac>
                <mml:mi>X</mml:mi>
                <mml:mfrac>
                  <mml:mrow>
                    <mml:mi>W</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>P</mml:mi>
                  </mml:mrow>
                </mml:mfrac>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          <p>Each patch is then projected into a feature space of dimension <italic>D</italic>, forming the initial feature representation in Eq. (12):
          <disp-formula id="eqn-12">
            <label>(12)</label>
            <mml:math id="mml-eqn-12" display="block">
              <mml:mrow>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>Z</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mn>0</mml:mn>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>=</mml:mo>
                <mml:mi>f</mml:mi>
                <mml:mo>(</mml:mo>
                <mml:mi>X</mml:mi>
                <mml:mo>)</mml:mo>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <italic>f</italic>(.) represents the patch embedding function, instead of computing self-attention over the whole space, Swin Transformer adopts Window-Based Multi-Head Self-Attention (W-MSA). The process divides the feature map into non-overlapping windows and computes self-attention for each window separately, reducing computational complexity. The attention mechanism employs the regular scaled dot-product attention formula as presented in Eq. (13):
          <disp-formula id="eqn-13">
            <label>(13)</label>
            <mml:math id="mml-eqn-13" display="block">
              <mml:mrow>
                <mml:mi mathvariant="normal">A</mml:mi>
                <mml:mi mathvariant="normal">t</mml:mi>
                <mml:mi mathvariant="normal">t</mml:mi>
                <mml:mi mathvariant="normal">e</mml:mi>
                <mml:mi mathvariant="normal">n</mml:mi>
                <mml:mi mathvariant="normal">t</mml:mi>
                <mml:mi mathvariant="normal">i</mml:mi>
                <mml:mi mathvariant="normal">o</mml:mi>
                <mml:mi mathvariant="normal">n</mml:mi>
                <mml:mfenced separators="|">
                  <mml:mrow>
                    <mml:mi>Q</mml:mi>
                    <mml:mo>,</mml:mo>
                    <mml:mi>K</mml:mi>
                    <mml:mo>,</mml:mo>
                    <mml:mi>V</mml:mi>
                  </mml:mrow>
                </mml:mfenced>
                <mml:mo>=</mml:mo>
                <mml:mi mathvariant="normal">s</mml:mi>
                <mml:mi mathvariant="normal">o</mml:mi>
                <mml:mi mathvariant="normal">f</mml:mi>
                <mml:mi mathvariant="normal">t</mml:mi>
                <mml:mi mathvariant="normal">m</mml:mi>
                <mml:mi mathvariant="normal">a</mml:mi>
                <mml:mi mathvariant="normal">x</mml:mi>
                <mml:mfenced separators="|">
                  <mml:mrow>
                    <mml:mfrac>
                      <mml:mrow>
                        <mml:mi>Q</mml:mi>
                        <mml:msup>
                          <mml:mrow>
                            <mml:mi>K</mml:mi>
                          </mml:mrow>
                          <mml:mrow>
                            <mml:mi>T</mml:mi>
                          </mml:mrow>
                        </mml:msup>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:msqrt>
                          <mml:msub>
                            <mml:mrow>
                              <mml:mi>d</mml:mi>
                            </mml:mrow>
                            <mml:mrow>
                              <mml:mi>k</mml:mi>
                            </mml:mrow>
                          </mml:msub>
                        </mml:msqrt>
                      </mml:mrow>
                    </mml:mfrac>
                  </mml:mrow>
                </mml:mfenced>
                <mml:mi>V</mml:mi>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <italic>Q</italic>, <italic>K</italic>, <italic>V</italic> &#x2208; <italic>R</italic><sup>Mxd</sup> represent the query, key, and value matrices for each window of size <italic>M</italic>, and <italic>d<sub>k</sub></italic> is the dimension of the key vector. The softmax operation ensures that the attention scores are normalized across all tokens within a window. To provide cross-window interaction without compromising computational efficiency, the Swin Transformer proposes shifted window attention. Between alternating layers, window partitioning is shifted by a constant offset, allowing information to propagate across adjacent regions. This technique is expressed in Eqs. (14) and (15):
          <disp-formula id="eqn-14">
            <label>(14)</label>
            <mml:math id="mml-eqn-14" display="block">
              <mml:mrow>
                <mml:msub>
                  <mml:mrow>
                    <mml:mover accent="true">
                      <mml:mrow>
                        <mml:mi>Z</mml:mi>
                      </mml:mrow>
                      <mml:mo stretchy="false">^</mml:mo>
                    </mml:mover>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>l</mml:mi>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>=</mml:mo>  
                <mml:mtext>W-MSA</mml:mtext>
                <mml:mfenced separators="|">
                  <mml:mrow>
                    <mml:mi>L</mml:mi>
                    <mml:mi>N</mml:mi>
                    <mml:mfenced separators="|">
                      <mml:mrow>
                        <mml:msub>
                          <mml:mrow>
                            <mml:mi>Z</mml:mi>
                          </mml:mrow>
                          <mml:mrow>
                            <mml:mi>l</mml:mi>
                            <mml:mo>&#x2212;</mml:mo>
                            <mml:mn>1</mml:mn>
                          </mml:mrow>
                        </mml:msub>
                      </mml:mrow>
                    </mml:mfenced>
                  </mml:mrow>
                </mml:mfenced>
                <mml:mo>+</mml:mo>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>Z</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>l</mml:mi>
                    <mml:mo>&#x2212;</mml:mo>
                    <mml:mn>1</mml:mn>
                  </mml:mrow>
                </mml:msub>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          <disp-formula id="eqn-15">
            <label>(15)</label>
            <mml:math id="mml-eqn-15" display="block">
              <mml:mrow>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>Z</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>l</mml:mi>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>=</mml:mo>
                <mml:mi mathvariant="normal">M</mml:mi>
                <mml:mi mathvariant="normal">L</mml:mi>
                <mml:mi mathvariant="normal">P</mml:mi>
                <mml:mfenced separators="|">
                  <mml:mrow>
                    <mml:mi>L</mml:mi>
                    <mml:mi>N</mml:mi>
                    <mml:mfenced separators="|">
                      <mml:mrow>
                        <mml:msub>
                          <mml:mrow>
                            <mml:mover accent="true">
                              <mml:mrow>
                                <mml:mi>z</mml:mi>
                              </mml:mrow>
                              <mml:mo stretchy="false">^</mml:mo>
                            </mml:mover>
                          </mml:mrow>
                          <mml:mrow>
                            <mml:mi>l</mml:mi>
                          </mml:mrow>
                        </mml:msub>
                      </mml:mrow>
                    </mml:mfenced>
                  </mml:mrow>
                </mml:mfenced>
                <mml:mo>+</mml:mo>
                <mml:msub>
                  <mml:mrow>
                    <mml:mover accent="true">
                      <mml:mrow>
                        <mml:mi>Z</mml:mi>
                      </mml:mrow>
                      <mml:mo stretchy="false">^</mml:mo>
                    </mml:mover>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>l</mml:mi>
                  </mml:mrow>
                </mml:msub>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where W-MSA refers to windowed multi-head self-attention, <italic>LN</italic> is layer normalization and multi-layer perceptron (MLP) with a non-linear activation. These steps facilitate hierarchical learning of features, with lower layers learning fine-grained patterns and deeper ones learning more abstract representations.</p>
          <p>The Swin Transformer operates on images in several stages, with each stage consisting of a Swin Transformer Block followed by patch merging. At every stage, the spatial resolution of the feature map decreases, while the feature dimension increases, much like how convolutional neural networks downsample images through pooling. The patch merging operation is mathematically defined in Eq. (16):
          <disp-formula id="eqn-16">
            <label>(16)</label>
            <mml:math id="mml-eqn-16" display="block">
              <mml:mrow>
                <mml:msup>
                  <mml:mrow>
                    <mml:mi>Z</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mo>&#x2032;</mml:mo>
                  </mml:mrow>
                </mml:msup>
                <mml:mo>=</mml:mo>
                <mml:mi>C</mml:mi>
                <mml:mi>o</mml:mi>
                <mml:mi>n</mml:mi>
                <mml:mi>c</mml:mi>
                <mml:mi>a</mml:mi>
                <mml:mi>t</mml:mi>
                <mml:mo>(</mml:mo>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>Z</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mn>1</mml:mn>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>,</mml:mo>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>Z</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mn>2</mml:mn>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>,</mml:mo>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>Z</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mn>3</mml:mn>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>,</mml:mo>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>Z</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mn>4</mml:mn>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>)</mml:mo>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi mathvariant="bold-italic">W</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>p</mml:mi>
                  </mml:mrow>
                </mml:msub>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <inline-formula id="ieqn-18">
<mml:math id="mml-ieqn-18">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi>Z</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mn>1</mml:mn>
			</mml:mrow>
		</mml:msub>
		<mml:mo>,</mml:mo>
		<mml:mo>&#xA0;</mml:mo>
		<mml:msub>
			<mml:mrow>
				<mml:mi>Z</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mn>2</mml:mn>
			</mml:mrow>
		</mml:msub>
		<mml:mo>,</mml:mo>
		<mml:mo>&#xA0;</mml:mo>
		<mml:msub>
			<mml:mrow>
				<mml:mi>Z</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mn>3</mml:mn>
			</mml:mrow>
		</mml:msub>
		<mml:mo>,</mml:mo>
		<mml:mo>&#xA0;</mml:mo>
		<mml:msub>
			<mml:mrow>
				<mml:mi>Z</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mn>4</mml:mn>
			</mml:mrow>
		</mml:msub>
	</mml:mrow>
</mml:math>
</inline-formula> are feature representations of neighbouring patches, and <inline-formula id="ieqn-19">
<mml:math id="mml-ieqn-19">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi mathvariant="bold-italic">W</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>p</mml:mi>
			</mml:mrow>
		</mml:msub>
	</mml:mrow>
</mml:math>
</inline-formula> is a learnable weight matrix that projects the concatenated features into a new space. After hierarchical feature extraction, the final feature map undergoes global average pooling (GAP) to condense the information into a fixed-size vector, as shown in Eq. (17):
          <disp-formula id="eqn-17">
            <label>(17)</label>
            <mml:math id="mml-eqn-17" display="block">
              <mml:mrow>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>Z</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi mathvariant="normal">G</mml:mi>
                    <mml:mi mathvariant="normal">A</mml:mi>
                    <mml:mi mathvariant="normal">P</mml:mi>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>=</mml:mo>
                <mml:mfrac>
                  <mml:mrow>
                    <mml:mn>1</mml:mn>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>N</mml:mi>
                  </mml:mrow>
                </mml:mfrac>
                <mml:mrow>
				<mml:mstyle displaystyle="true">
                  <mml:munderover>
                    <mml:mo stretchy="false">&#x2211;</mml:mo>
                    <mml:mrow>
                      <mml:mi>i</mml:mi>
                      <mml:mo>=</mml:mo>
                      <mml:mn>1</mml:mn>
                    </mml:mrow>
                    <mml:mrow>
                      <mml:mi>N</mml:mi>
                    </mml:mrow>
                  </mml:munderover>
				  </mml:mstyle>
                  <mml:mrow>
                    <mml:msub>
                      <mml:mrow>
                        <mml:mi>Z</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mi>i</mml:mi>
                      </mml:mrow>
                    </mml:msub>
                  </mml:mrow>
                </mml:mrow>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <italic>N</italic> is the number of spatial locations in the feature map, as shown in Eq. (18). This vector is then transmitted through a fully connected layer and a softmax activation function for classification.
          <disp-formula id="eqn-18">
            <label>(18)</label>
            <mml:math id="mml-eqn-18" display="block">
              <mml:mrow>
                <mml:mover accent="true">
                  <mml:mrow>
                    <mml:mi>y</mml:mi>
                  </mml:mrow>
                  <mml:mo stretchy="false">^</mml:mo>
                </mml:mover>
                <mml:mo>=</mml:mo>
                <mml:mi mathvariant="normal">s</mml:mi>
                <mml:mi mathvariant="normal">o</mml:mi>
                <mml:mi mathvariant="normal">f</mml:mi>
                <mml:mi mathvariant="normal">t</mml:mi>
                <mml:mi mathvariant="normal">m</mml:mi>
                <mml:mi mathvariant="normal">a</mml:mi>
                <mml:mi mathvariant="normal">x</mml:mi>
                <mml:mo>(</mml:mo>
                <mml:mi>W</mml:mi>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>Z</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi mathvariant="normal">G</mml:mi>
                    <mml:mi mathvariant="normal">A</mml:mi>
                    <mml:mi mathvariant="normal">P</mml:mi>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>+</mml:mo>
                <mml:mi>b</mml:mi>
                <mml:mo>)</mml:mo>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <italic>W</italic> and <italic>b</italic> are learnable parameters of the fully connected layer, and <inline-formula id="ieqn-20">
<mml:math id="mml-ieqn-20">
	<mml:mrow>
		<mml:mover accent="true">
			<mml:mrow>
				<mml:mi>y</mml:mi>
			</mml:mrow>
			<mml:mo stretchy="false">^</mml:mo>
		</mml:mover>
	</mml:mrow>
</mml:math>
</inline-formula> represents the predicted class probabilities. By integrating the Swin Transformer into the proposed model, it maximizes feature representation by extracting both fine-grained details and distant dependencies in images of turmeric leaf diseases. Its hierarchy of processing image data, while computationally efficient, makes the Swin Transformer a strong member in deep-learning models for classifying images.</p>
        </sec>
        <sec id="s3_4_3">
          <label>3.4.3</label>
          <title>Graphical Neural Network</title>
          <p>A Graph Neural Network is a type of model in deep learning that has been designed specifically to manage and process graph-structured data. In contrast with usual neural networks used for Euclidean data, such as images and sequences, GNNs easily capture the relationships between objects in the form of nodes (vertices) and edges, formulating an organized framework. GNNs have found extensive applications in tasks like social network analysis, molecular property prediction, and medical diagnosis because they can represent very intricate dependencies among data points.</p>
          <p>The structure of a GNN is of a systematic pipeline, as illustrated in <xref ref-type="fig" rid="fig-8">Fig. 8</xref>. The core building blocks are feature extraction, graph construction, message passing, feature transformation, and dense classification layers. All of these are responsible for encoding and learning good representations from input features.</p>
          <fig id="fig-8">
            <label>Figure 8</label>
            <caption>
              <p>Architecture of graph neural network.</p>
            </caption>
            <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f008.tif"/>
          </fig>
          <p>The initial process of GNN is extracting the input features of an entity (node). Every node comes with a feature vector that embraces its attributes. Let <italic>X</italic> denote the set of the input features, and each node <italic>v</italic> has a feature vector <italic>hv</italic>, denoted in Eq. (19):
          <disp-formula id="eqn-19">
            <label>(19)</label>
            <mml:math id="mml-eqn-19" display="block">
              <mml:mrow>
                <mml:msubsup>
                  <mml:mrow>
                    <mml:mi>h</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>v</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mo>(</mml:mo>
                    <mml:mn>0</mml:mn>
                    <mml:mo>)</mml:mo>
                  </mml:mrow>
                </mml:msubsup>
                <mml:mo>=</mml:mo>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>x</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>v</mml:mi>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>,</mml:mo>
                <mml:mo>&#xA0;</mml:mo>
                <mml:mi>v</mml:mi>
                <mml:mo>&#x2208;</mml:mo>
                <mml:mi>V</mml:mi>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <inline-formula id="ieqn-21">
<mml:math id="mml-ieqn-21">
	<mml:mrow>
		<mml:msubsup>
			<mml:mrow>
				<mml:mi>h</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>v</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mo>(</mml:mo>
				<mml:mn>0</mml:mn>
				<mml:mo>)</mml:mo>
			</mml:mrow>
		</mml:msubsup>
	</mml:mrow>
</mml:math>
</inline-formula> denotes the initial representation of the node, and <italic>V</italic> is the set of all nodes in the graph. These input features serve as the foundation for subsequent learning and representation updates. Once features are extracted, a graph is constructed by defining the connections (edges) between nodes. The relationships between nodes are represented using an adjacency matrix <bold><italic>A</italic></bold>, where each entry <inline-formula id="ieqn-22">
<mml:math id="mml-ieqn-22">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi mathvariant="bold-italic">A</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>i</mml:mi>
				<mml:mi>j</mml:mi>
			</mml:mrow>
		</mml:msub>
	</mml:mrow>
</mml:math>
</inline-formula> Indicates the presence (or weight) of an edge between nodes <italic>i</italic> and <italic>j</italic> as shown in Eq. (20):</p>
          <disp-formula id="eqn-20">
            <label>(20)</label>
            <mml:math id="mml-eqn-20" display="block">
              <mml:mrow>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi mathvariant="bold-italic">A</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>i</mml:mi>
                    <mml:mi>j</mml:mi>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>=</mml:mo>
                <mml:mfenced close="" open="{" separators="|">
                  <mml:mrow>
                    <mml:mtable columnalign="left">
                      <mml:mtr>
                        <mml:mtd>
                          <mml:mrow>
                            <mml:mn>1</mml:mn>
                            <mml:mo>,</mml:mo>
							</mml:mrow>
                        </mml:mtd>   
						<mml:mtd>
						<mml:mrow>
                            <mml:mi>i</mml:mi>
                            <mml:mi>f</mml:mi>
                            <mml:mo>&#xA0;</mml:mo>
                            <mml:mi>t</mml:mi>
                            <mml:mi>h</mml:mi>
                            <mml:mi>e</mml:mi>
                            <mml:mi>r</mml:mi>
                            <mml:mi>e</mml:mi>
                            <mml:mo>&#xA0;</mml:mo>
                            <mml:mi>i</mml:mi>
                            <mml:mi>s</mml:mi>
                            <mml:mo>&#xA0;</mml:mo>
                            <mml:mi>a</mml:mi>
                            <mml:mi>n</mml:mi>
                            <mml:mo>&#xA0;</mml:mo>
                            <mml:mi>e</mml:mi>
                            <mml:mi>d</mml:mi>
                            <mml:mi>g</mml:mi>
                            <mml:mi>e</mml:mi>
                            <mml:mo>&#xA0;</mml:mo>
                            <mml:mi>n</mml:mi>
                            <mml:mi>e</mml:mi>
                            <mml:mi>t</mml:mi>
                            <mml:mi>w</mml:mi>
                            <mml:mi>e</mml:mi>
                            <mml:mi>e</mml:mi>
                            <mml:mi>n</mml:mi>
                            <mml:mo>&#xA0;</mml:mo>
                            <mml:mi>n</mml:mi>
                            <mml:mi>o</mml:mi>
                            <mml:mi>d</mml:mi>
                            <mml:mi>e</mml:mi>
                            <mml:mi>s</mml:mi>
                            <mml:mo>&#xA0;</mml:mo>
                            <mml:mi>i</mml:mi>
                            <mml:mo>&#xA0;</mml:mo>
                            <mml:mi>a</mml:mi>
                            <mml:mi>n</mml:mi>
                            <mml:mi>d</mml:mi>
                            <mml:mo>&#xA0;</mml:mo>
                            <mml:mi>j</mml:mi>
                          </mml:mrow>
                        </mml:mtd>
                      </mml:mtr>
                      <mml:mtr>
                        <mml:mtd>
                          <mml:mrow>
                            <mml:mn>0</mml:mn>
                            <mml:mo>,</mml:mo>
							</mml:mrow>
                         </mml:mtd>   
						<mml:mtd>
						<mml:mrow>
                            <mml:mi>o</mml:mi>
                            <mml:mi>t</mml:mi>
                            <mml:mi>h</mml:mi>
                            <mml:mi>e</mml:mi>
                            <mml:mi>r</mml:mi>
                            <mml:mi>w</mml:mi>
                            <mml:mi>i</mml:mi>
                            <mml:mi>s</mml:mi>
                            <mml:mi>e</mml:mi>
                          </mml:mrow>
                        </mml:mtd>
                      </mml:mtr>
                    </mml:mtable>
                  </mml:mrow>
                </mml:mfenced>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          <p>This adjacency matrix serves as the foundation for information propagation in the GNN. In weighted graphs, the values of <inline-formula id="ieqn-23">
<mml:math id="mml-ieqn-23">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi mathvariant="bold-italic">A</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>i</mml:mi>
				<mml:mi>j</mml:mi>
			</mml:mrow>
		</mml:msub>
	</mml:mrow>
</mml:math>
</inline-formula> can be real numbers indicating the strength of connections. Message passing, in which a node repeatedly accumulates data from its neighbours to update its own representation, is the fundamental learning mechanism of GNNs. This is accomplished by combining transformation and neighbourhood aggregation. Eq. (21) is used to calculate a node v&#x2019;s updated feature representation at layer <italic>l</italic>: 
          <disp-formula id="eqn-21">
            <label>(21)</label>
            <mml:math id="mml-eqn-21" display="block">
              <mml:mrow>
                <mml:msubsup>
                  <mml:mrow>
                    <mml:mi>h</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>v</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mo>(</mml:mo>
                    <mml:mi>l</mml:mi>
                    <mml:mo>)</mml:mo>
                  </mml:mrow>
                </mml:msubsup>
                <mml:mo>=</mml:mo>
                <mml:mi>&#x3C3;</mml:mi>
                <mml:mfenced separators="|">
                  <mml:mrow>
                    <mml:msup>
                      <mml:mrow>
                        <mml:mi mathvariant="bold-italic">W</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mfenced separators="|">
                          <mml:mrow>
                            <mml:mi>l</mml:mi>
                          </mml:mrow>
                        </mml:mfenced>
                      </mml:mrow>
                    </mml:msup>
                    <mml:mrow>
					<mml:mstyle displaystyle="true">
                      <mml:munder>
                        <mml:mo stretchy="false">&#x2211;</mml:mo>
                        <mml:mrow>
                          <mml:mi>u</mml:mi>
                          <mml:mi>&#x3F5;</mml:mi>
                          <mml:mi>N</mml:mi>
                          <mml:mfenced separators="|">
                            <mml:mrow>
                              <mml:mi>v</mml:mi>
                            </mml:mrow>
                          </mml:mfenced>
                        </mml:mrow>
                      </mml:munder>
					  	</mml:mstyle>
                      <mml:mrow>
                        <mml:mfrac>
                          <mml:mrow>
                            <mml:mn>1</mml:mn>
                          </mml:mrow>
                          <mml:mrow>
                            <mml:mfenced close="|" open="|" separators="|">
                              <mml:mrow>
                                <mml:mi>N</mml:mi>
                                <mml:mfenced separators="|">
                                  <mml:mrow>
                                    <mml:mi>v</mml:mi>
                                  </mml:mrow>
                                </mml:mfenced>
                              </mml:mrow>
                            </mml:mfenced>
                          </mml:mrow>
                        </mml:mfrac>
                      </mml:mrow>
                    </mml:mrow>
                    <mml:msubsup>
                      <mml:mrow>
                        <mml:mi>h</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mi>u</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mfenced separators="|">
                          <mml:mrow>
                            <mml:mi>l</mml:mi>
                            <mml:mo>&#x2212;</mml:mo>
                            <mml:mn>1</mml:mn>
                          </mml:mrow>
                        </mml:mfenced>
                      </mml:mrow>
                    </mml:msubsup>
                    <mml:mo>+</mml:mo>
                    <mml:msup>
                      <mml:mrow>
                        <mml:mi mathvariant="bold-italic">B</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mfenced separators="|">
                          <mml:mrow>
                            <mml:mi>l</mml:mi>
                          </mml:mrow>
                        </mml:mfenced>
                      </mml:mrow>
                    </mml:msup>
                    <mml:msubsup>
                      <mml:mrow>
                        <mml:mi>h</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mi>v</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mfenced separators="|">
                          <mml:mrow>
                            <mml:mi>l</mml:mi>
                            <mml:mo>&#x2212;</mml:mo>
                            <mml:mn>1</mml:mn>
                          </mml:mrow>
                        </mml:mfenced>
                      </mml:mrow>
                    </mml:msubsup>
                  </mml:mrow>
                </mml:mfenced>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <inline-formula id="ieqn-24">
<mml:math id="mml-ieqn-24">
	<mml:mrow>
		<mml:msubsup>
			<mml:mrow>
				<mml:mi>h</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>v</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mo>(</mml:mo>
				<mml:mi>l</mml:mi>
				<mml:mo>)</mml:mo>
			</mml:mrow>
		</mml:msubsup>
	</mml:mrow>
</mml:math>
</inline-formula> is the updated feature representation of node <italic>v</italic> at layer <italic>l</italic>, <italic>N</italic>(<italic>v</italic>) represents the neighbouring nodes of <italic>v</italic>, <inline-formula id="ieqn-25">
<mml:math id="mml-ieqn-25">
	<mml:mrow>
		<mml:msup>
			<mml:mrow>
				<mml:mi mathvariant="bold-italic">W</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mfenced separators="|">
					<mml:mrow>
						<mml:mi>l</mml:mi>
					</mml:mrow>
				</mml:mfenced>
			</mml:mrow>
		</mml:msup>
	</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="ieqn-26">
<mml:math id="mml-ieqn-26">
	<mml:mrow>
		<mml:msup>
			<mml:mrow>
				<mml:mi mathvariant="bold-italic">B</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mfenced separators="|">
					<mml:mrow>
						<mml:mi>l</mml:mi>
					</mml:mrow>
				</mml:mfenced>
			</mml:mrow>
		</mml:msup>
	</mml:mrow>
</mml:math>
</inline-formula> are trainable weight matrices, <inline-formula id="ieqn-27">
<mml:math id="mml-ieqn-27">
	<mml:mrow>
		<mml:mi>&#x3C3;</mml:mi>
	</mml:mrow>
</mml:math>
</inline-formula> is a non-linear activation function, <inline-formula id="ieqn-28">
<mml:math id="mml-ieqn-28">
	<mml:mrow>
		<mml:msubsup>
			<mml:mrow>
				<mml:mi>h</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>u</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mfenced separators="|">
					<mml:mrow>
						<mml:mi>l</mml:mi>
						<mml:mo>&#x2212;</mml:mo>
						<mml:mn>1</mml:mn>
					</mml:mrow>
				</mml:mfenced>
			</mml:mrow>
		</mml:msubsup>
	</mml:mrow>
</mml:math>
</inline-formula> represents the feature vectors of neighbouring nodes from the previous layer. This equation ensures that each node receives information from its local neighbourhood, refining its feature representation iteratively. After multiple layers of message passing, each node learns a refined representation that captures its contextual relationships. The entire graph can be described using a feature matrix <inline-formula id="ieqn-29">
<mml:math id="mml-ieqn-29">
	<mml:mrow>
		<mml:msup>
			<mml:mrow>
				<mml:mi mathvariant="bold-italic">H</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mo>(</mml:mo>
				<mml:mi>l</mml:mi>
				<mml:mo>)</mml:mo>
			</mml:mrow>
		</mml:msup>
	</mml:mrow>
</mml:math>
</inline-formula>, which stacks all node representations at layer <italic>l</italic>. Using matrix notation, the feature update rule can be rewritten as shown in Eq. (22):
          <disp-formula id="eqn-22">
            <label>(22)</label>
            <mml:math id="mml-eqn-22" display="block">
              <mml:mrow>
                <mml:msup>
                  <mml:mrow>
                    <mml:mi mathvariant="bold-italic">H</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mo>(</mml:mo>
                    <mml:mi>l</mml:mi>
                    <mml:mo>)</mml:mo>
                  </mml:mrow>
                </mml:msup>
                <mml:mo>=</mml:mo>
                <mml:mi>&#x3C3;</mml:mi>
                <mml:mfenced separators="|">
                  <mml:mrow>
                    <mml:mover accent="true">
                      <mml:mrow>
                        <mml:mi mathvariant="bold-italic">A</mml:mi>
                      </mml:mrow>
                      <mml:mo stretchy="false">~</mml:mo>
                    </mml:mover>
                    <mml:msup>
                      <mml:mrow>
                        <mml:mi>H</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mfenced separators="|">
                          <mml:mrow>
                            <mml:mi>l</mml:mi>
                            <mml:mo>&#x2212;</mml:mo>
                            <mml:mn>1</mml:mn>
                          </mml:mrow>
                        </mml:mfenced>
                      </mml:mrow>
                    </mml:msup>
                    <mml:msup>
                      <mml:mrow>
                        <mml:mi mathvariant="bold-italic">W</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mfenced separators="|">
                          <mml:mrow>
                            <mml:mi>l</mml:mi>
                          </mml:mrow>
                        </mml:mfenced>
                      </mml:mrow>
                    </mml:msup>
                  </mml:mrow>
                </mml:mfenced>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where, <inline-formula id="ieqn-30">
<mml:math id="mml-ieqn-30">
	<mml:mrow>
		<mml:msup>
			<mml:mrow>
				<mml:mi mathvariant="bold-italic">H</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mo>(</mml:mo>
				<mml:mi>l</mml:mi>
				<mml:mo>)</mml:mo>
			</mml:mrow>
		</mml:msup>
	</mml:mrow>
</mml:math>
</inline-formula> is the feature matrix of all nodes at layer <italic>l</italic>, <inline-formula id="ieqn-31">
<mml:math id="mml-ieqn-31">
	<mml:mrow>
		<mml:mover accent="true">
			<mml:mrow>
				<mml:mi mathvariant="bold-italic">A</mml:mi>
			</mml:mrow>
			<mml:mo stretchy="false">~</mml:mo>
		</mml:mover>
	</mml:mrow>
</mml:math>
</inline-formula> is the normalized adjacency matrix ensuring stable training, <inline-formula id="ieqn-32">
<mml:math id="mml-ieqn-32">
	<mml:mrow>
		<mml:msup>
			<mml:mrow>
				<mml:mi mathvariant="bold-italic">W</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mfenced separators="|">
					<mml:mrow>
						<mml:mi>l</mml:mi>
					</mml:mrow>
				</mml:mfenced>
			</mml:mrow>
		</mml:msup>
	</mml:mrow>
</mml:math>
</inline-formula> is the learnable weight matrix. This formulation allows efficient propagation of node features throughout the graph, making GNNs highly effective in capturing relational structures. Once the GNN has processed the node features, a transformation layer is applied to restructure the learned representations into a more compact format. This transformation layer helps refine the output for improved classification.</p>
          <p>Finally, the transformed features are passed through a dense layer, which performs the final classification based on the learned node representations. This step ensures that the extracted features are accurately mapped to their respective categories.</p>
          <p>The incorporation of GNNs into the classification model addresses the disparity between global reasoning and local feature extraction. Unlike fully connected layers, which process feature dimensions individually, the GNN layer enables the explicit modeling of relationships between various components of features. This characteristic comes in handy for disease classification, where minute differences in texture, colour, and structure must be understood collectively. By utilizing the graph-based learning paradigm, the model effectively captures both contextual and spatial dependencies, resulting in stronger and more interpretable predictions.</p>
          <p>In the proposed DenseSwinGNNNet architecture, the GNN module operates on feature embeddings derived from the Swin Transformer. After hierarchical attention processing, the final output feature map from the Swin Transformer, with dimensions [B, 16, 768], is used to construct a graph. In this graph, each patch embedding represents a node, and the spatial or contextual relationships among patches define the edges. The adjacency matrix <bold><italic>A</italic></bold> &#x2208; &#x211D;<sup><italic>n</italic>&#xD7;<italic>n</italic></sup> (where <italic>n</italic> = 16 nodes) is generated based on 8-connected spatial neighborhood criteria, meaning that each node is connected to its immediate surrounding patches. To enhance relational awareness, edge weights are computed using cosine similarity between patch feature vectors, ensuring that visually or semantically similar regions have stronger connections. This weighted adjacency matrix allows the GNN to aggregate features from both spatially and contextually relevant nodes during the message-passing process. The message-passing operation follows the standard update rule shown in Eq. (23):
          <disp-formula id="eqn-23">
            <label>(23)</label>
            <mml:math id="mml-eqn-23" display="block">
              <mml:mrow>
                <mml:msup>
                  <mml:mrow>
                    <mml:mi>H</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mo>(</mml:mo>
                    <mml:mi>l</mml:mi>
                    <mml:mo>+</mml:mo>
                    <mml:mn>1</mml:mn>
                    <mml:mo>)</mml:mo>
                  </mml:mrow>
                </mml:msup>
                <mml:mo>=</mml:mo>
                <mml:mi>&#x3C3;</mml:mi>
                <mml:mo>(</mml:mo>
                <mml:mover accent="true">
                  <mml:mrow>
                    <mml:mi>A</mml:mi>
                  </mml:mrow>
                  <mml:mo stretchy="false">&#xAF;</mml:mo>
                </mml:mover>
                <mml:msup>
                  <mml:mrow>
                    <mml:mi>H</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mo>(</mml:mo>
                    <mml:mi>l</mml:mi>
                    <mml:mo>)</mml:mo>
                  </mml:mrow>
                </mml:msup>
                <mml:msup>
                  <mml:mrow>
                    <mml:mi>W</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mo>(</mml:mo>
                    <mml:mi>l</mml:mi>
                    <mml:mo>)</mml:mo>
                  </mml:mrow>
                </mml:msup>
                <mml:mo>)</mml:mo>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <italic>H</italic><sup>(<italic>l</italic>)</sup> denotes node embeddings at layer <italic>l</italic>, 
		  <inline-formula id="ieqn-38">
<mml:math id="mml-ieqn-38">
	<mml:mrow>
 <mml:mover accent="true">
                  <mml:mrow>
                    <mml:mi mathvariant="bold-italic">A</mml:mi>
                  </mml:mrow>
                  <mml:mo stretchy="false">&#xAF;</mml:mo>
                </mml:mover>
					</mml:mrow>
</mml:math>
</inline-formula> is the normalized adjacency matrix, <italic>W</italic><sup>(<italic>l</italic>)</sup> represents trainable weights, and <italic>&#x3C3;</italic> is the ReLU activation. Two GNN layers were implemented, each followed by batch normalization and dropout (rate = 0.3) to improve stability and prevent overfitting. This design enables the GNN to effectively capture spatial correlations and inter-patch dependencies, enriching the learned feature space before final classification. The explicit graph construction, based on spatial proximity and feature similarity, not only ensures interpretability but also allows other researchers to replicate the framework using similar transformer-based feature embeddings.</p>
        </sec>
        <sec id="s3_4_4">
          <label>3.4.4</label>
          <title>Final Classification Layer</title>
          <p>The final classification layer in the proposed model is significant for projecting the learned feature representations onto the actual class labels, as shown in <xref ref-type="fig" rid="fig-9">Fig. 9</xref>. After the feature extraction and transformation processes, where the Swin Transformer and GNN blocks operate on the input data, the obtained features pass through a dense layer for the final classification. This is the last layer that sums up all the information obtained through extraction and then assigns probabilities to each class with a softmax activation. Mathematically, let <italic>X</italic> represent the feature vector output from the last GNN layer, where <inline-formula id="ieqn-33">
<mml:math id="mml-ieqn-33">
	<mml:mrow>
		<mml:mi>X</mml:mi>
		<mml:mo>&#xA0;</mml:mo>
		<mml:mo>&#x2208;</mml:mo>
		<mml:mo>&#xA0;</mml:mo>
		<mml:msup>
			<mml:mrow>
				<mml:mi mathvariant="normal">R</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>d</mml:mi>
			</mml:mrow>
		</mml:msup>
	</mml:mrow>
</mml:math>
</inline-formula> is a d-dimensional feature representation. The classification layer applies a fully connected transformation, which is defined in Eq. (24):
          <disp-formula id="eqn-24">
            <label>(24)</label>
            <mml:math id="mml-eqn-24" display="block">
              <mml:mrow>
                <mml:mi>Z</mml:mi>
                <mml:mo>=</mml:mo>
                <mml:mi>W</mml:mi>
                <mml:mi>X</mml:mi>
                <mml:mo>+</mml:mo>
                <mml:mi>b</mml:mi>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <inline-formula id="ieqn-34">
<mml:math id="mml-ieqn-34">
	<mml:mrow>
		<mml:mi mathvariant="bold-italic">W</mml:mi>
		<mml:mo>&#x2208;</mml:mo>
		<mml:msup>
			<mml:mrow>
				<mml:mi mathvariant="normal">R</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>C</mml:mi>
				<mml:mi>X</mml:mi>
				<mml:mi>d</mml:mi>
			</mml:mrow>
		</mml:msup>
	</mml:mrow>
</mml:math>
</inline-formula> is the weight matrix, <inline-formula id="ieqn-35">
<mml:math id="mml-ieqn-35">
	<mml:mrow>
		<mml:mi>b</mml:mi>
		<mml:mo>&#x2208;</mml:mo>
		<mml:msup>
			<mml:mrow>
				<mml:mi mathvariant="normal">R</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>C</mml:mi>
			</mml:mrow>
		</mml:msup>
	</mml:mrow>
</mml:math>
</inline-formula> is the bias term, and <italic>C</italic> represents the number of output classes. The transformed feature vector <italic>Z</italic> is then passed through a softmax activation function to obtain the final class probabilities as represented in Eq. (25):
          <disp-formula id="eqn-25">
            <label>(25)</label>
            <mml:math id="mml-eqn-25" display="block">
              <mml:mrow>
                <mml:mi>P</mml:mi>
                <mml:mfenced separators="|">
                  <mml:mrow>
                    <mml:mfrac>
                      <mml:mrow>
                        <mml:msub>
                          <mml:mrow>
                            <mml:mi>y</mml:mi>
                          </mml:mrow>
                          <mml:mrow>
                            <mml:mi>i</mml:mi>
                          </mml:mrow>
                        </mml:msub>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mi>X</mml:mi>
                      </mml:mrow>
                    </mml:mfrac>
                  </mml:mrow>
                </mml:mfenced>
                <mml:mo>=</mml:mo>
                <mml:mfrac>
                  <mml:mrow>
                    <mml:msup>
                      <mml:mrow>
                        <mml:mi>e</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:msub>
                          <mml:mrow>
                            <mml:mi>Z</mml:mi>
                          </mml:mrow>
                          <mml:mrow>
                            <mml:mi>i</mml:mi>
                          </mml:mrow>
                        </mml:msub>
                      </mml:mrow>
                    </mml:msup>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mrow>
                      <mml:munderover>
                        <mml:mo stretchy="false">&#x2211;</mml:mo>
                        <mml:mrow>
                          <mml:mi>j</mml:mi>
                          <mml:mo>=</mml:mo>
                          <mml:mn>1</mml:mn>
                        </mml:mrow>
                        <mml:mrow>
                          <mml:mi>C</mml:mi>
                        </mml:mrow>
                      </mml:munderover>
                      <mml:mrow>
                        <mml:msup>
                          <mml:mrow>
                            <mml:mi>e</mml:mi>
                          </mml:mrow>
                          <mml:mrow>
                            <mml:msub>
                              <mml:mrow>
                                <mml:mi>Z</mml:mi>
                              </mml:mrow>
                              <mml:mrow>
                                <mml:mi>j</mml:mi>
                              </mml:mrow>
                            </mml:msub>
                          </mml:mrow>
                        </mml:msup>
                      </mml:mrow>
                    </mml:mrow>
                  </mml:mrow>
                </mml:mfrac>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <inline-formula id="ieqn-36">
<mml:math id="mml-ieqn-36">
	<mml:mrow>
		<mml:mi>P</mml:mi>
		<mml:mfenced separators="|">
			<mml:mrow>
				<mml:mfrac>
					<mml:mrow>
						<mml:msub>
							<mml:mrow>
								<mml:mi>y</mml:mi>
							</mml:mrow>
							<mml:mrow>
								<mml:mi>i</mml:mi>
							</mml:mrow>
						</mml:msub>
					</mml:mrow>
					<mml:mrow>
						<mml:mi>X</mml:mi>
					</mml:mrow>
				</mml:mfrac>
			</mml:mrow>
		</mml:mfenced>
	</mml:mrow>
</mml:math>
</inline-formula> represents the probability of class <italic>i</italic>, ensuring that all class probabilities sum to 1. By transforming the raw class scores into comprehensible probabilities, the softmax function enables the model to forecast the most probable class for the supplied input.</p>
          <fig id="fig-9">
            <label>Figure 9</label>
            <caption>
              <p>Architecture of final classification layers.</p>
            </caption>
            <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f009.tif"/>
          </fig>
          <p>To improve the strength of the classification step, dropout regularization is applied before the final classification layer. Dropout randomly disables some neurons during training to prevent overfitting and ensure the model can generalize well to new, unseen data. The output from the dropout layer is then fed to batch normalization, which normalizes the activations and makes the training stable.</p>
          <p>The whole process of classification is optimized with the sparse categorical cross-entropy loss function, as shown in Eqs. (26) and 27:
          <disp-formula id="eqn-26">
            <label>(26)</label>
            <mml:math id="mml-eqn-26" display="block">
              <mml:mrow>
                <mml:msup>
                  <mml:mrow>
                    <mml:mi>H</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mo>(</mml:mo>
                    <mml:mi>l</mml:mi>
                    <mml:mo>)</mml:mo>
                  </mml:mrow>
                </mml:msup>
                <mml:mo>=</mml:mo>
                <mml:mi>&#x3C3;</mml:mi>
                <mml:mfenced separators="|">
                  <mml:mrow>
                    <mml:mover accent="true">
                      <mml:mrow>
                        <mml:mi>A</mml:mi>
                      </mml:mrow>
                      <mml:mo stretchy="false">~</mml:mo>
                    </mml:mover>
                    <mml:msup>
                      <mml:mrow>
                        <mml:mi>H</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mfenced separators="|">
                          <mml:mrow>
                            <mml:mi>l</mml:mi>
                            <mml:mo>&#x2212;</mml:mo>
                            <mml:mn>1</mml:mn>
                          </mml:mrow>
                        </mml:mfenced>
                      </mml:mrow>
                    </mml:msup>
                    <mml:msup>
                      <mml:mrow>
                        <mml:mi>W</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mfenced separators="|">
                          <mml:mrow>
                            <mml:mi>l</mml:mi>
                          </mml:mrow>
                        </mml:mfenced>
                      </mml:mrow>
                    </mml:msup>
                  </mml:mrow>
                </mml:mfenced>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          <disp-formula id="eqn-27">
            <label>(27)</label>
            <mml:math id="mml-eqn-27" display="block">
              <mml:mrow>
                <mml:mi>L</mml:mi>
                <mml:mo>=</mml:mo>
                <mml:mo>&#x2212;</mml:mo>
                <mml:mrow>
				<mml:mstyle displaystyle="true">
                  <mml:munderover>
                    <mml:mo stretchy="false">&#x2211;</mml:mo>
                    <mml:mrow>
                      <mml:mi>i</mml:mi>
                      <mml:mo>=</mml:mo>
                      <mml:mn>1</mml:mn>
                    </mml:mrow>
                    <mml:mrow>
                      <mml:mi>C</mml:mi>
                    </mml:mrow>
                  </mml:munderover>
				  </mml:mstyle>
                  <mml:mrow>
                    <mml:msub>
                      <mml:mrow>
                        <mml:mi>y</mml:mi>
                      </mml:mrow>
                      <mml:mrow>
                        <mml:mi>i</mml:mi>
                      </mml:mrow>
                    </mml:msub>
                  </mml:mrow>
                </mml:mrow>
                <mml:mi mathvariant="normal">l</mml:mi>
                <mml:mi mathvariant="normal">o</mml:mi>
                <mml:mi mathvariant="normal">g</mml:mi>
                <mml:mi>P</mml:mi>
                <mml:mo stretchy="false">(</mml:mo>
                <mml:msub>
                  <mml:mrow>
                    <mml:mi>y</mml:mi>
                  </mml:mrow>
                  <mml:mrow>
                    <mml:mi>i</mml:mi>
                  </mml:mrow>
                </mml:msub>
                <mml:mo>|</mml:mo>
                <mml:mi>X</mml:mi>
                <mml:mo stretchy="false">)</mml:mo>
              </mml:mrow>
            </mml:math>
          </disp-formula>
          where <inline-formula id="ieqn-37">
<mml:math id="mml-ieqn-37">
	<mml:mrow>
		<mml:msub>
			<mml:mrow>
				<mml:mi>y</mml:mi>
			</mml:mrow>
			<mml:mrow>
				<mml:mi>i</mml:mi>
			</mml:mrow>
		</mml:msub>
	</mml:mrow>
</mml:math>
</inline-formula> represents the actual class label in one-hot representation. This loss function computes the difference between the predicted probabilities and true labels, helping the optimization process reduce classification errors. Optimization is carried out using an adaptive learning rate optimizer like AdamW, which adapts the weight updates during run-time to improve convergence rates and model performance.</p>
        </sec>
      </sec>
    </sec>
    <sec id="s4">
      <label>4</label>
      <title>Results</title>
      <p>The result section provides a descriptive analysis of the constructed DenseSwinGNNNet model for turmeric leaf disease classification. The section begins with a descriptive performance analysis, comparing the proposed model with the best model selection architectures: DenseNet121 and DenseNet121 with Swin Transformer. Various performance parameters, such as accuracy, precision, recall, and F1-score, are analyzed to measure the strength of classification. An optimizer analysis is also discussed to determine the best optimization technique. To make model evaluation transparent, it is highlighted that all final quantitative outcomes reported in this section, such as the accuracy, precision, recall, F1-score, and Area Under the Curve (AUC) values calculated in the analysis based on the classification parameters table and the visual analyses through the confusion matrix displayed in figures, were extracted solely from the held-out 10% test set. This part of the dataset was completely unseen during the training and validation processes and was reserved solely for the final evaluation. This separation ensures that the reported performance metrics reflect the actual generalization capability of the suggested DenseSwinGNNNet model on unseen data, with no overlap or information leakage between the training, validation, and testing phases.</p>
      <sec id="s4_1">
        <label>4.1</label>
        <title>Experimental Setup</title>
        <p>The envisioned DenseSwinGNNNet model was deployed using the PyTorch deep learning framework (version 1.13.1) with CUDA 11.6 for GPU acceleration. Model training was run with a batch size of 32, which gave an adequate trade-off between memory usage and gradient stability. Training was done for 30 epochs, a configuration established empirically to achieve convergence without overfitting. The Adam optimizer was implemented for its adaptive learning rate adjustment and convergence speed properties, with the initial learning rate set at 0.001. The use of this optimizer, coupled with the comparably small learning rate, facilitated smooth updating throughout training for all the network elements. Every training epoch took around 30 s on the utilized hardware setup, making the overall training time almost 1.25 h.</p>
        <p>Regarding model complexity, DenseSwinGNNNet contains around 25.3 million trainable parameters, indicating the depth and capability of the combined architecture involving DenseNet, Swin Transformer, and Graph Neural Network elements. While possessing significant representational capability, the computational expense of the framework under consideration remains low, with each forward pass making 5.8 GFLOPs of computations. These computational features demonstrate that the framework under consideration has achieved a trade-off between efficiency and accuracy, supporting deployment in research and real-world agricultural monitoring applications. The specific configuration settings are reported, by presenting a clear overview of the experimental setup to ensure reproducibility and enable comparison with current methods documented in the literature.</p>
      </sec>
      <sec id="s4_2">
        <label>4.2</label>
        <title>Result Analysis of DenseNet121 Model</title>
        <p>The training and validation plots of the DenseNet121 model used for classifying turmeric leaf disease, shown in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>, offer insight into how the model learns during training. Training is conducted with the Adagrad optimizer over 30 epochs, using a learning rate parameter of 0.0001 to ensure adaptive learning rate adjustment for convergence stability. The training and validation accuracy plot in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>a shows a steady increase in training accuracy over 0.95, while validation accuracy remains constant at around 0.85. This indicates that the model learns quickly but experiences minimal overfitting, as evidenced by the increasing gap between the training and validation curves. The graph of training and validation loss in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>b reveals a general drop in both losses. The training loss plummets at the start and then levels off, while the validation loss drops slightly before leveling off, supporting the mild overfitting. The precision graph in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>c reveals that training precision approaches 0.99, while validation precision levels off at approximately 0.90, reflecting high positive class detection but small misclassifications. The recall plots in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>d show that training recall increases towards 98%, while validation recall oscillates between 0.85 and 0.90. This suggests the model picks up most diseased and healthy leaves but has some trouble with false negatives. The F1-score plot in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>e follows recall and precision patterns, with training F1-scores approaching 98%, whereas validation F1-scores increase before leveling off just below 90%, indicating a balance between precision and recall.</p>
        <fig id="fig-10">
          <label>Figure 10</label>
          <caption>
            <p>Graphical analysis of DenseNet121 model based on training and validation, (<bold>a</bold>) Accuracy, (<bold>b</bold>) Loss, (<bold>c</bold>) Precision, (<bold>d</bold>) Recall, and (<bold>e</bold>) F1-score.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f010.tif"/>
        </fig>
        <p>The confusion matrix in <xref ref-type="fig" rid="fig-11">Fig. 11</xref> illustrates the DenseNet121 model&#x2019;s classification performance in separating four tea leaf categories: Aphids Disease, Blotch, Healthy Leaf, and Leaf Spot. The diagonal elements are correctly classified samples, whereas the off-diagonal elements are the misclassifications. The model has good discriminative power, correctly classifying 21 out of 22 Aphid Disease samples, 22 out of 24 Blotch samples, 15 out of 19 Healthy Leaf samples, and 20 out of 21 Leaf Spot samples. There are some misclassifications, notably one Aphid Disease image classified as Healthy Leaf and three Healthy Leaf samples predicted to be Leaf Spot, suggesting a visual similarity between the two classes. Overall, the DenseNet121 model has a high accuracy rate, effectively capturing complex visual patterns and textural changes in both diseased and healthy leaf specimens. The slight differences show possible visual feature overlap, which could be further reduced by using high-level feature enhancement or attention mechanisms. This robust confusion matrix result demonstrates the model&#x2019;s aptitude for automated disease diagnosis in precision agriculture and presents an effective and reliable method for early detection and classification of tea leaf diseases. </p>
        <fig id="fig-11">
          <label>Figure 11</label>
          <caption>
            <p>Confusion matrix of the proposed DenseNet121 model evaluated on the held-out 10% test set, illustrating correct and misclassified samples for the four turmeric leaf disease classes.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f011.tif"/>
        </fig>
        <p>The performance of the model depicted in <xref ref-type="table" rid="table-4">Table 4</xref> clearly illustrates its high proficiency in correctly classifying different tea leaf conditions. It resulted in a precision of 0.91, a recall of 0.95, and an F1-score of 0.93 for Aphids Disease, which reflects a very dependable performance with fewer false predictions. For the Blotch class, the model achieved maximum precision (1.00) with a recall of 0.91, indicating that all instances of Blotch predicted were accurate, but a negligible number of actual cases were omitted. The Healthy Leaf class exhibited relatively lower values (precision 0.83, recall 0.78, F1-score 0.81), indicating moderate complexity in distinguishing healthy from diseased ones due to minute textural and color similarities. Conversely, Leaf Spot has a well-balanced performance with a recall of 0.87 and a precision of 0.95, which is indicative of the model&#x2019;s effectiveness in recognizing this type of disease. Generally, the DenseNet121 model demonstrates strong classification power across all classes, achieving very high accuracy in recognizing diseased leaves. The slightly lower scores on the Healthy Leaf class represent an area of potential improvement, likely due to increased feature extraction or fine-tuning of class-specific features. These findings confirm the model&#x2019;s applicability to real-world use in automated plant disease detection systems.</p>
        <table-wrap id="table-4">
          <label>Table 4</label>
          <caption>
            <p>Classification parameters for the proposed DenseNet121 model were evaluated on the held-out 10% test set.</p>
          </caption>
          <table>
            <thead>
              <tr>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Class Name</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Precision (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Recall (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">F1-Score (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Accuracy (%)</th>
              </tr>
            </thead>
            <tbody>
              <tr>
                <td align="center" valign="middle">
                  <bold>Aphids Disease</bold>
                </td>
                <td align="center" valign="middle">0.91</td>
                <td align="center" valign="middle">0.95</td>
                <td align="center" valign="middle">0.93</td>
                <td rowspan="4" align="center" valign="middle" style="border-bottom:solid thin">0.91</td>
              </tr>
              <tr>
                <td align="center" valign="middle">
                  <bold>Blotch</bold>
                </td>
                <td align="center" valign="middle">1.00</td>
                <td align="center" valign="middle">0.91</td>
                <td align="center" valign="middle">0.95</td>
              </tr>
              <tr>
                <td align="center" valign="middle">
                  <bold>Healthy Leaf</bold>
                </td>
                <td align="center" valign="middle">0.83</td>
                <td align="center" valign="middle">0.78</td>
                <td align="center" valign="middle">0.81</td>
              </tr>
              <tr>
                <td align="center" valign="middle" style="border-bottom:solid thin">
                  <bold>Leaf Spot</bold>
                </td>
                <td align="center" valign="middle" style="border-bottom:solid thin">0.87</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">0.95</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">0.90</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>The ROC curve for the classification of turmeric leaf diseases with DenseNet121, as shown in <xref ref-type="fig" rid="fig-12">Fig. 12</xref>, illustrates the model&#x2019;s diagnostic power for four classes. The AUC values represent the performance of the classifier. The Healthy Leaf class achieved the highest AUC of 0.96, demonstrating an excellent balance between sensitivity and specificity. The model is capable of differentiating healthy leaves from diseased leaves. The Blotch class was next with an AUC of 0.94, indicating a very high true positive rate and very low false positives, further assuring the reliability of the model in identifying Blotch-infected leaves. The Leaf Spot class resulted in an AUC of 0.89, indicating good classification performance but with relatively higher false positive rates compared to Blotch and Healthy Leaf. The Aphids Disease class achieved an AUC of 0.88, indicating the model&#x2019;s decent discriminative capability between Aphids Disease and other classes. However, there is potential for improvement, possibly due to the visual resemblance between Aphid Disease and other leaf diseases. The DenseNet121 model consistently demonstrates high discriminative capability, as all AUC values are greater than 0.85, highlighting its strong performance in classifying turmeric leaf diseases. The ROC curves also show a significant gap between the Healthy Leaf class and the rest, reflecting the model&#x2019;s improved ability to identify healthy samples, which is essential for reducing false alarms in actual agricultural environments. The slightly lower AUC for Aphids Disease suggests the need for additional feature extraction layers or hyperparameter fine-tuning to enhance classification accuracy.</p>
        <fig id="fig-12">
          <label>Figure 12</label>
          <caption>
            <p>Analysis of DenseNet121 model based on ROC curve.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f012.tif"/>
        </fig>
      </sec>
      <sec id="s4_3">
        <label>4.3</label>
        <title>Result Analysis of DenseNet121 with Swin Transformer</title>
        <p>The performance of the DenseNet121 model with Swin Transformer for turmeric leaf disease classification is evaluated using different training and validation metrics, as shown in <xref ref-type="fig" rid="fig-13">Fig. 13</xref>. The model is optimized with the Adagrad optimizer using 30 epochs and a learning rate of 0.0001, enabling adaptive learning rate adjustment to enhance convergence. The accuracy plots of <xref ref-type="fig" rid="fig-13">Fig. 13</xref>a reveal a smooth rise, with the training and validation accuracy curves converging after about 10 epochs. This reflects successful learning with little overfitting, as the validation curve closely matches the training curve. The loss plots in <xref ref-type="fig" rid="fig-13">Fig. 13</xref>b reflect a descent trend, with the validation loss leveling off after about 15 epochs. Small fluctuations in validation loss indicate minor overfitting, which can be countered using regularization methods. The graphs of precision in <xref ref-type="fig" rid="fig-13">Fig. 13</xref>c show a consistent increase, with the validation precision stabilizing after 10 epochs, indicating excellent generalization capability. The recall plots in <xref ref-type="fig" rid="fig-13">Fig. 13</xref>d also follow the same trend, showing optimal performance within the first 10 epochs, which indicates that the model can detect diseased leaves with minimal false negatives. Finally, the graphs of F1-scores in <xref ref-type="fig" rid="fig-13">Fig. 13</xref>e, a balance between precision and recall, indicate a pattern of stabilization beyond 10 epochs, affirming the model&#x2019;s stability. A slight fluctuation in validation loss and accuracy supports the need for a bit more fine-tuning to improve classification further. All these results establish the efficacy of the DenseNet121 with the Swin Transformer method for the accurate classification of turmeric leaf diseases.</p>
        <fig id="fig-13">
          <label>Figure 13</label>
          <caption>
            <p>Graphical Analysis of DenseNet121 and Swin Transformer model based on Training and Validation (<bold>a</bold>) Accuracy, (<bold>b</bold>) Loss, (<bold>c</bold>) Precision, (<bold>d</bold>) Recall, and (<bold>e</bold>) F1-Score.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f013.tif"/>
        </fig>
        <p>The confusion matrix shown in <xref ref-type="fig" rid="fig-14">Fig. 14</xref> indicates the relative classification performance of the DenseNet121 and Swin Transformer models on four tea leaf classes: Aphids Disease, Blotch, Healthy Leaf, and Leaf Spot. The two models are both excellent discriminators, achieving very high true positive rates across all classes and few misclassifications. For Aphids Disease, 21 out of 22 were well classified, one was misclassified as Leaf Spot, indicating the high sensitivity and robustness of the model. For Blotch, most samples (21 out of 24) were accurately predicted, with little confusion between Aphids Disease and Healthy Leaf, indicating minor overlaps in visual texture. The Healthy Leaf class had almost flawless classification, with all 19 correctly classified, highlighting the models&#x2019; sensitivity to the differentiation between healthy and diseased patterns. For Leaf Spot, 20 of the 21 instances were correctly identified, further confirming the models&#x2019; accuracy. Overall, the confusion matrix shows both DenseNet121 and Swin Transformer accurately capturing complex spatial and colour characteristics. Yet, the Swin Transformer achieves a slight improvement in coping with inter-class similarities as a result of its self-attention mechanism, whereas DenseNet121 provides effective feature reuse with dense connections. Overall, these findings highlight the high accuracy, generalizability, and robustness of both models in plant disease classification.</p>
        <fig id="fig-14">
          <label>Figure 14</label>
          <caption>
            <p>Confusion matrix of the proposed DenseNet121 and Swin transformer model evaluated on the held-out 10% test set, illustrating correct and misclassified samples for the four turmeric leaf disease classes.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f014.tif"/>
        </fig>
        <p>The analysis of the DenseNet121 and Swin Transformer models&#x2019; performance, based on the classification metrics presented in <xref ref-type="table" rid="table-5">Table 5</xref>, emphasizes their robust and reliable ability to classify various tea leaf conditions with high precision and consistency. For Aphids Disease, both models achieved a balanced precision and recall of 0.95, resulting in an F1-score of 0.95, which reflects highly accurate and consistent predictions with zero false negatives or positives. The Blotch class achieved complete precision (1.00) and a recall of 0.87, indicating that all Blotch predictions were correct. However, some true cases were not identified, possibly due to their visual resemblance to other infected leaves. The Healthy Leaf class also performed remarkably well, achieving a precision of 0.90, a recall of 1.00, and an F1-score of 0.95, indicating the models&#x2019; capacity to identify healthy samples correctly with zero misclassification. For Leaf Spot, the models demonstrated a strong balance with 0.90 precision and 0.95 recall, indicating strong detection ability. In total, DenseNet121 and Swin Transformer both performed well across all classes, with the Swin Transformer performing slightly better than DenseNet121 because of its self-attention mechanism that further improves global feature comprehension. The metrics as a whole confirm the robustness, generalization capability, and applicability of the models for effective automated plant disease diagnosis.</p>
        <table-wrap id="table-5">
          <label>Table 5</label>
          <caption>
            <p>Classification parameters for the proposed DenseNet121 and Swin transformer models are evaluated on the held-out 10% test set.</p>
          </caption>
          <table>
            <thead>
              <tr>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Class Name</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Precision (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Recall (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">F1-Score (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Accuracy (%)</th>
              </tr>
            </thead>
            <tbody>
              <tr>
                <td align="center" valign="middle">
                  <bold>Aphids Disease</bold>
                </td>
                <td align="center" valign="middle">0.95</td>
                <td align="center" valign="middle">0.95</td>
                <td align="center" valign="middle">0.95</td>
                <td rowspan="4" align="center" valign="middle" style="border-bottom:solid thin">0.94</td>
              </tr>
              <tr>
                <td align="center" valign="middle">
                  <bold>Blotch</bold>
                </td>
                <td align="center" valign="middle">1.00</td>
                <td align="center" valign="middle">0.87</td>
                <td align="center" valign="middle">0.93</td>
              </tr>
              <tr>
                <td align="center" valign="middle">
                  <bold>Healthy Leaf</bold>
                </td>
                <td align="center" valign="middle">0.90</td>
                <td align="center" valign="middle">1.00</td>
                <td align="center" valign="middle">0.95</td>
              </tr>
              <tr>
                <td align="center" valign="middle" style="border-bottom:solid thin">
                  <bold>Leaf Spot</bold>
                </td>
                <td align="center" valign="middle" style="border-bottom:solid thin">0.90</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">0.95</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">0.93</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>ROC curve of the fine-tuned model for the classification of turmeric leaf disease from <xref ref-type="fig" rid="fig-15">Fig. 15</xref> clearly indicates the discriminative ability of the model for all four classes, i.e., Aphids Disease, Blotch, Healthy Leaf, and Leaf Spot. AUC values of each class indicate the model&#x2019;s capability to discriminate between positive and negative examples. Aphid disease had an AUC of 0.88 with excellent discrimination between true positives and negatives. Blotch had an AUC of 0.85, indicating the relatively high precision of the model, but with slightly more feature space overlap than Aphids Disease. The AUC was adjusted to 0.89 for Healthy Leaf, indicating the model&#x2019;s good ability to differentiate healthy from diseased leaves, a very crucial feature in precision agriculture. Leaf Spot had an AUC of 0.82, the lowest of all classes, indicating a moderate level of confusion with other diseases due to the possible visual similarity of leaf patterns. ROC curves for all classes climb well above the diagonal random guess line, which signifies that the model is significantly better than random probability. The steep initial bends for Aphids Disease and Healthy Leaf reflect high sensitivity at low false positive rates, which is a good property for reducing misclassification in practice. The comparatively less flat leaf spot bend shows potential for improvement, possibly with more data or additional fine-tuning. </p>
        <fig id="fig-15">
          <label>Figure 15</label>
          <caption>
            <p>Analysis of DenseNet121 and Swin transformer model based on the ROC curve.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f015.tif"/>
        </fig>
      </sec>
      <sec id="s4_4">
        <label>4.4</label>
        <title>Result Analysis of Proposed DenseSwinGNNNet Model (DenseNet121, Swin Transformer, and Graph Neural Network)</title>
        <p>The training and validation performance metrics of the proposed model for turmeric leaf disease classification are graphically represented in <xref ref-type="fig" rid="fig-16">Fig. 16</xref>, with results plotted over 30 epochs. The model is trained with the Adagrad optimizer with a learning rate of 0.0001, with adaptive learning rate adjustment for stable convergence. The Training and Validation Accuracy graph in <xref ref-type="fig" rid="fig-16">Fig. 16</xref>a illustrates steadily improving accuracy curves for both sets. These curves initially rise steeply during the earlier epochs before gradually leveling off, eventually converging to 0.90, which marks the strong learning and generalizing capabilities of the model. The plot of Training and Validation Loss in <xref ref-type="fig" rid="fig-16">Fig. 16</xref>b shows a continuous decline in both losses, with the validation loss remaining slightly less than the training loss, indicating that the model is learning effectively without notable overfitting. The Training and Validation Precision plot in <xref ref-type="fig" rid="fig-16">Fig. 16</xref>c shows a steep rise in early epochs, with both precision values exceeding 95%, demonstrating the model&#x2019;s ability to identify positive cases and reduce false positives accurately. Likewise, the Training and Validation Recall plot in <xref ref-type="fig" rid="fig-16">Fig. 16</xref>d is similar, with recall values above 0.90, affirming the model&#x2019;s high sensitivity in identifying diseased and healthy leaves correctly. The Training and Validation F1-Score graph in <xref ref-type="fig" rid="fig-16">Fig. 16</xref>e shows a gradual increase in both values, reaching close to 0.95, indicating well-balanced performance between recall and precision. The proximity of the training and validation curves for all the metrics also ensures that the proposed model exhibits stable generalization, rendering it highly efficient in classifying turmeric leaf disease.</p>
        <fig id="fig-16">
          <label>Figure 16</label>
          <caption>
            <p>Graphical analysis of proposed DenseSwinGNNNet model based on training and validation. (<bold>a</bold>) Accuracy, (<bold>b</bold>) Loss, (<bold>c</bold>) Precision, (<bold>d</bold>) Recall, and (<bold>e</bold>) F1-score.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f016.tif"/>
        </fig>
        <p>The confusion matrix in <xref ref-type="fig" rid="fig-17">Fig. 17</xref> shows the classification accuracy of the presented DenseSwinGNNNet model on four tea leaf classes: Aphids Disease, Blotch, Healthy Leaf, and Leaf Spot. The model demonstrates outstanding accuracy and robustness, performing near-perfect classification on all classes with very few misclassifications. In particular, 21 out of 22 samples for Aphids Disease were accurately classified, while one was mistakenly classified as Leaf Spot, reflecting high discriminative ability and sensitivity. Similarly, the Blotch class exhibits excellent performance, correctly predicting 23 out of 24 cases, with only one slight confusion in the case of Aphids Disease. The Healthy Leaf class exhibited perfect classification, with all 18 samples correctly identified, indicating the model&#x2019;s high ability to distinguish healthy leaves from infected ones. The Leaf Spot class also performed perfectly, with all 21 cases being properly identified, indicating the model&#x2019;s strength in recognizing disease-pattern-specific features.</p>
        <p>On the whole, the DenseSwinGNNNet model exhibits excellent feature learning by incorporating DenseNet&#x2019;s hierarchical feature reuse, Swin Transformer&#x2019;s global attention mechanism, and GNN&#x2019;s relational reasoning. This synergy of combined strengths facilitates effective spatial-contextual awareness with minimum inter-class confusion. The confusion matrix verifies that DenseSwinGNNNet exhibits excellent generalization and robustness, rendering it highly effective for precision-based leaf disease detection and agricultural automation. </p>
        <fig id="fig-17">
          <label>Figure 17</label>
          <caption>
            <p>Confusion matrix of the DenseSwinGNNNet model evaluated on the held-out 10% test set, illustrating correct and misclassified samples for the four turmeric leaf disease classes.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f017.tif"/>
        </fig>
        <p>The classification performance assessment of the developed DenseSwinGNNNet model, as shown in <xref ref-type="table" rid="table-6">Table 6</xref>, confirms its exemplary capability to effectively detect and classify tea leaf diseases with great precision, recall, and F1-scores for all classes. For the Aphids Disease class, the model has a well-balanced precision, recall, and F1-score of 95% which indicates its high predictive dependability and low misclassifications. The Blotch class achieved perfect accuracy (100%) and a recall of 95%, with an F1-score of 97%. This means that all the predicted instances were correct, and a few actual samples were left behind. Notably, the Healthy Leaf class achieved perfect accuracy (100% precision, recall, and F1-score), validating the model&#x2019;s ability to identify healthy leaves as opposed to the diseased ones with unqualified accuracy. Likewise, the Leaf Spot class exhibited an accuracy of 95% and a recall of 100%, providing an F1-score of 97%. This highlights the model&#x2019;s strength in identifying and accurately classifying even fine disease symptoms. These regular and high values on all evaluation metrics confirm that DenseSwinGNNNet convincingly integrates dense connectivity, self-attention models, and graph-based spatial reasoning to obtain superior feature representation and contextual understanding. Therefore, the model exhibits outstanding generalization, stability, and reliability, making it very apt for practical agricultural disease detection use cases.</p>
        <table-wrap id="table-6">
          <label>Table 6</label>
          <caption>
            <p>Classification parameters for the proposed DenseSwinGNNNet model were evaluated on the held-out 10% test set.</p>
          </caption>
          <table>
            <thead>
              <tr>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Class Name</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Precision (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Recall (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">F1-Score (%) </th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Accuracy (%)</th>
              </tr>
            </thead>
            <tbody>
              <tr>
                <td align="center" valign="middle">
                  <bold>Aphids Disease</bold>
                </td>
                <td align="center" valign="middle">95</td>
                <td align="center" valign="middle">95</td>
                <td align="center" valign="middle">95</td>
                <td rowspan="4" align="center" valign="middle" style="border-bottom:solid thin">96</td>
              </tr>
              <tr>
                <td align="center" valign="middle">
                  <bold>Blotch</bold>
                </td>
                <td align="center" valign="middle">100</td>
                <td align="center" valign="middle">95</td>
                <td align="center" valign="middle">97</td>
              </tr>
              <tr>
                <td align="center" valign="middle">
                  <bold>Healthy Leaf</bold>
                </td>
                <td align="center" valign="middle">100</td>
                <td align="center" valign="middle">100</td>
                <td align="center" valign="middle">100</td>
              </tr>
              <tr>
                <td align="center" valign="middle" style="border-bottom:solid thin">
                  <bold>Leaf Spot</bold>
                </td>
                <td align="center" valign="middle" style="border-bottom:solid thin">95</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">100</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">97</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>The ROC curve of the suggested model for classifying turmeric leaf disease is presented in <xref ref-type="fig" rid="fig-18">Fig. 18</xref>, illustrating the model&#x2019;s discriminative power among the four classes. AUC values represent the capacity of the model to distinguish among classes, with Aphids Disease at 0.97, Blotch at 0.98, Healthy Leaf at 0.99, and Leaf Spot at 0.97. These close to 1.0 AUC values for all these indicate that the model has an exceptionally good sensitivity (true positive rate) and specificity trade-off, making it least likely to misclassify. The Healthy Leaf class shows the best AUC, which means the model effectively discriminates the healthy from the pathological samples. Aphid disease and Leaf Spot classes with AUCs of 0.97 are representative of the model&#x2019;s ability to classify these diseases with minimal misclassification. The Blotch class also displays a high AUC of 0.98, once more representing the model&#x2019;s power. The sharp rise of the curves towards the top-left corner of the plot confirms that the model achieves high true positive rates even at low false positive rates. The slight variations between the curves indicate some degree of class overlap, possibly due to the visual resemblance between diseased and healthy leaves. However, the high AUC values confirm that the model performs these tasks exceptionally well.</p>
        <fig id="fig-18">
          <label>Figure 18</label>
          <caption>
            <p>Analysis of proposed DenseSwinGNNNet model based on ROC curve.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f018.tif"/>
        </fig>
      </sec>
      <sec id="s4_5">
        <label>4.5</label>
        <title>Optimizer Analysis for Proposed DenseSwinGNNNet Model</title>
        <p>Optimizer analysis of the suggested DenseSwinGNNNet model compares the effects of various optimization algorithms&#x2014;Adagrad, RMSprop, and Adam&#x2014;on classification accuracy, based on average precision, recall, F1-score, and accuracy, as shown in <xref ref-type="table" rid="table-7">Table 7</xref>. Adagrad optimizer provides 95.2% precision, 95.5% recall, and 95.7% F1-score with a general accuracy of 94%, reflecting effective learning but comparatively lower generalization than other optimizers. The RMSprop optimizer significantly enhances performance, achieving 98.5% precision, 98.2% recall, 98.7% F1-score, and 98% accuracy, reflecting greater optimization and training stability. Nonetheless, the Adam optimizer performs the best among them, achieving 99.5% accuracy across all measurements and an average accuracy of 98.71%, reflecting its excellence in tackling the complex feature distributions of turmeric leaf disease classification. The consistent improvement in performance from Adagrad to RMSprop and subsequently to Adam indicates that adaptive learning rate techniques considerably improve feature learning and convergence stability of the model. Adam&#x2019;s high performance, due to its adaptive moment estimation, provides optimal gradient updates, minimizing misclassification rates. Therefore, Adam is the optimal optimizer for DenseSwinGNNNet, achieving the most accurate and consistent classification outcomes, making it the perfect choice for implementing turmeric leaf disease detection in precision farming.</p>
        <table-wrap id="table-7">
          <label>Table 7</label>
          <caption>
            <p>Comparison of various optimizers on the proposed DenseSwinGNNNet model.</p>
          </caption>
          <table>
            <thead>
              <tr>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Optimizer</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Average Precision (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Average Recall (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Average F1-Score (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Accuracy (%)</th>
              </tr>
            </thead>
            <tbody>
              <tr>
                <td align="center" valign="middle">
                  <bold>Adagrad</bold>
                </td>
                <td align="center" valign="middle">95.2</td>
                <td align="center" valign="middle">95.5</td>
                <td align="center" valign="middle">95.7</td>
                <td align="center" valign="middle">96.0</td>
              </tr>
              <tr>
                <td align="center" valign="middle">
                  <bold>RMSprop</bold>
                </td>
                <td align="center" valign="middle">98.5</td>
                <td align="center" valign="middle">98.2</td>
                <td align="center" valign="middle">98.7</td>
                <td align="center" valign="middle">98.0</td>
              </tr>
              <tr>
                <td align="center" valign="middle" style="border-bottom:solid thin">
                  <bold>Adam</bold>
                </td>
                <td align="center" valign="middle" style="border-bottom:solid thin">99.5</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">99.5</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">99.5</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">99.7</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec id="s4_6">
        <label>4.6</label>
        <title>Result Analysis for the Proposed DenseSwinGNNNet Model with Adam Optimizer</title>
        <p>The performance of the proposed DenseSwinGNNNet model for classifying turmeric leaf disease is extensively analyzed using various performance metrics, as shown in <xref ref-type="fig" rid="fig-19">Fig. 19</xref>. The model is trained for 30 epochs using the Adam optimizer with a learning rate of 0.0001, and the learning rate can be adjusted adaptively to improve convergence. The training and validation accuracy graph of <xref ref-type="fig" rid="fig-19">Fig. 19</xref>a shows a consistent rise, with accuracy improving steadily throughout epochs before eventually attaining 99%, indicating the model&#x2019;s strong ability to learn and excellent generalization on real-world data. The slightest gap between training and validation accuracy ensures minimal overfitting, making the model very reliable for real-world use. The plots of training and validation loss in <xref ref-type="fig" rid="fig-19">Fig. 19</xref>b have a consistent downward trend, indicating the model&#x2019;s effectiveness in reducing classification errors. The lack of divergence in the loss curves confirms that the model is not subject to extreme overfitting or underfitting. The precision curve in <xref ref-type="fig" rid="fig-19">Fig. 19</xref>c shows a very high precision of over 95%, which implies that false-positive misclassifications are negligible and the model is particularly suited for identifying diseased leaves. Also, the recall plot in <xref ref-type="fig" rid="fig-19">Fig. 19</xref>d verifies that the model accurately classifies both diseased and non-diseased leaves, achieving a recall of over 90% and keeping false negatives very low. The F1-score plot of <xref ref-type="fig" rid="fig-19">Fig. 19</xref>e has a similar pattern, with percentages above 94%, indicating a good balance between precision and recall, avoiding model bias towards either metric. All these results provide evidence of the efficacy of the proposed model as a high-performing method for detecting turmeric leaf disease. It can be used in precision agriculture as an early detection tool for disease and crop monitoring. </p>
        <fig id="fig-19">
          <label>Figure 19</label>
          <caption>
            <p>Graphical analysis of the proposed DenseSwinGNNNet model with Adam optimizer based on training and validation. (<bold>a</bold>) Accuracy, (<bold>b</bold>) Loss, (<bold>c</bold>) Precision, (<bold>d</bold>) Recall, and (<bold>e</bold>) F1-score.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f019.tif"/>
        </fig>
        <p>The confusion matrix depicted in <xref ref-type="fig" rid="fig-20">Fig. 20</xref> shows the classification performance of the suggested DenseSwinGNNNet model, trained using the Adam optimizer, demonstrating its high accuracy and stability across four classes: Aphids Disease, Blotch, Healthy Leaf, and Leaf Spot. The model classifies almost all the samples from each class with near-perfect accuracy. For Aphids Disease, 21 out of 22 samples were accurately classified, with only one slight misclassification as Blotch, reflecting the model&#x2019;s high sensitivity. The Blotch class shows perfect identification, with all 24 samples correctly predicted, reflecting the optimizer&#x2019;s strength in adjusting learning parameters for precise convergence. In the same vein, both Healthy Leaf and Leaf Spot classes recorded immaculate recognition, with all 19 and 21 samples, respectively, being correctly predicted and no misclassifications. This stable performance indicates the model&#x2019;s strong feature extraction and improved learning ability with the incorporation of dense connectivity, transformer-based attention, and graph-based reasoning. The application of the Adam optimizer also helped with better gradient stability and faster convergence, minimizing overfitting and enabling smooth optimization. In totality, the confusion matrix verifies that DenseSwinGNNNet with the Adam optimizer holds superior generalization and classification performance, hence being a resourceful and trustworthy framework for smart agricultural disease diagnosis and precision crop monitoring.</p>
        <fig id="fig-20">
          <label>Figure 20</label>
          <caption>
            <p>Confusion matrix of the DenseSwinGNNNet model with Adam optimizer evaluated on the held-out 10% test set, illustrating correct and misclassified samples for the four turmeric leaf disease classes.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f020.tif"/>
        </fig>
        <p>The performance test of the optimized DenseSwinGNNNet model, using the Adam optimizer as presented in <xref ref-type="table" rid="table-8">Table 8</xref>, exhibits impressive classification ability across all categories of tea leaves, with near-perfect precision, recall, and F1-scores. For the Aphids Disease class, the model achieved 100% precision, 99% recall, and an F1-score of 98%, reflecting highly reliable predictions with only one minor incorrect classification. The Blotch class attained 98% precision and 100% recall with an F1-score of 99%, which indicates the model&#x2019;s capability to identify all actual Blotch samples without false positives. The Healthy Leaf and Leaf Spot classes attained a perfect score of 100% precision, recall, and F1-score, affirming the model&#x2019;s strength in accurately identifying both healthy and diseased samples. The 99.7% generalization and learning stability reflect the powerful capabilities of the Adam optimizer, which efficiently improves gradient convergence and avoids overfitting. DenseNet&#x2019;s feature reuse, Swin Transformer&#x2019;s self-attention for global context, and GNN&#x2019;s relational learning, combined, enable DenseSwinGNNNet to identify intricate spatial and textural information with higher accuracy. These findings together confirm that the Adam-optimized DenseSwinGNNNet model achieves top-notch performance and is highly efficient and reliable for automatic plant disease detection and precision agriculture use.</p>
        <table-wrap id="table-8">
          <label>Table 8</label>
          <caption>
            <p>Analysis of the proposed DenseSwinGNNNet model with Adam optimizer based on classification parameters.</p>
          </caption>
          <table>
            <thead>
              <tr>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Class Name</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Precision (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Recall (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">F1-Score (%) </th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Accuracy (%)</th>
              </tr>
            </thead>
            <tbody>
              <tr>
                <td align="center" valign="middle">
                  <bold>Aphids Disease</bold>
                </td>
                <td align="center" valign="middle">100</td>
                <td align="center" valign="middle">99</td>
                <td align="center" valign="middle">98</td>
                <td rowspan="4" align="center" valign="middle" style="border-bottom:solid thin">99.7</td>
              </tr>
              <tr>
                <td align="center" valign="middle">
                  <bold>Blotch</bold>
                </td>
                <td align="center" valign="middle">98</td>
                <td align="center" valign="middle">100</td>
                <td align="center" valign="middle">99</td>
              </tr>
              <tr>
                <td align="center" valign="middle">
                  <bold>Healthy Leaf</bold>
                </td>
                <td align="center" valign="middle">100</td>
                <td align="center" valign="middle">100</td>
                <td align="center" valign="middle">100</td>
              </tr>
              <tr>
                <td align="center" valign="middle" style="border-bottom:solid thin">
                  <bold>Leaf Spot</bold>
                </td>
                <td align="center" valign="middle" style="border-bottom:solid thin">100</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">100</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">100</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>The ROC curve of the designed DenseSwinGNNNet model for the turmeric leaf disease classification, as illustrated in <xref ref-type="fig" rid="fig-21">Fig. 21</xref>, clearly depicts the enhanced discriminative ability of the model for four classes. The ROC curve graphically represents the true positive rate vs. the false positive rate for each class, providing a global measure of how well the model can differentiate between diseased and healthy leaves. The AUC scores for all four classes are 0.99, which is very close to perfect classification power. The high value of AUC clearly demonstrates that the model effectively discriminates among the four classes with minimal misclassification. The sharp slope of the curves towards the top-left corner indicates that the model has high recall with minimal false positives. The improved classification result stems from the analysis conducted using the Adam optimizer on the presented model, which enables effective decision-making. The AUC scores validate the robustness of the model in practical applications where high sensitivity is crucial for accurately detecting diseased turmeric leaves with few false alarms. The fact that different classes&#x2019; ROC curves are so close to one another indicates an evenly balanced model with no strong bias towards any particular class, which also verifies its generalization capability. The outcome of the ROC curve validates the precision of the model developed to apply in precision agriculture as an effective method for early detection and categorization of leaf turmeric disease, with the potential for improved crop management and yield increase.</p>
        <fig id="fig-21">
          <label>Figure 21</label>
          <caption>
            <p>Analysis of the proposed DenseSwinGNNNet model with Adam optimizer based on ROC curve.</p>
          </caption>
          <graphic mimetype="image" mime-subtype="tif" xlink:href="TSP_Phyton-94-73354-f021.tif"/>
        </fig>
      </sec>
      <sec id="s4_7">
        <label>4.7</label>
        <title>Five-Fold Cross-Validation Results</title>
        <p>The five-fold cross-validation results shown in <xref ref-type="table" rid="table-9">Table 9</xref> also prove the robustness and generalization power of the introduced DenseSwinGNNNet model. The model performed consistently high classification performance in all five folds, with an accuracy between 99.2% to 99.6% and an average mean accuracy of 99.4% &#xB1; 0.2%. Correspondingly, the precision, recall, and F1-score metrics were extremely consistent with average values of 99.5%, 99.3%, and 99.4%, respectively, and minimal standard deviation over folds. This reinforces that the model&#x2019;s performance does not hinge on any fixed data split and that it stays in high discriminative capacity under different training&#x2013;testing partitions. The small variance across the folds suggests good generalization and robustness, effectively eliminating overfitting or performance inflation due to an advantageous division of data. These results provide strong empirical support for DenseSwinGNNNet&#x2019;s robust learning mechanism, which consistently achieves high accuracy and resilience across various test scenarios, thereby enhancing the credibility of its reported 99.7% classification accuracy.</p>
        <table-wrap id="table-9">
          <label>Table 9</label>
          <caption>
            <p>Five-fold cross-validation results.</p>
          </caption>
          <table>
            <thead>
              <tr>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Fold</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Accuracy (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Precision (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Recall (%)</th>
                <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">F1-Score (%)</th>
              </tr>
            </thead>
            <tbody>
              <tr>
                <td align="center" valign="middle">Fold 1</td>
                <td align="center" valign="middle">99.5</td>
                <td align="center" valign="middle">99.6</td>
                <td align="center" valign="middle">99.4</td>
                <td align="center" valign="middle">99.5</td>
              </tr>
              <tr>
                <td align="center" valign="middle">Fold 2</td>
                <td align="center" valign="middle">99.4</td>
                <td align="center" valign="middle">99.5</td>
                <td align="center" valign="middle">99.3</td>
                <td align="center" valign="middle">99.4</td>
              </tr>
              <tr>
                <td align="center" valign="middle">Fold 3</td>
                <td align="center" valign="middle">99.3</td>
                <td align="center" valign="middle">99.4</td>
                <td align="center" valign="middle">99.2</td>
                <td align="center" valign="middle">99.3</td>
              </tr>
              <tr>
                <td align="center" valign="middle">Fold 4</td>
                <td align="center" valign="middle">99.6</td>
                <td align="center" valign="middle">99.5</td>
                <td align="center" valign="middle">99.4</td>
                <td align="center" valign="middle">99.5</td>
              </tr>
              <tr>
                <td align="center" valign="middle">Fold 5</td>
                <td align="center" valign="middle">99.2</td>
                <td align="center" valign="middle">99.3</td>
                <td align="center" valign="middle">99.1</td>
                <td align="center" valign="middle">99.2</td>
              </tr>
              <tr>
                <td align="center" valign="middle" style="border-bottom:solid thin">Mean &#xB1; SD</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">99.4 &#xB1; 0.2</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">99.5 &#xB1; 0.1</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">99.3 &#xB1; 0.1</td>
                <td align="center" valign="middle" style="border-bottom:solid thin">99.4 &#xB1; 0.1</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>Beyond its classification accuracy, the proposed model exhibits potential for secure deployment under adversarially resilient configurations. Integrating adversarial training and differential privacy mechanisms can enhance model robustness and mitigate risks from malicious perturbations or data inference attacks. These measures ensure that the model&#x2019;s decisions remain stable and reliable even in the presence of noisy or manipulated data.</p>
      </sec>
    </sec>
    <sec id="s5">
      <label>5</label>
      <title>Ablation Analysis</title>
      <p>The ablation study presented in <xref ref-type="table" rid="table-10">Table 10</xref> demonstrates the model&#x2019;s improved performance with the progressive addition of various architectural elements, highlighting the efficiency of the developed DenseSwinGNNNet model. The baseline DenseNet121 model achieved an average precision of 90.5%, a recall of 90.2%, an F1-score of 90.2%, and an overall accuracy of 91.0%. Its robust feature extraction ability via dense connectivity is evident, but it lacks contextual information. When the Swin Transformer was combined with DenseNet121, performance was significantly enhanced. The average precision, recall, and F1-score increased to 93.7%, 93.5%, and 93.5%, respectively, while accuracy reached up to 94.0%. This improvement is due to the Swin Transformer&#x2019;s hierarchical attention mechanism, which captures local and global dependencies, enhancing feature representation. Lastly, the Proposed DenseSwinGNNNet model, which incorporates GNNs for learning relationships, performed spectacularly, achieving 99.5% precision, recall, and F1-score, and 99.7% accuracy, indicating nearly perfect classification. Adding GNN modules allows effective modeling of relationships between classes as well as spatial correlations among leaf features, resulting in better discrimination and resilience. In summary, the ablation study effectively confirms that each architectural improvement&#x2014;dense connections, Transformer-based attention, and GNN relational learning&#x2014;makes a synergistic contribution toward achieving state-of-the-art performance in plant disease classification.</p>
      <table-wrap id="table-10">
        <label>Table 10</label>
        <caption>
          <p>Ablation study.</p>
        </caption>
        <table>
          <thead>
            <tr>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Model</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Average Precision (%)</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Average Recall (%)</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Average F1-Score (%)</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Accuracy (%)</th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <bold>DenseNet121 Model</bold>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">90.5</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">90.2</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">90.2</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">91.0</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <bold>DenseNet121 with Swin Transformer</bold>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">93.7</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">93.5</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">93.5</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">94.0</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <bold>Proposed DenseSwinGNNNet Model</bold>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">99.5</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">99.5</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">99.5</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">99.7</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
    </sec>
    <sec id="s6">
      <label>6</label>
      <title>State-of-the-Art Analysis</title>
      <p>Previous studies on turmeric leaf disease detection, summarized in <xref ref-type="table" rid="table-11">Table 11</xref>, reveal a gradual evolution from traditional machine learning to deep hybrid architectures. Early models such as VGG16 and VGG19 [<xref ref-type="bibr" rid="ref-19">19</xref>] achieved accuracies between 93&#x2013;97% on manually collected datasets but were limited to basic convolutional feature extraction without hierarchical or contextual understanding. Classical machine learning approaches using Logistic Regression, KNN, and SVM [<xref ref-type="bibr" rid="ref-16">16</xref>] achieved around 92% accuracy but relied heavily on handcrafted features, restricting adaptability to complex field images. </p>
      <p>In contrast, the proposed DenseSwinGNNNet integrates DenseNet121, Swin Transformer, and GNN in a unified, end-to-end framework that jointly learns local, global, and relational features. This design enables deeper spatial reasoning and stronger generalization, achieving an impressive 99.7% accuracy on the turmeric leaf disease dataset. The model&#x2019;s performance was further validated through five-fold cross-validation and significance testing (<italic>p</italic> &lt; 0.05), confirming its robustness. Other works, including CNN and InceptionV3-based models [<xref ref-type="bibr" rid="ref-4">4</xref>], and DenseNet201 with machine learning classifiers [<xref ref-type="bibr" rid="ref-5">5</xref>], demonstrated strong performance but were constrained by the simplicity of the dataset or non-leaf applications. Ensemble methods like SVM, Random Forest, and XGBoost [<xref ref-type="bibr" rid="ref-14">14</xref>] reached 93% accuracy yet lacked spatial feature integration. In contrast, the proposed DenseSwinGNNNet framework integrates DenseNet121, Swin Transformer, and GNN in an end-to-end architecture, combining convolutional, attention-based, and relational learning to achieve 99.7% accuracy with superior generalization, interpretability, and scalability over prior state-of-the-art models.</p>
      <table-wrap id="table-11">
        <label>Table 11</label>
        <caption>
          <p>State-of-the-art comparison.</p>
        </caption>
        <table>
          <thead>
            <tr>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Year/Reference</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Technique Used</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Dataset Name</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Number of Classes</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Evaluation Parameters</th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">2022/[<xref ref-type="bibr" rid="ref-19">19</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">VGG19, VGG16 and CNN</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Manually collected dataset</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">-</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy of VGG19: 93%<break/>Precision of VGG19: 94%<break/>Accuracy of VGG16:<break/>97%<break/>Precision of VGG16: 97%</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">2023/[<xref ref-type="bibr" rid="ref-16">16</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Logistic Regression (LR), K-Nearest Neighbor (KNN) &amp; SVM</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Manually collected Images</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">-</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy:92%<break/>Precision: 92%<break/>Recall: 94%<break/>F1-Score: 91%</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">2024/[<xref ref-type="bibr" rid="ref-4">4</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">CNN, InceptionV3, VGG16</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Collected from fields located in Andhra Pradesh</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Healthy Not</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy:90%</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">2024/[<xref ref-type="bibr" rid="ref-5">5</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">DenseNet201, Logistic Regression (LR), Decision Tree Classifier (DTC)</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Startch Adulterated turmeric was meticulously created</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">-</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy: 98%</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">2024/[<xref ref-type="bibr" rid="ref-14">14</xref>]</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">SVM, Random Forest (RF) &amp; XG Boost</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Manually collected Images</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">-</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy:93%</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Proposed Model</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">DenseNet121 integrated with Swin Transformer and Graph Neural Network</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Dataset collected from Mendeley&#x2019;&#x2019; Image Dataset for Turmeric Plant Leaf Disease Detection&#x201D;</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Aphids Disease<break/>Blotch<break/>Healthy Leaf<break/>Leaf Spot</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Accuracy:99.7%<break/>Precision:99.5%<break/>Recall:99.5%<break/>F1-score:99.5%</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <p>The performance comparison summarized in <xref ref-type="table" rid="table-11">Table 11</xref> involves studies conducted on diverse datasets collected under different environmental and imaging conditions. Therefore, the reported accuracy metrics should not be viewed as direct measures of superiority. The comparison primarily highlights the architectural evolution and methodological innovations within the field, rather than absolute performance ranking. Despite this variability, the proposed DenseSwinGNNNet demonstrates consistently high results and robust cross-validation performance, underscoring its strong generalization capability and potential scalability to broader agricultural disease classification tasks.</p>
    </sec>
    <sec id="s7">
      <label>7</label>
      <title>Conclusion</title>
      <p>In this study, the proposed DenseSwinGNNNet model successfully combines DenseNet121, Swin Transformer, and GNN to attain a strong and accurate solution to disease classification of turmeric leaf. The enhanced performance of the model, with a general accuracy rate of 99.7% and consistently high precision, recall, and F1-score values of 99.5%, respectively, confirms its strength in classifying Aphids Disease, Blotch, Leaf Spot, and Healthy Leaf. The hybrid architecture leverages the feature extraction capability of DenseNet121, the hierarchical spatial pattern recognition of Swin Transformer, and the modeling of complex feature relationships by GNN, all of which enhance the model&#x2019;s generalization and classification abilities. Low reported misclassifications in the confusion matrix and high AUC scores also support the credibility of the model. The entire preprocessing pipeline of the research and strategic data augmentation ensured effective training and validation to avoid the risk of overfitting. By enabling accurate and early leaf disease detection, the DenseSwinGNNNet model is a valuable tool in precision agriculture, allowing farmers to embrace timely disease control and stem crop loss. Subsequent research could explore real-time deployment of the model on edge computing devices to facilitate feasible use in field disease detection and monitoring. The work represented herein is part of the grand effort to incorporate AI-based solutions into sustainable agricultural practices, aiming to enhance food security and farmers&#x2019; economic stability.</p>
    </sec>
  </body>
  <back>
    <ack>
      <p>Not Applicable.</p>
    </ack>
    <sec>
      <title>Funding Statement</title>
      <p>This work was supported through the Ongoing Research Funding Program (ORF-2025-498), King Saud University, Riyadh, Saudi Arabia.</p>
    </sec>
    <sec>
      <title>Author Contributions</title>
      <p>The authors confirm contribution to the paper as follows: Conceptualization, Seerat Singla and Gunjan Shandilya; methodology, Seerat Singla; software, Ateeq Ur Rehman; validation, Seerat Singla, and Gunjan Shandilya; formal analysis, Ruby Pant; investigation, Ajay Kumar; writing&#x2014;review and editing; resources, Ayman Altameem; data curation, Seerat Singla; writing original draft preparation, Seerat Singla; writing&#x2014;review and editing, Gunjan Shandilya, Ahmad Almogren, and Ateeq Ur Rehman; visualization; supervision, Ahmad Almogren and Ateeq Ur Rehman; project administration, Gunjan Shandilya; funding acquisition, Ayman Altameem and Ahmad Almogren. All authors reviewed the results and approved the final version of the manuscript.</p>
    </sec>
    <sec sec-type="data-availability">
      <title>Availability of Data and Materials</title>
      <p>The dataset used in this study is publicly available and can be accessed at: <ext-link ext-link-type="uri" xlink:href="https://data.mendeley.com/datasets/jtttfbx342/1">https://data.mendeley.com/datasets/jtttfbx342/1</ext-link> (accessed on 15 August 2025).</p>
    </sec>
    <sec>
      <title>Ethics Approval</title>
      <p>Not applicable.</p>
    </sec>
    <sec sec-type="COI-statement">
      <title>Conflicts of Interest</title>
      <p>The authors declare no conflicts of interest to report regarding the present study.</p>
    </sec>
    <ref-list content-type="authoryear">
      <title>References</title>
      <ref id="ref-1">
        <label>1.</label>
        <mixed-citation publication-type="journal">
<person-group person-group-type="author">
<string-name>
<surname>Subramanian</surname> 
<given-names>M</given-names>
</string-name>, 
<string-name>
<surname>Gantait</surname> 
<given-names>S</given-names>
</string-name>, 
<string-name>
<surname>Jaafar</surname> 
<given-names>JN</given-names>
</string-name>, 
<string-name>
<surname>Ismail</surname> 
<given-names>MF</given-names>
</string-name>, 
<string-name>
<surname>Sinniah</surname> 
<given-names>UR</given-names>
</string-name></person-group>. 
<article-title>Micropropagation of white turmeric (<italic>Curcuma zedoaria</italic> (Christm.) Roscoe) and establishment of adventitious root culture for the production of phytochemicals</article-title>. 
<source>Ind Crops Prod</source>. 
<year>2025</year>;
<volume>223</volume>:
<fpage>120101</fpage>. 
doi:<pub-id pub-id-type="doi">10.1016/j.indcrop.2024.120101</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-2">
        <label>2.</label>
        <mixed-citation publication-type="journal">
<person-group person-group-type="author">
<string-name>
<surname>Albattah</surname> 
<given-names>W</given-names>
</string-name>, 
<string-name>
<surname>Javed</surname> 
<given-names>A</given-names>
</string-name>, 
<string-name>
<surname>Nawaz</surname> 
<given-names>M</given-names>
</string-name>, 
<string-name>
<surname>Masood</surname> 
<given-names>M</given-names>
</string-name>, 
<string-name>
<surname>Albahli</surname> 
<given-names>S</given-names>
</string-name></person-group>. 
<article-title>Artificial intelligence-based drone system for multiclass plant disease detection using an improved efficient convolutional neural network</article-title>. 
<source>Front Plant Sci</source>. 
<year>2022</year>;
<volume>13</volume>:
<fpage>808380</fpage>. 
doi:<pub-id pub-id-type="doi">10.3389/fpls.2022.808380</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-3">
        <label>3.</label>
        <mixed-citation publication-type="journal">
<person-group person-group-type="author">
<string-name>
<surname>Lekbangpong</surname> 
<given-names>S</given-names>
</string-name>, 
<string-name>
<surname>Ratanachai</surname> 
<given-names>A</given-names>
</string-name></person-group>. 
<article-title>Technology Transfer needs assessment for turmeric farmers in Paphayom District, Phatthalung Province: prodution, problems and needs of farmers for technology transfer to do turmeric production in Paphayom District, Phatthalung Province</article-title>. 
<source>ASEAN J Sci Technol Rep</source>. 
<year>2024</year>;
<volume>28</volume>(
<issue>1</issue>):
<elocation-id>e254806</elocation-id>. 
doi:<pub-id pub-id-type="doi">10.55164/ajstr.v28i1.254806</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-4">
        <label>4.</label>
        <mixed-citation publication-type="book">
<person-group person-group-type="author">
<string-name>
<surname>Chathurya</surname> 
<given-names>C</given-names>
</string-name>, 
<string-name>
<surname>Sachdeva</surname> 
<given-names>D</given-names>
</string-name>, 
<string-name>
<surname>Arora</surname> 
<given-names>M</given-names>
</string-name></person-group>. 
<chapter-title>Real-time turmeric leaf identification and classification using advanced deep learning models: initiative to smart agriculture</chapter-title>. In: 
<person-group person-group-type="editor">
<string-name>
<surname>Hassanien</surname> 
<given-names>AE</given-names>
</string-name>, 
<string-name>
<surname>Anand</surname> 
<given-names>S</given-names>
</string-name>, 
<string-name>
<surname>Jaiswal</surname> 
<given-names>A</given-names>
</string-name>, 
<string-name>
<surname>Kumar</surname> 
<given-names>P</given-names>
</string-name>,</person-group> editors. 
<source>Innovative computing and communications</source>. 
<publisher-loc>Singapore</publisher-loc>: 
<publisher-name>Springer Nature</publisher-name>; 
<year>2024</year>. p. 
<fpage>657</fpage>&#x2013;
<lpage>69</lpage>. 
doi:<pub-id pub-id-type="doi">10.1007/978-981-97-3817-5_46</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-5">
        <label>5.</label>
        <mixed-citation publication-type="journal">
<person-group person-group-type="author">
<string-name>
<surname>Siam</surname> 
<given-names>AKMFK</given-names>
</string-name>, 
<string-name>
<surname>Nirob</surname> 
<given-names>MAS</given-names>
</string-name>, 
<string-name>
<surname>Bishshash</surname> 
<given-names>P</given-names>
</string-name>, 
<string-name>
<surname>Assaduzzaman</surname> 
<given-names>M</given-names>
</string-name>, 
<string-name>
<surname>Ghosh</surname> 
<given-names>A</given-names>
</string-name>, 
<string-name>
<surname>Noori</surname> 
<given-names>SRH</given-names>
</string-name></person-group>. 
<article-title>A data-driven approach to turmeric disease detection: dataset for plant condition classification</article-title>. 
<source>Data Brief</source>. 
<year>2025</year>;
<volume>59</volume>:
<fpage>111435</fpage>. 
doi:<pub-id pub-id-type="doi">10.1016/j.dib.2025.111435</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-6">
        <label>6.</label>
        <mixed-citation publication-type="journal">
<person-group person-group-type="author">
<string-name>
<surname>Selvaraj</surname> 
<given-names>R</given-names>
</string-name>, 
<string-name>
<surname>Geetha Devasena</surname> 
<given-names>MS</given-names>
</string-name></person-group>. 
<article-title>A novel attention based vision transformer optimized with hybrid optimization algorithm for turmeric leaf disease detection</article-title>. 
<source>Sci Rep</source>. 
<year>2025</year>;
<volume>15</volume>(
<issue>1</issue>):
<fpage>17238</fpage>. 
doi:<pub-id pub-id-type="doi">10.1038/s41598-025-02185-7</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-7">
        <label>7.</label>
        <mixed-citation publication-type="journal">
<person-group person-group-type="author">
<string-name>
<surname>Devisurya</surname> 
<given-names>V</given-names>
</string-name>, 
<string-name>
<surname>Devi Priya</surname> 
<given-names>R</given-names>
</string-name>, 
<string-name>
<surname>Anitha</surname> 
<given-names>N</given-names>
</string-name></person-group>. 
<article-title>Early detection of major diseases in turmeric plant using improved deep learning algorithm</article-title>. 
<source>Bull Pol Acad Sci Tech Sci</source>. 
<year>2022</year>;
<volume>70</volume>(
<issue>2</issue>):
<fpage>140689</fpage>. 
doi:<pub-id pub-id-type="doi">10.24425/bpasts.2022.140689</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-8">
        <label>8.</label>
        <mixed-citation publication-type="journal">
<person-group person-group-type="author">
<string-name>
<surname>Vinayarani</surname> 
<given-names>G</given-names>
</string-name>, 
<string-name>
<surname>Prakash</surname> 
<given-names>HS</given-names>
</string-name></person-group>. 
<article-title>Growth promoting rhizospheric and endophytic bacteria from <italic>Curcuma longa</italic> L. as biocontrol agents against rhizome rot and leaf blight diseases</article-title>. 
<source>Plant Pathol J</source>. 
<year>2018</year>;
<volume>34</volume>(
<issue>3</issue>):
<fpage>218</fpage>&#x2013;
<lpage>35</lpage>. 
doi:<pub-id pub-id-type="doi">10.5423/PPJ.OA.11.2017.0225</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-9">
        <label>9.</label>
        <mixed-citation publication-type="journal">
<person-group person-group-type="author">
<string-name>
<surname>Singh</surname> 
<given-names>G</given-names>
</string-name>, 
<string-name>
<surname>Al-Huqail</surname> 
<given-names>AA</given-names>
</string-name>, 
<string-name>
<surname>Almogren</surname> 
<given-names>A</given-names>
</string-name>, 
<string-name>
<surname>Kaur</surname> 
<given-names>S</given-names>
</string-name>, 
<string-name>
<surname>Joshi</surname> 
<given-names>K</given-names>
</string-name>, 
<string-name>
<surname>Singh</surname> 
<given-names>A</given-names>
</string-name>, 
<etal>et al</etal></person-group>. 
<article-title>Enhanced leaf disease segmentation using U-Net architecture for precision agriculture: a deep learning approach</article-title>. 
<source>Food Sci Nutr</source>. 
<year>2025</year>;
<volume>13</volume>(
<issue>7</issue>):
<elocation-id>e70594</elocation-id>. 
doi:<pub-id pub-id-type="doi">10.1002/fsn3.70594</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-10">
        <label>10.</label>
        <mixed-citation publication-type="conf-proc">
<person-group person-group-type="author">
<string-name>
<surname>Sharma</surname> 
<given-names>R</given-names>
</string-name>, 
<string-name>
<surname>Sharma</surname> 
<given-names>A</given-names>
</string-name>, 
<string-name>
<surname>Hariharan</surname> 
<given-names>S</given-names>
</string-name>, 
<string-name>
<surname>Mahajan</surname> 
<given-names>S</given-names>
</string-name></person-group>. 
<article-title>Implementing convolutional neural networks and AdaBoost for varied turmeric discrimination in India</article-title>. In: 
<conf-name>Proceedings of the 2024 IEEE International Conference on Information Technology, Electronics and Intelligent Communication Systems (ICITEICS)</conf-name>; 
<conf-date>2024 Jun 28&#x2013;29</conf-date>; 
<conf-loc>Bangalore, India</conf-loc>. p. 
<fpage>1</fpage>&#x2013;
<lpage>5</lpage>. 
doi:<pub-id pub-id-type="doi">10.1109/ICITEICS61368.2024.10625183</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-11">
        <label>11.</label>
        <mixed-citation publication-type="conf-proc">
<person-group person-group-type="author">
<string-name>
<surname>Mervin Paul Raj</surname> 
<given-names>M</given-names>
</string-name>, 
<string-name>
<surname>Vijayakumar</surname> 
<given-names>J</given-names>
</string-name></person-group>. 
<article-title>Turmeric farm monitoring and automation using deep learning and fuzzy logic on raspberry Pi: a low-cost and energy efficient solution</article-title>. In: 
<conf-name>Proceedings of the 2024 Fourth International Conference on Advances in Electrical, Computing, Communication and Sustainable Technologies (ICAECT)</conf-name>; 
<conf-date>2024 Jan 11&#x2013;12</conf-date>; 
<conf-loc>Bhilai, India</conf-loc>. p. 
<fpage>1</fpage>&#x2013;
<lpage>8</lpage>. 
doi:<pub-id pub-id-type="doi">10.1109/ICAECT60202.2024.10469659</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-12">
        <label>12.</label>
        <mixed-citation publication-type="journal">
<person-group person-group-type="author">
<string-name>
<surname>Lanjewar</surname> 
<given-names>MG</given-names>
</string-name>, 
<string-name>
<surname>Asolkar</surname> 
<given-names>SS</given-names>
</string-name>, 
<string-name>
<surname>Parab</surname> 
<given-names>JS</given-names>
</string-name></person-group>. 
<article-title>Hybrid methods for detection of starch in adulterated turmeric from colour images</article-title>. 
<source>Multimed Tools Appl</source>. 
<year>2024</year>;
<volume>83</volume>(
<issue>25</issue>):
<fpage>65789</fpage>&#x2013;
<lpage>814</lpage>. 
doi:<pub-id pub-id-type="doi">10.1007/s11042-024-18195-y</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-13">
        <label>13.</label>
        <mixed-citation publication-type="conf-proc">
<person-group person-group-type="author">
<string-name>
<surname>Selvaraj</surname> 
<given-names>R</given-names>
</string-name>, 
<string-name>
<surname>Geetha Devasena</surname> 
<given-names>MS</given-names>
</string-name>, 
<string-name>
<surname>Satheesh</surname> 
<given-names>T</given-names>
</string-name>, 
<string-name>
<surname>Sathishkumar</surname> 
<given-names>C</given-names>
</string-name></person-group>. 
<article-title>AI for smart agriculture&#x2013;a deep learning based turmeric leaf disease detection</article-title>. In: 
<conf-name>Proceedings of the 2024 9th International Conference on Communication and Electronics Systems (ICCES)</conf-name>; 
<conf-date>2024 Dec 16&#x2013;18</conf-date>; 
<conf-loc>Coimbatore, India</conf-loc>. p. 
<fpage>1491</fpage>&#x2013;
<lpage>5</lpage>. 
doi:<pub-id pub-id-type="doi">10.1109/ICCES63552.2024.10859805</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-14">
        <label>14.</label>
        <mixed-citation publication-type="conf-proc">
<person-group person-group-type="author">
<string-name>
<surname>Tanwar</surname> 
<given-names>V</given-names>
</string-name>, 
<string-name>
<surname>Anand</surname> 
<given-names>V</given-names>
</string-name>, 
<string-name>
<surname>Chauhan</surname> 
<given-names>R</given-names>
</string-name>, 
<string-name>
<surname>Rawat</surname> 
<given-names>RS</given-names>
</string-name>, 
<string-name>
<surname>Kumar</surname> 
<given-names>GR</given-names>
</string-name></person-group>. 
<article-title>Enhanced classification of turmeric leaf disease severity levels using an optimized hybrid deep learning model: LevelPrecisionRecallF1-scoreAccuracy</article-title>. In: 
<conf-name>Proceedings of the 2024 International Conference on E-mobility, Power Control and Smart Systems (ICEMPS)</conf-name>; 
<conf-date>2024 Apr 18&#x2013;20</conf-date>; 
<conf-loc>Thiruvananthapuram, India</conf-loc>. p. 
<fpage>1</fpage>&#x2013;
<lpage>5</lpage>. 
doi:<pub-id pub-id-type="doi">10.1109/ICEMPS60684.2024.10559345</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-15">
        <label>15.</label>
        <mixed-citation publication-type="book">
<person-group person-group-type="author">
<string-name>
<surname>Lanjewar</surname> 
<given-names>MG</given-names>
</string-name>, 
<string-name>
<surname>Parate</surname> 
<given-names>RK</given-names>
</string-name>, 
<string-name>
<surname>Wakodikar</surname> 
<given-names>R</given-names>
</string-name>, 
<string-name>
<surname>Parab</surname> 
<given-names>JS</given-names>
</string-name></person-group>. 
<chapter-title>Detection of starch in turmeric using machine learning methods</chapter-title>. In: 
<person-group person-group-type="editor">
<string-name>
<surname>Kumar</surname> 
<given-names>S</given-names>
</string-name>, 
<string-name>
<surname>Sharma</surname> 
<given-names>H</given-names>
</string-name>, 
<string-name>
<surname>Balachandran</surname> 
<given-names>K</given-names>
</string-name>, 
<string-name>
<surname>Kim</surname> 
<given-names>JH</given-names>
</string-name>, 
<string-name>
<surname>Bansal</surname> 
<given-names>JC</given-names>
</string-name>,</person-group> editors. 
<source>Third congress on intelligent systems</source>. 
<publisher-loc>Singapore</publisher-loc>: 
<publisher-name>Springer Nature</publisher-name>; 
<year>2023</year>. p. 
<fpage>117</fpage>&#x2013;
<lpage>26</lpage>. 
doi:<pub-id pub-id-type="doi">10.1007/978-981-19-9379-4_10</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-16">
        <label>16.</label>
        <mixed-citation publication-type="conf-proc">
<person-group person-group-type="author">
<string-name>
<surname>Senthil Pandi</surname> 
<given-names>S</given-names>
</string-name>, 
<string-name>
<surname>Kumar</surname> 
<given-names>P</given-names>
</string-name>, 
<string-name>
<surname>Salman Latheef</surname> 
<given-names>TA</given-names>
</string-name></person-group>. 
<article-title>Projection of plant leaf disease using support vector machine algorithm</article-title>. In: 
<conf-name>Proceedings of the 2023 International Conference on Recent Advances in Science and Engineering Technology (ICRASET)</conf-name>; 
<conf-date>2023 Nov 23&#x2013;24</conf-date>; 
<conf-loc>B. G. Nagara, India</conf-loc>. p. 
<fpage>1</fpage>&#x2013;
<lpage>6</lpage>. 
doi:<pub-id pub-id-type="doi">10.1109/icraset59632.2023.10419981</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-17">
        <label>17.</label>
        <mixed-citation publication-type="book">
<person-group person-group-type="author">
<string-name>
<surname>Nawaz</surname> 
<given-names>M</given-names>
</string-name>, 
<string-name>
<surname>Nazir</surname> 
<given-names>T</given-names>
</string-name>, 
<string-name>
<surname>Khan</surname> 
<given-names>MA</given-names>
</string-name>, 
<string-name>
<surname>Rajinikanth</surname> 
<given-names>V</given-names>
</string-name>, 
<string-name>
<surname>Kadry</surname> 
<given-names>S</given-names>
</string-name></person-group>. 
<chapter-title>Plant disease classification using VGG-19 based faster-RCNN</chapter-title>. In: 
<person-group person-group-type="editor">
<string-name>
<surname>Singh</surname> 
<given-names>M</given-names>
</string-name>, 
<string-name>
<surname>Tyagi</surname> 
<given-names>V</given-names>
</string-name>, 
<string-name>
<surname>Gupta</surname> 
<given-names>PK</given-names>
</string-name>, 
<string-name>
<surname>Flusser</surname> 
<given-names>J</given-names>
</string-name>, 
<string-name>
<surname>&#xD6;ren</surname> 
<given-names>T</given-names>
</string-name>,</person-group> editors. 
<source>Advances in computing and data sciences</source>. 
<publisher-loc>Cham, Switzerland</publisher-loc>: 
<publisher-name>Springer Nature</publisher-name>; 
<year>2023</year>. p. 
<fpage>277</fpage>&#x2013;
<lpage>89</lpage>. 
doi:<pub-id pub-id-type="doi">10.1007/978-3-031-37940-6_23</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-18">
        <label>18.</label>
        <mixed-citation publication-type="conf-proc">
<person-group person-group-type="author">
<string-name>
<surname>Velmurugan</surname> 
<given-names>S</given-names>
</string-name>, 
<string-name>
<surname>Ramdinesh Reddy</surname> 
<given-names>K</given-names>
</string-name>, 
<string-name>
<surname>Rahul</surname> 
<given-names>SG</given-names>
</string-name>, 
<string-name>
<surname>Vardhan</surname> 
<given-names>S</given-names>
</string-name>, 
<string-name>
<surname>Subitha</surname> 
<given-names>D</given-names>
</string-name>, 
<string-name>
<surname>Vignesh</surname> 
<given-names>SK</given-names>
</string-name></person-group>. 
<article-title>A comparative computational analysis of VGG16 and VGG19 in prediction of turmeric plant disease</article-title>. In: 
<conf-name>Proceedings of the 2022 8th International Conference on Advanced Computing and Communication Systems (ICACCS)</conf-name>; 
<conf-date>2022 Mar 25&#x2013;26</conf-date>; 
<conf-loc>Coimbatore, India</conf-loc>. p. 
<fpage>130</fpage>&#x2013;
<lpage>4</lpage>. 
doi:<pub-id pub-id-type="doi">10.1109/icaccs54159.2022.9784970</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-19">
        <label>19.</label>
        <mixed-citation publication-type="conf-proc">
<person-group person-group-type="author">
<string-name>
<surname>Janani</surname> 
<given-names>V</given-names>
</string-name>, 
<string-name>
<surname>Siva Mangai</surname> 
<given-names>NM</given-names>
</string-name></person-group>. 
<article-title>Analysis and classification of rhizome rot disease for turmeric plant using artificial intelligence</article-title>. In: 
<conf-name>Proceedings of the 2022 International Conference on Sustainable Computing and Data Communication Systems (ICSCDS)</conf-name>; 
<conf-date>2022 Apr 7&#x2013;9</conf-date>; 
<conf-loc>Erode, India</conf-loc>. p. 
<fpage>411</fpage>&#x2013;
<lpage>4</lpage>. 
doi:<pub-id pub-id-type="doi">10.1109/icscds53736.2022.9760884</pub-id>. 

        </mixed-citation>
    </ref>
      <ref id="ref-20">
        <label>20.</label>
        <mixed-citation publication-type="web">
<person-group person-group-type="author">
<collab>Mendeley Data</collab>
</person-group>. 
<article-title>Turmeric Plant Disease Dataset [Dataset]</article-title>. 
<year>2020</year> [cited 
<date-in-citation>2025 Aug 15</date-in-citation>]. 
<comment>Available from: <ext-link ext-link-type="uri" xlink:href="https://data.mendeley.com/datasets/jtttfbx342/1" xmlns:xlink="http://www.w3.org/1999/xlink">https://data.mendeley.com/datasets/jtttfbx342/1</ext-link></comment> 

        </mixed-citation>
    </ref>
    </ref-list>
  </back>
</article>
