<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">64394</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2025.064394</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Enhancing Military Visual Communication in Harsh Environments Using Computer Vision Techniques</article-title>
<alt-title alt-title-type="left-running-head">Enhancing Military Visual Communication in Harsh Environments Using Computer Vision Techniques</alt-title>
<alt-title alt-title-type="right-running-head">Enhancing Military Visual Communication in Harsh Environments Using Computer Vision Techniques</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Selvarajan</surname><given-names>Shitharth</given-names></name><xref ref-type="aff" rid="aff-1">1</xref><xref ref-type="aff" rid="aff-2">2</xref><xref ref-type="aff" rid="aff-3">3</xref><email>s.selvarajan@leedsbeckett.ac.uk</email></contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western"><surname>Manoharan</surname><given-names>Hariprasath</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Al-Shehari</surname><given-names>Taher</given-names></name><xref ref-type="aff" rid="aff-5">5</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Alsadhan</surname><given-names>Nasser A</given-names></name><xref ref-type="aff" rid="aff-6">6</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Singh</surname><given-names>Subhav</given-names></name><xref ref-type="aff" rid="aff-7">7</xref><xref ref-type="aff" rid="aff-8">8</xref></contrib>
<aff id="aff-1"><label>1</label><institution>School of Built Environment, Engineering and Computing, Leeds Beckett University</institution>, <addr-line>Leeds, LS6 3HF</addr-line>, <country>UK</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Computer Science and Engineering, Chennai Institute of Technology</institution>, <addr-line>Chennai, 600069</addr-line>, <country>India</country></aff>
<aff id="aff-3"><label>3</label><institution>Centre for Research Impact &#x0026; Outcome, Chitkara University Institute of Engineering and Technology, Chitkara University</institution>, <addr-line>Rajpura, 140401</addr-line>, <country>India</country></aff>
<aff id="aff-4"><label>4</label><institution>Department of Electronics and Communication Engineering, Panimalar Engineering College</institution>, <addr-line>Poonamallee, Chennai, 600123</addr-line>, <country>India</country></aff>
<aff id="aff-5"><label>5</label><institution>Computer Skills, Department of Self-Development Skill, Common First Year Deanship, King Saud University</institution>, <addr-line>Riyadh, 11362</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-6"><label>6</label><institution>Computer Science Department, College of Computer and Information Sciences, King Saud University</institution>, <addr-line>Riyadh, 12372</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-7"><label>7</label><institution>Chitkara Centre for Research and Development, Chitkara University</institution>, <addr-line>Rajpura, 140401</addr-line>, <country>India</country></aff>
<aff id="aff-8"><label>8</label><institution>Division of Research &#x0026; Innovation, Uttaranchal University</institution>, <addr-line>Dehradun, 248007</addr-line>, <country>India</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Shitharth Selvarajan. Email: <email>s.selvarajan@leedsbeckett.ac.uk</email></corresp>
</author-notes>
<pub-date date-type="collection" publication-format="electronic">
<year>2025</year>
</pub-date>
<pub-date date-type="pub" publication-format="electronic">
<day>03</day><month>07</month><year>2025</year>
</pub-date>
<volume>84</volume>
<issue>2</issue>
<fpage>3541</fpage>
<lpage>3557</lpage>
<history>
<date date-type="received">
<day>14</day>
<month>2</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>04</day>
<month>6</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2025 The Authors.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Published by Tech Science Press.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_64394.pdf"></self-uri>
<abstract>
<p>This research investigates the application of digital images in military contexts by utilizing analytical equations to augment human visual capabilities. A comparable filter is used to improve the visual quality of the photographs by reducing truncations in the existing images. Furthermore, the collected images undergo processing using histogram gradients and a flexible threshold value that may be adjusted in specific situations. Thus, it is possible to reduce the occurrence of overlapping circumstances in collective picture characteristics by substituting grey-scale photos with colorized factors. The proposed method offers additional robust feature representations by imposing a limiting factor to reduce overall scattering values. This is achieved by visualizing a graphical function. Moreover, to derive valuable insights from a series of photos, both the separation and in-version processes are conducted. This involves analyzing comparison results across four different scenarios. The results of the comparative analysis show that the proposed method effectively reduces the difficulties associated with time and space to 1 s and 3%, respectively. In contrast, the existing strategy exhibits higher complexities of 3 s and 9.1%, respectively.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Image enhancement</kwd>
<kwd>visual information</kwd>
<kwd>harsh environment</kwd>
<kwd>computer vision</kwd>
</kwd-group>
<funding-group>
<award-group id="awg1">
<funding-source>King Saud University</funding-source>
<award-id>ORF-2025-846</award-id>
</award-group>
</funding-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>Identification of various circumstances with visualized patterns is mostly conducted via image processing techniques, particularly in cases where a distinct block division is not evident. In this scenario, it is crucial to employ a vision technology that accurately detects suitable boundaries while eliminating hazy circumstances to ensure the complete separation of blocks. Although many image processing algorithms have been used to identify objects in challenging situations, the accuracy of identification significantly decreases after scaling. Therefore, the suggested methodology utilizes computer vision algorithms to enhance the precision and security of collected images, enabling the acquisition of comprehensive shape features through classification techniques. Furthermore, utilizing an image processing tool is crucial in facilitating comprehensive surveillance and monitoring of the surrounding items. This tool enables the real-time separation of recognized objects through the implementation of a series of voice commands. Moreover, the application of computer vision algorithms enables real-time authentication, hence ensuring secure access to all blocks within each image with improved pixel quality. Nevertheless, the attributes of photographs acquired in challenging circumstances stay the same even when the images are scaled, enabling intelligent identification for both damage evaluation and defense tactics. Given the increased volume of data created in military applications, it becomes feasible to achieve comprehensive situational awareness through the use of visual patterns. In this scenario, the entire context can be effectively deployed by deploying centralized node processing units. <xref ref-type="fig" rid="fig-1">Fig. 1</xref> illustrates the block diagram of the proposed computer vision approach for detecting different objects collected in challenging situations.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Block diagram of computer vision measures in harsh environments</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_64394-fig-1.tif"/>
</fig>
<p><xref ref-type="fig" rid="fig-1">Fig. 1</xref> presents the overall architecture of the proposed computer vision-based enhancement framework tailored for military applications in harsh environments. The system initiates with the image acquisition module, where real-time images affected by environmental distortions are captured. These images are passed through a visualized filtering stage, which isolates key edges and suppresses noise while preserving structural details. Next, graphical enhancement functions are applied to address truncations and illumination inconsistencies, improving clarity in low-visibility conditions. The histogram gradient module then identifies edge orientations and spatial intensity variations, aiding in precise object delineation. Following this, adaptive thresholding adjusts the local contrast and suppresses overlapping features. The final stages involve image separation and inversion mechanisms, which ensure robust segmentation of objects across multiple frequencies and lighting conditions. Together, these interconnected blocks form a cohesive pipeline for real-time, distortion-resilient visual communication.</p>
<sec id="s1_1">
<label>1.1</label>
<title>Background and Related Works</title>
<p>The analysis of vision approaches is crucial for obtaining significant insights, as their implementation necessitates real-time processing while mitigating external influences on scaled images. Therefore, in this section, a comprehensive review of relevant works is conducted using multiple objective patterns, and a modernization solution is attained by enhancing essential criteria. In addition, The key functions of image processing techniques are examined to manage intricate security-related challenges. The existence of several feature sets in the processing of colorized image sets is seen to yield the most important metric in transmission systems [<xref ref-type="bibr" rid="ref-1">1</xref>]. The application of color segment embedding enables the comprehensive identification of various effects inside underwater systems, hence facilitating the establishment of a cohesive framework with interconnected mechanisms. While it is evident that underwater systems may be identified using color scaling mechanisms in real time, the application of homomorphic and multiple filter techniques is necessary to incorporate specific traits. This integration adds complexity to the process of getting unambiguous visualization. The retrieval network facilitates the analysis of complete picture features by leveraging neural networks, which are processed at a debauched rate [<xref ref-type="bibr" rid="ref-2">2</xref>]. This process involves the conversion of low-mapped features into high-spectral characteristics. It has been found that a greater number of localized picture features are required during these conversions to divide the entire block and distribute semantic information. However, as the images are reshaped, the local details will be eliminated, necessitating the use of gradient mapping features that are not noticed in retrieval networks. Both of the above-mentioned processes focus on determining color variations and do not address information concerning grayscale spectrums, resulting in higher mistake rates.</p>
<p>In contrast, Ref. [<xref ref-type="bibr" rid="ref-3">3</xref>] predicts a greater quantity of data created for military applications by leveraging more comprehensive information from the entire image set. The observed outcomes in such forecasts are subject to interpretations influenced by the presence of normalization factors. If the threshold values are eliminated, the system will no longer retain the border requirements for the image collection, rendering direct sources unsuitable for military applications. Therefore, the mistake rate during identification increases due to the influence of interpretation values, and achieving precise mapping with utility features for improved decision-making is not possible. The estimation of acquired images in different contexts is conducted in [<xref ref-type="bibr" rid="ref-4">4</xref>] using distinct histogram gradients. These gradients are used to boost the current bands that are active in the resized photos. When gradients are raised, there is an increase in periodicity, resulting in compression rates that exceed the boundary limitations. This phenomenon gives rise to the occurrence of many mistakes. Furthermore, this study presents a scene discrimination mechanism that is applicable to a wide range of applications. This mechanism involves the real-time identification of important targets utilizing infrared patterns [<xref ref-type="bibr" rid="ref-5">5</xref>]. The presence of such infrared patterns is widely recognized as a fundamental approach in the early stages of picture analysis. When defining infrared patterns, it is necessary to construct two types of scenes, each with critical feature points. This creates a sophisticated system that separates each block. Furthermore, to address challenging environments, it is necessary to remove unclear images at designated target areas, requiring the use of multiple spectral scales. This approach effectively reduces the overall running time in this scenario. In addition to the integration procedure, the aforementioned strategic considerations can only be applied to underwater systems where it is significantly challenging to eliminate complete roughness in the entire image.</p>
<p>The use of a specialized device is employed in [<xref ref-type="bibr" rid="ref-6">6</xref>] to aid in the identification of image blocks within the near-infrared areas, where an edge detection operation is carried out. Throughout this process, the effect on the marginal sequence can be observed for all progressive values, resulting in the definition of complete sinusoidal functions in an appropriate manner. Furthermore, the extraction process in this scenario is straightforward compared to morphological separations, allowing for the integration of additional filter requirements with homomorphic features. Should advanced functionality in marginal distributions be necessary, machine vision techniques can be utilized to address uncontrollable concerns in specific applications. Various feature enhancement models are subsequently identified using deep learning algorithms, leading to improved performance visions accompanied by suitable countermeasures [<xref ref-type="bibr" rid="ref-7">7</xref>]. If any issues arise during image processing within block separation units, auxiliary networks may be employed to assist in separating different blocks after resizing the image sequence. In this system, the addition of auxiliary units will disrupt the initial image set, resulting in decreased classification accuracy due to the larger structure. In the context of computer vision measurements, it is possible to implement a retention mechanism when an image set is rejected. This method involves using image-enhancing features, such as a multiple-channel fusion procedure, in conjunction with adaptive color mechanisms [<xref ref-type="bibr" rid="ref-8">8</xref>]. By incorporating diverse color elements, it becomes possible to classify images accurately. However, in challenging circumstances, if the background does not match the appropriate color components, vision tasks are likely to fail. In addition, incorporating weighted multi-scale features can enhance the contrast of an image. However, in challenging circumstances, if the background does not match the appropriate color components, Vision tasks are likely to fail. Additionally, incorporating weighted multi-scale features can enhance the contrast of an image. Collection by utilizing reliable parameters for reconstruction procedures [<xref ref-type="bibr" rid="ref-9">9</xref>]. In this scenario, all reliable factors must collectively contribute to improving visual acuity across all forms of irradiance, thereby eliminating any low-frequency components at the receiver. <xref ref-type="table" rid="table-1">Table 1</xref> presents a comparison of similar works that utilize objective functions.</p>
<table-wrap id="table-1">
<label>Table 1</label>
<caption>
<title>Existing vs. Proposed</title>
</caption>
<table>
<colgroup>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th align="center" rowspan="2">References</th>
<th align="center" rowspan="2">Methods/Algorithms</th>
<th colspan="4">Objectives</th>
</tr>
<tr>
<th>A</th>
<th>B</th>
<th>C</th>
<th>D</th>
</tr>
</thead>
<tbody>
<tr>
<td>[<xref ref-type="bibr" rid="ref-10">10</xref>]</td>
<td>Dual-link distributed source coding scheme with efficient hyperspectral image transmissions</td>
<td>&#x2713;</td>
<td></td>
<td>&#x2713;</td>
<td></td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-11">11</xref>]</td>
<td>Offloading computational resources for dark channel improvements</td>
<td></td>
<td>&#x2713;</td>
<td>&#x2713;</td>
<td></td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-12">12</xref>]</td>
<td>Quantization step estimation based on JPEG coefficient histograms and spectral analysis</td>
<td></td>
<td></td>
<td>&#x2713;</td>
<td>&#x2713;</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-13">13</xref>]</td>
<td>Contextual grounding for selecting relevant algorithmic approaches</td>
<td></td>
<td>&#x2713;</td>
<td></td>
<td>&#x2713;</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-14">14</xref>]</td>
<td>Artificial intelligence for enhanced image features</td>
<td>&#x2713;</td>
<td></td>
<td></td>
<td>&#x2713;</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-15">15</xref>]</td>
<td>Cross-point approach for image enhancements</td>
<td></td>
<td>&#x2713;</td>
<td></td>
<td>&#x2713;</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-16">16</xref>]</td>
<td>Generalized class of vision enhancements with individual data sets</td>
<td>&#x2713;</td>
<td>&#x2713;</td>
<td></td>
<td></td>
</tr>
<tr>
<td>Proposed</td>
<td>Computer vision algorithms for image enhancement in harsh environments</td>
<td>&#x2713;</td>
<td>&#x2713;</td>
<td>&#x2713;</td>
<td>&#x2713;</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="table-1fn1" fn-type="other">
<p>Note: A: Filtering and enhancements; B: Truncation visualization with graphical functions; C: Visual separation and inversion; D: Restoration factors.</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s1_2">
<label>1.2</label>
<title>Research Gap and Motivation</title>
<p>Various objective patterns are noticed in different ways when visualization is processed utilizing optimized algorithms, as indicated in <xref ref-type="table" rid="table-1">Table 1</xref>. Furthermore, each traditional approach examines the processing strategy by segregating different blocks, resulting in significantly fewer circumstantial detections in this scenario. Furthermore, a significant deficiency in the current system is the absence of defined filters, resulting in reduced visualization factors and hence low restoration factors. Therefore, the suggested approach must be designed to address the limitations of existing methods in the following manner.</p>

<p>RG1: Can the integration of suitable filters for picture enhancements be achieved through the utilization of control factor measurements?</p>
<p>RG2: Is it possible to reduce scattering measurements by observing graphical functions with defined restriction settings?</p>
<p>RG3: Can pictures be effectively separated using suitable restoration and inversion factors?</p>
</sec>
<sec id="s1_3">
<label>1.3</label>
<title>Major Contributions</title>
<p>To address the limitations of current methodologies, the proposed strategy introduces a robust computer vision framework that leverages histogram gradients and adaptive thresholds for visual communication in harsh military environments. The key contributions of this work are as follows:
<list list-type="bullet">
<list-item>
<p>Integration of a precision-driven filtering mechanism that enhances the visual quality of degraded military images using well-defined persuasive functions.</p></list-item>
<list-item>
<p>Development of a graphical function-based visualization model that applies constraint-aware parameters to reduce image scattering and minimize truncation effects in low-light or complex environments.</p></list-item>
<list-item>
<p>Implementation of a gradient-based restoration and inversion technique that improves image separability and enables accurate object recognition by applying adaptive inversion factors.</p></list-item>
<list-item>
<p>Comprehensive evaluation using 928 real-time military images, demonstrating significant improvements over conventional methods&#x2014;with a reduction in processing time to 1 s and enhancement error down to 3%.</p></list-item>
</list></p>
</sec>
</sec>
<sec id="s2">
<label>2</label>
<title>Proposed System Model</title>
<p>This section explores the mathematical methodology employed for picture upgrades, with the aim of effectively addressing challenging settings that require a conversion state for visual identification. Analytical representations in this particular situation assume a significant role as they facilitate the classification and enhancement of extracted images, hence enabling the recognition of comprehensive properties essential for various applications. The conversion probabilities from a low to a high state are monitored in real-time using appropriate mathematical expressions, which are also represented by similar parametric expressions.</p>
<sec id="s2_1">
<label>2.1</label>
<title>Visualized Filter</title>
<p>In order to attain a high level of precision in collected images within challenging military settings characterized by the disruption of diverse air conditions, a decomposition factor is employed to ensure the preservation of all image edges. The acquisition of detailed layers in current photos with high enhancement values is a direct consequence of the preservation process, as demonstrated by <xref ref-type="disp-formula" rid="eqn-1">Eq. (1)</xref>.
<disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:msub><mml:mrow><mml:mtext>VF</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo movablelimits="true" form="prefix">max</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mrow><mml:mtext>n</mml:mtext></mml:mrow></mml:mrow></mml:munderover><mml:msub><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>o</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mo>.</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x00D7;</mml:mo><mml:msub><mml:mrow><mml:mi mathvariant="normal">&#x03C9;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mtext>c</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:msub><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>o</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mo>.</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> represents current image observations</p>
<p><inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:msub><mml:mrow><mml:mi mathvariant="normal">&#x03C9;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mtext>c</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> indicates control factor of images.</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Truncated Visualizations</title>
<p>The existence of limited illumination in captured photos for military purposes will invariably hinder the attainment of comprehensive improvements, hence rendering the process exceedingly intricate in ascertaining precise resolutions. Hence, it is imperative to prevent low light circumstances by adjusting the appropriate parametric index, as specified in <xref ref-type="disp-formula" rid="eqn-2">Eq. (2)</xref>.
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:mi>T</mml:mi><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo movablelimits="true" form="prefix">min</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>&#x03B3;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>&#x03C4;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:msub><mml:mi>&#x03B3;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the presence of low light in images</p>
<p><inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:msub><mml:mi>&#x03C4;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicates limit settings</p>
<p><inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents detailed factor of each image.</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Visualization Enhancement</title>
<p>In situations when low light conditions are present, it becomes imperative to emphasize specific qualities through the utilization of visualization functions. The proposed strategy aims to enhance the visualization factors by utilizing a two-degree value approach, resulting in the attainment of a persuaded function as illustrated in <xref ref-type="disp-formula" rid="eqn-3">Eq. (3)</xref>.
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:mi>V</mml:mi><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo movablelimits="true" form="prefix">max</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> denotes convinced functions for in-flight images</p>
<p><inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> indicates definite functions for complete visions.</p>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Graphical Functions</title>
<p>In order to achieve comprehensive augmentation of cloud photographs obtained during diverse military operations, it is vital to incorporate radiance with the original image spectrum. In order to ensure accurate visualizations in challenging environments, it is necessary to measure the scattering values, as specified in <xref ref-type="disp-formula" rid="eqn-4">Eq. (4)</xref>.
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:msub><mml:mi>g</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo movablelimits="true" form="prefix">min</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>&#x03D1;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mi>s</mml:mi><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:msub><mml:mi>&#x03D1;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the complete intensity of images</p>
<p><inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:mi>s</mml:mi><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents scattering values.</p>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>Visulal Brilliance</title>
<p>The increased level of light in contemporary photographs effectively captures all the information contained inside high-frequency representations. In wartime contexts, the presence of high frequency conditions is consistently required to effectively manage irradiations in images, hence enhancing visualization, as demonstrated by <xref ref-type="disp-formula" rid="eqn-5">Eq. (5)</xref>.
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:mi>B</mml:mi><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo movablelimits="true" form="prefix">min</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>where,</p>
<p><inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:msub><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes irradiation measurements</p>
<p><inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> indicates reflection ranges for different frequencies.</p>
</sec>
<sec id="s2_6">
<label>2.6</label>
<title>Image Separation</title>
<p>In military applications, it is imperative to segregate images across multiple domains due to the significant variations in collected images across distinct frequency bands. Consequently, in this step of separation, the brightness of each picture will be reduced, resulting in an exponential transformation that increases the conditionality factors, as shown in <xref ref-type="disp-formula" rid="eqn-6">Eq. (6)</xref>.
<disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:mi>I</mml:mi><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo movablelimits="true" form="prefix">max</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mo stretchy="false">(</mml:mo><mml:mi>L</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>H</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msup></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:mi>L</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>H</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes low and high-frequency image representations</p>
<p><inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msup></mml:math></inline-formula> represents exponential terms for each image.</p>
</sec>
<sec id="s2_7">
<label>2.7</label>
<title>Restoration Factors</title>
<p>It is necessary to assess the extent of restoration in relation to both non-colorized and colorized image sets. In this scenario, dynamic ranges can be determined by employing scaling factors. Therefore, in this scenario, it is necessary to utilize the whole number of restored values along with their respective weighting functions, as specified in <xref ref-type="disp-formula" rid="eqn-7">Eq. (7)</xref>.
<disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:mi>R</mml:mi><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo movablelimits="true" form="prefix">min</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>&#x03B4;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mi>w</mml:mi><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:msub><mml:mi>&#x03B4;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes restoration values</p>
<p><inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:mi>w</mml:mi><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicates separate weighting functions.</p>
</sec>
<sec id="s2_8">
<label>2.8</label>
<title>Image Inversion</title>
<p>In this scenario, it is necessary to partition the total pixel values into distinct border indications. This entails doing measurements along both the horizontal and vertical axis using inversion measures. <xref ref-type="disp-formula" rid="eqn-8">Eq. (8)</xref> indicates that double-precision values are crucial for establishing the primary use of these inversion metrics.
<disp-formula id="eqn-8"><label>(8)</label><mml:math id="mml-eqn-8" display="block"><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo movablelimits="true" form="prefix">max</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>&#x03BC;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes half image factor in horizontal representations</p>
<p><inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:msub><mml:mi>&#x03BC;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicates vertical measurement values.</p>
</sec>
<sec id="s2_9">
<label>2.9</label>
<title>Objective Functions</title>
<p>It is crucial to include a composite objective function for all the parametric observations listed above, as it allows for a multi-objective perspective with minimum and maximum values. Therefore, <xref ref-type="disp-formula" rid="eqn-9">Eqs. (9)</xref> and <xref ref-type="disp-formula" rid="eqn-10">(10)</xref> can be utilized to build distinct composite functions for altering picture weights without any predetermined alterations.
<disp-formula id="eqn-9"><label>(9)</label><mml:math id="mml-eqn-9" display="block"><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo movablelimits="true" form="prefix">min</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mi>T</mml:mi><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>g</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>B</mml:mi><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>R</mml:mi><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula>
<disp-formula id="eqn-10"><label>(10)</label><mml:math id="mml-eqn-10" display="block"><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo movablelimits="true" form="prefix">max</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mi>V</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>V</mml:mi><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>I</mml:mi><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula></p>
<p>The utilization of composite objective functions with independent set functions is employed to determine optimal outcomes through the utilization of combinational sets, as denoted by <xref ref-type="disp-formula" rid="eqn-11">Eq. (11)</xref>.
<disp-formula id="eqn-11"><label>(11)</label><mml:math id="mml-eqn-11" display="block"><mml:mi>o</mml:mi><mml:mi>b</mml:mi><mml:msub><mml:mi>j</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>In order to enhance the accuracy measures for each image set in challenging circumstances, it is imperative to combine the objective function in <xref ref-type="disp-formula" rid="eqn-11">Eq. (11)</xref> with the computer vision algorithm. The following is a comprehensive explanation of vision algorithms.</p>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Computer Vision Algorithm</title>
<p>To effectively understand all specified circumstances in acquired images, it is imperative to incorporate a computer vision algorithm that enables the generation of optimized solutions. In this particular scenario, all jobs will be identified and classified in order to facilitate a comparison with the underlying database. Deep learning algorithms are currently playing a crucial part in upgrading various photographs by arranging them in a sequential manner during the interpretation process. The utilization of computer vision algorithms is crucial in numerous instances since they are employed in conjunction with suitable training patterns to attain error-free circumstances. Consequently, these algorithms offer significant advantages in various military applications. In the context of computer vision algorithms, the recognition of linked patterns in images involves the identification of items on virtual screens.</p>
<sec id="s3_1">
<label>3.1</label>
<title>Histogram Gradients</title>
<p>The utilization of a descriptor in image processing offers significant benefits in the capture of images, enabling the recognition of diverse objects and facilitating the attainment of localized solutions with suitable orientations. The significance of histograms in image processing techniques lies in their ability to identify different gradients through the utilization of diverse edge detection systems. Hence, histogram gradients are crucial in defining each image in terms of pixel variations and feature recognition techniques. The process of picture differentiation can be effectively accomplished through several methods. However, histogram gradients offer valuable insights into both edges and forms. This is achieved by partitioning each image into smaller cell representations, resulting in simplified expressions. <xref ref-type="fig" rid="fig-2">Fig. 2</xref> indicates the histogram gradients for vision enhancements.</p>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>Histogram gradients for vision enhancements</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_64394-fig-2.tif"/>
</fig>
<sec id="s3_1_1">
<label>3.1.1</label>
<title>Histogram Normalization</title>
<p>An invariant matrix representation can be employed to enhance the reliability of object recognition by normalization determination. Therefore, the proposed method involves the implementation of normalization at each stage, utilizing magnitude test patterns. In this particular example, the cut version will be employed, as specified in <xref ref-type="disp-formula" rid="eqn-12">Eq. (12)</xref>.
<disp-formula id="eqn-12"><label>(12)</label><mml:math id="mml-eqn-12" display="block"><mml:mi>n</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:msub><mml:mi>m</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mfrac><mml:msub><mml:mi>L</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mi>N</mml:mi><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac><mml:mo>&#x00D7;</mml:mo><mml:mn>100</mml:mn></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:msub><mml:mi>L</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicates limiting values</p>
<p><inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:mi>N</mml:mi><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes non-normalized segments.</p>
</sec>
<sec id="s3_1_2">
<label>3.1.2</label>
<title>Histogram Magnitude</title>
<p>The mapping process is crucial in the use of vision for military photos captured in hard circumstances. It gives valuable information for detecting and classifying challenging images based on several separation characteristics. In this particular scenario, the magnitude of gradients exhibits a tendency to vary, as represented by <xref ref-type="disp-formula" rid="eqn-13">Eq. (13)</xref>.
<disp-formula id="eqn-13"><label>(13)</label><mml:math id="mml-eqn-13" display="block"><mml:mi>M</mml:mi><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>v</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x00D7;</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>v</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> denotes ranging values</p>
<p><inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> indicates defined ranges.</p>
</sec>
<sec id="s3_1_3">
<label>3.1.3</label>
<title>Vector Regularizations</title>
<p>In order to identify the existence of objects, it is necessary to establish a regularization metric. This metric will define the square of each cell, which will then be used to display the vector outputs using appropriate representations. Therefore, <xref ref-type="disp-formula" rid="eqn-14">Eq. (14)</xref> is utilized to represent the regularization pattern with an independent size block.
<disp-formula id="eqn-14"><label>(14)</label><mml:math id="mml-eqn-14" display="block"><mml:mi>R</mml:mi><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mfrac><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mfrac><mml:mo>&#x00D7;</mml:mo><mml:mn>100</mml:mn></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents individual block display units.</p>
</sec>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Adaptive Thresholding</title>
<p>When photos are acquired in challenging situations with adequate illumination, it becomes crucial to prioritize the reduction of spatial fluctuations. This is because the entire image is classed using a three-dimensional spectrum. Therefore, an adaptive threshold approach is implemented to integrate the combined features of images in all local regions, thereby mitigating the influence of external factors on acquired images. While the adaptive threshold mechanism is commonly regarded as a sub-task of integrated components, it is important to note that in this particular example, it is a dynamic establishment specifically designed for images that have been divided using proper computational methods. <xref ref-type="fig" rid="fig-3">Fig. 3</xref> illustrates the adaptive thresholds for vision enhancements.</p>
<fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>Adaptive thresholds for vision enhancements</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_64394-fig-3.tif"/>
</fig>
<sec id="s3_2_1">
<label>3.2.1</label>
<title>Arithmetic Threshold</title>
<p>An image threshold is defined by utilizing magnitude values obtained from gradient vectors to establish maximum and minimum limits. Therefore, the neighborhood values that surpass the overlapping pictures are noticed in this particular scenario, which is defined in arithmetic mode as specified in <xref ref-type="disp-formula" rid="eqn-15">Eq. (15)</xref>.
<disp-formula id="eqn-15"><label>(15)</label><mml:math id="mml-eqn-15" display="block"><mml:mi>T</mml:mi><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mrow><mml:mi mathvariant="normal">&#x0394;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>C</mml:mi><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:msub><mml:mrow><mml:mi mathvariant="normal">&#x0394;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>C</mml:mi><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> denotes the difference in the captured image set.</p>
</sec>
<sec id="s3_2_2">
<label>3.2.2</label>
<title>Linear Threshold</title>
<p>The utilization of a linear threshold mechanism is employed in computer vision to provide optimized solutions within a constant time period, with a specific focus on border scenarios. The linear threshold employed in this method calculates the integral image set at both corners, resulting in the elimination of the entire overlap for individual pixels, as described in <xref ref-type="disp-formula" rid="eqn-16">Eq. (16)</xref>.
<disp-formula id="eqn-16"><label>(16)</label><mml:math id="mml-eqn-16" display="block"><mml:mi>L</mml:mi><mml:mi>T</mml:mi><mml:msub><mml:mi>D</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mi>c</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:mi>c</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the corner image set</p>
<p><inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> represents the border overlap set.</p>
</sec>
<sec id="s3_2_3">
<label>3.2.3</label>
<title>Image Volume Maintenance</title>
<p>Given that each cell is partitioned into many blocks, it is imperative to ensure that the volume within each individual block remains at a minimum. Consequently, the overall volume of the image is determined by measuring narrow image bands in this scenario. By maintaining distinct picture sets, the accuracy rate is increased, hence preventing non-uniform cell measurements, as indicated by <xref ref-type="disp-formula" rid="eqn-17">Eq. (17)</xref>.
<disp-formula id="eqn-17"><label>(17)</label><mml:math id="mml-eqn-17" display="block"><mml:mi>V</mml:mi><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>q</mml:mi><mml:mrow><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>where,</p>
<p><inline-formula id="ieqn-28"><mml:math id="mml-ieqn-28"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>q</mml:mi><mml:mrow><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> denotes a sequence of individual blocks</p>
<p><inline-formula id="ieqn-29"><mml:math id="mml-ieqn-29"><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> indicates the total maintenance rate.</p>
</sec>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Results</title>
<p>This section presents a real-time investigation of computer vision through the examination of a sequence of 928 photographs taken under various challenging circumstances. Most of the photos considered for analysis are sourced from military applications, where background segments are recognized with enhanced accuracy. To evaluate the results of the proposed method, the collected image set is combined with gradient characteristics, ensuring non-overlapping identification. Additionally, this scenario presents a feature set that has been colorized, along with normalizations that accurately describe the limiting factors. By incorporating a limiting factor, a control unit is linked to analyze a sequence of images within a shorter time frame. This facilitates the precise identification of histograms with high and low-value measurements, thereby improving detection accuracy. After providing the gradients for all images, the threshold level for individual blocks is determined to eliminate any additional overlaps. Conversely, the proposed method involves observing the 928 images using a filter that assesses the current photos while considering specific control parameters established for border segments. As the projected model analyses the acquired photographs in challenging conditions, any borders that disrupt the entire image set are removed, resulting in a higher maintenance rate. Subsequently, any scattered observations where the image set overlaps with background features are identified using both horizontal and vertical distributions. The replacement of half of the size part in the sequence image set with computer vision features allows for the display of individual blocks with reduced restoration values. This approach achieves the original results without the requirement for any external distributions. Four scenarios are developed to analyze the parametric outcomes, and the significance of all scenarios is listed in <xref ref-type="table" rid="table-2">Table 2</xref>.</p>
<table-wrap id="table-2">
<label>Table 2</label>
<caption>
<title>Simulation environments</title>
</caption>
<table>
<colgroup>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th align="center">Bounds</th>
<th align="center">Requirement</th>
</tr>
</thead>
<tbody>
<tr>
<td>Operating systems</td>
<td>Windows 8 and above</td>
</tr>
<tr>
<td>Platform</td>
<td>MATLAB and Image processing (Vision) toolbox</td>
</tr>
<tr>
<td>Version (MATLAB)</td>
<td>2015 and above</td>
</tr>
<tr>
<td>Version (Image processing (Vision) toolbox)</td>
<td>3.6 and above</td>
</tr>
<tr>
<td>Applications</td>
<td>Military applications and other harsh environments</td>
</tr>
<tr>
<td>Implemented data sets</td>
<td>Captured image data set (928) arranged in sequence order with colorized background segments</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><bold><italic>Discussions</italic></bold></p>
<p>The role of simulation environments is crucial in determining the outcomes of the proposed method, as it involves the real-time processing of recorded images using predetermined parameters and simulation units. The operating system will be linked to visual processing technologies, with MATLAB serving as the principal platform for processing image sequences. In this scenario, all photographs are presented uniformly, resulting in sequential arrangements. Additionally, the configuration needs are established automatically. After capturing the photos, separate processing units with minimal restoration rates are employed, leading to a total reduction of external distribution rates. Furthermore, to enhance the grey scale factors through the use of analogous color representations, horizontal blocks are designated with threshold values, limited to a factor set at 1. <xref ref-type="table" rid="table-2">Table 2</xref> indicates the simulation environments for conducting various scenarios. A comprehensive overview of enhancement techniques, ranging from spatial domain methods like contrast stretching and histogram equalization to frequency domain approaches utilizing various filtering mechanisms, is discussed [<xref ref-type="bibr" rid="ref-17">17</xref>]. To address tactical deployment scenarios, communication models inspired by the Internet of Battle Things architecture is considered [<xref ref-type="bibr" rid="ref-18">18</xref>]. Further immersive experiential learning methods using extended reality technologies are interfaced with simulation designs [<xref ref-type="bibr" rid="ref-19">19</xref>]. Additionally Explainable AI concepts were considered to enhance transparency and trust in the decision-making components of the system [<xref ref-type="bibr" rid="ref-20">20</xref>,<xref ref-type="bibr" rid="ref-21">21</xref>]. Therefore, these classical methods serve as a baseline for understanding more advanced algorithms applied in challenging environments. Since the proposed method is carried out using image processing techniques, it is necessary to avoid overlapping images. Therefore, at the output, it is possible to enhance the images as indicated in <xref ref-type="fig" rid="fig-4">Fig. 4</xref>. The comparison from <xref ref-type="fig" rid="fig-4">Fig. 4</xref> with the existing image set indicates that the projected military visual communications can be achieved only if images are enhanced with high quality.</p>
<fig id="fig-4">
<label>Figure 4</label>
<caption>
<title>Comparisons of visual communications in harsh environments. <bold>(a)</bold> Existing; <bold>(b)</bold> Proposed</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_64394-fig-4.tif"/>
</fig>
<p><bold><italic>Scenario 1: Enactment of visualized filter and consistent enhancement features</italic></bold></p>
<p>The present situation involves the observation of comprehensive enhancement features for a sequence of images subsequent to the application of appropriate filter configurations. Typically, picture enhancement merely improves the contrast of background pixels. However, the suggested method increases the contrast of each data block connected to the main blocks, allowing for uninterrupted identification of objects. By establishing control factors that are equivalent to the current images, it becomes possible to assign limiting values to all normalized segments. This allows for the possibility of reconnecting any problems that may arise with divided blocks or blocks that are unnecessarily removed by the visualized filter to the same image set in the future. Therefore, the primary benefit in this particular situation is not only in the augmentation of photos but also in the ability to detect unattached images that have been altered using specific range values. The enhancing aspects of the proposed and existing approach are illustrated in <xref ref-type="fig" rid="fig-5">Fig. 5</xref>.</p>
<fig id="fig-5">
<label>Figure 5</label>
<caption>
<title>Image functions for enhancements with convinced rate</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_64394-fig-5.tif"/>
</fig>
<p><xref ref-type="fig" rid="fig-5">Fig. 5</xref> demonstrates that the proposed strategy may consistently improve acquired photos compared to the previous approach [<xref ref-type="bibr" rid="ref-6">6</xref>]. The inclusion of similar control elements in filters enables the manipulation of external effects in photographs, even when captured under challenging environmental conditions. In an alternative scenario, the potential for achieving consistency is also realized through the utilization of normalized values, wherein the spreading factor of the image set is expanded to encompass 360 degrees in the proposed methodology. In order to assess the results of consistent improvements, a total of 12, 36, 72, 96, and 124 photographs are reviewed. The function used in this case remains at 3, 5, 6, 8, and 10 for each batch of images. The existing method achieves consistent enhancements of 32%, 36%, 37%, 39%, and 41% for the indicated photographs. In contrast, the projected model achieves constant enhancements of 49%, 57%, 61%, 64%, and 69% for the same images. Therefore, by utilizing visualization filters, it is possible to eliminate superfluous block elements entirely in the proposed method. Additionally, if the number of photos in the collection exceeds 100%, consistency may be attained.</p>
<p><bold><italic>Scenario 2: Visualizations with graphical units</italic></bold></p>
<p>In order to enhance the analytics component for all visualization units, it is important to have graphical representations available. Therefore, in this particular situation, the phenomenon of total scattering is observed in each set of images, with each set being represented by unique graphical units. A border limit is established to prevent truncations. Furthermore, the presence of low-light in the background poses a significant challenge in addressing several associated problems. Consequently, doing a thorough analysis of the specific components involved in this scenario can facilitate the elimination of low light conditions, thereby reducing the occurrence of scattered measurement values.</p>
<p>Given that photographs are captured in diverse and challenging circumstances, the occurrence of significantly elevated haze levels can lead to a severe situation where the overall intensity of the images is compromised, rendering them irreparable in subsequent instances. Therefore, in the initial state, it is necessary to identify and eliminate all scattering values, even if visualized filters are configured with certain constraints. By using truncated visualization, one can only get a clean vision for improved object recognition. <xref ref-type="fig" rid="fig-6">Fig. 6</xref> shows the truncations for high-intensity images.</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption>
<title>Truncations for high-intensity images</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_64394-fig-6.tif"/>
</fig>
<p><bold><italic>Scenario 3: Visualization brilliance and restoration units</italic></bold></p>
<p>The enhancement procedure for gray-scale photos is challenging, necessitating the inclusion of analogous color representations. This is necessary to distinguish background images from segmented blocks and accurately identify different items. Therefore, in this particular scenario, an analysis is conducted on restoration units for all photos, focusing on the visualization brightness and the observation of reflection elements in the sequence of images. In the field of image processing, restoration refers to the procedure of eliminating noisy and corrupted formats, resulting in the creation of normal images that retain only the original blocks. The proposed method involves adding a colorized unit with a low reflection outcome to each block instead of removing noise. This enhances the accuracy of irradiation measurements, which may then be further decreased to replace poor values. Restoration units enable the identification of all images in hard settings by assigning specific weighting factors as shown in <xref ref-type="fig" rid="fig-7">Fig. 7</xref>, resulting in the formation of acceptable pixels in clear states.</p>
<fig id="fig-7">
<label>Figure 7</label>
<caption>
<title>Individual weighting functions with minimized restorations</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_64394-fig-7.tif"/>
</fig>
<p><bold><italic>Scenario 4: Inversions and separations</italic></bold></p>
<p>In this case, we witness complete image inversions with equal distance separations, taking into account both vertical and horizontal pixel blocks. Recognizing analogous color blocks and mapping each light block to dark ones is crucial for accurately distinguishing distinct objects and achieving more precise measurements using picture inversion. When creating mapping representations of this nature, changes are made at each boundary, followed by the execution of curve step procedures.</p>
<p>By accurately separating images with appropriate pixel values, it becomes able to accurately identify them during the inversion step, eliminating any crucial measurements. The key measurement in this situation is determined by calculating the differences between the acquired and original image sets. This allows for the monitoring of the exponential ranges of each image. The comparative simulation outcomes of the proposed and existing approach in terms of inversions and separations are presented in <xref ref-type="fig" rid="fig-8">Fig. 8</xref>.</p>
<fig id="fig-8">
<label>Figure 8</label>
<caption>
<title>Color factor in the presence of low light conditions</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_64394-fig-8.tif"/>
</fig>
</sec>
<sec id="s5">
<label>5</label>
<title>Conclusions</title>
<p>The utilization of image enhancement techniques in visual processing holds substantial importance in contemporary identification applications, particularly in military processing units, where establishing a secure environment is imperative. Most military actions conducted along borders occur in challenging settings, where it is especially difficult to identify territorial crowds composed of similar groups. Moreover, managing climatic conditions poses a significant challenge, as the majority of the territorial population encounters either high or low-temperature climates, thereby complicating the identification procedure. Therefore, to reduce the complexity of photos taken in difficult situations, it is necessary to process them using computer vision measures for accurate image identification. The approach outlined for picture identification employs a visualized filter to establish a controlling factor, which is subject to specific limits. This control factor is implemented by dividing the entire image set into horizontal and vertical blocks. Furthermore, to enhance visual acuity, two unique criteria, namely conviction and definiteness, are employed, each possessing identical values. Consequently, the results are derived from a two-degree set of images. In the proposed model, a visualized filter is employed to minimize the total number of truncations and obtain precise information about the image set while ensuring appropriate limiting settings are applied. A sequential strategy is employed to process all collected photos, utilizing a histogram gradient and an adaptive threshold mechanism. This approach effectively manages the occurrence of overlaps at the boundaries of the images.</p>
<p>Four scenarios and two performance measures are used to verify the suggested mechanisms and procedures with equal analytical representation. These metrics are then compared with the previous technique. The comparison analysis reveals that the new method consistently achieves a 69% improvement in picture enhancement, while the conventional methodology only achieves a 41% improvement in challenging settings. In contrast, the proposed model reduces the proportion of truncations to 1%, compared to the existing model&#x2019;s 6%. As a result, the need for picture restoration is minimized to 4% and 12% for the proposed and traditional methods, respectively. Artificial intelligence systems can be utilized to automatically interpret photographs acquired in hostile environments in the future.</p>
</sec>
</body>
<back>
<ack>
<p>This work is financially supported by Ongoing Research Funding Program (ORF-2025-846), King Saud University, Riyadh, Saudi Arabia.</p>
</ack>
<sec>
<title>Funding Statement</title>
<p>This work is financially supported by Ongoing Research Funding Program (ORF-2025-846), King Saud University, Riyadh, Saudi Arabia.</p>
</sec>
<sec>
<title>Author Contributions</title>
<p>The authors confirm contribution to the paper as follows: study conception and design: Shitharth Selvarajan; Hariprasath Manoharan; data collection: Subhav Singh; Taher Al-Shehari; Nasser A Alsadhan; analysis and interpretation of results: Subhav Singh; Shitharth Selvarajan; Hariprasath Manoharan; draft manuscript preparation: Shitharth Selvarajan; Hariprasath Manoharan. All authors reviewed the results and approved the final version of the manuscript.</p>
</sec>
<sec sec-type="data-availability">
<title>Availability of Data and Materials</title>
<p>Data available on request from the authors. The data that support the findings of this study are available from the corresponding author, [Shitharth Selvarajan], upon reasonable request.</p>
</sec>
<sec>
<title>Ethics Approval</title>
<p>Not applicable.</p>
</sec>
<sec sec-type="COI-statement">
<title>Conflicts of Interest</title>
<p>The authors declare no conflicts of interest to report regarding the present study.</p>
</sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Ibrahim</surname> <given-names>RW</given-names></string-name>, <string-name><surname>Jalab</surname> <given-names>HA</given-names></string-name>, <string-name><surname>Karim</surname> <given-names>FK</given-names></string-name>, <string-name><surname>Alabdulkreem</surname> <given-names>E</given-names></string-name>, <string-name><surname>Ayub</surname> <given-names>MN</given-names></string-name></person-group>. <article-title>A medical image enhancement based on generalized class of fractional partial differential equations</article-title>. <source>Quant Imaging Med Surg</source>. <year>2022</year>;<volume>12</volume>(<issue>1</issue>):<fpage>172</fpage>&#x2013;<lpage>83</lpage>. doi:<pub-id pub-id-type="doi">10.21037/qims-21-15</pub-id>; <pub-id pub-id-type="pmid">34993069</pub-id></mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Gopalan</surname> <given-names>S</given-names></string-name>, <string-name><surname>Arathy</surname> <given-names>S</given-names></string-name></person-group>. <article-title>A new mathematical model in image enhancement problem</article-title>. <source>Procedia Comput Sci</source>. <year>2015</year>;<volume>46</volume>:<fpage>1786</fpage>&#x2013;<lpage>93</lpage>. doi:<pub-id pub-id-type="doi">10.1016/j.procs.2015.02.134</pub-id>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>&#x017D;iguli&#x0107;</surname> <given-names>N</given-names></string-name>, <string-name><surname>Glu&#x010D;ina</surname> <given-names>M</given-names></string-name>, <string-name><surname>Lorencin</surname> <given-names>I</given-names></string-name>, <string-name><surname>Matika</surname> <given-names>D</given-names></string-name></person-group>. <article-title>Military decision-making process enhanced by image detection</article-title>. <source>Information</source>. <year>2024</year>;<volume>15</volume>(<issue>1</issue>):<fpage>11</fpage>. doi:<pub-id pub-id-type="doi">10.3390/info15010011</pub-id>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Wang</surname> <given-names>J</given-names></string-name>, <string-name><surname>Yuan</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Li</surname> <given-names>G</given-names></string-name></person-group>. <article-title>Multifeature contrast enhancement algorithm for digital media images based on the diffusion equation</article-title>. <source>Adv Math Phys</source>. <year>2022</year>;<volume>2022</volume>:<fpage>1982555</fpage>. doi:<pub-id pub-id-type="doi">10.1155/2022/1982555</pub-id>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Yang</surname> <given-names>R</given-names></string-name>, <string-name><surname>Chen</surname> <given-names>L</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>L</given-names></string-name>, <string-name><surname>Li</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Lin</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Wu</surname> <given-names>Y</given-names></string-name></person-group>. <article-title>Image enhancement via special functions and its application for near infrared imaging</article-title>. <source>Glob Chall</source>. <year>2023</year>;<volume>7</volume>(<issue>7</issue>):<fpage>2200179</fpage>. doi:<pub-id pub-id-type="doi">10.1002/gch2.202200179</pub-id>; <pub-id pub-id-type="pmid">37483414</pub-id></mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Li</surname> <given-names>P</given-names></string-name>, <string-name><surname>Gu</surname> <given-names>X</given-names></string-name></person-group>. <article-title>An image enhancement method based on partial differential equations to improve dark channel theory</article-title>. <source>IOP Conf Ser Earth Environ Sci</source>. <year>2021</year>;<volume>769</volume>(<issue>4</issue>):<fpage>042112</fpage>. doi:<pub-id pub-id-type="doi">10.1088/1755-1315/769/4/042112</pub-id>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Zhang</surname> <given-names>H</given-names></string-name>, <string-name><surname>Gong</surname> <given-names>L</given-names></string-name>, <string-name><surname>Li</surname> <given-names>X</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>F</given-names></string-name>, <string-name><surname>Yin</surname> <given-names>J</given-names></string-name></person-group>. <article-title>An underwater imaging method of enhancement via multi-scale weighted fusion</article-title>. <source>Front Mar Sci</source>. <year>2023</year>;<volume>10</volume>:<fpage>1150593</fpage>. doi:<pub-id pub-id-type="doi">10.3389/fmars.2023.1150593</pub-id>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>He</surname> <given-names>K</given-names></string-name>, <string-name><surname>Tao</surname> <given-names>D</given-names></string-name>, <string-name><surname>Xu</surname> <given-names>D</given-names></string-name></person-group>. <article-title>Adaptive colour restoration and detail retention for image enhancement</article-title>. <source>IET Image Process</source>. <year>2021</year>;<volume>15</volume>(<issue>14</issue>):<fpage>3685</fpage>&#x2013;<lpage>97</lpage>. doi:<pub-id pub-id-type="doi">10.1049/ipr2.12223</pub-id>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Zhang</surname> <given-names>P</given-names></string-name></person-group>. <article-title>Image enhancement method based on deep learning</article-title>. <source>Math Probl Eng</source>. <year>2022</year>;<volume>2022</volume>(<issue>1</issue>):<fpage>6797367</fpage>. doi:<pub-id pub-id-type="doi">10.1155/2022/6797367</pub-id>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Hagag</surname> <given-names>A</given-names></string-name>, <string-name><surname>Omara</surname> <given-names>I</given-names></string-name>, <string-name><surname>Chaib</surname> <given-names>S</given-names></string-name>, <string-name><surname>Ma</surname> <given-names>G</given-names></string-name>, <string-name><surname>El-Samie</surname> <given-names>FEA</given-names></string-name></person-group>. <article-title>Dual link distributed source coding scheme for the transmission of satellite hyperspectral imagery</article-title>. <source>J Vis Commun Image Represent</source>. <year>2021</year>;<volume>78</volume>:<fpage>103117</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.jvcir.2021.103117</pub-id>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Jiang</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Dong</surname> <given-names>L</given-names></string-name>, <string-name><surname>Liang</surname> <given-names>J</given-names></string-name></person-group>. <article-title>Image enhancement of maritime infrared targets based on scene discrimination</article-title>. <source>Sensors</source>. <year>2022</year>;<volume>22</volume>(<issue>15</issue>):<fpage>5873</fpage>. doi:<pub-id pub-id-type="doi">10.3390/s22155873</pub-id>; <pub-id pub-id-type="pmid">35957429</pub-id></mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Yao</surname> <given-names>H</given-names></string-name>, <string-name><surname>Wei</surname> <given-names>H</given-names></string-name>, <string-name><surname>Qiao</surname> <given-names>T</given-names></string-name>, <string-name><surname>Qin</surname> <given-names>C</given-names></string-name></person-group>. <article-title>JPEG quantization step estimation with coefficient histogram and spectrum analyses</article-title>. <source>J Vis Commun Image Represent</source>. <year>2020</year>;<volume>69</volume>(<issue>3</issue>):<fpage>102795</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.jvcir.2020.102795</pub-id>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Gal&#x00E1;n</surname> <given-names>JJ</given-names></string-name>, <string-name><surname>Carrasco</surname> <given-names>RA</given-names></string-name>, <string-name><surname>LaTorre</surname> <given-names>A</given-names></string-name></person-group>. <article-title>Military applications of machine learning: a bibliometric perspective</article-title>. <source>Mathematics</source>. <year>2022</year>;<volume>10</volume>(<issue>9</issue>):<fpage>1397</fpage>. doi:<pub-id pub-id-type="doi">10.3390/math10091397</pub-id>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Peng</surname> <given-names>X</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>X</given-names></string-name>, <string-name><surname>Li</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>B</given-names></string-name></person-group>. <article-title>Research on image feature extraction and retrieval algorithms based on convolutional neural network</article-title>. <source>J Vis Commun Image Represent</source>. <year>2020</year>;<volume>69</volume>(<issue>6</issue>):<fpage>102705</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.jvcir.2019.102705</pub-id>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Li</surname> <given-names>C</given-names></string-name>, <string-name><surname>Anwar</surname> <given-names>S</given-names></string-name>, <string-name><surname>Hou</surname> <given-names>J</given-names></string-name>, <string-name><surname>Cong</surname> <given-names>R</given-names></string-name>, <string-name><surname>Guo</surname> <given-names>C</given-names></string-name>, <string-name><surname>Ren</surname> <given-names>W</given-names></string-name></person-group>. <article-title>Underwater image enhancement via medium transmission-guided multi-color space embedding</article-title>. <source>IEEE Trans Image Process</source>. <year>2021</year>;<volume>30</volume>:<fpage>4985</fpage>&#x2013;<lpage>5000</lpage>. doi:<pub-id pub-id-type="doi">10.1109/TIP.2021.3076367</pub-id>; <pub-id pub-id-type="pmid">33961554</pub-id></mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Lozano-V&#x00E1;zquez</surname> <given-names>LV</given-names></string-name>, <string-name><surname>Miura</surname> <given-names>J</given-names></string-name>, <string-name><surname>Rosales-Silva</surname> <given-names>AJ</given-names></string-name>, <string-name><surname>Luviano-Ju&#x00E1;rez</surname> <given-names>A</given-names></string-name>, <string-name><surname>M&#x00FA;jica-Vargas</surname> <given-names>D</given-names></string-name></person-group>. <article-title>Analysis of different image enhancement and feature extraction methods</article-title>. <source>Mathematics</source>. <year>2022</year>;<volume>10</volume>(<issue>14</issue>):<fpage>2407</fpage>. doi:<pub-id pub-id-type="doi">10.3390/math10142407</pub-id>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="book"><person-group person-group-type="author"><string-name><surname>Gonzalez</surname> <given-names>RC</given-names></string-name>, <string-name><surname>Woods</surname> <given-names>RE</given-names></string-name></person-group>. <source>Digital image processing</source>. <edition>4th ed</edition>. <publisher-loc>London, UK</publisher-loc>: <publisher-name>Pearson Education</publisher-name>; <year>2018</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Kufakunesu</surname> <given-names>R</given-names></string-name>, <string-name><surname>Myburgh</surname> <given-names>H</given-names></string-name>, <string-name><surname>De Freitas</surname> <given-names>A</given-names></string-name></person-group>. <article-title>The Internet of battle things: a survey on communication challenges and recent solutions</article-title>. <source>Discov Internet Things</source>. <year>2025</year>;<volume>5</volume>(<issue>1</issue>):<fpage>3</fpage>. doi:<pub-id pub-id-type="doi">10.1007/s43926-025-00093-w</pub-id>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Garcia Estrada</surname> <given-names>J</given-names></string-name>, <string-name><surname>Prasolova-F&#x00F8;rland</surname> <given-names>E</given-names></string-name>, <string-name><surname>Kjeksrud</surname> <given-names>S</given-names></string-name>, <string-name><surname>Themelis</surname> <given-names>C</given-names></string-name>, <string-name><surname>Lindqvist</surname> <given-names>P</given-names></string-name>, <string-name><surname>Kvam</surname> <given-names>K</given-names></string-name>, <etal>et al</etal></person-group>. <article-title>Military education in extended reality (XR): learning troublesome knowledge through immersive experiential application</article-title>. <source>Vis Comput</source>. <year>2024</year>;<volume>40</volume>(<issue>10</issue>):<fpage>7249</fpage>&#x2013;<lpage>78</lpage>. doi:<pub-id pub-id-type="doi">10.1007/s00371-024-03339-w</pub-id>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>e Oliveira</surname> <given-names>E</given-names></string-name>, <string-name><surname>Rodrigues</surname> <given-names>M</given-names></string-name>, <string-name><surname>Pereira</surname> <given-names>JP</given-names></string-name>, <string-name><surname>Lopes</surname> <given-names>AM</given-names></string-name>, <string-name><surname>Mestric</surname> <given-names>II</given-names></string-name>, <string-name><surname>Bjelogrlic</surname> <given-names>S</given-names></string-name></person-group>. <article-title>Unlabeled learning algorithms and operations: overview and future trends in defense sector</article-title>. <source>Artif Intell Rev</source>. <year>2024</year>;<volume>57</volume>(<issue>3</issue>):<fpage>66</fpage>. doi:<pub-id pub-id-type="doi">10.1007/s10462-023-10692-0</pub-id>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Wood</surname> <given-names>NG</given-names></string-name></person-group>. <article-title>Explainable AI in the military domain</article-title>. <source>Ethics Inf Technol</source>. <year>2024</year>;<volume>26</volume>(<issue>2</issue>):<fpage>29</fpage>. doi:<pub-id pub-id-type="doi">10.1007/s10676-024-09762-w</pub-id>.</mixed-citation></ref>
</ref-list>
</back></article>