<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">49611</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2024.049611</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Enhancing Security and Privacy in Distributed Face Recognition Systems through Blockchain and GAN Technologies</article-title>
<alt-title alt-title-type="left-running-head">Enhancing Security and Privacy in Distributed Face Recognition Systems Through Blockchain and GAN Technologies</alt-title>
<alt-title alt-title-type="right-running-head">Enhancing Security and Privacy in Distributed Face Recognition Systems Through Blockchain and GAN Technologies</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Ghani</surname><given-names>Muhammad Ahmad Nawaz Ul</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-2" contrib-type="author" corresp="yes">
<name name-style="western"><surname>She</surname><given-names>Kun</given-names></name><xref ref-type="aff" rid="aff-1">1</xref><email>kun@uestc.edu.cn</email></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Rauf</surname><given-names>Muhammad Arslan</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Khan</surname><given-names>Shumaila</given-names></name><xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Khan</surname><given-names>Javed Ali</given-names></name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Aldakheel</surname><given-names>Eman Abdullah</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-7" contrib-type="author">
<name name-style="western"><surname>Khafaga</surname><given-names>Doaa Sami</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<aff id="aff-1"><label>1</label><institution>School of Information and Software Engineering, University of Electronic Science and Technology of China</institution>, <addr-line>Chengdu, 611731</addr-line>, <country>China</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Computer Science, University of Science &#x0026; Technology</institution>, <addr-line>Bannu, 28100</addr-line>, <country>Pakistan</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Computer Science, University of Hertfordshire</institution>, <addr-line>Hatfield, AL10 9AB</addr-line>, <country>UK</country></aff>
<aff id="aff-4"><label>4</label><institution>Department of Computer Sciences, College of Computer and Information Sciences, Princess Nourah bint Abdulrahman University</institution>, <addr-line>Riyadh, 11671</addr-line>, <country>Saudi Arabia</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Kun She. Email: <email>kun@uestc.edu.cn</email></corresp>
</author-notes>
<pub-date date-type="collection" publication-format="electronic">
<year>2024</year></pub-date>
<pub-date date-type="pub" publication-format="electronic"><day>15</day>
<month>5</month>
<year>2024</year></pub-date>
<volume>79</volume>
<issue>2</issue>
<fpage>2609</fpage>
<lpage>2623</lpage>
<history>
<date date-type="received">
<day>12</day>
<month>1</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>29</day>
<month>3</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2024 Ghani et al.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Ghani et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_49611.pdf"></self-uri>
<abstract>
<p>The use of privacy-enhanced facial recognition has increased in response to growing concerns about data security and privacy in the digital age. This trend is spurred by rising demand for face recognition technology in a variety of industries, including access control, law enforcement, surveillance, and internet communication. However, the growing usage of face recognition technology has created serious concerns about data monitoring and user privacy preferences, especially in context-aware systems. In response to these problems, this study provides a novel framework that integrates sophisticated approaches such as Generative Adversarial Networks (GANs), Blockchain, and distributed computing to solve privacy concerns while maintaining exact face recognition. The framework&#x2019;s painstaking design and execution strive to strike a compromise between precise face recognition and protecting personal data integrity in an increasingly interconnected environment. Using cutting-edge tools like Dlib for face analysis, Ray Cluster for distributed computing, and Blockchain for decentralized identity verification, the proposed system provides scalable and secure facial analysis while protecting user privacy. The study&#x2019;s contributions include the creation of a sustainable and scalable solution for privacy-aware face recognition, the implementation of flexible privacy computing approaches based on Blockchain networks, and the demonstration of higher performance over previous methods. Specifically, the proposed StyleGAN model has an outstanding accuracy rate of 93.84% while processing high-resolution images from the CelebA-HQ dataset, beating other evaluated models such as Progressive GAN 90.27%, CycleGAN 89.80%, and MGAN 80.80%. With improvements in accuracy, speed, and privacy protection, the framework has great promise for practical use in a variety of fields that need face recognition technology. This study paves the way for future research in privacy-enhanced face recognition systems, emphasizing the significance of using cutting-edge technology to meet rising privacy issues in digital identity.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Facial recognition</kwd>
<kwd>privacy protection</kwd>
<kwd>blockchain</kwd>
<kwd>GAN</kwd>
<kwd>distributed systems</kwd>
</kwd-group>
<funding-group>
<award-group id="awg2">
<funding-source>Princess Nourah bint Abdulrahman University, Riyadh, Saudi Arabia</funding-source>
<award-id>PNURSP2024R409</award-id>
</award-group></funding-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>Machine learning (ML) has gained considerable interest in academic and industrial circles, demonstrating its versatility across various domains [<xref ref-type="bibr" rid="ref-1">1</xref>]. This interest has led to the development of a decentralized training framework, facilitating collaborative engagement of multiple participants in ML model training while ensuring the confidentiality of their individual training data. Privacy regulations such as the General Data Protection Regulation (GDPR) [<xref ref-type="bibr" rid="ref-2">2</xref>] and the California Consumer Privacy Act (CCPA) [<xref ref-type="bibr" rid="ref-3">3</xref>], enforce stringent privacy protocols for individuals&#x2019; privacy rights through secure collection, access, and storage of their user data. In response to concerns about personal data privacy, a decentralized method called federated learning [<xref ref-type="bibr" rid="ref-4">4</xref>] has been introduced. This approach enables collaborative machine learning while maintaining data privacy by allowing individuals to share only model parameters, rather than sensitive training data.</p>
<p>Furthermore, the adoption of privacy-enhanced face recognition has increased due to concerns surrounding data security and privacy, as well as the growing need for facial recognition technology [<xref ref-type="bibr" rid="ref-5">5</xref>]. By employing state-of-the-art Generative Adversarial Networks (GANs) to create synthetic faces [<xref ref-type="bibr" rid="ref-6">6</xref>], utilizing distributed computing for increased scalability, and implementing the secure framework of Blockchain for user identification, we may integrate innovative methodologies to address users&#x2019; privacy. The main goal is to achieve a harmonious equilibrium between guaranteeing precise facial recognition and protecting individuals&#x2019; data in an increasingly networked world [<xref ref-type="bibr" rid="ref-7">7</xref>]. Despite its widespread usage in fields such as access control, law enforcement, security, surveillance, internet communication, and entertainment [<xref ref-type="bibr" rid="ref-8">8</xref>], the pervasive use of face recognition technology has raised significant concerns about data monitoring and user privacy preferences [<xref ref-type="bibr" rid="ref-9">9</xref>]. The face recognition system follows a systematic structure: Initially, an image is retrieved from the database, followed by the application of the face detection process. Subsequently, features are extracted to identify the face from the feature storage database. This sequential process ensures the verification and identification, as depicted in <xref ref-type="fig" rid="fig-1">Fig. 1</xref>.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Generic structure of face recognition system</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-1.tif"/>
</fig>
<p>The primary challenge lies in developing a decentralized, distributed system capable of managing a significant volume of secure facial data while preserving personal privacy integrity [<xref ref-type="bibr" rid="ref-10">10</xref>]. This study proposes a decentralized and distributed facial recognition system as a solution to address the privacy and security concerns arising from the rapid increase in facial data collection. The proposed system aims to achieve precise and highly secure facial analysis and recognition, along with enhanced privacy protection, by leveraging advanced technologies such as Blockchain, GANs, smart contracts, and distributed computing [<xref ref-type="bibr" rid="ref-11">11</xref>].</p>
<p>The major research contributions of this research are highlighted in bullet points:
<list list-type="bullet">
<list-item>
<p>Developed a scalable and sustainable privacy-aware facial recognition system by integrating GANs, Dlib, Ray Cluster, and Blockchain technology. This provides accurate biometrics while upholding robust user privacy.</p></list-item>
<list-item>
<p>Created a versatile privacy computing model that distributes facial data processing across dedicated nodes in a cluster configuration. Sensitive user information is isolated and securely stored via Blockchain.</p></list-item>
<list-item>
<p>Comprehensive experiments validate that the approach exceeds existing techniques in balancing high precision matching with low visibility of personal data, generation time and resilience against data leaks.</p></list-item>
</list></p>
</sec>
<sec id="s2">
<label>2</label>
<title>Related Work</title>
<p>Face identification involves recognizing an unknown face image by comparing it with a database of known faces. In this process, the most similar match is returned as the assumed identity of the subject after calculating the similarity between a specific face image and all the face images in a database as presented in <xref ref-type="fig" rid="fig-2">Fig. 2</xref>. Numerous challenges in the field of face identification have been tackled using a variety of methods. In academics, there has been specific interest in hybrid face recognition techniques. The complexities of face identification are discussed in several papers [<xref ref-type="bibr" rid="ref-12">12</xref>]. Chaabane et al. [<xref ref-type="bibr" rid="ref-13">13</xref>] utilize statistical characteristics in their innovative approach, which accelerates identification speed and achieves an impressive accuracy rate of 99.37% on a dataset comprising 400 images from 40 individuals. Vu et al. [<xref ref-type="bibr" rid="ref-14">14</xref>] introduce &#x201C;MaskTheFace,&#x201D; a method that enhances existing datasets and modifies facial recognition systems to accommodate individuals wearing masks, ensuring secure authentication without the need to rebuild datasets. Hariri [<xref ref-type="bibr" rid="ref-15">15</xref>] have engineered a robust system that integrates MobileNetV2, OpenCV, and FaceNet to address the need for facial identification during the pandemic, achieving exceptional accuracy rates of 99.65% for mask detection, 99.52% for identifying individuals wearing masks, and 99.96% for recognizing unmasked faces. Haider TH. ALRikabi et al. [<xref ref-type="bibr" rid="ref-16">16</xref>] proposes a Matlab-based face identification system utilizing a Quantum Neural Network (QN) for precise recognition of facial patterns against a maintained database.</p>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>Traditional facial recognition systems</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-2.tif"/>
</fig>
<p>Prior research addresses significant privacy concerns and facets of facial recognition technology. Feng et al. [<xref ref-type="bibr" rid="ref-17">17</xref>] devised a privacy protection system for face recognition and resolution based on edge computing to mitigate the privacy risks associated with cloud computing. Tian et al. [<xref ref-type="bibr" rid="ref-18">18</xref>] propose a comprehensive approach to address privacy and fairness issues in facial image graphs. Their study employs GAN models to generate synthetic images that maintain privacy while ensuring fairness. Zhang et al. [<xref ref-type="bibr" rid="ref-19">19</xref>] investigate the resistance of customers to innovation in face recognition payments, emphasizing the influence of user behavior moderators such as gender, platform trust, and privacy concerns on user behavior.</p>
<p>This study introduces a novel approach to facial recognition technology, integrating adversarial image synthesis and decentralized identity management. Through the utilization of GANs for creating realistic dummy faces and the implementation of tamper-resistant Blockchain transactions for secure user verification, the system endeavors to strike a balance between performance and privacy. As previous research, concentrates on specific aspects, this study offers a comprehensive solution covering training data expansion, access control, result verification, and storage auditing, promoting responsible and ethical facial recognition practices. The summary of the main distinctions between this study and previous research are shown in <xref ref-type="table" rid="table-1">Table 1</xref>.</p>
<table-wrap id="table-1">
<label>Table 1</label>
<caption>
<title>Comparison of proposed study with existing literature in facial recognition technology</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Aspect</th>
<th>Existing literature</th>
<th>Proposed study</th>
</tr>
</thead>
<tbody>
<tr>
<td>Technology integration</td>
<td>Limited integration of Blockchain and GANs</td>
<td>Comprehensive integration of Blockchain and GANs</td>
</tr>
<tr>
<td>Privacy enhancement</td>
<td>Some studies address privacy concerns using edge computing</td>
<td>Emphasizes privacy through Blockchain transactions and GANs synthesis</td>
</tr>
<tr>
<td>Security measures</td>
<td>Limited exploration of tamper-resistant Blockchain transactions</td>
<td>Implements tamper-resistant Blockchain transactions for secure user verification</td>
</tr>
<tr>
<td>Comprehensive approach</td>
<td>Focuses on specific aspects such as affect recognition or pose-invariant face recognition</td>
<td>Offers a holistic solution including training data expansion, access control, result verification, and storage auditing</td>
</tr>
<tr>
<td>Novelty</td>
<td>Some studies propose innovative techniques like federated learning</td>
<td>Introduces a novel approach by integrating GANs and Blockchain for facial recognition</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3">
<label>3</label>
<title>Proposed Framework</title>
<p>The distributed face recognition system is meticulously designed with scalability, fault tolerance, interoperability, and privacy rules as top priorities. This involves implementing advanced distributed data storage techniques to ensure efficient face data processing and retrieval. Strategic placement of numerous nodes, along with GAN-based face recognition modules, enhances system efficiency and resilience. The use of a Blockchain network reinforces privacy and security by establishing decentralized data storage and access control methods. Priority is given to data encryption methods that balance computational efficiency and encryption strength, ensuring sensitive data protection during transmission. <xref ref-type="fig" rid="fig-3">Fig. 3</xref> visually represents the proposed framework, encompassing the discussed design concepts and procedures for clarity and reference.</p>
<fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>Workflow of the proposed framework for secure and scalable face recognition</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-3.tif"/>
</fig>
<p>The proposed methodology ensures secure communication and data management through a series of steps. Beginning with cluster initialization using Blockchain, nodes are configured and authorized, followed by GAN Face Generator and Variation Generator modules for synthetic face creation. The Cluster Multimode Model optimizes resource usage and scalability. Faces are securely stored, encoded, and associated with identities for streamlined management. Testing assesses encoding performance, with results stored in decentralized cloud services. Ethereum Blockchain ensures transparency and integrity, capturing transactional data. The workflow concludes with the cluster&#x2019;s graceful shutdown, preserving operational effectiveness. This advanced approach integrates cutting-edge technologies for robust, scalable, and private face recognition, marking a significant breakthrough in the field.</p>
<sec id="s3_1">
<label>3.1</label>
<title>Generative Adversarial Network</title>
<p>GAN is a type of machine learning model that can generate synthetic data that resembles the existing data. The basic idea of GANs is that two neural networks, consists of a generator and a discriminator, engaging in an adversarial training process illustrated in <xref ref-type="fig" rid="fig-4">Fig. 4</xref>.</p>
<fig id="fig-4">
<label>Figure 4</label>
<caption>
<title>Synthetic image generation and model training process</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-4.tif"/>
</fig>
<p>The discriminator is trained to distinguish between these images, and its loss (Li_discriminator) is computed by comparing its output for actual and synthetic images. The discriminator (<italic>W</italic><italic>d</italic>) weights are then updated by gradient descent. The generator&#x2019;s task is to produce false images that can deceive the discriminator; the generator loss (Li_generator) is calculated using the discriminator&#x2019;s output for synthetic images. The generator&#x2019;s (<italic>W</italic>g) weights are suitably adjusted. Periodic updates on the average discriminator and generator losses are provided by the program, which offers insightful information on the continuous training process. When the discriminator and generator losses are confirmed to have stabilized and converged, the training is considered complete.</p>
<p>Algorithm 1 offers step-by-step instructions for training the facial recognition system and emphasizes crucial mathematical ideas to enhance model performance. The pre-trained GAN model was modified to fit the system under demonstration after being trained on the same publicly accessible data. The goal of this model is to produce realistic, high-quality facial imagegraphs. Firstly, the generator (G) and discriminator (D) networks&#x2019; primary hyper-parameters and random weights. The training loop consists of sampling and processing actual (Xi_real) and synthetic (Xi_synthetic) face images over epochs (t) while iterating over the current batch (i).</p>
<fig id="fig-12">
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-12.tif"/>
</fig>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Model Training</title>
<p>This research emphasizes the crucial role of face recognition model training in establishing a reliable framework. The core strategy relies on state-of-the-art facial recognition skills from pre-trained deep neural networks, supported notably by the open-source Dlib toolbox. The effectiveness of facial recognition systems is linked to the quality and breadth of training data, influencing the model&#x2019;s ability to accurately identify individuals. The utilization of pre-trained deep neural networks enables accurate extraction of complex facial traits, ensuring precise recognition even in challenging scenarios. This initiative leverages recent advancements in machine learning and computer vision through the Dlib toolbox, creating a highly reliable and accurate face identification system.</p>
<p>Precise facial mapping, feature extraction, and matching are made possible by Dlib&#x2019;s powerful image analysis pipelines and model structures. <xref ref-type="fig" rid="fig-5">Fig. 5</xref> presents the results of the face generation process, which are based on GANs. This illustrates how well the GAN-based method produces realistic facial images. The model&#x2019;s output is a testament to these techniques and shows how helpful this study was in attaining accurate and efficient face recognition.</p>
<fig id="fig-5">
<label>Figure 5</label>
<caption>
<title>GAN-based synthetic face generation</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-5.tif"/>
</fig>
<p>This strategy describes a methodical way to use StyleGAN, a GAN, to create synthetic faces that then add to the training dataset of face recognition systems. To make face creation easier, the StyleGAN model&#x2019;s parameters are first initialized. Synthetic faces are requested from the StyleGAN model by sampling from the latent space during testing and validation, and the resulting synthetic faces are retrieved for assessment. The real face dataset is then supplemented in the training dataset augmentation phase by integrating it with the set of synthetic faces produced by StyleGAN through the use of the set union procedure. By adding synthetic faces alongside actual faces, this augmentation procedure seeks to increase the training dataset&#x2019;s variety and resilience, which might improve the performance and generalization capacity of the face recognition model. This method helps to reduce biases and boost accuracy in practical face recognition applications by exposing the model to a wider variety of facial variants.</p>
<p>Algorithm 2 delineates the face recognition and verification methodology utilized in this research. Leveraging the robust Dlib library, discriminative face embeddings are extracted, encoding facial characteristics resilient to challenging conditions. Upon acquiring a face image, the algorithm computes its embedding vector and compares it against a database of known embeddings using distance metrics. A positive identity match is declared if the minimum distance falls below a predefined threshold, indicating sufficient similarity to a stored embedding. Conversely, a negative verification is returned for unknown individuals.</p>
<fig id="fig-13">
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-13.tif"/>
</fig>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Integrating Blockchain to Improve System Performance</title>
<p>Blockchain technology, which was first developed for cryptocurrencies, is used in this study. Important Ethereum components enable DApp interaction via MetaMask and seamless asset management. We eliminate the need for node hosting with Infura Ethereum API endpoints, ensuring smooth Blockchain access. Kaleido, a Blockchain platform-as-a-service, uses solidity-based smart contracts to expedite the establishment and management of corporate consortium networks. These contracts govern user addresses, IPFS image encoding, registration, authentication, and dynamic updates. The foundation for data protection, accountability, and trust-based, decentralized regulation automation in facial recognition is Blockchain technology.</p>
<p>The face recognition algorithm employs facial embedding to authenticate individuals in images, calculating similarity scores between input and known faces. The method utilizes Dlib and a predetermined threshold to determine positive or negative identity verification. This process ensures accurate identification in extensive face datasets. Integration of blockchain, Google Drive, and GPU clusters facilitates secure face creation and encoding. Tasks include GAN-based synthesis, IPFS network creation, Kaleido Blockchain deployment, user identity confirmation, and resource evaluation. Secure storage involves saving IPFS hashes in Google Drive, with encoding verification through cloud services. Cloud clusters, IPFS activation, Google Drive connection, and facial recognition activities are implemented using Dlib, storing results in JSON format.</p>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Results and Discussion</title>
<p>Modern technologies like Blockchain, Dlib, Ray Cluster, and GANs are easily integrated into the distributed face recognition framework to maximize efficiency, strengthen security, and protect privacy. GANs are essential to this approach because they produce synthetic faces that are highly similar to their real-world counterparts, improving the quality of training and testing data. In the meanwhile, Ray Cluster enables parallelized computations, improving system scalability to effectively handle massive datasets. Dlib contributes to the dependability of the system by guaranteeing the correctness of face matching procedures.</p>
<p>Blockchain technology facilitates secure data sharing between dispersed nodes and decentralized identity verification by utilizing smart contracts. By integrating various state-of-the-art technologies in a harmonic way, the framework realizes a comprehensive, privacy-aware face recognition system that has great potential for practical application. To support this technical discussion, <xref ref-type="fig" rid="fig-6">Fig. 6</xref> provides a striking example of the benefits of GANs by demonstrating how they may produce high-quality images, highlighting their critical function in the system. <xref ref-type="fig" rid="fig-7">Fig. 7</xref> illustrates the operation of the face recognition system and offers insights into its performance by displaying names and clarity metrics next to recognized faces. <xref ref-type="fig" rid="fig-8">Fig. 8</xref> illustrates how facial landmarks evolve, offering a clear visual representation for image production and identification algorithms.</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption>
<title>GAN-based performance-illustrating the superiority of GANs in generating high-quality images for enhanced face recognition systems</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-6.tif"/>
</fig><fig id="fig-7">
<label>Figure 7</label>
<caption>
<title>Face recognition system performance-names, and clarity for recognized faces, providing insights into system functionality</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-7.tif"/>
</fig><fig id="fig-8">
<label>Figure 8</label>
<caption>
<title>Face landmark development-depicting the clarity for image generation and recognition</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-8.tif"/>
</fig>
<sec id="s4_1">
<label>4.1</label>
<title>Dataset</title>
<p>The study makes use of the CelebA-HQ<xref ref-type="fn" rid="fn1"><sup>1</sup></xref><fn id="fn1"><label>1</label><p><ext-link ext-link-type="uri" xlink:href="https://paperswithcode.com/sota/image-generation-on-celeba-hq-1024x1024">https://paperswithcode.com/sota/image-generation-on-celeba-hq-1024x1024</ext-link></p></fn> dataset, which consists of 70,000 high-quality PNG images with 1024 by 1024-pixel resolution. These images feature a range of demographic characteristics, such as age, race, and background, in addition to different facial accessories, such as sunglasses, hats, and eyeglasses. Dlib is used to automatically align and crop the images, reducing the impact of the initial biases. During training, a varied set of faces is collected in order to guarantee algorithmic inclusivity and fairness across different populations. Preprocessing methods are used to improve the caliber and variety of the training data, such as scaling, normalization, and augmentation.</p>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>Results</title>
<p>The performance evaluation description of the face-generating techniques used in the research is shown in <xref ref-type="table" rid="table-2">Table 2</xref> For performance evaluation, StyleGAN utilizes Dlib, custom similarity loss, Adam optimization, and trained models. Similarly, Progressive GAN uses OpenCV, Adam, custom discriminator and generator loss functions, and pre-trained models. In contrast, MGAN does not depend on pre-trained models; instead, it leverages TensorFlow, custom multitask loss, and Adam optimization. The selection of framework, custom loss functions, optimizer, and pre-trained models has a significant impact on the efficiency and accuracy of face generation methods.</p>
<table-wrap id="table-2">
<label>Table 2</label>
<caption>
<title>An analysis of several methods, such as optimizer, loss functions, and pre-trained models, is included in the summary of face generation strategies</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Models</th>
<th>Loss function</th>
<th>Optimizer</th>
<th>Pre-trained</th>
</tr>
</thead>
<tbody>
<tr>
<td>StyleGAN</td>
<td>Custom Similarity Loss</td>
<td>Adam</td>
<td>Dlib</td>
</tr>
<tr>
<td>Progressive GAN</td>
<td>Custom Discriminator &#x002B; Generator Loss</td>
<td>Adam</td>
<td>OpenCV</td>
</tr>
<tr>
<td>CycleGAN</td>
<td>Custom Adversarial Loss</td>
<td>Adam</td>
<td>TensorFlow</td>
</tr>
<tr>
<td>MGAN</td>
<td>Custom Multitask Loss</td>
<td>Adam</td>
<td>TensorFlow</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Similarly, <xref ref-type="fig" rid="fig-9">Fig. 9</xref> shows a detailed overview of each job in the system, including its ID, job description, current state, and related function or class name. This detailed data is crucial for monitoring and optimizing task execution, allowing for the discovery of possible bottlenecks, resource-intensive procedures, and chances for efficiency improvements. This data allows for a more in-depth knowledge of resource allocation and overall system efficiency by outlining the time and memory needs of each activity.</p>
<fig id="fig-9">
<label>Figure 9</label>
<caption>
<title>Ethereum blockchain transactions execution details&#x2013;individual ID, name, Job ID, status and function or class name for system task management and performance analysis</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-9.tif"/>
</fig>
<p><xref ref-type="table" rid="table-3">Table 3</xref> provides a comprehensive comparison between our proposed StyleGAN model and various other models, including Progressive GAN, CycleGAN, MGAN, and baseline models like VGG [<xref ref-type="bibr" rid="ref-20">20</xref>], CNN [<xref ref-type="bibr" rid="ref-21">21</xref>], and ResNet20 [<xref ref-type="bibr" rid="ref-22">22</xref>]. The evaluation encompasses crucial criteria such as image size, dataset size, epoch count, and accuracy. Notably, StyleGAN exhibits superior performance, particularly excelling in processing extensive 1024 &#x00D7; 1024 CelebA-HQ images. Leveraging TensorFlow and custom loss functions, StyleGAN achieves efficient processing over a dataset comprising 70,000 images across 10 epochs. This computational efficiency is attributed to the integration of Dlib and bespoke loss functions within the StyleGAN framework, as visually represented in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>.</p>
<table-wrap id="table-3">
<label>Table 3</label>
<caption>
<title>A comparison was made between the presented model StyleGAN with baseline techniques such as Progressive GAN, CycleGAN, MGAN, VGG, CNN, and ResNet20</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Method</th>
<th>Dataset</th>
<th>Image_size</th>
<th>No. of samples</th>
<th>No. of epochs</th>
<th>Accuracy (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td>StyleGAN (Our)</td>
<td>CelebA-HQ</td>
<td>1024 &#x00D7; 1024</td>
<td>70,000</td>
<td>10</td>
<td>93.84</td>
</tr>
<tr>
<td>Progressive GAN</td>
<td>FFHQ</td>
<td>1024 &#x00D7; 1024</td>
<td>70,000</td>
<td>10</td>
<td>90.27</td>
</tr>
<tr>
<td>CycleGAN</td>
<td>CelebA</td>
<td>178 &#x00D7; 218</td>
<td>200,000</td>
<td>10</td>
<td>89.80</td>
</tr>
<tr>
<td>MGAN</td>
<td>CASIA WebFace</td>
<td>60 &#x00D7; 60</td>
<td>10,000</td>
<td>10</td>
<td>80.80</td>
</tr>
<tr>
<td>VGG</td>
<td>CIFAR-10</td>
<td>32 &#x00D7; 32</td>
<td>60,000</td>
<td>10</td>
<td>83.0</td>
</tr>
<tr>
<td>CNN</td>
<td>FCV Fingerprints</td>
<td>384 &#x00D7; 384</td>
<td>120,000</td>
<td>10</td>
<td>89.32</td>
</tr>
<tr>
<td>ResNet20</td>
<td>CIFAR-10</td>
<td>32 &#x00D7; 32</td>
<td>60,000</td>
<td>10</td>
<td>92.43</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-10">
<label>Figure 10</label>
<caption>
<title>Comparison of the proposed model with rest of discussed models including progressive GAN, Cycle GAN, MGAN, VGG, CNN, and ResNet20</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-10.tif"/>
</fig>
<p>In contrast, Progressive GAN, despite requiring more epochs due to a smaller dataset, showcases remarkable generation time efficiency for large images through OpenCV and generic losses. This makes it well-suited for real-time applications leveraging OpenCV&#x2019;s edge features in embedded systems. Resource-constrained implementations may opt for lightweight models like MGAN and VGG, albeit with a trade-off in accuracy. Dlib&#x2019;s face analysis pipelines can mitigate these accuracy costs, enhancing the reliability of these models. Crucially, StyleGAN outperforms all counterparts with an exceptional accuracy rating of 93.84%. The study underscores the synergistic effects of custom losses, Dlib, and Adam optimization in producing high-fidelity face embeddings crucial for accurate identification tasks. Additionally, tests on StyleGAN derivatives consistently validate correctness and speed, affirming the inherent advantages of the framework. <xref ref-type="fig" rid="fig-11">Fig. 11</xref> visually compares the outputs of StyleGAN, Progressive GAN, CycleGAN, and WGAN, highlighting StyleGAN&#x2019;s superior performance in terms of high resolution and image integrity.</p>
<fig id="fig-11">
<label>Figure 11</label>
<caption>
<title>GAN model outputs-A visual comparison of outputs generated by StyleGAN, progressive GAN, CycleGAN, and WGAN when processing random images, underscoring the superior performance of StyleGAN in high-resolution and image fidelity</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_49611-fig-11.tif"/>
</fig>
<p>The observed speed-accuracy tradeoffs show that StyleGAN is suitable for high-performance areas where privacy-preserving identification is required overall. Compared with baseline attempts based on CNN/VGG architectures, StyleGAN outputs show increased perceptual quality and feature retention as seen by the lower Fr&#x00E9;chet Inception Distance (FID) and similarity losses. TensorFlow outperforms competitors like OpenCV, Dlib, and MGAN when datasets grow exponentially, underscoring StyleGAN&#x2019;s comparative advantage in the face recognition space. StyleGAN distinguishes itself as a robust and versatile method that surpasses alternative models in terms of precision, effectiveness, and confidentiality preservation. It is at the forefront of facial recognition technology because to its adaptability to a wide range of application situations and its ability to handle increasingly large datasets effectively.</p>
<p><bold>Ablation Study:</bold> The ablation study systematically evaluates key components, including Blockchain authentication, GAN-based image synthesis, Ray parallelization, and Dlib biometrics, within the privacy-enhancing face recognition framework. Disabling blockchain compromises cryptography-backed integrity, limiting synthetic facial variety impacts model robustness, eliminating distributed computing hampers efficiency, and excluding Dlib facial mappings impairs out-of-the-box recognition capabilities. This underscores the vital importance of each element, showcasing their collective role in an effective, safe, and precise face recognition system with enhanced privacy. The symbiotic integration of trustless identity, artificial biometrics, scalable infrastructure, and performant algorithms achieves a balance between accuracy, security, and privacy in ethical facial analysis.</p>
</sec>
</sec>
<sec id="s5">
<label>5</label>
<title>Conclusion</title>
<p>In the dynamic landscape of digital technology, facial recognition systems have become pervasive across various industries, giving rise to concerns about data security, privacy, and ethical implications. Addressing these challenges, our research presents an inclusive framework for privacy-centric face recognition. Leveraging advanced technologies such as distributed computing, Blockchain, and GANs, our system marks a paradigm shift. Notably, GANs generate highly realistic synthetic faces, diversifying training data while safeguarding user privacy. The incorporation of Blockchain ensures secure and immutable identity verification, mitigating data tampering risks and unauthorized access. Enhanced by distributed computing, our study demonstrates resilience and scalability, efficiently processing extensive face data while adhering to stringent privacy protocols. The research validates the framework&#x2019;s efficacy, surpassing alternative approaches in privacy preservation and performance. Our system contributes significantly to advancing face recognition technology, striking a delicate balance between privacy, scalability, and accuracy. Ultimately, the study aims to offer secure facial recognition system.</p>
</sec>
</body>
<back>
<ack><p>The authors would like to acknowledge Princess Nourah bint Abdulrahman University Riyadh, Saudi Arabia, for paying the Article Processing Charges (APC) of this publication.</p>
</ack>
<sec><title>Funding Statement</title>
<p>This project is funded by &#x201C;Researchers Supporting Project Number (PNURSP2024R409)&#x201D;, Princess Nourah bint Abdulrahman University, Riyadh, Saudi Arabia.</p>
</sec>
<sec><title>Author Contributions</title>
<p>Conceptualization, Methodology, and Writing original draft, Muhammad Ahmad Nawaz Ul Ghani; Supervision, Kun She; Review and editing, Arslan Rauf, Shumaila Khan, Javed Ali Khan, Eman Abdullah Aldakheel and Doaa Sami Khafaga.</p>
</sec>
<sec sec-type="data-availability"><title>Availability of Data and Materials</title>
<p>Data will be made available on request.</p>
</sec>
<sec sec-type="COI-statement"><title>Conflicts of Interest</title>
<p>The authors declare that they have no conflicts of interest to report regarding the
present study.</p>
</sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Raju</surname></string-name>, <string-name><given-names>B. C.</given-names> <surname>Rao</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Saikumar</surname></string-name>, and <string-name><given-names>N. L.</given-names> <surname>Pratap</surname></string-name></person-group>, &#x201C;<article-title>An optimal hybrid solution to local and global facial recognition through machine learning</article-title>,&#x201D; in <source>A Fusion Artif. Intell. Internet Things Emerg. Cyber Syst.</source>, vol. <volume>6</volume>, no. <issue>1</issue>, pp. <fpage>203</fpage>&#x2013;<lpage>226</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-76653-5</pub-id>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. Z. E.</given-names> <surname>Mestari</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Lenzini</surname></string-name>, and <string-name><given-names>H.</given-names> <surname>Demirci</surname></string-name></person-group>, &#x201C;<article-title>Preserving data privacy in machine learning systems</article-title>,&#x201D; <source>Comput. Secur.</source>, vol. <volume>137</volume>, no. <issue>6</issue>, pp. <fpage>103605</fpage>, <year>2024</year>. doi: <pub-id pub-id-type="doi">10.1016/j.cose.2023.103605</pub-id>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Almeida</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Shmarko</surname></string-name>, and <string-name><given-names>E.</given-names> <surname>Lomas</surname></string-name></person-group>, &#x201C;<article-title>The ethics of facial recognition technologies, surveillance, and accountability in an age of artificial intelligence: A comparative analysis of US, EU, and UK regulatory frameworks</article-title>,&#x201D; <source>AI Ethics</source>, vol. <volume>2</volume>, no. <issue>3</issue>, pp. <fpage>377</fpage>&#x2013;<lpage>387</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1007/s43681-021-00077-w</pub-id>; <pub-id pub-id-type="pmid">34790955</pub-id></mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Farooq</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Borghesi</surname></string-name></person-group>, &#x201C;<article-title>A federated learning approach for anomaly detection in high performance computing</article-title>,&#x201D; in <conf-name>Proc. IEEE ICTAI</conf-name>, <publisher-loc>Atlanta, USA</publisher-loc>, <year>2023</year>, pp. <fpage>496</fpage>&#x2013;<lpage>500</lpage>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. O.</given-names> <surname>Oloyede</surname></string-name>, <string-name><given-names>G. P.</given-names> <surname>Hancke</surname></string-name>, and <string-name><given-names>H. C.</given-names> <surname>Myburgh</surname></string-name></person-group>, &#x201C;<article-title>A review on face recognition systems: Recent approaches and challenges</article-title>,&#x201D; <source>Multimed. Tools Appl.</source>, vol. <volume>79</volume>, no. <issue>37</issue>, pp. <fpage>27891</fpage>&#x2013;<lpage>27922</lpage>, <year>2020</year>. doi: <pub-id pub-id-type="doi">10.1007/s11042-020-09261-2</pub-id>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Qin</surname></string-name>, <string-name><given-names>Q.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Ding</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Zhuang</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Qin</surname></string-name> and <string-name><given-names>K. K. R.</given-names> <surname>Choo</surname></string-name></person-group>, &#x201C;<article-title>Segmentation mask and feature similarity loss guided GAN for object-oriented image-to-image translation</article-title>,&#x201D; <source>Inform. Process Manag.</source>, vol. <volume>59</volume>, no. <issue>3</issue>, pp. <fpage>102926</fpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1016/j.ipm.2022.102926</pub-id>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R. A.</given-names> <surname>Waelen</surname></string-name></person-group>, &#x201C;<article-title>The struggle for recognition in the age of facial recognition technology</article-title>,&#x201D; <source>AI Ethics</source>, vol. <volume>3</volume>, no. <issue>1</issue>, pp. <fpage>215</fpage>&#x2013;<lpage>222</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.1007/s43681-022-00146-8</pub-id>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Bouras</surname></string-name> and <string-name><given-names>E.</given-names> <surname>Michos</surname></string-name></person-group>, &#x201C;<article-title>An online real-time face recognition system for police purposes</article-title>,&#x201D; in <conf-name>Proc. ICOIN</conf-name>, <publisher-loc>Jeju Island, Korea</publisher-loc>, <year>2022</year>, pp. <fpage>62</fpage>&#x2013;<lpage>67</lpage>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Datta Rakshit</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Rattani</surname></string-name>, and <string-name><given-names>D. R.</given-names> <surname>Kisku</surname></string-name></person-group>, &#x201C;<article-title>An LDOP approach for face identification under unconstrained scenarios</article-title>,&#x201D; <source>J. Exp. Theor. Artif. Intell.</source>, vol. <volume>11</volume>, no. <issue>14</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>49</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.1080/0952813X.2023.2183274</pub-id>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Lu</surname></string-name></person-group>, &#x201C;<article-title>Implementing blockchain in information systems: A review</article-title>,&#x201D; <source>Enterp. Inf. Syst.</source>, vol. <volume>16</volume>, no. <issue>12</issue>, pp. <fpage>2008513</fpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>G.</given-names> <surname>Revathy</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Bhavana Raj</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Kumar</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Adibatti</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Dahiya</surname></string-name> and <string-name><given-names>T. M.</given-names> <surname>Latha</surname></string-name></person-group>, &#x201C;<article-title>Investigation of E-voting system using face recognition using convolutional neural network (CNN)</article-title>,&#x201D; <source>Theor. Comput. Sci.</source>, vol. <volume>925</volume>, no. <issue>2</issue>, pp. <fpage>61</fpage>&#x2013;<lpage>67</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1016/j.tcs.2022.05.005</pub-id>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Iqbal</surname></string-name>, <string-name><given-names>A. N.</given-names> <surname>Qureshi</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Alhussein</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Aurangzeb</surname></string-name>, and <string-name><given-names>M. S.</given-names> <surname>Anwar</surname></string-name></person-group>, &#x201C;<article-title>AD-CAM: Enhancing interpretability of convolutional neural networks with a lightweight framework-from black box to glass box</article-title>,&#x201D; <source>IEEE J. Biomed Health</source>, vol. <volume>28</volume>, no. <issue>1</issue>, pp. <fpage>514</fpage>&#x2013;<lpage>525</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.1109/JBHI.2023.3329231</pub-id>; <pub-id pub-id-type="pmid">37910403</pub-id></mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. B.</given-names> <surname>Chaabane</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Hijji</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Harrabi</surname></string-name>, and <string-name><given-names>H.</given-names> <surname>Seddik</surname></string-name></person-group>, &#x201C;<article-title>Face recognition based on statistical features and SVM classifier</article-title>,&#x201D; <source>Multimed. Tools Appl.</source>, vol. <volume>81</volume>, no. <issue>6</issue>, pp. <fpage>8767</fpage>&#x2013;<lpage>8784</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1007/s11042-021-11816-w</pub-id>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H. N.</given-names> <surname>Vu</surname></string-name>, <string-name><given-names>M. H.</given-names> <surname>Nguyen</surname></string-name>, and <string-name><given-names>C.</given-names> <surname>Pham</surname></string-name></person-group>, &#x201C;<article-title>Masked face recognition with convolutional neural networks and local binary patterns</article-title>,&#x201D; <source>Appl. Intell.</source>, vol. <volume>52</volume>, no. <issue>5</issue>, pp. <fpage>5497</fpage>&#x2013;<lpage>5512</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1007/s10489-021-02728-1</pub-id>; <pub-id pub-id-type="pmid">34764616</pub-id></mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>W.</given-names> <surname>Hariri</surname></string-name></person-group>, &#x201C;<article-title>Efficient masked face recognition method during the COVID-19 pandemic</article-title>,&#x201D; <source>Signal Image Video P</source>, vol. <volume>16</volume>, no. <issue>3</issue>, pp. <fpage>605</fpage>&#x2013;<lpage>612</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1007/s11760-021-02050-w</pub-id>; <pub-id pub-id-type="pmid">34804243</pub-id></mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H. T. S.</given-names> <surname>ALRikabi</surname></string-name>, <string-name><given-names>I. A.</given-names> <surname>Aljazaery</surname></string-name>, <string-name><given-names>J. S.</given-names> <surname>Qateef</surname></string-name>, <string-name><given-names>A. H. M.</given-names> <surname>Alaidi</surname></string-name>, and <string-name><given-names>M.</given-names> <surname>Roa&#x0027;a</surname></string-name></person-group>, &#x201C;<article-title>Face patterns analysis and recognition system based on quantum neural network QNN</article-title>,&#x201D; <source>Int. J. Interac. Mob. Tech.</source>, vol. <volume>16</volume>, no. <issue>8</issue>, pp. <fpage>35</fpage>&#x2013;<lpage>48</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.3991/ijim.v16i08.30107</pub-id>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>W.</given-names> <surname>Feng</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>A privacy protection scheme for facial recognition and resolution based on edge computing</article-title>,&#x201D; <source>Secur. Commun. Netw.</source>, vol. <volume>2022</volume>, pp. <fpage>12</fpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1155/2022/4095427</pub-id>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Tian</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Zhu</surname></string-name>, and <string-name><given-names>W.</given-names> <surname>Zhou</surname></string-name></person-group>, &#x201C;<article-title>Fairness and privacy preservation for facial images: GAN-based methods</article-title>,&#x201D; <source>Comput. Secur.</source>, vol. <volume>122</volume>, pp. <fpage>102902</fpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1016/j.cose.2022.102902</pub-id>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zou</surname></string-name>, and <string-name><given-names>W.</given-names> <surname>Yan</surname></string-name></person-group>, &#x201C;<article-title>Analysis of consumers&#x2019; innovation resistance behavior to facial recognition payment: An empirical investigation</article-title>,&#x201D; in <conf-name>Proc. WHICEB</conf-name>, <publisher-loc>Wuhan, China</publisher-loc>, <year>2022</year>, vol. <volume>01</volume>, pp. <fpage>129</fpage>&#x2013;<lpage>138</lpage>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Shen</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Zhong</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Sun</surname></string-name>, and <string-name><given-names>B.</given-names> <surname>Qi</surname></string-name></person-group>, &#x201C;<article-title>RRN: A differential private approach to preserve privacy in image classification</article-title>,&#x201D; <source>IET Image Process</source>, vol. <volume>17</volume>, no. <issue>7</issue>, pp. <fpage>2192</fpage>&#x2013;<lpage>2203</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.1049/ipr2.12784</pub-id>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Mohammadi</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Sabry</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Labda</surname></string-name>, and <string-name><given-names>Q.</given-names> <surname>Malluhi</surname></string-name></person-group>, &#x201C;<article-title>Privacy-preserving deep-learning models for fingerprint data using differential privacy</article-title>,&#x201D; in <conf-name>Proc. ACM IWSPA</conf-name>, <publisher-loc>Charlotte, NC, USA</publisher-loc>, <year>2023</year>, pp. <fpage>45</fpage>&#x2013;<lpage>53</lpage>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J. W.</given-names> <surname>Lee</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Privacy-preserving machine learning with fully homomorphic encryption for deep neural network</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>10</volume>, pp. <fpage>30039</fpage>&#x2013;<lpage>30054</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2022.3159694</pub-id>.</mixed-citation></ref>
</ref-list>
</back></article>