<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">31445</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2023.031445</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>A Multi-Watermarking Algorithm for Medical Images Using Inception V3&#x00A0;and&#x00A0;DCT</article-title>
<alt-title alt-title-type="left-running-head">A Multi-Watermarking Algorithm for Medical Images Using Inception V3 and DCT</alt-title>
<alt-title alt-title-type="right-running-head">A Multi-Watermarking Algorithm for Medical Images Using Inception V3 and DCT</alt-title>
</title-group>
<contrib-group content-type="authors">
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Fan</surname><given-names>Yu</given-names></name><xref ref-type="aff" rid="aff-1">1</xref>
<xref ref-type="aff" rid="aff-6">6</xref></contrib>
<contrib id="author-2" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Li</surname><given-names>Jingbing</given-names></name><xref ref-type="aff" rid="aff-1">1</xref>
<xref ref-type="aff" rid="aff-2">2</xref><email>jingbingli2008@hotmail.com</email></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Bhatti</surname><given-names>Uzair Aslam</given-names></name><xref ref-type="aff" rid="aff-1">1</xref>
<xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Shao</surname><given-names>Chunyan</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Gong</surname><given-names>Cheng</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Cheng</surname><given-names>Jieren</given-names></name><xref ref-type="aff" rid="aff-3">3</xref>
<xref ref-type="aff" rid="aff-5">5</xref></contrib>
<contrib id="author-7" contrib-type="author">
<name name-style="western"><surname>Chen</surname><given-names>Yenwei</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<aff id="aff-1"><label>1</label><institution>School of Information and Communication Engineering, Hainan University</institution>, <addr-line>Haikou, 570100</addr-line>, <country>China</country></aff>
<aff id="aff-2"><label>2</label><institution>State Key Laboratory of Marine Resource Utilization in the South China Sea, Hainan University</institution>, <addr-line>Haikou, 570100</addr-line>, <country>China</country></aff>
<aff id="aff-3"><label>3</label><institution>School of Computer Science and Technology, Hainan University</institution>, <addr-line>Haikou, 570100</addr-line>, <country>China</country></aff>
<aff id="aff-4"><label>4</label><institution>Graduate School of Information Science and Engineering, Ritsumeikan University</institution>, <addr-line>Kyoto, 5258577</addr-line>, <country>Japan</country></aff>
<aff id="aff-5"><label>5</label><institution>Hainan Blockchain Technology Engineering Research Center, Hainan University</institution>, <addr-line>Haikou, 570100</addr-line>, <country>China</country></aff>
<aff id="aff-6"><label>6</label><institution>TJ-YZ School of Network Science, Haikou University of Economics</institution>, <addr-line>Haikou, 571127</addr-line>, <country>China</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Jingbing Li. Email: <email>jingbingli2008@hotmail.com</email></corresp>
</author-notes>
<pub-date pub-type="epub" date-type="pub" iso-8601-date="2022-08-16"><day>16</day>
<month>08</month>
<year>2022</year></pub-date>
<volume>74</volume>
<issue>1</issue>
<fpage>1279</fpage>
<lpage>1302</lpage>
<history>
<date date-type="received"><day>18</day><month>4</month><year>2022</year></date>
<date date-type="accepted"><day>12</day><month>6</month><year>2022</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Fan et al.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Fan et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_31445.pdf"></self-uri>
<abstract>
<p>Medical images are a critical component of the diagnostic process for clinicians. Although the quality of medical photographs is essential to the accuracy of a physician&#x2019;s diagnosis, they must be encrypted due to the characteristics of digital storage and information leakage associated with medical images. Traditional watermark embedding algorithm embeds the watermark information into the medical image, which reduces the quality of the medical image and affects the physicians&#x2019; judgment of patient diagnosis. In addition, watermarks in this method have weak robustness under high-intensity geometric attacks when the medical image is attacked and the watermarks are destroyed. This paper proposes a novel watermarking algorithm using the convolutional neural networks (CNN) Inception V3 and the discrete cosine transform (DCT) to address above mentioned problems. First, the medical image is input into the Inception V3 network, which has been structured by adjusting parameters, such as the size of the convolution kernels and the typical architecture of the convolution modules. Second, the coefficients extracted from the fully connected layer of the network are transformed by DCT to obtain the feature vector of the medical image. At last, the watermarks are encrypted using the logistic map system and hash function, and the keys are stored by a third party. The encrypted watermarks and the original image features are performed logical operations to realize the embedding of zero-watermark. In the experimental section, multiple watermarking schemes using three different types of watermarks were implemented to verify the effectiveness of the three proposed algorithms. Our NC values for all the images are more than 90&#x0025; accurate which shows the robustness of the algorithm. Extensive experimental results demonstrate the robustness under both conventional and high-intensity geometric attacks of the proposed algorithm.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Inception V3</kwd>
<kwd>multi-watermarking</kwd>
<kwd>DCT</kwd>
<kwd>watermark encryption</kwd>
<kwd>robustness</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1"><label>1</label><title>Introduction</title>
<p>The rise of technology has opened up a slew of new possibilities for digital content creation and distribution. Web publishing and digital repositories/libraries are only a few examples of the many uses of this technology. However, the most critical matter with these applications is protecting users&#x2019; data. For digital data, current copyright laws are inadequate, according to experts. Consequently, the protection and enforcement of intellectual property rights for digital media have become a significant concern. As a result, researchers are working to devise new ways to keep copies from being produced. The use of digital watermarking techniques is one attempt that has gained significant attention. Stenciling and watermarking are two different techniques, but they both focus on the message&#x2019;s durability and its ability to withstand removal attacks, such as image manipulations like cropping and filtering. For several different reasons, including copy protection and control, digital watermarking involves embedding information (the &#x201C;watermark&#x201D;) into digital multimedia content so that it may later be recovered or recognized. As digital content continues to increase at an ever-increasing rate, new watermarking techniques are being developed and commercialized to address some of these difficulties.</p>
<p>Medical images are playing an important role in the field of assisted medical diagnosis and telemedicine. Fast advanced network technology provides a good transmission channel for telemedicine and medical image information sharing, solving the problem of uneven distribution of medical resources, and improving the utilization rate of medical data. At the same time, medical image information sharing also faces problems such as illegal copying and tampering [<xref ref-type="bibr" rid="ref-1">1</xref>]. As an important basis for physicians to access patient information, the security and copyright protection of medical images is particularly important for the patient. Techniques used to protect the copyright of medical images are still required to explore for the security of data.</p>
<p>Digital watermarking technology [<xref ref-type="bibr" rid="ref-2">2</xref>], an effective way for traditional encryption techniques, enables the concealment of information and performs well on copyright protection for medical images [<xref ref-type="bibr" rid="ref-3">3</xref>]. Digital watermarking technology produces encrypted images by embedding watermarks with identification significance (such as author information, product serial number, trademark pattern, etc.) into the digital image without information value loss and usage effect of the digital image. Due to the good concealment, security and robustness, the watermark persists invariability even after various attacks, thereby determining the copyright confirmation of the digital image. As a basis for physicians&#x2019; diagnosis, medical images are more rigorous and complete. To ensure the accuracy and objectivity of the physician&#x2019;s diagnosis, the copyright information of watermark embedded medical images should be clarified even under medical image attacking. The above-mentioned watermark embedding algorithms are not suitable for the digital watermarking scheme for medical images. Consequently, robust watermarking algorithms for medical images are required for security against different attacks.</p>
<p>The main digital watermarking algorithms can be classified into two types, spatial domain digital watermarking and transform domain digital watermarking. The spatial domain digital watermarking algorithm alters the image pixel directly by embedding the watermark information, which is simple and easy to implement with low robustness. The transform domain digital watermarking is a reversible mathematical transformation of the image before the watermark is embedded, and the watermark information is embedded in the transformed data and then inverted when the watermark is extracted. Compared to the spatial domain watermarking, watermark energy has an even distribution in the transformed image, making the watermark information more concealed to improve the embedding strength of the watermark information greatly [<xref ref-type="bibr" rid="ref-4">4</xref>].</p>
<p>Cox&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-5">5</xref>] proposed a spread-spectrum digital watermarking algorithm that first transforms the discrete cosine transform (DCT) of the carrier image and then embeds the watermark information into the low-frequency sub-band, improving the watermark&#x2019;s resistance to compression. Kang&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-6">6</xref>] proposed a robust watermarking algorithm based on discrete wavelet transform (DWT) against geometric attacks; they introduce a distance measure between the distorted and undistorted images to determine the distortion before the image recovers by reversing the geometric distortion. The watermarking algorithm is resistant to geometric attacks such as rotation, scaling, translation, clipping, cropping, dithering attacks, and linear transformations. Cedillo-Hernandez&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-7">7</xref>] proposed a robust watermarking algorithm for medical images based on the discrete Fourier transform (DFT), which embeds the watermark into the DFT domain of medical images ensuring the quality of medical images and improving the robustness of the watermark. The above algorithms realized the embedding and extraction of the watermark, but the extraction processes of image features were complex, and the algorithms were less resistant to high-intensity geometric attacks.</p>
<p>The core of digital watermarking technology is image feature extraction; however, the method of extracting data features manually is complicated. Therefore, we can automatically learn the potential attributes of data through deep learning (DL). In recent years, the research of deep learning has become a hot spot again, and its applications are all over the fields of computer vision, natural language processing, speech recognition, and so on. Based on the research on deep learning, Cheng&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-8">8</xref>] proposed a lightweight multiscale information fusion network (MIFNet), which solved the two problems of accurate segmentation and efficient reasoning and improved the performance of semantic segmentation technology. Among the typical deep learning networks, deep learning models such as convolutional neural networks (CNN) [<xref ref-type="bibr" rid="ref-9">9</xref>], deep residual networks (DRN) [<xref ref-type="bibr" rid="ref-10">10</xref>], generative adversarial networks (GAN) [<xref ref-type="bibr" rid="ref-11">11</xref>], and U-Net [<xref ref-type="bibr" rid="ref-12">12</xref>] have achieved outstanding results in various fields. Adding attention mechanisms with deep learning approaches also increases the performance of classification and recognition [<xref ref-type="bibr" rid="ref-13">13</xref>]. Zhao&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-14">14</xref>] used cross model attention mechanism for character recognition and show the performance of the machine learning method improves with the attention mechanism. Yuan&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-15">15</xref>] applied a deep residual network (DRN) to fingerprint liveness detection (FLD) for the first time and proposed an FLD algorithm combining region of interest (ROI) extraction and DRN based on adaptive learning (ALDRN). The experimental results of the algorithm are better than the most advanced FLD method.</p>
<p>Convolutional neural networks (CNN) are a widely used neural network architecture. It is an effective algorithm for automatic learning and recognition of required features. It plays an important role in speech recognition, image detection, and image classification [<xref ref-type="bibr" rid="ref-16">16</xref>]. Leonid&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-17">17</xref>] realized the classification of elephant sounds through CNN by concatenating parallel convolution layers and then extracting features from different feature sets. Lee [<xref ref-type="bibr" rid="ref-18">18</xref>] used CNN to simulate the structure of the human optic nerve and completed the classification and detection of small moths through automatic learning and recognition. Sudha&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-19">19</xref>] trained deep CNN VGG-19, which extracted features from 20000 image training sets and extracted features from 5000 image test sets, and achieved automatic labeling and classification of diabetic retinopathy (DR) grades. The sensitivity and accuracy of the algorithm were 82&#x0025; and 96&#x0025;, respectively. Rajakumari&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-20">20</xref>] extracted the features and realized the detection and classification of breast cancer by introducing the reconstructed image into the CNN GoogleNet model. Zhang&#x00A0;et&#x00A0;al.&#x00A0;proposed a 3D watermarking algorithm based on wavelet-based transform with improved security [<xref ref-type="bibr" rid="ref-21">21</xref>] and used a similar approach for soft tissue processing [<xref ref-type="bibr" rid="ref-22">22</xref>].</p>
<p>As mentioned above, a deep learning network has obvious advantages in the automatic extraction and recognition of speech and image features. Therefore, in recent years, the research on digital watermarking technology is also closely combined with deep learning networks. Through the integration of the two technologies, the research in the field of digital watermarking technology has achieved creative results [<xref ref-type="bibr" rid="ref-23">23</xref>]. Jin&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-24">24</xref>] proposed a digital watermarking algorithm based on CNN, which firstly divides the original image into 8&#x2009;&#x00D7;&#x2009;8 image blocks, and then uses CNN to learn the texture properties and luminance properties of the image and adaptively determine the embedding strength of the watermark, the algorithm balances the invisible rows and robustness of the watermark. Kandi&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-25">25</xref>] proposed a novel learning-based auto-encoder CNN for image watermarking that outperforms traditional image watermarking techniques in terms of imperceptibility and robustness. Fierro-Radilla&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-26">26</xref>] proposed a reinforcement learning model to ensure the robustness of watermarking. The learning process consists of three stages: watermark embedding, attack simulation and weight update, and experiments demonstrate that reinforcement learning is more competitive than supervised learning. Hayes&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-27">27</xref>] applied adversarial neural networks to digital watermarking, and the network model can determine whether an image contains watermarked information. Baluja&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-28">28</xref>] trained a neural network to hide a full-color image within another image of the same size, the watermarked image has a very good visual effect, and the algorithm can embed not only images of different sizes but also text and audio. Meng&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-29">29</xref>] and Liu&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-30">30</xref>] developed an irreversible watermarking scheme using wavelet transform and U-net based machine learning method. Fang&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-31">31</xref>] proposed an information hiding technique based on adversarial generative networks to effectively secure data in data sharing. Uchida&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-32">32</xref>] embedded watermarking information in the network model without affecting the performance of the network to ensure the intellectual property of the shared neural network model.</p>
<p>By considering the above studies, we can conclude that watermark algorithms performing directly on the original image cannot avoid affecting the image quality and the watermarks are unstable to geometric attacks. Aiming to address the intolerable problem of the watermark algorithm of medical images, this paper proposes an algorithm based on the combination of CNN Inception V3 and DCT to process the medical image and extract the image feature vectors. The algorithm combines logistic map and hash functions to scrambled encryption of watermarks. The &#x201C;third party&#x201D; concept to achieve zero embedding and blind extraction of multiple watermarks is also combined to improve the performance of medical image watermarking techniques.</p>
<p>The main contributions of this research are:
<list list-type="simple">
<list-item><label>(1)</label><p>A CNN-based image feature extraction method is proposed, extracting the fully connected layer data (predictions) of the Inception V3 network as processing data and extracting image features through the DCT transform. The algorithm achieves algorithmic innovation by combining deep learning theory with traditional image transformation theory.</p></list-item>
<list-item><label>(2)</label><p>Three completely different images (text, graphics, and symbols) were chosen as digital watermarks to enable multiple watermarks embedding and to analyze the robustness of different types of digital watermarks.</p></list-item>
<list-item><label>(3)</label><p>The watermark is encrypted using a chaotic system and the key is stored by a third party to improve the security of the watermark.</p></list-item>
<list-item><label>(4)</label><p>The extraction of the watermark does not require the original image, enabling zero watermarking and blind extraction.</p></list-item>
</list></p>
</sec>
<sec id="s2"><label>2</label><title>The Fundamental Theory</title>
<p>The theoretical basis of this paper is the combination of the CNN Inception V3 and the traditional transform DCT to achieve the embedding and extraction of digital watermarks of the medical image through watermark encryption techniques.</p>
<sec id="s2_1"><label>2.1</label><title>Convolutional Neural Networks Inception V3</title>
<p>The Inception V3 network is the most representative CNN in the Inception Net. Inception V1 (GoogLeNet) won the 2014 ILSVRC Challenge. Its parameters were 12 times smaller than the Alex Net network, and the Top5 error rate was reduced to 6.67&#x0025;. Based on the advantages of fewer network parameters and high accuracy of Inception V1, Inception V3 continuously improves the network, which decomposes the symmetric convolution kernel into two layers of asymmetric convolution kernel in series. This change greatly accelerates the calculation speed, deepens the depth of the network, increases the nonlinearity of the network, reduces the probability of overfitting, and further improves the accuracy of network recognition. Compared with other classical CNN, Alex Net, VGG and Resnet, Inception V3 has fewer parameters and a deeper network, with a faster speed and lower recognition error rate, therefore, this paper selects Inception V3 as the experimental network.</p>
<p>Inception V3 is a pre-trained version of the network trained on more than a million images from the ImageNet database. The pre-trained network can classify images into 1000 object categories. As a result, the network has learned rich feature representations for a wide range of images.</p>
<p>The input to the Inception V3 network is an image of 299&#x2009;&#x00D7;&#x2009;299&#x2009;&#x00D7;&#x2009;3. The network contains three different types of inception modules (35&#x2009;&#x00D7;&#x2009;35/17&#x2009;&#x00D7;&#x2009;17/8&#x2009;&#x00D7;&#x2009;8) and two Grid Size Reduction modules. The inception Modules enable autonomous learning of data without manual processing. The Grid Size Reduction modules solve the problem of feature bottlenecks and computational overload and finally achieve image classification recognition by using the softmax function. The most important feature of the Inception V3 network is the splitting of a larger two-dimensional convolutional kernel into two smaller one-bit convolutional kernels, e.g., decomposing a 5&#x2009;&#x00D7;&#x2009;5 convolutional kernel into two 3&#x2009;&#x00D7;&#x2009;3 convolutional kernels (see <xref ref-type="fig" rid="fig-1">Fig. 1a</xref>), this improves the performance of the network and increases the speed of computation while reducing the cost of computation. In addition, the network decomposes symmetric convolution kernels into asymmetric convolution kernels, such as splitting the 3&#x2009;&#x00D7;&#x2009;3 convolutional kernels into 1&#x2009;&#x00D7;&#x2009;3 and 3&#x2009;&#x00D7;&#x2009;1 convolutional kernels (see <xref ref-type="fig" rid="fig-1">Fig. 1b</xref>). The deconvolution kernel approach saves a large number of parameters, speeding up computation while reducing overfitting [<xref ref-type="bibr" rid="ref-33">33</xref>].</p>
<fig id="fig-1"><label>Figure 1</label><caption><title>Operation of the convolution kernel. (a) Mini-network replacing the 5&#x2009;&#x00D7;&#x2009;5 convolutions. (b)&#x00A0;Decomposing symmetric convolution into asymmetric convolution</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-1.png"/></fig>
<p>On the other hand, to solve the problem of feature representation bottlenecks and excessive computation, two Grid Size Reduction modules were added between each of the three Inception Modules to reduce the size of the feature map by using a parallel two-branch structure (convolution and pooling) (see <xref ref-type="fig" rid="fig-2">Fig. 2</xref>).</p>
<fig id="fig-2"><label>Figure 2</label><caption><title>Grid size reduction modules</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-2.png"/></fig>
<p>The data used in the proposed algorithm is not the final output data of this network. Still, the fully connected layer data (predictions) is selected for processing, which achieves a high level of feature integration and the data is distinctly representative. Therefore, we choose the fully connected layer as the data source for feature extraction. The Inception V3 network structure is shown in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>.</p>
<fig id="fig-3"><label>Figure 3</label><caption><title>The Inception V3 network structure</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-3.png"/></fig>
</sec>
<sec id="s2_2"><label>2.2</label><title>Discrete Cosine Transform</title>
<p>To strengthen the algorithm&#x2019;s resilience to attacks, we execute another DCT transform on the data extracted from the CNN [<xref ref-type="bibr" rid="ref-34">34</xref>]. This concentrates the energy in the image signal, making it easier to retrieve image features. The discrete cosine transform is a separable transform with a cosine function as its transform kernel [<xref ref-type="bibr" rid="ref-35">35</xref>]. The DCT transform can concentrate the majority of the signal&#x2019;s energy in its low-frequency components. DCT can describe the correlation between human speech signals and image signals [<xref ref-type="bibr" rid="ref-36">36</xref>]. Therefore, the DCT was chosen to extract the feature vectors of medical images in this experiment.</p>
<p>For a sequence of length N, its 1D-DCT transformation is as in <xref ref-type="disp-formula" rid="eqn-1">Eqs. (1)</xref> and <xref ref-type="disp-formula" rid="eqn-2">(2)</xref>.
<disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:mi>F</mml:mi><mml:mi>u</mml:mi><mml:mo>=</mml:mo><mml:mi>C</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>u</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mi>f</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>s</mml:mi><mml:mrow><mml:mo>[</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>0.5</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mi>u</mml:mi><mml:mi>&#x03C0;</mml:mi></mml:mrow><mml:mi>N</mml:mi></mml:mfrac><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula>
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:mi>C</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>u</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mtable columnalign="left" rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:msqrt><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac></mml:mstyle></mml:msqrt><mml:mo>,</mml:mo><mml:mi>u</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msqrt><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mfrac><mml:mn>2</mml:mn><mml:mi>N</mml:mi></mml:mfrac></mml:mstyle></mml:msqrt><mml:mo>,</mml:mo><mml:mi>u</mml:mi><mml:mo>&#x2260;</mml:mo><mml:mn>0</mml:mn></mml:mtd></mml:mtr></mml:mtable><mml:mo>;</mml:mo><mml:mi>u</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mi>M</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo fence="true" stretchy="true" symmetric="true"></mml:mo></mml:mrow></mml:math></disp-formula></p>
</sec>
<sec id="s2_3"><label>2.3</label><title>Logistic Map</title>
<p>To improve the security of the watermark, this paper uses chaotic sequences to encrypt the watermark. The chaos is a seemingly irregular movement, referring to a random-like process that occurs in a deterministic system; with its initial values and parameters, it is possible to generate this chaotic system. The most famous type of chaotic system is Logistic Map.</p>
<p>It is a non-linear mapping given by <xref ref-type="disp-formula" rid="eqn-3">Eq. (3)</xref>.
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>&#x22C5;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p><inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:mi>k</mml:mi></mml:math></inline-formula> is the number of iterations, <inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>, The growth parameter<inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:mi>&#x03BC;</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>4</mml:mn><mml:mo>]</mml:mo></mml:mrow></mml:math></inline-formula>, when<inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:mn>3.5699456</mml:mn><mml:mo>&#x003C;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>&#x2264;</mml:mo><mml:mn>4</mml:mn></mml:math></inline-formula>, the logistic map gets a chaotic state and the chaotic sequence can be used as an ideal key sequence. Different studies used the logistic map for encryption of information, Szegedy&#x00A0;et&#x00A0;al.&#x00A0;scrambled the row and column for each pixel in the input image using two 1-D discrete for watermark encryption, similarly [<xref ref-type="bibr" rid="ref-33">33</xref>]. Dai&#x00A0;et&#x00A0;al.&#x00A0;also used logistic mapping for generating encrypted watermark sequences for improving the security of medical images [<xref ref-type="bibr" rid="ref-34">34</xref>]. Therefore, Logistic mapping is one of the key factors in securing the image with better security. In the chaotic encryption of the medical images and the watermark, the initial values of the chaos are set to 0.135 and 0.2, respectively.</p>
</sec>
</sec>
<sec id="s3"><label>3</label><title>The Proposed Watermarking Algorithm</title>
<p>This study proposed a watermarking algorithm based on a CNN Inception V3 combined with DCT, generating a key sequence using chaotic encryption during the watermarking process. We found that in most of the relevant literature on watermarks, whether single watermark or multi watermark, the content of watermark information used is mostly text type. To test the robustness of the algorithm based on Inception V3 and DCT from multiple angles and all aspects, we use multiple watermarking schemes where the watermark information includes three types of text, graphics and symbols to achieve zero watermarking and blind extraction in the following experiments.</p>
<p>The algorithm consists of five parts: Inception V3-based feature extraction, watermark encryption, watermark embedding, watermark extraction and watermark decryption. First, the original medical images are convolved and pooled using the Inception V3 network to obtain the fully connected layer data (predictions). Then, a global discrete cosine transform is applied to the fully connected layer data and the low-frequency data is selected from the matrix obtained by DCT as the visual feature vector of the medical image. Finally, a new feature sequence based on perceptual hashing was found in the DCT domain to participate in the watermark operation. In the design of the algorithm, watermark technology is combined with chaotic encryption, cryptography and the third-party concept, which not only allows the digital watermark to resist conventional and geometric attacks but also makes the algorithm robust. Meanwhile, the use of multiple watermarks also enhances the security of medical image transmissions, protecting the privacy of patients.</p>
<sec id="s3_1"><label>3.1</label><title>Medical Image Feature Extraction</title>
<p>The flowchart of the proposed algorithm is presented in <xref ref-type="fig" rid="fig-4">Fig. 4</xref>; we start processing the images in two directions: the original medical image and the original watermarks. On the one hand, original medical image is first resized to 299&#x2009;&#x00D7;&#x2009;299 image <inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:mi>I</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> as the input of the Inception V3 network. After the initial medical image has been convolved and pooled by 3 Inception Modules and 2 Grid Size Reduction Modules, the fully connected layer (predictions) data <inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:mi>E</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> of the InceptionV3 convolutional network is selected. Finally, DCT transform was then performed on the fully connected layer data for a vector <inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> conforming to human visual features, founded in the transformation domain. To process watermark images, first, a chaotic sequence <inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:mi>X</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> based on the initial value <inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> is generated, then binarizing the chaotic sequence to generate the binary encryption matrix <inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:mi>k</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula>, at last, the binary encryption matrix <inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:mi>k</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> is operated with the watermark <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> to obtain the encrypted watermark <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:mi>B</mml:mi><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>. When watermarks need to be embedded, using the hash function on the encrypted watermark <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:mi>B</mml:mi><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> and the visual feature vector <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> of the image, meanwhile binary logic sequences <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> are generated. These binary logic sequences <inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> can be stored in a third-party platform.</p>
<fig id="fig-4"><label>Figure 4</label><caption><title>Flowchart of the proposed algorithm</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-4.png"/></fig>
<p>When testing, the same method was performed. Extract the visual feature vector <inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:msubsup><mml:mi>V</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> of the tested medical image, the binary logic sequences <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is obtained from the third party, and to obtain the extracted watermarks <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:mi>B</mml:mi><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> by the feature vector <inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:msubsup><mml:mi>V</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mspace width="thickmathspace" /></mml:math></inline-formula> and the <inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> . The chaotic sequences <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:mi>X</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> and the encryption matrixes <inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:mi>k</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>n</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> were generated using the same initial value <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> as the above method. Then, the restored watermarks <inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> were obtained by hashing <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:mi>k</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>n</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> and <inline-formula id="ieqn-28"><mml:math id="mml-ieqn-28"><mml:mi>B</mml:mi><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>.</p>
<p>To verify whether the extracted features using the specified algorithm are valid, this experiment randomly selects a medical gray-scale image of 512&#x2009;&#x00D7;&#x2009;512 pixels which Inception V3 and DCT transform, and different types of attacks are carried out on the medical image (as shown in <xref ref-type="fig" rid="fig-5">Fig. 5</xref>). In this paper, we selected 32 bits of low-frequency data and replaced data greater than or equal to 0 with 1, and the other data with 0. <xref ref-type="table" rid="table-1">Tab. 1</xref> lists the low-frequency coefficients of the medical image under different attacks. To exemplify this, we have selected the top 10 data <inline-formula id="ieqn-29"><mml:math id="mml-ieqn-29"><mml:mrow><mml:mo>(</mml:mo><mml:mi>O</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x223C;</mml:mo><mml:mi>O</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>10</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> in <xref ref-type="table" rid="table-1">Tab. 1</xref>. As can be seen from <xref ref-type="table" rid="table-1">Tab. 1</xref>, after Inception V3 and DCT transformation, we find that the values change significantly, but their symbols remain largely unchanged; the sequences of all the attacked images are almost identical to the original images. Therefore, the experiment proves that the image features extracted by the proposed algorithm are effective.</p>
<fig id="fig-5"><label>Figure 5</label><caption><title>Different attacks on the abdomen. (a) Original image. (b) Gaussian noise (16&#x0025;). (c) JPEG compression (21&#x0025;). (d) Median filter [3&#x2009;&#x00D7;&#x2009;3] (15times). (e) Rotation (clockwise, 40&#x00B0;). (f) Scaling (&#x00D7;1.5). (g) Translation (28&#x0025;, left). (h) Translation (35&#x0025;, down). (i) Cropping (30&#x0025;, Y direction). (j) Cropping (32&#x0025;, X direction)</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-5.png"/></fig>
<p>We randomly selected other medical images to test the same algorithm, as shown in <xref ref-type="fig" rid="fig-6">Fig. 6</xref>, and verified the value of the normalized correlation coefficient, as show in <xref ref-type="table" rid="table-2">Tab. 2</xref>, that the NC values of the different images obtained using the feature vector selected with the above method are all less than 0.5, and their NC values are 1.00. These results are consistent with human visual features. Therefore, we can use the low-frequency coefficients of medical images based on the CNN Inception V3 and the traditional DCT transform as their feature vectors.</p>
<table-wrap id="table-1"><label>Table 1</label><caption><title>Changes of Inception V3 and DCT coefficients under different attacks for the medical images</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Image processing</th>
<th align="left">PSNR (dB)</th>
<th align="left">O (1, 1)</th>
<th align="left">O (1, 2)</th>
<th align="left">O (1, 3)</th>
<th align="left">O (1, 4)</th>
<th align="left">O (1, 5)</th>
<th align="left">O (1, 6)</th>
<th align="left">O (1, 7)</th>
<th align="left">O (1, 8)</th>
<th align="left">O (1, 9)</th>
<th align="left">O (1, 10)</th>
<th align="left">Sequence of coefficient signs</th>
<th align="left">NC</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Original image</td>
<td align="left">/</td>
<td align="left">&#x2212;1.23898</td>
<td align="left">&#x2212;7.91148</td>
<td align="left">&#x2212;3.74813</td>
<td align="left">1.154251</td>
<td align="left">0.646601</td>
<td align="left">&#x2212;1.94961</td>
<td align="left">&#x2212;2.77929</td>
<td align="left">1.556383</td>
<td align="left">0.144174</td>
<td align="left">1.6805359</td>
<td align="left">0001100111</td>
<td align="left">1.0</td>
</tr>
<tr>
<td align="left">Gaussian noise (2&#x0025;)</td>
<td align="left">19.12</td>
<td align="left">&#x2212;1.30063</td>
<td align="left">&#x2212;12.4179</td>
<td align="left">&#x2212;6.73767</td>
<td align="left">3.145781</td>
<td align="left">1.076835</td>
<td align="left">&#x2212;0.17014</td>
<td align="left">&#x2212;5.55097</td>
<td align="left">1.967321</td>
<td align="left">0.168225</td>
<td align="left">2.6991644</td>
<td align="left">0001100111</td>
<td align="left">1.0</td>
</tr>
<tr>
<td align="left">JPEG compression (35&#x0025;)</td>
<td align="left">33.96</td>
<td align="left">&#x2212;1.20436</td>
<td align="left">&#x2212;7.62404</td>
<td align="left">&#x2212;4.26336</td>
<td align="left">1.544986</td>
<td align="left">0.554832</td>
<td align="left">&#x2212;1.20787</td>
<td align="left">&#x2212;3.33877</td>
<td align="left">1.988583</td>
<td align="left">0.026068</td>
<td align="left">2.5266697</td>
<td align="left">0001100111</td>
<td align="left">1.0</td>
</tr>
<tr>
<td align="left">Median filter [3, 3] (35 times)</td>
<td align="left">29.02</td>
<td align="left">&#x2212;1.27199</td>
<td align="left">&#x2212;9.55575</td>
<td align="left">&#x2212;4.00191</td>
<td align="left">3.074254</td>
<td align="left">2.001256</td>
<td align="left">0.523326</td>
<td align="left">&#x2212;3.24354</td>
<td align="left">1.686682</td>
<td align="left">0.217292</td>
<td align="left">1.2284669</td>
<td align="left">0001110111</td>
<td align="left">0.75</td>
</tr>
<tr>
<td align="left">Rotation (clockwise, 40&#x00B0;)</td>
<td align="left">15.03</td>
<td align="left">&#x2212;1.28436</td>
<td align="left">&#x2212;9.82873</td>
<td align="left">&#x2212;3.75639</td>
<td align="left">1.758381</td>
<td align="left">1.936239</td>
<td align="left">&#x2212;0.61681</td>
<td align="left">&#x2212;1.29868</td>
<td align="left">0.81516</td>
<td align="left">0.364847</td>
<td align="left">0.84129214</td>
<td align="left">0001100111</td>
<td align="left">1.0</td>
</tr>
<tr>
<td align="left">Scaling (&#x00D7;8.0)</td>
<td align="left">/</td>
<td align="left">&#x2212;1.21921</td>
<td align="left">&#x2212;8.00264</td>
<td align="left">&#x2212;4.62872</td>
<td align="left">1.753362</td>
<td align="left">0.093074</td>
<td align="left">&#x2212;1.05737</td>
<td align="left">&#x2212;3.48623</td>
<td align="left">2.065893</td>
<td align="left">0.304405</td>
<td align="left">2.0755959</td>
<td align="left">0001100111</td>
<td align="left">1.0</td>
</tr>
<tr>
<td align="left">Translation (21&#x0025;, left)</td>
<td align="left">12.79</td>
<td align="left">&#x2212;1.26796</td>
<td align="left">&#x2212;6.52771</td>
<td align="left">&#x2212;4.61591</td>
<td align="left">1.846608</td>
<td align="left">0.737819</td>
<td align="left">&#x2212;0.63933</td>
<td align="left">&#x2212;2.97566</td>
<td align="left">1.335132</td>
<td align="left">0.130301</td>
<td align="left">1.5306479</td>
<td align="left">0001100111</td>
<td align="left">1.0</td>
</tr>
<tr>
<td align="left">Translation (35&#x0025;, down)</td>
<td align="left">11.87</td>
<td align="left">&#x2212;1.16118</td>
<td align="left">&#x2212;6.05354</td>
<td align="left">&#x2212;3.5574</td>
<td align="left">2.754599</td>
<td align="left">1.973892</td>
<td align="left">1.545179</td>
<td align="left">&#x2212;1.08467</td>
<td align="left">1.437165</td>
<td align="left">0.203135</td>
<td align="left">1.8267167</td>
<td align="left">0001110111</td>
<td align="left">0.83</td>
</tr>
<tr>
<td align="left">Cropping (19&#x0025;, Y direction)</td>
<td align="left">/</td>
<td align="left">&#x2212;1.29197</td>
<td align="left">&#x2212;6.14289</td>
<td align="left">&#x2212;0.89311</td>
<td align="left">1.059037</td>
<td align="left">0.040989</td>
<td align="left">&#x2212;0.62871</td>
<td align="left">&#x2212;0.59192</td>
<td align="left">1.486014</td>
<td align="left">0.332596</td>
<td align="left">0.33246225</td>
<td align="left">0001100111</td>
<td align="left">1.0</td>
</tr>
<tr>
<td align="left">Cropping (32&#x0025;, X direction)</td>
<td align="left">/</td>
<td align="left">&#x2212;1.22588</td>
<td align="left">&#x2212;10.2754</td>
<td align="left">&#x2212;4.97074</td>
<td align="left">3.529296</td>
<td align="left">1.907433</td>
<td align="left">&#x2212;0.12879</td>
<td align="left">&#x2212;3.65184</td>
<td align="left">2.703147</td>
<td align="left">0.564717</td>
<td align="left">2.5739105</td>
<td align="left">0001100111</td>
<td align="left">1.0</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="table-2"><label>Table 2</label><caption><title>Values of the correlation coefficients between different medical images (32 bit)</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Image</th>
<th align="left">Abdomen</th>
<th align="left">Neck</th>
<th align="left">Wrist</th>
<th align="left">Coronary artery</th>
<th align="left">Internal auditory canal</th>
<th align="left">Lumbar spine</th>
<th align="left">Foot</th>
<th align="left">Shoulder</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Abdomen</td>
<td align="left">1.00</td>
<td align="left">0.31</td>
<td align="left">0.24</td>
<td align="left">0.19</td>
<td align="left">0.12</td>
<td align="left">0.12</td>
<td align="left">0.37</td>
<td align="left">0.31</td>
</tr>
<tr>
<td align="left">Neck</td>
<td align="left">0.31</td>
<td align="left">1.00</td>
<td align="left">0.04</td>
<td align="left">0.13</td>
<td align="left">0.31</td>
<td align="left">0.18</td>
<td align="left">0.31</td>
<td align="left">0.13</td>
</tr>
<tr>
<td align="left">Wrist</td>
<td align="left">0.24</td>
<td align="left">0.04</td>
<td align="left">1.00</td>
<td align="left">0.32</td>
<td align="left">&#x2212;0.14</td>
<td align="left">&#x2212;0.01</td>
<td align="left">0.24</td>
<td align="left">0.19</td>
</tr>
<tr>
<td align="left">Coronary artery</td>
<td align="left">0.19</td>
<td align="left">0.13</td>
<td align="left">0.32</td>
<td align="left">1.00</td>
<td align="left">0.19</td>
<td align="left">&#x2212;0.06</td>
<td align="left">0.31</td>
<td align="left">0.13</td>
</tr>
<tr>
<td align="left">Internal auditory canal</td>
<td align="left">0.12</td>
<td align="left">0.31</td>
<td align="left">&#x2212;0.14</td>
<td align="left">0.19</td>
<td align="left">1.00</td>
<td align="left">0.12</td>
<td align="left">0.25</td>
<td align="left">0.31</td>
</tr>
<tr>
<td align="left">Lumbar spine</td>
<td align="left">0.12</td>
<td align="left">0.18</td>
<td align="left">&#x2212;0.01</td>
<td align="left">&#x2212;0.06</td>
<td align="left">0.12</td>
<td align="left">1.00</td>
<td align="left">0.37</td>
<td align="left">0.31</td>
</tr>
<tr>
<td align="left">Foot</td>
<td align="left">0.37</td>
<td align="left">0.31</td>
<td align="left">0.24</td>
<td align="left">0.31</td>
<td align="left">0.25</td>
<td align="left">0.37</td>
<td align="left">1.00</td>
<td align="left">0.44</td>
</tr>
<tr>
<td align="left">Shoulder</td>
<td align="left">0.31</td>
<td align="left">0.13</td>
<td align="left">0.19</td>
<td align="left">0.13</td>
<td align="left">0.31</td>
<td align="left">0.31</td>
<td align="left">0.44</td>
<td align="left">1.00</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="fig-6"><label>Figure 6</label><caption><title>The tested images. (a) Abdomen. (b) Neck. (c) Wrist. (d) Coronary artery. (e) Internal auditory canal. (f) Lumbar spine. (g) Foot. (h) Shoulder</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-6.png"/></fig>
</sec>
<sec id="s3_2"><label>3.2</label><title>Watermarks Encryption</title>
<p>To ensure the security of the watermark, the watermark is scrambled and encrypted before it is embedded, as shown in <xref ref-type="fig" rid="fig-7">Fig. 7</xref>.</p>
<fig id="fig-7"><label>Figure 7</label><caption><title>Watermarks encryption process</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-7.png"/></fig>
<p>Step1: Generate a chaotic sequence <inline-formula id="ieqn-30"><mml:math id="mml-ieqn-30"><mml:mi>X</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> based on the initial value <inline-formula id="ieqn-31"><mml:math id="mml-ieqn-31"><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>2</mml:mn></mml:math></inline-formula>, <inline-formula id="ieqn-32"><mml:math id="mml-ieqn-32"><mml:mi>&#x03BC;</mml:mi><mml:mo>=</mml:mo><mml:mn>4</mml:mn></mml:math></inline-formula>, Binarization of the chaotic sequence <inline-formula id="ieqn-33"><mml:math id="mml-ieqn-33"><mml:mi>X</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>, When the value of <inline-formula id="ieqn-34"><mml:math id="mml-ieqn-34"><mml:mi>X</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is greater than 0.5, it is &#x201C;1&#x201D;, the rest is &#x201C;0&#x201D;, and a binary encryption matrix <inline-formula id="ieqn-35"><mml:math id="mml-ieqn-35"><mml:mi>k</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>n</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is generated.</p>
<p>Step2: The binary encryption matrix <inline-formula id="ieqn-36"><mml:math id="mml-ieqn-36"><mml:mi>k</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>n</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> and the binary watermark <inline-formula id="ieqn-37"><mml:math id="mml-ieqn-37"><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> are operated by the hash function, such as shown in <xref ref-type="disp-formula" rid="eqn-4">Eq. (4)</xref>, then we obtain the encrypted watermarks <inline-formula id="ieqn-38"><mml:math id="mml-ieqn-38"><mml:mi>B</mml:mi><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>.
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:mi>B</mml:mi><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2295;</mml:mo><mml:mi>k</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>n</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
</sec>
<sec id="s3_3"><label>3.3</label><title>Watermarks Embedding</title>
<p>Step3: Using <xref ref-type="disp-formula" rid="eqn-5">Eq. (5)</xref> to calculate the image feature matrix <inline-formula id="ieqn-39"><mml:math id="mml-ieqn-39"><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> and the encrypted watermark <inline-formula id="ieqn-40"><mml:math id="mml-ieqn-40"><mml:mi>B</mml:mi><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>, the watermark can be embedded into the medical image, and the logical key <inline-formula id="ieqn-41"><mml:math id="mml-ieqn-41"><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> can be obtained at the same time.
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>B</mml:mi><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2295;</mml:mo><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Step4: Save the logical key <inline-formula id="ieqn-42"><mml:math id="mml-ieqn-42"><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> to a third-party platform. When it is necessary to extract the watermark of the tested image, we can apply for logical key <inline-formula id="ieqn-43"><mml:math id="mml-ieqn-43"><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> from the third-party platform.</p>
<p><xref ref-type="fig" rid="fig-8">Fig. 8</xref> shows the watermark embedding process.</p>
<fig id="fig-8"><label>Figure 8</label><caption><title>Watermarks embedding process</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-8.png"/></fig>
</sec>
<sec id="s3_4"><label>3.4</label><title>Watermarks Extraction</title>
<p>Step5: Extraction of features <inline-formula id="ieqn-44"><mml:math id="mml-ieqn-44"><mml:mspace width="thickmathspace" /><mml:msubsup><mml:mi>V</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> of the tested image using the same method as the extraction of components of the original medical image.</p>
<p>Step6: The encrypted watermark <inline-formula id="ieqn-45"><mml:math id="mml-ieqn-45"><mml:mi>B</mml:mi><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is extracted by <xref ref-type="disp-formula" rid="eqn-6">Eq. (6)</xref>.
<disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:mi>B</mml:mi><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mi>V</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2295;</mml:mo><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>The algorithm only requires the <inline-formula id="ieqn-46"><mml:math id="mml-ieqn-46"><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> when removing the watermark and does not need the participation of the original image. Therefore, it is a zero watermarking extraction algorithm.</p>
<p><xref ref-type="fig" rid="fig-9">Fig. 9</xref> shows the extraction process of the watermark.</p>
<fig id="fig-9"><label>Figure 9</label><caption><title>Watermarks extraction process</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-9.png"/></fig>
</sec>
<sec id="s3_5"><label>3.5</label><title>Watermarks Decryption</title>
<p>Step7: Using the same method as watermark encryption, the same binary encryption matrix <inline-formula id="ieqn-47"><mml:math id="mml-ieqn-47"><mml:mi>k</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>n</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is obtained.</p>
<p>Step8: Inverse the scramble watermarking image using <xref ref-type="disp-formula" rid="eqn-7">Eq. (7)</xref>. The watermark decryption process is shown in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>.
<disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>B</mml:mi><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2295;</mml:mo><mml:mi>k</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>n</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<fig id="fig-10"><label>Figure 10</label><caption><title>Watermarks decryption process</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-10.png"/></fig>
<p>Determine the watermark information and clarify the medical image ownership by calculating the correlation coefficients NC between <inline-formula id="ieqn-48"><mml:math id="mml-ieqn-48"><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> and<inline-formula id="ieqn-49"><mml:math id="mml-ieqn-49"><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>.</p>
</sec>
</sec>
<sec id="s4"><label>4</label><title>Experimental Results</title>
<p>This experiment used Matlab 2019a as the test platform and selected an abdominal CT image as the study object. We choose three different types of images as watermarks (as shown in <xref ref-type="fig" rid="fig-11">Fig. 11</xref>). Verify the robustness of the algorithm from multiple perspectives. <xref ref-type="fig" rid="fig-12">Fig. 12</xref> shows the encrypted watermarks effect. The encrypted watermarks change greatly and are completely unrecognizable to the naked eye, which improves the security of the watermark information.</p>
<fig id="fig-11"><label>Figure 11</label><caption><title>Binary watermark. (a) Binary watermark 1-text. (b) Binary watermark 2-graphic. (c) Binary watermark 3-symbol</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-11.png"/></fig>
<fig id="fig-12"><label>Figure 12</label><caption><title>Encrypted watermark. (a) Encrypted watermark 1-text. (b) Encrypted watermark 2-graphic. (c) Encrypted watermark 3-symbol</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-12.png"/></fig>
<sec id="s4_1"><label>4.1</label><title>Evaluation Criteria</title>
<p>Calculating the NC value between the watermark extracted from the tested image and the original watermark (NC value between 0 and 1), we evaluate the robustness of the algorithm using NC values [<xref ref-type="bibr" rid="ref-37">37</xref>]. The NC value is calculated as shown in <xref ref-type="disp-formula" rid="eqn-8">Eq. (8)</xref>. <inline-formula id="ieqn-50"><mml:math id="mml-ieqn-50"><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> represents the original watermark, and <inline-formula id="ieqn-51"><mml:math id="mml-ieqn-51"><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> was the extracted watermark. <inline-formula id="ieqn-52"><mml:math id="mml-ieqn-52"><mml:mover><mml:mrow><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo accent="false">&#x00AF;</mml:mo></mml:mover></mml:math></inline-formula> was the mean of <inline-formula id="ieqn-53"><mml:math id="mml-ieqn-53"><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>, <inline-formula id="ieqn-54"><mml:math id="mml-ieqn-54"><mml:mover><mml:mrow><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo accent="false">&#x00AF;</mml:mo></mml:mover></mml:math></inline-formula> was the mean of <inline-formula id="ieqn-55"><mml:math id="mml-ieqn-55"><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>. The algorithm has the best robustness when the NC value is 1. The embedded watermark can still be extracted when the NC value is greater than 0.5, so we consider the algorithm to be robust when the NC value [<xref ref-type="bibr" rid="ref-38">38</xref>] is greater than 0.5.
<disp-formula id="eqn-8"><label>(8)</label><mml:math id="mml-eqn-8" display="block"><mml:mi>N</mml:mi><mml:mi>C</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:munder><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:munder><mml:munder><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:munder><mml:mrow><mml:mo>(</mml:mo><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mover><mml:mrow><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo accent="false">&#x00AF;</mml:mo></mml:mover><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mover><mml:mrow><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo accent="false">&#x00AF;</mml:mo></mml:mover><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:msqrt><mml:mrow><mml:mo>(</mml:mo><mml:munder><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:munder><mml:munder><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:munder><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mover><mml:mrow><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo accent="false">&#x00AF;</mml:mo></mml:mover><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:munder><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:munder><mml:munder><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:munder><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mover><mml:mrow><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo accent="false">&#x00AF;</mml:mo></mml:mover><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>)</mml:mo></mml:mrow></mml:msqrt></mml:mfrac></mml:math></disp-formula></p>
<p>The peak signal-to-noise ratio is the ratio of the maximum possible power of a representative signal and the destructive noise power that affects its representation accuracy. PSNR value indicates the degree of distortion of the image, the larger the value, the smaller the image distortion [<xref ref-type="bibr" rid="ref-38">38</xref>]. The PSNR is calculated as shown in <xref ref-type="disp-formula" rid="eqn-9">Eq. (9)</xref>. <inline-formula id="ieqn-56"><mml:math id="mml-ieqn-56"><mml:mi>I</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> and <inline-formula id="ieqn-57"><mml:math id="mml-ieqn-57"><mml:msup><mml:mi>I</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> are the pixel values of each point of the image <italic>I</italic> and <inline-formula id="ieqn-58"><mml:math id="mml-ieqn-58"><mml:msup><mml:mi>I</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> respectively, <inline-formula id="ieqn-59"><mml:math id="mml-ieqn-59"><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>I</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the maximum possible pixel value of the image, if the pixel value of each point is represented by B-bit binary, then <inline-formula id="ieqn-60"><mml:math id="mml-ieqn-60"><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>I</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msup><mml:mn>2</mml:mn><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msup><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula>. To facilitate the operation, the image is usually taken as a square, that is, <inline-formula id="ieqn-61"><mml:math id="mml-ieqn-61"><mml:mi>M</mml:mi><mml:mo>=</mml:mo><mml:mi>N</mml:mi></mml:math></inline-formula> [<xref ref-type="bibr" rid="ref-39">39</xref>].
<disp-formula id="eqn-9"><label>(9)</label><mml:math id="mml-eqn-9" display="block"><mml:mi>P</mml:mi><mml:mi>S</mml:mi><mml:mi>N</mml:mi><mml:mi>R</mml:mi><mml:mo>=</mml:mo><mml:mn>10</mml:mn><mml:mi>lg</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mfrac><mml:mrow><mml:mi>M</mml:mi><mml:mi>N</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:msubsup><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow><mml:mrow><mml:munder><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:munder><mml:munder><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:munder><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>I</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:msup><mml:mi>I</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac></mml:math></disp-formula></p>
<p><xref ref-type="fig" rid="fig-13">Fig. 13</xref> shows that the medical images were not changed when the medical images were not attacked; the NC value of the watermark information extracted from the original image is 1. The following conventional and geometric attacks are used to verify the robustness of the algorithm.</p>
<fig id="fig-13"><label>Figure 13</label><caption><title>The extracted watermarks without attack. (a) Extracted watermark 1. (b) Extracted watermark 2. (c) Extracted watermark 3</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-13.png"/></fig>
</sec>
<sec id="s4_2"><label>4.2</label><title>Conventional Attacks</title>
<sec id="s4_2_1"><label>4.2.1</label><title>Gaussian Noise Attacks</title>
<p>As shown in <xref ref-type="fig" rid="fig-14">Fig. 14</xref> and <xref ref-type="table" rid="table-3">Tab. 3</xref>, we added different levels of Gaussian noise to the watermarked images, and the NC values for the three extracted watermarks were 0.66, 0.66 and 0.72 when the Gaussian intensity was 20&#x0025;, and from all data, the NC values for the two types of watermarks (text and graphic) were lower than the symbolic watermark. When the Gaussian noise intensity reaches 30&#x0025;, the watermark information can still be extracted well.</p>
<fig id="fig-14"><label>Figure 14</label><caption><title>Under Gaussian noise attacks. (a) Gaussian noise level of 16&#x0025;. (b) The extracted watermark 1 with a Gaussian noise level of 16&#x0025;. (c) The extracted watermark 2 with a Gaussian noise level of 16&#x0025;. (d) The extracted watermark 3 with a Gaussian noise level of 16&#x0025;</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-14.png"/></fig>
<table-wrap id="table-3"><label>Table 3</label><caption><title>PSNR and NC values after Gaussian noise attacks</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Gaussian noise (&#x0025;)</th>
<th align="left">2</th>
<th align="left">4</th>
<th align="left">8</th>
<th align="left">16</th>
<th align="left">20</th>
<th align="left">30</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">PSNR (dB)</td>
<td align="left">19.12</td>
<td align="left">16.28</td>
<td align="left">13.51</td>
<td align="left">10.83</td>
<td align="left">10.08</td>
<td align="left">8.81</td>
</tr>
<tr>
<td align="left">NC1</td>
<td align="left">0.90</td>
<td align="left">0.82</td>
<td align="left">0.69</td>
<td align="left">0.76</td>
<td align="left">0.66</td>
<td align="left">0.53</td>
</tr>
<tr>
<td align="left">NC2</td>
<td align="left">0.92</td>
<td align="left">0.79</td>
<td align="left">0.64</td>
<td align="left">0.71</td>
<td align="left">0.66</td>
<td align="left">0.60</td>
</tr>
<tr>
<td align="left">NC3</td>
<td align="left">0.92</td>
<td align="left">0.79</td>
<td align="left">0.70</td>
<td align="left">0.65</td>
<td align="left">0.72</td>
<td align="left">0.66</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4_2_2"><label>4.2.2</label><title>JPEG Attacks</title>
<p>JPEG compression, as an international standard, is widely used in images compression processing, and JPEG attacks are often used in the watermarking field. As shown in <xref ref-type="fig" rid="fig-15">Fig. 15</xref> and <xref ref-type="table" rid="table-4">Tab. 4</xref>, the NC values of the three watermarks extracted are 0.72, 0.77 and 0.77 when the compression quality reaches 35&#x0025;, with the graphic and symbol watermarks having better extraction quality than the text.</p>
<fig id="fig-15"><label>Figure 15</label><caption><title>Under JPEG attacks. (a) Compression quality set to 30&#x0025;. (b) The extracted watermark 1 under compression quality set to 30&#x0025;. (c) The extracted watermark 2 under compression quality set to 30&#x0025;. (d) The extracted watermark 3 under compression quality set to 30&#x0025;</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-15.png"/></fig>
<table-wrap id="table-4"><label>Table 4</label><caption><title>PSNR and NC values after JPEG attacks</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Compression quality (&#x0025;)</th>
<th align="left">1</th>
<th align="left">7</th>
<th align="left">14</th>
<th align="left">21</th>
<th align="left">28</th>
<th align="left">35</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">PSNR (dB)</td>
<td align="left">24.88</td>
<td align="left">28.62</td>
<td align="left">31.00</td>
<td align="left">32.37</td>
<td align="left">33.27</td>
<td align="left">33.96</td>
</tr>
<tr>
<td align="left">NC1</td>
<td align="left">0.41</td>
<td align="left">0.40</td>
<td align="left">0.60</td>
<td align="left">0.60</td>
<td align="left">0.66</td>
<td align="left">0.72</td>
</tr>
<tr>
<td align="left">NC2</td>
<td align="left">0.48</td>
<td align="left">0.47</td>
<td align="left">0.63</td>
<td align="left">0.64</td>
<td align="left">0.70</td>
<td align="left">0.77</td>
</tr>
<tr>
<td align="left">NC3</td>
<td align="left">0.49</td>
<td align="left">0.48</td>
<td align="left">0.65</td>
<td align="left">0.64</td>
<td align="left">0.70</td>
<td align="left">0.77</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4_2_3"><label>4.2.3</label><title>Median Filter Attacks</title>
<p>We applied the [3&#x2009;&#x00D7;&#x2009;3] and [5&#x2009;&#x00D7;&#x2009;5] median filtering attacks to the watermarked image, the NC values of the watermark information were greater than 0.66 when the median filtering parameter was [3&#x2009;&#x00D7;&#x2009;3] and the number of filtering repetitions was 35, and the data are shown in <xref ref-type="fig" rid="fig-16">Fig. 16</xref> and <xref ref-type="table" rid="table-5">Tab. 5</xref>. The NC values of the three types of watermarked information-text, graphic and symbol-increase sequentially under the median filtering attack.</p>
<fig id="fig-16"><label>Figure 16</label><caption><title>Under Median Filter attacks. (a) Median Filter [3&#x2009;&#x00D7;&#x2009;3] with 35 repetitions. (b) The extracted watermark 1 under median filter [3&#x2009;&#x00D7;&#x2009;3] with 35 repetitions. (c) The extracted watermark 2 under median filter [3&#x2009;&#x00D7;&#x2009;3] with 35 repetitions. (d) The extracted watermark 3 under median filter [3&#x2009;&#x00D7;&#x2009;3] with 35 repetitions</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-16.png"/></fig>
<table-wrap id="table-5"><label>Table 5</label><caption><title>PSNR and NC values after Median filter attacks</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Median filter</th>
<th align="center" colspan="3">[3&#x2009;&#x00D7;&#x2009;3]</th>
<th align="center" colspan="3">[5&#x2009;&#x00D7;&#x2009;5]</th>
</tr>
<tr>
<td align="left">Repeat times</td>
<td align="left">3</td>
<td align="left">15</td>
<td align="left">35</td>
<td align="left">3</td>
<td align="left">15</td>
<td align="left">35</td>
</tr>
</thead>
<tbody>
<tr>
<td align="left">PSNR (dB)</td>
<td align="left">31.26</td>
<td align="left">29.55</td>
<td align="left">29.02</td>
<td align="left">26.67</td>
<td align="left">24.32</td>
<td align="left">23.47</td>
</tr>
<tr>
<td align="left">NC1</td>
<td align="left">0.63</td>
<td align="left">0.60</td>
<td align="left">0.66</td>
<td align="left">0.51</td>
<td align="left">0.54</td>
<td align="left">0.54</td>
</tr>
<tr>
<td align="left">NC2</td>
<td align="left">0.66</td>
<td align="left">0.63</td>
<td align="left">0.70</td>
<td align="left">0.54</td>
<td align="left">0.58</td>
<td align="left">0.63</td>
</tr>
<tr>
<td align="left">NC3</td>
<td align="left">0.64</td>
<td align="left">0.64</td>
<td align="left">0.71</td>
<td align="left">0.52</td>
<td align="left">0.60</td>
<td align="left">0.64</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s4_3"><label>4.3</label><title>Geometrical Attacks</title>
<sec id="s4_3_1"><label>4.3.1</label><title>Rotation Attacks</title>
<p>The images with watermark information are rotated in different directions and at different angles, and it can be found in <xref ref-type="fig" rid="fig-17">Fig. 17</xref> and <xref ref-type="table" rid="table-6">Tab. 6</xref> (Negative is counterclockwise, positive is clockwise) that the algorithm is more robust against rotation attacks. When the image is rotated 40&#x00B0; clockwise, the watermark NC values exceed 0.90, and when rotated 80&#x00B0; counterclockwise, the watermark NC values are approximately 0.70. The robustness of the three types of watermarks is generally consistent.</p>
<fig id="fig-17"><label>Figure 17</label><caption><title>Under rotation attacks. (a) Rotation (clockwise) 40&#x00B0;. (b) The extracted watermark 1 under rotation (clockwise) 40&#x00B0;. (c) The extracted watermark 2 under rotation (clockwise) 40&#x00B0;. (d) The extracted watermark 3 under rotation (clockwise) 40&#x00B0;</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-17.png"/></fig>
<table-wrap id="table-6"><label>Table 6</label><caption><title>PSNR and NC values after rotation attacks</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Rotation</th>
<th align="left">&#x2212;80&#x00B0;</th>
<th align="left">&#x2212;40&#x00B0;</th>
<th align="left">&#x2212;16&#x00B0;</th>
<th align="left">&#x2212;8&#x00B0;</th>
<th align="left">8&#x00B0;</th>
<th align="left">16&#x00B0;</th>
<th align="left">40&#x00B0;</th>
<th align="left">80&#x00B0;</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">PSNR (dB)</td>
<td align="left">13.80</td>
<td align="left">15.03</td>
<td align="left">16.22</td>
<td align="left">17.89</td>
<td align="left">17.89</td>
<td align="left">16.21</td>
<td align="left">15.03</td>
<td align="left">13.81</td>
</tr>
<tr>
<td align="left">NC1</td>
<td align="left">0.69</td>
<td align="left">0.72</td>
<td align="left">0.60</td>
<td align="left">0.57</td>
<td align="left">0.71</td>
<td align="left">0.64</td>
<td align="left">0.90</td>
<td align="left">0.82</td>
</tr>
<tr>
<td align="left">NC2</td>
<td align="left">0.70</td>
<td align="left">0.78</td>
<td align="left">0.66</td>
<td align="left">0.63</td>
<td align="left">0.77</td>
<td align="left">0.69</td>
<td align="left">0.92</td>
<td align="left">0.88</td>
</tr>
<tr>
<td align="left">NC3</td>
<td align="left">0.70</td>
<td align="left">0.78</td>
<td align="left">0.65</td>
<td align="left">0.62</td>
<td align="left">0.78</td>
<td align="left">0.69</td>
<td align="left">0.92</td>
<td align="left">0.84</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4_3_2"><label>4.3.2</label><title>Scaling Attacks</title>
<p>The image was attacked using different scaling factors; when the scaling factor was as small as 0.5, the NC value of the extracted watermarks was 0.83 on average, and the watermarks could be identified. When the scaling factor reached 1.2, the NC values of the extracted watermarks were above 0.90. <xref ref-type="fig" rid="fig-18">Fig. 18</xref> and <xref ref-type="table" rid="table-7">Tab. 7</xref> show these data and part of the attack images.</p>
<fig id="fig-18"><label>Figure 18</label><caption><title>Under scaling attacks. (a) Scaling factor 1.2. (b) The extracted watermark 1 under scaling factor 1.2. (c) The extracted watermark 2 under scaling factor 1.2. (d) The extracted watermark 3 under scaling factor 1.2</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-18.png"/></fig>
<table-wrap id="table-7"><label>Table 7</label><caption><title>PSNR and NC values after scaling attacks</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Scaling factor</th>
<th align="left">0.5</th>
<th align="left">0.8</th>
<th align="left">1.2</th>
<th align="left">2.4</th>
<th align="left">4.0</th>
<th align="left">8.0</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">NC1</td>
<td align="left">0.81</td>
<td align="left">0.81</td>
<td align="left">0.91</td>
<td align="left">0.81</td>
<td align="left">0.81</td>
<td align="left">0.81</td>
</tr>
<tr>
<td align="left">NC2</td>
<td align="left">0.84</td>
<td align="left">0.87</td>
<td align="left">0.93</td>
<td align="left">0.87</td>
<td align="left">0.87</td>
<td align="left">0.87</td>
</tr>
<tr>
<td align="left">NC3</td>
<td align="left">0.85</td>
<td align="left">0.84</td>
<td align="left">0.92</td>
<td align="left">0.84</td>
<td align="left">0.84</td>
<td align="left">0.84</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4_3_3"><label>4.3.3</label><title>Translation Attacks</title>
<p><xref ref-type="fig" rid="fig-19">Fig. 19</xref> and <xref ref-type="table" rid="table-8">Tab. 8</xref> show the experimental data of the algorithm against translation attacks. The NC value of the watermarked image is greater than 0.91 when the image is shifted 21&#x0025; horizontally to the left, the NC value is greater than 0.70 when the image is shifted 21&#x0025; to the right or up, the robustness of graphic and symbol watermarks outperforms text watermark in all three directions of translation attack. The NC value of the watermarks are between 0.52 and 0.57 when the image is shifted vertically downwards by 21&#x0025;. In addition, we found that when the image moves down 35&#x0025;, the NC value of the watermark image is better than 14&#x0025;, 21&#x0025; and 28&#x0025;. We speculate that the main reason for this is that more invalid features in the image are removed. The watermark can be extracted accurately from the images after the above attacks, so the watermarking algorithm has strong resistance to translation attacks.</p>
<fig id="fig-19"><label>Figure 19</label><caption><title>Under-up translation attacks. (a) up distance 21&#x0025;. (b) The extracted watermark 1 under up distance 21&#x0025;. (c) The extracted watermark 2 under up distance 21&#x0025;. (d) The extracted watermark 3 under up distance of 21&#x0025;</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-19.png"/></fig>
<table-wrap id="table-8"><label>Table 8</label><caption><title>PSNR and NC values after translation attacks</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="center" colspan="2">Distance (&#x0025;)</th>
<th align="left">7</th>
<th align="left">14</th>
<th align="left">21</th>
<th align="left">28</th>
<th align="left">35</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Left</td>
<td align="left">PSNR (dB)</td>
<td align="left">14.80</td>
<td align="left">13.38</td>
<td align="left">12.79</td>
<td align="left">12.49</td>
<td align="left">12.41</td>
</tr>
<tr>
<td/>
<td align="left">NC1</td>
<td align="left">0.76</td>
<td align="left">0.82</td>
<td align="left">0.91</td>
<td align="left">0.82</td>
<td align="left">0.76</td>
</tr>
<tr>
<td/>
<td align="left">NC2</td>
<td align="left">0.79</td>
<td align="left">0.85</td>
<td align="left">0.93</td>
<td align="left">0.85</td>
<td align="left">0.79</td>
</tr>
<tr>
<td/>
<td align="left">NC3</td>
<td align="left">0.78</td>
<td align="left">0.85</td>
<td align="left">0.92</td>
<td align="left">0.85</td>
<td align="left">0.79</td>
</tr>
<tr>
<td align="left">Right</td>
<td align="left">PSNR (dB)</td>
<td align="left">14.85</td>
<td align="left">13.44</td>
<td align="left">12.88</td>
<td align="left">12.48</td>
<td align="left">12.31</td>
</tr>
<tr>
<td/>
<td align="left">NC1</td>
<td align="left">0.66</td>
<td align="left">0.57</td>
<td align="left">0.70</td>
<td align="left">0.72</td>
<td align="left">0.63</td>
</tr>
<tr>
<td/>
<td align="left">NC2</td>
<td align="left">0.71</td>
<td align="left">0.59</td>
<td align="left">0.77</td>
<td align="left">0.78</td>
<td align="left">0.70</td>
</tr>
<tr>
<td/>
<td align="left">NC3</td>
<td align="left">0.69</td>
<td align="left">0.57</td>
<td align="left">0.77</td>
<td align="left">0.77</td>
<td align="left">0.70</td>
</tr>
<tr>
<td align="left">Up</td>
<td align="left">PSNR (dB)</td>
<td align="left">14.99</td>
<td align="left">13.30</td>
<td align="left">12.28</td>
<td align="left">11.68</td>
<td align="left">11.35</td>
</tr>
<tr>
<td/>
<td align="left">NC1</td>
<td align="left">0.69</td>
<td align="left">0.67</td>
<td align="left">0.72</td>
<td align="left">0.62</td>
<td align="left">0.47</td>
</tr>
<tr>
<td/>
<td align="left">NC2</td>
<td align="left">0.72</td>
<td align="left">0.71</td>
<td align="left">0.76</td>
<td align="left">0.66</td>
<td align="left">0.50</td>
</tr>
<tr>
<td/>
<td align="left">NC3</td>
<td align="left">0.71</td>
<td align="left">0.71</td>
<td align="left">0.77</td>
<td align="left">0.66</td>
<td align="left">0.52</td>
</tr>
<tr>
<td align="left">Down</td>
<td align="left">PSNR (dB)</td>
<td align="left">15.08</td>
<td align="left">13.39</td>
<td align="left">12.38</td>
<td align="left">11.92</td>
<td align="left">11.87</td>
</tr>
<tr>
<td/>
<td align="left">NC1</td>
<td align="left">0.83</td>
<td align="left">0.68</td>
<td align="left">0.52</td>
<td align="left">0.49</td>
<td align="left">0.77</td>
</tr>
<tr>
<td/>
<td align="left">NC2</td>
<td align="left">0.86</td>
<td align="left">0.71</td>
<td align="left">0.57</td>
<td align="left">0.58</td>
<td align="left">0.79</td>
</tr>
<tr>
<td/>
<td align="left">NC3</td>
<td align="left">0.85</td>
<td align="left">0.69</td>
<td align="left">0.55</td>
<td align="left">0.56</td>
<td align="left">0.78</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4_3_4"><label>4.3.4</label><title>Cropping Attacks</title>
<p>To verify the cropping attack resistance of the algorithm, we design cropping attacks to the images from both horizontal and vertical directions, with cropping attack strength from 8&#x0025; to 50&#x0025;. When cropping 32&#x0025; of the medical image along the X-axis, the NC values of the extracted watermarks are greater than 0.82 and the watermarks are very clear. When clipping 19&#x0025; of the medical image along the Y-axis, the NC values are between 0.75 and 0.78, at which point the graphic watermark NC value is higher than the other two types of watermarks. The data of cropping attacks is shown in <xref ref-type="fig" rid="fig-20">Fig. 20</xref> and <xref ref-type="table" rid="table-9">Tab. 9</xref>.</p>
<fig id="fig-20"><label>Figure 20</label><caption><title>Under cropping attacks. (a) Cropping 32&#x0025;, X direction. (b) The extracted watermark 1 under 32&#x0025; cropping. (c) The extracted watermark 2 under 32&#x0025; cropping. (d) The extracted watermark 3 under 32&#x0025; cropping</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_31445-fig-20.png"/></fig>
<table-wrap id="table-9"><label>Table 9</label><caption><title>PSNR and NC values after cropping attacks</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="center" colspan="2">Cropping (&#x0025;)</th>
<th align="left">8</th>
<th align="left">10</th>
<th align="left">15</th>
<th align="left">19</th>
<th align="left">32</th>
<th align="left">35</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" rowspan="3">Y direction</td>
<td align="left">NC1</td>
<td align="left">0.81</td>
<td align="left">0.80</td>
<td align="left">0.74</td>
<td align="left">0.75</td>
<td align="left">0.59</td>
<td align="left">0.63</td>
</tr>
<tr>
<td align="left">NC2</td>
<td align="left">0.85</td>
<td align="left">0.84</td>
<td align="left">0.78</td>
<td align="left">0.78</td>
<td align="left">0.63</td>
<td align="left">0.66</td>
</tr>
<tr>
<td align="left">NC3</td>
<td align="left">0.84</td>
<td align="left">0.84</td>
<td align="left">0.77</td>
<td align="left">0.77</td>
<td align="left">0.63</td>
<td align="left">0.64</td>
</tr>
<tr>
<td align="center" colspan="2">Cropping (&#x0025;)</td>
<td align="left">5</td>
<td align="left">8</td>
<td align="left">10</td>
<td align="left">19</td>
<td align="left">25</td>
<td align="left">32</td>
</tr>
<tr>
<td align="left" rowspan="3">X direction</td>
<td align="left">NC1</td>
<td align="left">0.69</td>
<td align="left">0.82</td>
<td align="left">0.76</td>
<td align="left">0.65</td>
<td align="left">0.67</td>
<td align="left">0.82</td>
</tr>
<tr>
<td align="left">NC2</td>
<td align="left">0.72</td>
<td align="left">0.85</td>
<td align="left">0.78</td>
<td align="left">0.71</td>
<td align="left">0.71</td>
<td align="left">0.85</td>
</tr>
<tr>
<td align="left">NC3</td>
<td align="left">0.71</td>
<td align="left">0.84</td>
<td align="left">0.77</td>
<td align="left">0.70</td>
<td align="left">0.70</td>
<td align="left">0.85</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s4_4"><label>4.4</label><title>Comparison with Other Algorithm</title>
<p>The proposed algorithm combines a CNN with DCT to design a set of medical image digital watermarking research schemes. To further, verify the advantages and disadvantages of the algorithm, this paper selects a traditional algorithm for comparison. <xref ref-type="table" rid="table-10">Tab. 10</xref> lists the comparison results of the proposed algorithm with SIFT-DCT [<xref ref-type="bibr" rid="ref-40">40</xref>]. It can be seen that the performance of the algorithm proposed in this paper is better than the SIFT-DCT algorithm in all high-intensity geometric attacks. In conventional attacks, the performance of the proposed algorithm is better than SIFT-DCT in Gaussian noise attack but weaker than SIFT-DCT in JPEG compression attack. Compared with the three different watermark types selected in the experiment, the robustness of graphic watermark is the best, the symbol watermark is the second, and the robustness of text watermark is the weakest. Experiments show that the algorithm based on a CNN and DCT has good performance.</p>
<table-wrap id="table-10"><label>Table 10</label><caption><title>Comparison of the SIFT-DCT algorithm</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left" rowspan="2">Attacks</th>
<th align="center" rowspan="2">Parameter</th>
<th align="left">SIFT-DCT</th>
<th align="center" colspan="3">Proposed algorithm</th>
</tr>
<tr>
<th align="left">NC</th>
<th align="left">NC1 (text)</th>
<th align="left">NC2 (graphic)</th>
<th align="left">NC3 (symbol)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Gaussian noise</td>
<td align="left">15&#x0025;</td>
<td align="left">0.60</td>
<td align="left">0.60</td>
<td align="left"><bold>0.71</bold></td>
<td align="left"><bold>0.65</bold></td>
</tr>
<tr>
<td align="left">JPEG compression</td>
<td align="left">30&#x0025;</td>
<td align="left">0.90</td>
<td align="left">0.68</td>
<td align="left">0.71</td>
<td align="left">0.71</td>
</tr>
<tr>
<td align="left" rowspan="2">Rotation (clockwise)</td>
<td align="left">8&#x00B0;</td>
<td align="left">0.51</td>
<td align="left"><bold>0.71</bold></td>
<td align="left"><bold>0.77</bold></td>
<td align="left"><bold>0.78</bold></td>
</tr>
<tr>
<td align="left">10&#x00B0;</td>
<td align="left">0.60</td>
<td align="left"><bold>0.81</bold></td>
<td align="left"><bold>0.85</bold></td>
<td align="left"><bold>0.84</bold></td>
</tr>
<tr>
<td align="left" rowspan="2">Scaling</td>
<td align="left">&#x00D7;1.5</td>
<td align="left">0.79</td>
<td align="left"><bold>0.81</bold></td>
<td align="left"><bold>0.85</bold></td>
<td align="left"><bold>0.84</bold></td>
</tr>
<tr>
<td align="left">&#x00D7;3.0</td>
<td align="left">0.65</td>
<td align="left"><bold>0.81</bold></td>
<td align="left"><bold>0.85</bold></td>
<td align="left"><bold>0.84</bold></td>
</tr>
<tr>
<td align="left" rowspan="2">Cropping (X-direction)</td>
<td align="left">25&#x0025;</td>
<td align="left">0.72</td>
<td align="left">0.67</td>
<td align="left">0.71</td>
<td align="left">0.70</td>
</tr>
<tr>
<td align="left">30&#x0025;</td>
<td align="left">0.66</td>
<td align="left"><bold>0.76</bold></td>
<td align="left"><bold>0.78</bold></td>
<td align="left"><bold>0.77</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In future we will use LSTM and graph based methods to improve the encryption and decryption process [<xref ref-type="bibr" rid="ref-41">41</xref>]. Hybrid approaches for securte watermarking are proved to be a better way to increase robustness and security against geometric attacks [<xref ref-type="bibr" rid="ref-42">42</xref>,<xref ref-type="bibr" rid="ref-43">43</xref>].</p>
</sec>
</sec>
<sec id="s5"><label>5</label><title>Conclusions</title>
<p>This article presents a multi-watermarking technique based on Inception V3 and DCT that combines deep learning and classical transforms. A CNN is firstly used to automatically extract the fully connected layer coefficients (predictions) of the medical image. Then, the DCT transform is applied to extract features. To protect the security of the watermarks, the watermarks are scrambled and encrypted using the logistic map system. This algorithm uses a hash function to implement a zero-watermarking technique by storing the key through a third platform, achieving blind extraction of the watermarks during watermarks extraction. To verify the feasibility of the algorithm and promote its generalization using, three different types of watermarks (text, image, symbol) were selected for this experiment. The experimental data shows that the algorithm is more robust to conventional attacks and performs better than traditional watermarking algorithms against geometric attacks. Therefore, the algorithm is important for the medical field, where image quality is very demanding.</p>
</sec>
</body>
<back>
<ack>
<p>This work was supported in part by Key Research Project of Hainan Province under Grant ZDYF2021SHFZ093, the Natural Science Foundation of China under Grants 62063004 and 62162022, the Hainan Provincial Natural Science Foundation of China under Grants 2019RC018, 521QN206 and 619QN249, the Major Scientific Project of Zhejiang Lab 2020ND8AD01, and the Scientific Research Foundation for Hainan University (No. KYQD(ZR)-21013).</p>
</ack>
<fn-group>
<fn fn-type="other"><p><bold>Funding Statement:</bold> The authors received no specific funding for this study.</p></fn>
<fn fn-type="conflict"><p><bold>Conflicts of Interest:</bold> The authors declare that they have no conflicts of interest to report regarding the present study.</p></fn>
</fn-group>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Hu</surname></string-name>, <string-name><given-names>H. H.</given-names> <surname>Chen</surname></string-name> and <string-name><given-names>T. W.</given-names> <surname>Hou</surname></string-name></person-group>, &#x201C;<article-title>A hybrid public key infrastructure solution (HPKI) for HIPAA privacy/security regulations</article-title>,&#x201D; <source>Computer Standards &#x0026; Interfaces</source>, vol. <volume>32</volume>, no. <issue>5&#x2013;6</issue>, pp. <fpage>274</fpage>&#x2013;<lpage>280</lpage>, <year>2010</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Yuan</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Yu</surname></string-name>, <string-name><given-names>J. B.</given-names> <surname>Li</surname></string-name> and <string-name><given-names>K.</given-names> <surname>Zhang</surname></string-name></person-group>, &#x201C;<article-title>Hybrid watermarking algorithm using clifford algebra with arnold scrambling and chaotic encryption</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>8</volume>, pp. 76386&#x2013;76398, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C. K.</given-names> <surname>Tan</surname></string-name>, <string-name><given-names>J. C.</given-names> <surname>Ng</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Xu</surname></string-name>, <string-name><given-names>C. L.</given-names> <surname>Poh</surname></string-name>, <string-name><given-names>Y. L.</given-names> <surname>Guan</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Security protection of DICOM medical images using dual-layer reversible watermarking with tamper detection capability</article-title>,&#x201D; <source>Journal of Digital Imaging</source>, vol. <volume>24</volume>, no. <issue>3</issue>, pp. <fpage>528</fpage>&#x2013;<lpage>540</lpage>, <year>2011</year>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. J.</given-names> <surname>Sahraee</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Ghofrani</surname></string-name></person-group>, &#x201C;<article-title>A robust blind watermarking method using quantization of distance between wavelet coefficients</article-title>,&#x201D; <source>Signal Image &#x0026; Video Processing</source>, vol. <volume>7</volume>, no. <issue>4</issue>, pp. <fpage>799</fpage>&#x2013;<lpage>807</lpage>, <year>2013</year>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>I. J.</given-names> <surname>Cox</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Kilian</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Leighton</surname></string-name> and <string-name><given-names>T.</given-names> <surname>Shamoon</surname></string-name></person-group>, &#x201C;<article-title>Secure spread spectrum watermarking for images, audio and video</article-title>,&#x201D; in <conf-name>Proc. of 3rd IEEE Int. Conf. on Image Processing</conf-name>, Lausanne, Switzerland, vol. <volume>3</volume>, pp. <fpage>243</fpage>&#x2013;<lpage>246</lpage>, <year>2002</year>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Kang</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Huang</surname></string-name> and <string-name><given-names>Y. Q.</given-names> <surname>Shi</surname></string-name></person-group>, &#x201C;<article-title>An image watermarking algorithm robust to geometric distortion</article-title>,&#x201D; in <conf-name>Int. Workshop on Digital Watermarking</conf-name>, Seoul, Korea, pp. <fpage>212</fpage>&#x2013;<lpage>223</lpage>, <year>2002</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Cedillo-Hernandez</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Garcia-Ugalde</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Nakano-Miyatake</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Perez-Meana</surname></string-name></person-group>, &#x201C;<article-title>Robust watermarking method in DFT domain for effective management of medical imaging</article-title>,&#x201D; <source>Signal, Image and Video Processing</source>, vol. <volume>9</volume>, no. <issue>5</issue>, pp. <fpage>1163</fpage>&#x2013;<lpage>1178</lpage>, <year>2015</year>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Cheng</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Peng</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Tang</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Tu</surname></string-name> and <string-name><given-names>W.</given-names> <surname>Xu</surname></string-name></person-group>, &#x201C;<article-title>MIFNet: A lightweight multiscale information fusion network</article-title>,&#x201D; <source>International Journal of Intelligent Systems</source>, pp. 1&#x2013;26 <year>2021</year>. <uri xlink:href="https://doi.org/10.1002/int.22804">https://doi.org/10.1002/int.22804</uri>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N.</given-names> <surname>Loris</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Stefano</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Sheryl</surname></string-name></person-group>, &#x201C;<article-title>Ensemble of convolutional neural networks for bioimage classification</article-title>,&#x201D; <source>Applied Computing &#x0026; Informatics</source>, vol. 17, no. 1, pp. <fpage>19&#x2013;35</fpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Ding</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Li</surname></string-name> and <string-name><given-names>C.</given-names> <surname>Fang</surname></string-name></person-group>, &#x201C;<article-title>FPGA accelerates deep residual learning for image recognition</article-title>,&#x201D; in <conf-name>2017 IEEE 2nd Information Technology, Networking, Electronic and Automation Control Conf.</conf-name>, Chengdu, China, pp. <fpage>837</fpage>&#x2013;<lpage>840</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Cheng</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Yang</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Tang</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Xiong</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhang</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Generative adversarial networks: A literature review</article-title>,&#x201D; <source>KSII Transactions on Internet and Information Systems</source>, vol. <volume>14</volume>, no. <issue>12</issue>, pp. <fpage>4625</fpage>&#x2013;<lpage>4647</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>O.</given-names> <surname>Ronneberger</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Fischer</surname></string-name> and <string-name><given-names>T.</given-names> <surname>Brox</surname></string-name></person-group>, &#x201C;<article-title>U-Net: Convolutional networks for biomedical image segmentation</article-title>,&#x201D; in <conf-name>Int. Conf. on Medical Image Computing and Computer-Assisted Intervention</conf-name>, Istanbul, Turkey, pp. <fpage>234</fpage>&#x2013;<lpage>241</lpage>, <year>2015</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Zhao</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Hu</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Cai</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Zhou</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Enhancing Chinese character representation with lattice-aligned attention</article-title>,&#x201D; <source>IEEE Transactions on Neural Networks and Learning Systems</source>, pp. 1&#x2013;10, <year>2021</year>. <uri xlink:href="https://doi.org/10.1109/TNNLS.2021.3114378">https://doi.org/10.1109/TNNLS.2021.3114378</uri>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Zhao</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Hu</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Cai</surname></string-name> and <string-name><given-names>F.</given-names> <surname>Liu</surname></string-name></person-group>. &#x201C;<article-title>Dynamic modeling cross-modal interactions in two-phase prediction for entity-relation extraction</article-title>,&#x201D; <source>IEEE Transactions on Neural Networks and Learning Systems</source>, pp. 1&#x2013;10, <year>2021</year>. <uri xlink:href="https://doi.org/10.1109/TNNLS.2021.3104971">https://doi.org/10.1109/TNNLS.2021.3104971</uri>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Yuan</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Xia</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Sun</surname></string-name> and <string-name><given-names>Q. J.</given-names> <surname>Wu</surname></string-name></person-group>, &#x201C;<article-title>Deep residual network with adaptive learning framework for fingerprint liveness detection</article-title>,&#x201D; <source>IEEE Transactions on Cognitive and Developmental Systems</source>, vol. 12, no. 3, pp. 461&#x2013;473, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Cheng</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Xu</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Xia</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Liu</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>A review of Chinese named entity recognition</article-title>,&#x201D; <source>KSII Transactions on Internet and Information Systems</source>, vol. <volume>15</volume>, no. <issue>6</issue>, pp. <fpage>2012</fpage>&#x2013;<lpage>2030</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T. T.</given-names> <surname>Leonid</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Jayaparvathy</surname></string-name></person-group>, &#x201C;<article-title>Classification of elephant sounds using parallel convolutional neural network</article-title>,&#x201D; <source>Intelligent Automation &#x0026; Soft Computing</source>, vol. <volume>32</volume>, no. <issue>3</issue>, pp. <fpage>1415</fpage>&#x2013;<lpage>1426</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Lee</surname></string-name></person-group>, &#x201C;<article-title>A study on classification and detection of small moths using cnn model</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>71</volume>, no. <issue>1</issue>, pp. <fpage>1987</fpage>&#x2013;<lpage>1998</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>V.</given-names> <surname>Sudha</surname></string-name> and <string-name><given-names>T. R.</given-names> <surname>Ganeshbabu</surname></string-name></person-group>, &#x201C;<article-title>A convolutional neural network classifier vgg-19 architecture for lesion detection and grading in diabetic retinopathy based on deep learning</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>66</volume>, no. <issue>1</issue>, pp. <fpage>827</fpage>&#x2013;<lpage>842</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Rajakumari</surname></string-name> and <string-name><given-names>L.</given-names> <surname>Kalaivani</surname></string-name></person-group>, &#x201C;<article-title>Breast cancer detection and classification using deep cnn techniques</article-title>,&#x201D; <source>Intelligent Automation &#x0026; Soft Computing</source>, vol. <volume>32</volume>, no. <issue>2</issue>, pp. <fpage>1089</fpage>&#x2013;<lpage>1107</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Sun</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Sun</surname></string-name> and <string-name><given-names>S. K.</given-names> <surname>Jha</surname></string-name></person-group>, &#x201C;<article-title>A robust 3-D medical watermarking based on wavelet transform for data protection</article-title>,&#x201D; <source>Computer Systems Science and Engineering</source>, vol. <volume>41</volume>, no. <issue>3</issue>, pp. <fpage>1043</fpage>&#x2013;<lpage>1056</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>X. R.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Sun</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Sun</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Xu</surname></string-name> and <string-name><given-names>P. P.</given-names> <surname>Wang</surname></string-name></person-group>, &#x201C;<article-title>Deformation expression of soft tissue based on BP neural network</article-title>,&#x201D; <source>Intelligent Automation &#x0026; Soft Computing</source>, vol. <volume>32</volume>, no. <issue>2</issue>, pp. <fpage>1041</fpage>&#x2013;<lpage>1053</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>B.</given-names> <surname>Isac</surname></string-name> and <string-name><given-names>V.</given-names> <surname>Santhi</surname></string-name></person-group>, &#x201C;<article-title>A study on digital image and video watermarking schemes using neural networks</article-title>,&#x201D; <source>International Journal of Computer Applications</source>, vol. <volume>12</volume>, no. <issue>9</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>6</lpage>, <year>2011</year>.</mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Jin</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Wang</surname></string-name></person-group>, &#x201C;<article-title>Applications of a neural network to estimate watermark embedding strength</article-title>,&#x201D; in <conf-name>Eighth Int. Workshop on Image Analysis for Multimedia Interactive Services</conf-name>, Santorini, Greece, pp. <fpage>68</fpage>&#x2013;<lpage>68</lpage>, <year>2007</year>.</mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Kandi</surname></string-name> and <string-name><given-names>D.</given-names> <surname>Mishra</surname></string-name></person-group>, &#x201C;<article-title>Exploring the learning capabilities of convolutional neural networks for robust image watermarking</article-title>,&#x201D; <source>Computers &#x0026; Security</source>, vol. <volume>65</volume>, pp. <fpage>247</fpage>&#x2013;<lpage>268</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Fierro-Radilla</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Nakano-Miyatake</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Cedillo-Hernandez</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Cleofas-Sanchez</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Perez-Meana</surname></string-name></person-group>, &#x201C;<article-title>A robust image zero-watermarking using convolutional neural networks</article-title>,&#x201D; in <conf-name>7th Int. Workshop on Biometrics and Forensics (IWBF) IEEE</conf-name>, Sassari, Italy, pp. <fpage>1</fpage>&#x2013;<lpage>5</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-27"><label>[27]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Hayes</surname></string-name> and <string-name><given-names>G.</given-names> <surname>Danezis</surname></string-name></person-group>, &#x201C;<article-title>Generating steganographic images via adversarial training</article-title>,&#x201D; <source>Advances in Neural Information Processing Systems</source>, vol. 30, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-28"><label>[28]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Baluja</surname></string-name> and <collab>Google</collab></person-group>, &#x201C;<article-title>Hiding images in plain sight: Deep steganography</article-title>,&#x201D; <source>Advances in Neural Information Processing Systems</source>, vol. <volume>30</volume>, pp. <fpage>2066</fpage>&#x2013;<lpage>2076</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-29"><label>[29]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Meng</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Tian</surname></string-name> and <string-name><given-names>X.</given-names> <surname>Wang</surname></string-name></person-group>, &#x201C;<article-title>An adaptive reversible watermarking in IWT domain</article-title>,&#x201D; <source>Multimedia Tools and Applications</source>, vol. <volume>80</volume>, no. <issue>1</issue>, pp. <fpage>711</fpage>&#x2013;<lpage>735</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-30"><label>[30]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Meng</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Peng</surname></string-name> and <string-name><given-names>X.</given-names> <surname>Wang</surname></string-name></person-group>, &#x201C;<article-title>A data hiding scheme based on U-Net and wavelet transform</article-title>,&#x201D; <source>Knowledge-Based Systems</source>, vol. <volume>223</volume>, pp. <fpage>107022</fpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-31"><label>[31]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Fang</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Cheng</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Hu</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Robust zero-watermarking algorithm for medical images based on SIFT and Bandelet-DCT</article-title>,&#x201D;. <source>Multimedia Tools and Applications</source>, vol. <volume>81</volume>, no. <issue>12</issue>, pp. <fpage>16863</fpage>&#x2013;<lpage>16879</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-32"><label>[32]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Uchida</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Nagai</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Sakazawa</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Satoh</surname></string-name></person-group>, &#x201C;<article-title>Embedding watermarks into deep neural networks</article-title>,&#x201D; in <conf-name>Proc. of the 2017 ACM on Int. Conf. on Multimedia Retrieval</conf-name>, Guangzhou, China, pp. <fpage>269</fpage>&#x2013;<lpage>277</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-33"><label>[33]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Szegedy</surname></string-name>, <string-name><given-names>V.</given-names> <surname>Vanhoucke</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Ioffe</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Shlens</surname></string-name> and <string-name><given-names>Z.</given-names> <surname>Wojna</surname></string-name></person-group>, &#x201C;<article-title>Rethinking the inception architecture for computer vision</article-title>,&#x201D; in <conf-name>Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition</conf-name>, Las Vegas, USA, pp. <fpage>2818</fpage>&#x2013;<lpage>2826</lpage>, <year>2016</year>.</mixed-citation></ref>
<ref id="ref-34"><label>[34]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>Q.</given-names> <surname>Dai</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Cheng</surname></string-name> and <string-name><given-names>X.</given-names> <surname>Bai</surname></string-name></person-group>, &#x201C;<article-title>An automatic identification algorithm for encrypted anti-counterfeiting tag based on DWT-DCT and Chen&#x2019;s Chaos</article-title>,&#x201D; in <conf-name>Int. Conf. on Artificial Intelligence and Security</conf-name>, New York, USA, pp. <fpage>596</fpage>&#x2013;<lpage>608</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-35"><label>[35]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Zeng</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Cheng</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Zhou</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Multi-watermarking algorithm for medical image based on KAZE-DCT</article-title>,&#x201D; <source>Journal of Ambient Intelligence and Humanized Computing</source>, pp. <fpage>1</fpage>&#x2013;<lpage>9</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-36"><label>[36]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>K. A.</given-names> <surname>Al-Afandy</surname></string-name>, <string-name><given-names>O. S.</given-names> <surname>Faragallah</surname></string-name>, <string-name><given-names>E. S. M.</given-names> <surname>El-Rabaie</surname></string-name>, <string-name><given-names>F. E.</given-names> <surname>Abd El-Samie</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Elmhalawy</surname></string-name></person-group>, &#x201C;<article-title>A hybrid scheme for robust color image watermarking using DSWT in DCT domain</article-title>,&#x201D; in <conf-name>2016 4th IEEE Int. Colloquium on Information Science and Technology (CiSt)</conf-name>, Tangier, Morocco, pp. <fpage>444</fpage>&#x2013;<lpage>449</lpage>, <year>2016</year>.</mixed-citation></ref>
<ref id="ref-37"><label>[37]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Zeeshan</surname></string-name>, <string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name>, <string-name><given-names>W. H.</given-names> <surname>Memon</surname></string-name>, <string-name><given-names>S. N.</given-names> <surname>Ali</surname></string-name>, <string-name><given-names>S. A.</given-names> <surname>Nizamani</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Feature-based multi-criteria recommendation system using a weighted approach with ranking correlation</article-title>,&#x201D; <source>Intelligent Data Analysis</source>, vol. <volume>25</volume>, no. <issue>4</issue>, pp. <fpage>1013</fpage>&#x2013;<lpage>1029</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-38"><label>[38]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>K. A.</given-names> <surname>Al-Afandy</surname></string-name>, <string-name><given-names>O. S.</given-names> <surname>Faragallah</surname></string-name>, <string-name><given-names>E. S. M.</given-names> <surname>EL-Rabaie</surname></string-name>, <string-name><given-names>F. E.</given-names> <surname>Abd El-Samie</surname></string-name> and <string-name><given-names>A.</given-names> <surname>ELmhalawy</surname></string-name></person-group>, &#x201C;<article-title>Efficient color image watermarking using homomorphic based SVD in DWT domain</article-title>,&#x201D; in <conf-name>2016 Fourth Int. Japan-Egypt Conf. on Electronics, Communications and Computers (JEC-ECC)</conf-name>, Cairo, Egypt, pp. <fpage>43</fpage>&#x2013;<lpage>47</lpage>, <year>2016</year>.</mixed-citation></ref>
<ref id="ref-39"><label>[39]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Huang</surname></string-name>, <string-name><given-names>Y. W.</given-names> <surname>Chen</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Robust watermarking algorithm for medical images based on log-polar transform</article-title>,&#x201D; <source>EURASIP Journal on Wireless Communications and Networking</source>, vol. <volume>1</volume>, pp. <fpage>1</fpage>&#x2013;<lpage>11</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-40"><label>[40]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Zou</surname></string-name> and <string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name></person-group>, &#x201C;<article-title>A robust zero-watermarking based on SIFT-DCT for medical images in the encrypted domain</article-title>,&#x201D; <source>Computers. Materials &#x0026; Continua</source>, vol. <volume>61</volume>, no. <issue>1</issue>, pp. <fpage>363</fpage>&#x2013;<lpage>378</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-41"><label>[41]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Wu</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name> and <string-name><given-names>Y. W.</given-names> <surname>Chen</surname></string-name></person-group>, &#x201C;<article-title>Logistic map and contourlet-based robust zero watermark for medical images</article-title>,&#x201D; <source>Innovation in Medicine and Healthcare Systems, and Multimedia</source>, vol. 145, pp. <fpage>115</fpage>&#x2013;<lpage>123</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-42"><label>[42]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Gong</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Gong</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Ma</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Robust and secure zero-watermarking algorithm for medical images based on Harris-SURF-DCT and Chaotic Map</article-title>,&#x201D; <source>Security and Communication Networks</source>, vol. <volume>2021</volume>, <year>2021</year>. <uri xlink:href="https://doi.org/10.1155/2021/3084153"> https://doi.org/10.1155/2021/3084153</uri>.</mixed-citation></ref>
<ref id="ref-43"><label>[43]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Yu</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Chanussot</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Zeeshan</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Yuan</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Local similarity-based spatial-spectral fusion hyperspectral image classification with deep CNN and gabor filtering</article-title>,&#x201D; <source>IEEE Transactions on Geoscience and Remote Sensing</source>, vol. <volume>60</volume>, pp. 1&#x2013;15, <year>2021</year>.</mixed-citation></ref>
</ref-list>
</back>
</article>