<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">36317</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2023.036317</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Robust Multi-Watermarking Algorithm for Medical Images Based on GoogLeNet and Henon Map</article-title>
<alt-title alt-title-type="left-running-head">Robust Multi-Watermarking Algorithm for Medical Images Based on GoogLeNet and Henon Map</alt-title>
<alt-title alt-title-type="right-running-head">Robust Multi-Watermarking Algorithm for Medical Images Based on GoogLeNet and Henon Map</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Zhang</surname><given-names>Wenxing</given-names>
</name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-2" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Li</surname><given-names>Jingbing</given-names>
</name><xref ref-type="aff" rid="aff-1">1</xref>
<xref ref-type="aff" rid="aff-2">2</xref><email>jingbingli2008@hotmail.com</email></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Bhatti</surname><given-names>Uzair Aslam</given-names>
</name><xref ref-type="aff" rid="aff-1">1</xref>
<xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Liu</surname><given-names>Jing</given-names>
</name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Zheng</surname><given-names>Junhua</given-names>
</name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Chen</surname><given-names>Yen-Wei</given-names>
</name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<aff id="aff-1"><label>1</label><institution>School of Information and Communication Engineering, Hainan University</institution>, <addr-line>Haikou, 570228</addr-line>, <country>China</country></aff>
<aff id="aff-2"><label>2</label><institution>State Key Laboratory of Marine Resource Utilization in the South China Sea, Hainan University</institution>, <addr-line>Haikou, 570228</addr-line>, <country>China</country></aff>
<aff id="aff-3"><label>3</label><institution>Research Center for Healthcare Data Science, Zhejiang Lab</institution>, <addr-line>Hangzhou, 311100</addr-line>, <country>China</country></aff>
<aff id="aff-4"><label>4</label><institution>Graduate School of Information Science and Engineering, Ritsumeikan University</institution>, <addr-line>Kusatsu, 525-8577</addr-line>, <country>Japan</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Jingbing Li. Email: <email>jingbingli2008@hotmail.com</email></corresp>
</author-notes>
<pub-date date-type="collection" publication-format="electronic"><year>2023</year></pub-date>
<pub-date date-type="pub" publication-format="electronic"><day>24</day><month>1</month><year>2023</year></pub-date>
<volume>75</volume>
<issue>1</issue>
<fpage>565</fpage>
<lpage>586</lpage>
<history>
<date date-type="received"><day>25</day><month>9</month><year>2022</year></date>
<date date-type="accepted"><day>19</day><month>11</month><year>2022</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Zhang et al.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Zhang et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_36317.pdf"></self-uri>
<abstract><p>The field of medical images has been rapidly evolving since the advent of the digital medical information era. However, medical data is susceptible to leaks and hacks during transmission. This paper proposed a robust multi-watermarking algorithm for medical images based on GoogLeNet transfer learning to protect the privacy of patient data during transmission and storage, as well as to increase the resistance to geometric attacks and the capacity of embedded watermarks of watermarking algorithms. First, a pre-trained GoogLeNet network is used in this paper, based on which the parameters of several previous layers of the network are fixed and the network is fine-tuned for the constructed medical dataset, so that the pre-trained network can further learn the deep convolutional features in the medical dataset, and then the trained network is used to extract the stable feature vectors of medical images. Then, a two-dimensional Henon chaos encryption technique, which is more sensitive to initial values, is used to encrypt multiple different types of watermarked private information. Finally, the feature vector of the image is logically operated with the encrypted multiple watermark information, and the obtained key is stored in a third party, thus achieving zero watermark embedding and blind extraction. The experimental results confirm the robustness of the algorithm from the perspective of multiple types of watermarks, while also demonstrating the successful embedding of multiple watermarks for medical images, and show that the algorithm is more resistant to geometric attacks than some conventional watermarking algorithms.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Zero watermarks</kwd>
<kwd>GoogLeNet</kwd>
<kwd>medical image</kwd>
<kwd>Henon map</kwd>
<kwd>feature vector</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label><title>Introduction</title>
<p>With the comprehensive improvement of network communication technology and medical intelligence, digital technology has been integrated into the medical system, and more and more clinical medical images, including computed tomography (CT) images and magnetic resonance imaging (MRI) images, have begun to be stored and shared through the Internet and cloud platforms; at the same time, a large amount of medical information from traditional paper medical records has been replaced by electronic medical records, and electronic medical records (EMR) [<xref ref-type="bibr" rid="ref-1">1</xref>,<xref ref-type="bibr" rid="ref-2">2</xref>] is a digital medical record, which enables a large amount of patient information to be transmitted and stored more easily. It is the arrival of its emerging technology that allows physicians to process pathological information more efficiently and patients to have a better medical experience, largely improving the quality of our entire healthcare system. However, although telemedicine diagnostic platforms provide patients with more real-time and convenient medical services, medical big data information can face malicious attacks such as illegal theft and leakage during the actual network transmission [<xref ref-type="bibr" rid="ref-3">3</xref>&#x2013;<xref ref-type="bibr" rid="ref-5">5</xref>]. Therefore, how to bring into play the convenience of network transmission and at the same time, effectively enhance the security of medical information systems has become an important issue that needs to be addressed. Digital watermarking of medical images [<xref ref-type="bibr" rid="ref-6">6</xref>] is an effective solution, which ensures the security of its transmission on the Internet by hiding patients&#x2019; personal information in medical carrier images.</p>
<p>Digital watermarking [<xref ref-type="bibr" rid="ref-7">7</xref>&#x2013;<xref ref-type="bibr" rid="ref-9">9</xref>] is a popular and very effective information-hiding technology nowadays [<xref ref-type="bibr" rid="ref-10">10</xref>,<xref ref-type="bibr" rid="ref-11">11</xref>], and its essential characteristics are security, robustness, imperceptibility, data capacity, etc. Its main method is to embed secret information in the carrier image to protect the copyright of digital products and prove the real reliability of the products. Therefore, applying digital watermarking technology to the medical system and selecting patient information or diagnostic reports as watermarks for embedding can solve security problems such as illegal tampering, stealing and copying. At present, there are two main categories according to the hiding position of the digital watermark: spatial domain [<xref ref-type="bibr" rid="ref-12">12</xref>] and transform domain [<xref ref-type="bibr" rid="ref-13">13</xref>,<xref ref-type="bibr" rid="ref-14">14</xref>]. The spatial domain is to embed the watermark directly by changing the pixel value of the host image, while the transform domain is to embed the digital watermark into a certain transform domain of the host image by transformation. There are mainly classical algorithms such as LSB (Least Significant Bit) and patchwork in the spatial domain, however, after embedding the watermark information into the spatial domain, although the embedded information is guaranteed to be invisible, the robustness of the algorithm is poor and the watermark information is easily corrupted by geometric attacks because the pixel values on the carrier image are changed. Then, in the transform domain watermarking, it is mainly based on discrete cosine transform (DCT), discrete wavelet transform (DWT), and discrete Fourier transform (DFT), etc., and the embedded watermark is achieved by modifying the transformed coefficients. Alotaibi&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-15">15</xref>] proposed a text image watermarking algorithm combining integer wavelet transform (IWT) and discrete cosine transform (DCT). The algorithm first decomposes the text image with IWT to obtain the low-frequency subband LL, then performs the DCT transform on the low-frequency subband, and finally embeds the watermark into the low and medium-frequency DCT coefficients; the experimental results have good imperceptibility and good robustness under conventional attacks, but the poor performance against geometric attacks such as rotation. Cedillo-Hernandez&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-16">16</xref>] proposed a watermarking algorithm based on the DFT domain. The algorithm first performs the DFT transform on medical images and then embeds the watermark into the IF amplitude of the DFT to achieve the embedding of the watermark, which has better robustness in a few geometric attacks. Assini&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-17">17</xref>] proposed a watermarking scheme that combines three transforms: DWT, DCT, and singular value decomposition (SVD). In the paper, firstly, the medical carrier image and the watermarked image are separately subjected to the three-level DWT transform, and the obtained high-frequency subbands are separately subjected to the DCT transform, then the coefficients of the DCT transform are subjected to the singular value decomposition using SVD stability, and finally, the singular values of the medical image and the watermark are summed up by the scale factor to achieve the embedding of the watermark, and the algorithm has good robustness against conventional attacks. Compared with the spatial domain, the transform domain-based watermarking scheme will have better robustness, but the embedding and extracting watermark information operation of such algorithms above is complicated, and the embedding capacity of the watermark is also low, which still needs to be improved in geometric attacks.</p>
<p>Considering the special nature of medical images, medical images such as CT and MRI, which we study, play an important role in acquiring pathological information and diagnosing conditions, as an important carrier of medical information storage. Therefore, the visual quality requirements for medical images are very strict and often no alterations are allowed. Meanwhile, in practical applications, it is found that the watermark embedding capacity of medical images is contradictory to the imperceptibility of the watermark. Then, based on the general transform domain watermarking algorithm, the proposed and applied zero watermarking technology [<xref ref-type="bibr" rid="ref-18">18</xref>,<xref ref-type="bibr" rid="ref-19">19</xref>] effectively solves this problem, and the medical zero watermarking technology can embed the watermark information without changing the visual effect of the original medical image. The main principle is to combine the feature vector of medical carrier image with the watermark information to obtain the logical key, and extract and restore the watermark by the retained key, thus ensuring the visual quality of the medical image, and there is no limitation of embedding capacity, which has high research value. Sinha&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-20">20</xref>] used the zero watermarking technique to authenticate medical images, obtained the low-frequency subbands of medical images by wavelet decomposition, then extracted the feature values of the low-frequency subbands using SVD. Finally, the watermark and the feature values were logically operated to achieve watermark embedding. This method can be used in the field of telemedicine to effectively protect images from illegal use, but its research lacks the detection of attack experiments. Liu&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-21">21</xref>] proposed a zero-watermarking algorithm based on dual-tree complex wavelet transform (DTCWT) and DCT. Firstly, DTCWT-DCT combined transformation is performed on the medical carrier image, and the low-frequency coefficient matrix is extracted. Then, the feature vector is obtained by quantization and coding. Finally, the feature vector of the image and the watermark information are used for XOR operation to embed the watermark. This algorithm has strong robustness to conventional attacks, but weak robustness to geometric attacks such as panning. The comparison of the above existing algorithms is shown in <xref ref-type="table" rid="table-1">Table 1</xref>.</p>
<table-wrap id="table-1">
<label>Table 1</label>
<caption><title>Comparison of some existing algorithms</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Types</th>
<th>Algorithms</th>
<th>Advantages</th>
<th>Disadvantages</th>
</tr>
<tr>
<td>Spatial domain</td>
<td>LSB, Patchwork</td>
<td>Convenient and fast</td>
<td>Weak robustness</td>
</tr>
</thead>
<tbody>
<tr>
<td>Transform domain</td>
<td>IWT-DCT [<xref ref-type="bibr" rid="ref-15">15</xref>]<break/>DFT [<xref ref-type="bibr" rid="ref-16">16</xref>]<break/>DWT-DCT-SVD [<xref ref-type="bibr" rid="ref-17">17</xref>]</td>
<td>Good robustness under conventional attacks</td>
<td>Complex scheme operation<break/>Limited capacity<break/>Weak robustness against geometric attacks</td>
</tr>
<tr>
<td/>
<td>DWT-SVD [<xref ref-type="bibr" rid="ref-20">20</xref>]<break/>DTCWT-DCT [<xref ref-type="bibr" rid="ref-21">21</xref>]</td>
<td>Zero watermark<break/>No capacity limitation<break/>Good robustness under conventional attacks</td>
<td>Weak robustness against geometric attacks</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>With the development of deep learning, it has become a research hotspot for scholars in computer vision, natural language processing, speech recognition, and other fields, especially Convolutional Neural Networks (CNN) is currently very widely used, and more stable feature information can be extracted by convolutional neural networks [<xref ref-type="bibr" rid="ref-22">22</xref>,<xref ref-type="bibr" rid="ref-23">23</xref>]. Therefore, in response to the weakness of traditional algorithms in resisting geometric attacks, some research scholars have combined deep learning techniques with watermarking technology in recent years to improve the robustness of watermarking algorithms, which has essential research significance for medical privacy protection. Fierro-Radilla&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-24">24</xref>] used a convolutional neural network to generate stable features of images and combined the feature information with watermark by XOR operation, to realize zero watermark embedding. The results are robust to some geometric attacks, but the types of geometric test attacks are less. Han&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-25">25</xref>] proposed a zero watermarking scheme for medical images based on the convolutional neural network VGG19, which extracts the depth feature map of medical images by a pre-trained VGG19 network and performs DFT transform on the feature map; then selects the low-frequency coefficients of Fourier transform, converts the low-frequency coefficients into a binary hash sequence, and finally correlate the watermark information with the hash sequence to achieve watermark embedding. In terms of resistance to geometric attacks, the algorithm has improved its robustness compared to conventional algorithms. However, there are few research solutions combining neural networks with zero watermarking techniques, while most of the existing watermarking algorithms show less than optimal robustness in the face of major geometric attacks such as rotation, translation, scaling and shearing.</p>
<p>Synthesizing the above research progress, the resistance to geometric attacks is still a research difficulty. Therefore, this paper proposes a robust multi-watermarking algorithm for medical images based on the GoogLeNet neural network, which selects a pre-trained GoogLeNet network and trains a medical image feature extraction network using the medical image dataset for transfer learning of the GoogLeNet network. Firstly, the trained network is used to extract the feature vector of the carrier image. Then, several different types of watermark information are chaotically encrypted by Henon mapping. Finally, the zero watermarking technique is used to combine the feature vectors with the encrypted multiple watermark information to generate the key and complete the multiple watermark embedding and blind extraction. The proposed algorithm can effectively resist many different attacks and is especially robust against geometric attacks.</p>
<p>The main contribution of the present research is:</p>
<list list-type="simple">
<list-item><label>(1)</label><p>Combining neural networks with zero watermarking techniques to extract more stable image feature vectors, which are more robust than conventional watermarking algorithms in terms of resistance to geometric attacks.</p></list-item>
<list-item><label>(2)</label><p>Watermark encryption adopts a two-dimensional chaotic Henon mapping system that is more sensitive to initial values, with higher security performance than one-dimensional chaotic mapping systems such as Logistic mapping, which improves the security of watermark information as well as its concealment.</p></list-item>
<list-item><label>(3)</label><p>Combined with zero watermarking technology, it does not change the original characteristics of medical image carriers, which better solves the contradiction between watermark embedding capacity and imperceptibility of medical images.</p></list-item>
<list-item><label>(4)</label><p>Selecting several different types of watermark information (symbols, graphics, and text) to verify the algorithm&#x2019;s robustness from multiple perspectives, completing multiple watermark embedding and blind extraction, and improving the embedding capacity of the algorithm.</p></list-item>
</list>
<p>The organization of this paper consists of five sections: Introduction, The Fundamental Theory, The Proposed Watermarking Algorithm, Experiment and Analysis, and Conclusions. In the remaining part of the organization, the second section focuses on the basic theory of the proposed algorithm, including the GoogLeNet network and Henon Map. The third section presents the detailed process of implementing the algorithm. The fourth section presents the experiments and analysis, mainly to verify the robustness of the algorithm through various attacks and compare it with other algorithms. The fifth section mainly summarizes the ideas and advantages of the algorithm, as well as the future research focus directions.</p>
</sec>
<sec id="s2">
<label>2</label><title>The Fundamental Theory</title>
<sec id="s2_1">
<label>2.1</label><title>GoogLeNet Neural Network Model</title>
<p>With the continuous development of convolutional neural network (CNN) models, general convolutional neural networks will improve their performance by extending the depth and width of the network model; however, more profound or wider networks will face many problems such as too many parameters prone to overfitting, gradient dispersion, and increased computational complexity. Therefore, the proposed GoogLeNet network [<xref ref-type="bibr" rid="ref-26">26</xref>] can effectively improve the problems of traditional networks, which is a convolutional neural network model architecture for multi-scale image information extraction proposed by a team at Google Inc. and won the championship at the ImageNet Large-Scale Visual Recognition Challenge (ILSVRC) in 2014.</p>
<p>The GoogLeNet convolutional neural network has 22 layers of depth, as shown in <xref ref-type="fig" rid="fig-1">Fig. 1</xref>, and has nine Inception modules at the core of its architecture. Although its network depth reaches 22 layers, its parametric size is much smaller than that of ALexNet and VGG networks, and its performance is much superior. Meanwhile, to avoid the gradient dispersion problem brought about by increasing the network depth, two auxiliary classifiers are added to the GoogLeNet network structure for back-propagating gradients in the training phase, which can effectively avoid the gradient disappearance problem. Then in the testing phase, only the final Softmax3 is used to output the results, and the two auxiliary classifiers will be removed.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption><title>GoogLeNet convolutional neural network structure</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-1.tif"/>
</fig>
<p>Of course, the proposed Inception module structure is an essential innovation in the GoogLeNet network. The main principle of the Inception module is to use multiple different convolutional kernels to extract feature information of different image sizes and fuse this feature information as the output. The Inception module can obtain better image features than a single convolutional kernel size. This structure uses a locally optimal sparse structure instead of the original convolutional neural network&#x2019;s fully connected approach to minimize redundancy and increase the width and depth of the network structure, and the performance is improved. As shown in <xref ref-type="fig" rid="fig-2">Fig. 2</xref>, it is a net-within-a-network structure, and it can be seen from the structure diagram that 1 &#x00D7; 1 convolutional layers are added before the 3 &#x00D7; 3 convolutional layer and the 5 &#x00D7; 5 convolutional layer, and after the 3 &#x00D7; 3 maximum pooling layer, respectively, where 1 &#x00D7; 1 convolution is mainly used for dimensionality reduction, which reduces the number of parameters and thus the complexity of computation. Therefore, when memory or computational resources are limited, GoogLeNet network will play a great advantage.</p>
<fig id="fig-2">
<label>Figure 2</label>
<caption><title>Inception module structure</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-2.tif"/>
</fig>
</sec>
<sec id="s2_2">
<label>2.2</label><title>Henon Map</title>
<p>Henon map [<xref ref-type="bibr" rid="ref-27">27</xref>] is a two-input two-dimensional nonlinear discrete chaotic mapping system that is very sensitive to chaotic initial values. Since the system is controlled by two variables simultaneously, then its security performance will be better than that of one-dimensional chaotic techniques such as Logistic mapping, so it is suitable for generating pseudo-random sequences in image encryption. The iterative equation of its mapping is as in <xref ref-type="disp-formula" rid="eqn-1">Eq. (1)</xref>.</p>
<p><disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:mrow><mml:mo>{</mml:mo><mml:mtable columnalign="left" rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">n</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mi>a</mml:mi><mml:msubsup><mml:mi>x</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">n</mml:mi></mml:mrow><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi mathvariant="italic">x</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable><mml:mo fence="true" stretchy="true" symmetric="true"></mml:mo></mml:mrow></mml:math></disp-formula>where <italic>x</italic> and <italic>y</italic> are the two variables of the system and <italic>n</italic> is the number of iterations, the study shows that when the parameters take the values <inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:mrow><mml:mi mathvariant="italic">a</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mn>1.4</mml:mn></mml:math></inline-formula> and <inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:mrow><mml:mi mathvariant="italic">b</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mn>0.3</mml:mn></mml:math></inline-formula>, the system is in a chaotic state, generating both <italic>x</italic> and <italic>y</italic> chaotic system variables system, in <xref ref-type="fig" rid="fig-3">Fig. 3</xref> is the generated chaotic attractor, this chaotic attractor is two irregular curves. <xref ref-type="fig" rid="fig-4">Fig. 4</xref> shows the bifurcation diagram of the <inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:msub><mml:mrow><mml:mi mathvariant="italic">x</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="italic">n</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> component of the Henon system with the variation of the parameter <italic>a</italic>, where the initial values <inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:msub><mml:mrow><mml:mi mathvariant="italic">x</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mtext>o</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:math></inline-formula>, <inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:msub><mml:mrow><mml:mi mathvariant="italic">y</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mtext>o</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:math></inline-formula>, <inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:mrow><mml:mi mathvariant="italic">b</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mn>0.3</mml:mn></mml:math></inline-formula>, and <inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:mi>a</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>1.4</mml:mn><mml:mo>]</mml:mo></mml:mrow></mml:math></inline-formula> are taken, and it can be seen that the more the value of <italic>a</italic> is taken close to 1.4, the higher the complexity of the chaotic system. Therefore, in cryptographic applications, the parameters are usually taken to be <inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:mrow><mml:mi mathvariant="italic">a</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mn>1.4</mml:mn></mml:math></inline-formula> and <inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:mrow><mml:mi mathvariant="italic">b</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mn>0.3</mml:mn></mml:math></inline-formula>.</p>
<fig id="fig-3">
<label>Figure 3</label>
<caption><title>The chaotic attractor of the Henon map</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-3.tif"/>
</fig><fig id="fig-4">
<label>Figure 4</label>
<caption><title>The bifurcation diagram of Henon map</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-4.tif"/>
</fig>
</sec>
</sec>
<sec id="s3">
<label>3</label><title>The Proposed Watermarking Algorithm</title>
<sec id="s3_1">
<label>3.1</label><title>Transfer Learning for GoogLeNet Neural Network</title>
<p>At present, deep learning has been widely used in the field of medical image processing, however, due to the particular characteristics of medical images, it is difficult for us to obtain a large amount of medical image data for training, and the annotation cost is high, and there are few free and publicly available medical datasets. Then it becomes a difficult task to obtain a network model for deep feature extraction by training a large number of medical images. Therefore, combining it with transfer learning can be a good solution to the current problem. Transfer learning [<xref ref-type="bibr" rid="ref-28">28</xref>] is a machine learning method that refers to a pre-trained model being reused in another task, which can effectively circumvent the shortcoming of insufficient training data and improve the performance of neural networks.</p>
<p>Then in the zero watermarking algorithms, extracting stable features of medical images is a very important part. The method in this paper is to select the pre-trained GoogLeNet network on the ImageNet database, which has been trained with more than one million natural images and has a good feature extraction effect; on this basis, the network is fine-tuned for the medical dataset we constructed and transfer learning is performed to train the medical image feature extraction network.</p>
<sec id="s3_1_1">
<label>3.1.1</label><title>Building the Dataset</title>
<p>Our dataset images were obtained from two open-source medical databases, The National Institutes of Health Clinical Center and The National Library of Medicine present MedPix. In constructing the dataset, we selected three types of medical images from open-source data, brain, spine, and abdomen, as the original sample images, with a total of 390 images, and some of the dataset images are shown in <xref ref-type="fig" rid="fig-5">Fig. 5</xref>. 80% of these images were used for training, 10% for validation, and 10% for testing. Of course, to increase the diversity of training samples and enhance the robustness of the training model, we utilized data enhancement by performing various attacks on the original training sample images, including Gaussian noise, JPEG compression, median filtering, rotation, panning, cropping, and scaling. After processing, the training samples reached 17160. In addition, since the input size of the GoogLeNet network is 224 &#x00D7; 224 &#x00D7; 3, the sample images of the dataset were all resized to 224 &#x00D7; 224 &#x00D7; 3 pixels.</p>
<fig id="fig-5">
<label>Figure 5</label>
<caption><title>Partial images of the dataset. (a) Brain. (b) Spine. (c) Abdomen</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-5.tif"/>
</fig>
<p>In this time, we use a 32-bit image binary feature vector as the label of the dataset, where the 32-bit feature vector just matches the size of the embedded watermark. The steps of feature vector acquisition: (1) DCT is performed on medical images; (2) the values of the 4 &#x00D7; 8 region in the upper left corner of the DCT coefficient matrix are extracted and combined with the perceptual hash [<xref ref-type="bibr" rid="ref-29">29</xref>], the positive coefficient values or zero are represented by quantization coding with &#x201C;1&#x201D; and &#x201C;0&#x201D; to represent the negative coefficient values, generating a 32-bit binary feature vector. Meanwhile, both the original training image and the validation image are subjected to the same steps, and their respective 32-bit feature vectors are used as the labels of the images.</p>
</sec>
<sec id="s3_1_2">
<label>3.1.2</label><title>Training the GoogLeNet Network</title>
<p>This experiment utilizes the neural network toolbox of Matlab 2019a with an NVIDIA GeForce GTX 1050Ti hardware graphics card. We also fine-tuned the network by changing the fully connected layer of the GoogLeNet network from the initial 1000 outputs to 32 outputs to match the zero watermarking systems, and by deleting the Softmax and Classification Output layers of the network and adding a Regression Output layer after the fully connected layer, so that the modified network can complete the regression task.</p>
<p>During the training process, we freeze the 1:110 layers of the pre-trained network, so that the parameters of the frozen layers are not updated and only the later layers are trained with the pre-trained parameters to improve the training efficiency and reduce the hardware requirements of the training process. The training process is accelerated by Graphics Processing Units (GPU), and the whole training process takes only about 16 minutes, and the specific hyperparameter settings are shown in <xref ref-type="table" rid="table-2">Table 2</xref>. The final trained network outputs 32 feature values after processing the input image, which can be subsequently matched with the watermarking system.</p>
<table-wrap id="table-2">
<label>Table 2</label>
<caption><title>Hyperparameters of the network</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Types</th>
<th>Parameters</th>
</tr>
</thead>
<tbody>
<tr>
<td>Optimizer</td>
<td>Stochastic gradient descent with momentum optimizer (SGDM)</td>
</tr>
<tr>
<td>Loss function</td>
<td>Mean square error function</td>
</tr>
<tr>
<td>Mini-Batch size</td>
<td>32</td>
</tr>
<tr>
<td>Learning rate</td>
<td>0.001</td>
</tr>
<tr>
<td>Number of epochs</td>
<td>5</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s3_2">
<label>3.2</label><title>Feature Extraction of Medical Images</title>
<p>Since the algorithm in this paper uses the zero watermarking technique, the main method of this technique is to associate the image features with the watermark. Then extracting a stable image feature vector can guarantee the robustness of the watermarking algorithm. At this time, we utilize the trained GoogLeNet network to extract the feature information, which has the architecture of multi-scale image information extraction and can be well combined with the zero watermarking technique to obtain stable feature vectors. <xref ref-type="fig" rid="fig-6">Fig. 6</xref> shows the process of feature extraction with the following steps:</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption><title>The process of image feature extraction</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-6.tif"/>
</fig>
<p>Step 1: Using the original medical image I(i, j) as the input to the trained GoogLeNet network, with the input images all resized to 224 &#x00D7; 224 &#x00D7; 3.</p>
<p>Step 2: Since we previously used the 32-bit image binary feature vector as the label of the dataset, the original 1000 parameter values of the fully connected layer (FC) are adjusted to 32, so the medical image is processed by the trained GoogLeNet network, and the fully connected layer FC outputs 32 feature values D(j).</p>
<p>Step 3: Combined with the perceptual hash, the 32 values D(j) are quantized and encoded, and those greater than or equal to 0.5 are judged as &#x201C;1&#x201D; and those less than 0.5 are judged as &#x201C;0&#x201D; to generate a binary feature sequence, which is used as the feature vector V(j) of the medical image.</p>
</sec>
<sec id="s3_3">
<label>3.3</label><title>Watermark Encryption</title>
<p>This time, Henon mapping is used to perform chaotic encryption of watermark, which is a two-dimensional discrete chaotic system with higher security than one-dimensional chaotic systems such as Logistic mapping. <xref ref-type="fig" rid="fig-7">Fig. 7</xref> is the flow chart of the chaotic encryption algorithm with multiple watermarks.</p>
<fig id="fig-7">
<label>Figure 7</label>
<caption><title>The watermark encryption process</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-7.tif"/>
</fig>
<p>The steps of the watermark encryption principle are:</p>
<p>Step 1: We first generate the chaotic sequence XY(j) given the initial values of the system <inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:msub><mml:mrow><mml:mi mathvariant="italic">x</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and&#x00A0;<inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:msub><mml:mrow><mml:mi mathvariant="italic">y</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>.</p>
<p>Step 2: By quantization coding the chaotic sequence XY(j), the value greater than or equal to 0 is judged as &#x201C;1&#x201D; and the opposite is judged as &#x201C;0&#x201D;, thus converting the chaotic sequence XY(j) into a binary encrypted sequence C(j).</p>
<p>Step 3: Three different types of binary watermark <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:msub><mml:mrow><mml:mi mathvariant="italic">W</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="italic">k</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="italic">i</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> were selected, including text, symbols and graphics, with a pixel size of 32 &#x00D7; 32. Finally, XOR operation is performed on each line of the binary watermarking image <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:msub><mml:mrow><mml:mi mathvariant="italic">W</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="italic">k</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="italic">i</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> and binary encryption sequence C(j) to obtain chaotic encrypted multi-watermark <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:msub><mml:mrow><mml:mi mathvariant="italic">B</mml:mi><mml:mi mathvariant="italic">W</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="italic">k</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="italic">i</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula>, as shown in <xref ref-type="disp-formula" rid="eqn-2">Eq. (2)</xref>, where &#x201C;&#x2295;&#x201D; is an XOR operator. The operation rule is: when the two values are different, the XOR result is 1; conversely, the result is 0.
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:mi>B</mml:mi><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>C</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2295;</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
</sec>
<sec id="s3_4">
<label>3.4</label><title>Watermark Embedding</title>
<p>Digital watermarking system mainly has two important processes of watermark embedding and watermark extraction, this section introduces the watermark embedding part, the watermark embedding process is shown in <xref ref-type="fig" rid="fig-8">Fig. 8</xref>. The specific implementation steps are as follows:</p>
<fig id="fig-8">
<label>Figure 8</label>
<caption><title>The watermark embedding process</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-8.tif"/>
</fig>
<p>Step 1: The feature vector V(j) of the original medical image I(i, j) is extracted through the trained GoogLeNet network, and the detailed steps of obtaining the feature vector can refer to the chapter: feature extraction of medical images.</p>
<p>Step 2: Before watermark embedding, Henon chaos encryption is performed on different types of multi-watermark information to improve information security, and chaos encrypted multi-watermark <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:msub><mml:mrow><mml:mi mathvariant="italic">B</mml:mi><mml:mi mathvariant="italic">W</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="italic">k</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="italic">i</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> is obtained.</p>
<p>Step 3 : The feature vector V(j) and the encrypted watermark <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:msub><mml:mrow><mml:mi mathvariant="italic">B</mml:mi><mml:mi mathvariant="italic">W</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="italic">k</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="italic">i</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> are performed XOR operation to generate the logical key <inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:msub><mml:mrow><mml:mi mathvariant="italic">K</mml:mi><mml:mi mathvariant="italic">e</mml:mi><mml:mi mathvariant="italic">y</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="italic">k</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="italic">i</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula>, which is saved in the third party to realize the embedding of multiple watermarks, as shown in <xref ref-type="disp-formula" rid="eqn-3">Eq. (3)</xref>.
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>V</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2295;</mml:mo><mml:mi>B</mml:mi><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
</sec>
<sec id="s3_5">
<label>3.5</label><title>The Extraction and Decryption of Watermark</title>
<p>This section includes the extraction and decryption of the watermark, the process of which is shown in <xref ref-type="fig" rid="fig-9">Fig. 9</xref>, with the following implementation steps:</p>
<fig id="fig-9">
<label>Figure 9</label>
<caption><title>The process of watermark extraction and decryption</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-9.tif"/>
</fig>
<p>Step 1: Firstly, extract the feature vector <inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:msup><mml:mrow><mml:mtext>V</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msup><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> of the medical image <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:msup><mml:mrow><mml:mtext>I</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msup><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> to be tested using the same method as watermark embedding.</p>
<p>Step 2: The feature vector <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:msup><mml:mrow><mml:mtext>V</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msup><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> and key <inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:msub><mml:mrow><mml:mi mathvariant="italic">K</mml:mi><mml:mi mathvariant="italic">e</mml:mi><mml:mi mathvariant="italic">y</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="italic">k</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi mathvariant="italic">i</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> are XOR operation, and then the encrypted multi-watermark information <inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:mi>B</mml:mi><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is extracted, as shown in <xref ref-type="disp-formula" rid="eqn-4">Eq. (4)</xref>.
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:mi>B</mml:mi><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>K</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">k</mml:mi></mml:mrow></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2295;</mml:mo><mml:msup><mml:mi>V</mml:mi><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Step 3: Watermark decryption process: the extracted encrypted watermark <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:mi>B</mml:mi><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> and the binary encryption sequence C(j) are performed XOR operation to restore the watermark information <inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>, as shown in <xref ref-type="disp-formula" rid="eqn-5">Eq. (5)</xref>.
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>B</mml:mi><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2295;</mml:mo><mml:mi>C</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
</sec>
</sec>
<sec id="s4">
<label>4</label><title>Experiment and Analysis</title>
<p>This experimental system uses MATLAB 2019a as a platform, and the main research process is to test the watermarking algorithm by conventional and geometric attacks. In the experiments, we randomly selected a brain slice image from the test set as the original medical image for testing, and also selected three 32 &#x00D7; 32 pixel binary watermarked maps including text, graphics and symbols, which can verify the robustness of the algorithm from several aspects. In addition, to further improve the security, we use the Henon chaos encryption technique to encrypt the original watermark information, as shown in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>.</p>
<fig id="fig-10">
<label>Figure 10</label>
<caption><title>Medical images and watermarks. (a) Original medical image. (b) Original binary watermark 1. (c) Original binary watermark 2. (d) Original binary watermark 3. (e) Encrypted watermark 1. (f) Encrypted watermark 2. (g) Encrypted watermark 3</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-10.tif"/>
</fig>
<sec id="s4_1">
<label>4.1</label><title>Performance Indicators</title>
<p>In our experiments, we used two performance metrics: peak signal-to-noise ratio (PSNR) and normalized correlation coefficient (NC). Among them, PSNR can measure the distortion degree of medical images containing watermarks, and the smaller the PSNR value, the greater the distortion degree of the image. In <xref ref-type="disp-formula" rid="eqn-6">Eq. (6)</xref>, I(i, j) denotes the grayscale value of each pixel point in the original image; <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:msup><mml:mrow><mml:mtext>I</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msup><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> denotes the grayscale value of each pixel point in the embedded watermarked image; <italic>M</italic> and <italic>N</italic> denote the pixel values of the rows and columns of the medical image.</p>
<p><disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:mi>P</mml:mi><mml:mi>S</mml:mi><mml:mi>N</mml:mi><mml:mi>R</mml:mi><mml:mo>=</mml:mo><mml:mn>10</mml:mn><mml:mi>lg</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mfrac><mml:mrow><mml:mi>M</mml:mi><mml:mi>N</mml:mi><mml:munder><mml:mo form="prefix">max</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:munder><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>I</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:munder><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:munder><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>I</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:msup><mml:mi>I</mml:mi><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac></mml:mstyle><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Meanwhile, to compare the correlation between the original watermark and the extracted watermark, we use the normalized correlation coefficient NC. In <xref ref-type="disp-formula" rid="eqn-7">Eq. (7)</xref>, <inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:mrow><mml:mtext>W</mml:mtext></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> is the original watermark and <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:msup><mml:mrow><mml:mtext>W</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msup><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> is the extracted watermark. in the experiment, when the NC value is not less than 0.5, the algorithm can effectively extract the watermark information; when the NC value is closer to 1, it means that the correlation between the two is higher and the algorithm is more robust against attacks.</p>
<p><disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:mi>N</mml:mi><mml:mi>C</mml:mi><mml:mo>=</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mfrac><mml:mrow><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:munder><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:munder><mml:mi>W</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:munder><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:munder><mml:msup><mml:mi>W</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mstyle></mml:math></disp-formula></p>
</sec>
<sec id="s4_2">
<label>4.2</label><title>The Reliability of the Algorithm</title>
<p>To prove the reliability of the algorithm, it is guaranteed that the extracted feature vectors are representative. We can use the normalized correlation coefficient NC to measure the similarity between different image feature vectors, when the NC value is lower than 0.5, it means that the similarity of different image feature vectors is low and the extracted feature vectors are well representative; on the contrary, it means that the feature vectors are not very representative. In this time, we selected 4 different medical images from inside and outside the test set to test, and <xref ref-type="fig" rid="fig-11">Fig. 11</xref> shows the 8 medical images tested, and <xref ref-type="table" rid="table-3">Table 3</xref> shows the NC values between them.</p>
<fig id="fig-11"><label>Figure 11</label><caption><title>Different medical images. (a) Brain. (b) Abdomen. (c) Spine 1. (d) Spine 2. (e) Breast. (f) Hand. (g) Chest. (h) Lung</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-11.tif"/></fig><table-wrap id="table-3"><label>Table 3</label>
<caption><title>NC value between different images</title>
</caption>
<table frame="hsides">
<colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Image</th>
<th>Brain</th>
<th>Abdomen</th>
<th>Spine 1</th>
<th>Spine 2</th>
<th>Breast</th>
<th>Hand</th>
<th>Chest</th>
<th>Lung</th>
</tr>
</thead>
<tbody>
<tr>
<td>Brain</td>
<td>1.00</td>
<td>0.06</td>
<td>&#x2212;0.12</td>
<td>&#x2212;0.06</td>
<td>0.43</td>
<td>&#x2212;0.02</td>
<td>0.08</td>
<td>0.31</td>
</tr>
<tr>
<td>Abdomen</td>
<td>0.06</td>
<td>1.00</td>
<td>0.06</td>
<td>0.25</td>
<td>&#x2212;0.13</td>
<td>0.33</td>
<td>0.00</td>
<td>0.25</td>
</tr>
<tr>
<td>Spine 1</td>
<td>&#x2212;0.12</td>
<td>0.06</td>
<td>1.00</td>
<td>0.06</td>
<td>0.20</td>
<td>0.28</td>
<td>0.18</td>
<td>0.20</td>
</tr>
<tr>
<td>Spine 2</td>
<td>&#x2212;0.06</td>
<td>0.25</td>
<td>0.06</td>
<td>1.00</td>
<td>&#x2212;0.25</td>
<td>0.33</td>
<td>0.39</td>
<td>0.00</td>
</tr>
<tr>
<td>Breast</td>
<td>0.43</td>
<td>&#x2212;0.13</td>
<td>0.20</td>
<td>&#x2212;0.25</td>
<td>1.00</td>
<td>&#x2212;0.11</td>
<td>&#x2212;0.23</td>
<td>0.37</td>
</tr>
<tr>
<td>Hand</td>
<td>&#x2212;0.02</td>
<td>0.33</td>
<td>0.28</td>
<td>0.33</td>
<td>&#x2212;0.11</td>
<td>1.00</td>
<td>0.02</td>
<td>0.29</td>
</tr>
<tr>
<td>Chest</td>
<td>0.08</td>
<td>0.00</td>
<td>0.18</td>
<td>0.39</td>
<td>&#x2212;0.23</td>
<td>0.02</td>
<td>1.00</td>
<td>0.16</td>
</tr>
<tr>
<td>Lung</td>
<td>0.31</td>
<td>0.25</td>
<td>0.20</td>
<td>0.00</td>
<td>0.37</td>
<td>0.29</td>
<td>0.16</td>
<td>1.00</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The data in <xref ref-type="table" rid="table-3">Table 3</xref> shows that the absolute values of NC values between different images are less than 0.5, and the NC values between the same images are all 1. Therefore, the medical image feature vectors extracted by the trained network are representative and can distinguish between different images, indicating that the algorithm is reliable.</p>

</sec>
<sec id="s4_3">
<label>4.3</label><title>The Result of the Attack Experiment</title>
<p>In this simulation experiment, we tested conventional and geometric attacks of different intensities, respectively, and selected three types of watermark information. The original watermark is shown in <xref ref-type="fig" rid="fig-10">Fig. 10</xref> above. Meanwhile, in the experimental results, &#x201C;NC1&#x201D; represents the NC value after the original watermark 1 attack, &#x201C;NC2&#x201D; represents the NC value after the original watermark 2 attacks, and &#x201C;NC3&#x201D; represents the NC value after the original watermark 3 attacks.</p>
<sec id="s4_3_1">
<label>4.3.1</label><title>Conventional Attacks</title>
<p>In the conventional attacks, we tested three attacks of Gaussian noise, JPEG compression, and median filtering, respectively. As can be seen from <xref ref-type="table" rid="table-4">Table 4</xref>, when the JPEG compression strength reaches 5%, the NC values of the three extracted watermarks are more than 0.9; meanwhile, two strengths of the median filter attack were tested, and the NC values are greater than 0.8; in addition, when the Gaussian noise attack reaches 14%, the NC values are 0.59, 0.58 and 0.58, respectively, and the watermark information can still be effectively recovered, <xref ref-type="fig" rid="fig-12">Fig. 12</xref> shows some experimental effects. Therefore, the proposed algorithm can effectively resist conventional attacks, especially in JPEG compression attacks with strong robustness.</p>
<table-wrap id="table-4">
<label>Table 4</label>
<caption><title>PSNR and NC values under conventional attacks</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Attacks</th>
<th>Intensity</th>
<th>PSNR (dB)</th>
<th>NC1</th>
<th>NC2</th>
<th>NC3</th>
</tr>
</thead>
<tbody>
<tr>
<td>Gaussian noise (%)</td>
<td>2</td>
<td>19.02</td>
<td>0.91</td>
<td>0.94</td>
<td>0.94</td>
</tr>
<tr>
<td/>
<td>5</td>
<td>15.27</td>
<td>0.86</td>
<td>0.87</td>
<td>0.88</td>
</tr>
<tr>
<td/>
<td>10</td>
<td>12.56</td>
<td>0.78</td>
<td>0.84</td>
<td>0.83</td>
</tr>
<tr>
<td/>
<td>14</td>
<td>11.30</td>
<td>0.59</td>
<td>0.58</td>
<td>0.58</td>
</tr>
<tr>
<td>JPEG compression (%)</td>
<td>5</td>
<td>24.47</td>
<td>0.91</td>
<td>0.94</td>
<td>0.94</td>
</tr>
<tr>
<td/>
<td>10</td>
<td>29.46</td>
<td>0.91</td>
<td>0.94</td>
<td>0.94</td>
</tr>
<tr>
<td/>
<td>20</td>
<td>31.77</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>30</td>
<td>33.27</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td>Median filter (5 times)</td>
<td>[3, 3]</td>
<td>29.53</td>
<td>0.91</td>
<td>0.93</td>
<td>0.93</td>
</tr>
<tr>
<td/>
<td>[7, 7]</td>
<td>24.06</td>
<td>0.81</td>
<td>0.87</td>
<td>0.87</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-12">
<label>Figure 12</label>
<caption><title>Under conventional attacks. (a) Gaussian noise 5%. (e) JPEG compression 10%. (i) Median filter [7,7] (5 times). (b&#x2013;d) are all three watermarks extracted with Gaussian noise at 5%. (f&#x2013;h) are all three watermarks extracted with JPEG compression at 10%. (j&#x2013;l) are all three watermarks extracted with median filter [7,7] (5 times)</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-12.tif"/>
</fig>
</sec>
<sec id="s4_3_2">
<label>4.3.2</label><title>Geometric Attacks</title>
<p>Since resistance to geometric attacks is an important part of the current need to be addressed, we tested multiple groups of attacks, including rotation, scaling, translation, and shear attacks, respectively, in this time. From <xref ref-type="table" rid="table-5">Table 5</xref>, we can see that the algorithm can effectively extract watermarks under five different geometric attacks, and their NC values are all greater than 0.5. Among them, when the rotation attack reaches 45 degrees, the NC values of all three watermarks exceed 0.8; meanwhile, when the Y-axis shear strength reaches 45%, the NC values of their extracted watermarks are 0.72, 0.72 and 0.74, respectively, and the watermark information can be effectively recovered. Therefore, the algorithm can well solve the problem of resistance to geometric attacks, especially in rotation, scaling, and translation with good robustness. <xref ref-type="fig" rid="fig-13">Fig. 13</xref> shows some experimental effects.</p>
<table-wrap id="table-5"><label>Table 5</label>
<caption><title>PSNR and NC values under geometric attacks</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Attacks</th>
<th>Intensity</th>
<th>PSNR (dB)</th>
<th>NC1</th>
<th>NC2</th>
<th>NC3</th>
</tr>
</thead>
<tbody>
<tr>
<td>Rotation (clockwise) (&#x00B0;)</td>
<td>5</td>
<td>18.44</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>10</td>
<td>16.30</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>20</td>
<td>15.81</td>
<td>0.91</td>
<td>0.93</td>
<td>0.92</td>
</tr>
<tr>
<td/>
<td>40</td>
<td>14.75</td>
<td>0.87</td>
<td>0.84</td>
<td>0.86</td>
</tr>
<tr>
<td/>
<td>45</td>
<td>14.73</td>
<td>0.87</td>
<td>0.84</td>
<td>0.86</td>
</tr>
<tr>
<td>Scaling factor</td>
<td>0.2</td>
<td>&#x2014;</td>
<td>0.91</td>
<td>0.94</td>
<td>0.94</td>
</tr>
<tr>
<td/>
<td>0.5</td>
<td>&#x2014;</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>2.0</td>
<td>&#x2014;</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>3.0</td>
<td>&#x2014;</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td>Left translation (%)</td>
<td>5</td>
<td>14.86</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>15</td>
<td>14.02</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>30</td>
<td>13.14</td>
<td>0.95</td>
<td>0.93</td>
<td>0.94</td>
</tr>
<tr>
<td/>
<td>40</td>
<td>12.77</td>
<td>0.86</td>
<td>0.87</td>
<td>0.88</td>
</tr>
<tr>
<td>Cropping ratio (%)&#x00A0;(Y&#x00A0;direction)</td>
<td>10</td>
<td>&#x2014;</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>27</td>
<td>&#x2014;</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>40</td>
<td>&#x2014;</td>
<td>0.76</td>
<td>0.81</td>
<td>0.80</td>
</tr>
<tr>
<td/>
<td>45</td>
<td>&#x2014;</td>
<td>0.72</td>
<td>0.72</td>
<td>0.74</td>
</tr>
<tr>
<td>Cropping ratio (%)&#x00A0;(X&#x00A0;direction)</td>
<td>5</td>
<td>&#x2014;</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>15</td>
<td>&#x2014;</td>
<td>1.00</td>
<td>1.00</td>
<td>1.00</td>
</tr>
<tr>
<td/>
<td>30</td>
<td>&#x2014;</td>
<td>0.95</td>
<td>0.93</td>
<td>0.94</td>
</tr>
<tr>
<td/>
<td>40</td>
<td>&#x2014;</td>
<td>0.72</td>
<td>0.72</td>
<td>0.72</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-13">
<label>Figure 13</label>
<caption><title>Under geometric attacks. (a) Clockwise rotation (45&#x00B0;). (e) Left translation 30%. (i) Cropping ratio 40% (Y direction). (m) cropping ratio 30% (X direction). (b&#x2013;d) are the three watermarks extracted under clockwise rotation (45&#x00B0;). (f&#x2013;h) are the three watermarks extracted under left translation 30%. (j&#x2013;l) are the three watermarks extracted under a cropping ratio 40% (Y direction). (n&#x2013;p) are the three watermarks extracted under a cropping ratio 30% (X direction)</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-13.tif"/>
</fig>
</sec>
</sec>
<sec id="s4_4">
<label>4.4</label><title>Algorithms Comparison</title>
<p>To better verify the robustness of the proposed algorithm, we compare it with a part of existing classical watermarking algorithms. In the experimental process, we selected the same size of brain map and text &#x201C;HN&#x201D; watermark information for testing to maintain uniformity. Then, we compared with the existing algorithms KAZE-DCT [<xref ref-type="bibr" rid="ref-30">30</xref>], Zernike-DCT [<xref ref-type="bibr" rid="ref-31">31</xref>], Inception V3-DCT [<xref ref-type="bibr" rid="ref-32">32</xref>], and DWT-DCT. As can be seen, from both <xref ref-type="table" rid="table-6">Table 6</xref> and <xref ref-type="fig" rid="fig-14">Fig. 14</xref>, the performance of the proposed algorithm is similar to the other four algorithms in conventional attacks; in geometric attacks, although the algorithm performs slightly lower than the DWT-DCT algorithm in downshift attacks, it significantly outperforms the existing algorithms in most geometric attacks, especially in comparison with the algorithm using pre-trained Inception V3, the proposed paper based on GoogLeNet transfer learning based watermarking scheme in this paper has superior performance. Thus, the proposed algorithm is generally better than the existing algorithms and has good robustness in both conventional and geometric attacks.</p>
<table-wrap id="table-6">
<label>Table 6</label>
<caption><title>Comparison between different algorithms</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Attacks</th>
<th>Intensity</th>
<th>Ref. [<xref ref-type="bibr" rid="ref-30">30</xref>]<break/>(NC1)</th>
<th>Ref. [<xref ref-type="bibr" rid="ref-31">31</xref>]<break/>(NC2)</th>
<th>Ref. [<xref ref-type="bibr" rid="ref-32">32</xref>]<break/>(NC3)</th>
<th>DWT-DCT<break/>(NC4)</th>
<th>Proposed<break/>(NC5)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Gaussian noise (%)</td>
<td>2</td>
<td>0.63</td>
<td>0.94</td>
<td>0.76</td>
<td>0.95</td>
<td>0.91</td>
</tr>
<tr>
<td/>
<td>5</td>
<td>0.53</td>
<td>0.72</td>
<td>0.63</td>
<td>0.95</td>
<td>0.86</td>
</tr>
<tr>
<td/>
<td>10</td>
<td>0.41</td>
<td>0.67</td>
<td>0.59</td>
<td>0.82</td>
<td>0.78</td>
</tr>
<tr>
<td>JPEG compression (%)</td>
<td>5</td>
<td>0.69</td>
<td>0.93</td>
<td>0.77</td>
<td>0.96</td>
<td>0.91</td>
</tr>
<tr>
<td/>
<td>10</td>
<td>0.80</td>
<td>0.72</td>
<td>0.82</td>
<td>1.00</td>
<td>0.91</td>
</tr>
<tr>
<td/>
<td>15</td>
<td>0.62</td>
<td>1.00</td>
<td>0.80</td>
<td>1.00</td>
<td><bold>1.00</bold></td>
</tr>
<tr>
<td>Rotation (clockwise) (&#x00B0;)</td>
<td>3</td>
<td>0.80</td>
<td>0.91</td>
<td>0.91</td>
<td>0.88</td>
<td><bold>1.00</bold></td>
</tr>
<tr>
<td/>
<td>15</td>
<td>0.46</td>
<td>0.50</td>
<td>0.67</td>
<td>0.62</td>
<td><bold>0.91</bold></td>
</tr>
<tr>
<td/>
<td>35</td>
<td>0.66</td>
<td>0.45</td>
<td>0.58</td>
<td>0.29</td>
<td><bold>0.78</bold></td>
</tr>
<tr>
<td>Scaling factor</td>
<td>0.2</td>
<td>0.23</td>
<td>0.21</td>
<td>0.67</td>
<td>0.92</td>
<td>0.91</td>
</tr>
<tr>
<td/>
<td>0.6</td>
<td>0.54</td>
<td>0.87</td>
<td>1.00</td>
<td>0.92</td>
<td><bold>1.00</bold></td>
</tr>
<tr>
<td/>
<td>1.2</td>
<td>0.79</td>
<td>0.91</td>
<td>1.00</td>
<td>0.96</td>
<td><bold>1.00</bold></td>
</tr>
<tr>
<td>Left translation (%)</td>
<td>1</td>
<td>1.00</td>
<td>0.84</td>
<td>0.94</td>
<td>0.95</td>
<td><bold>1.00</bold></td>
</tr>
<tr>
<td/>
<td>5</td>
<td>1.00</td>
<td>0.33</td>
<td>0.91</td>
<td>0.42</td>
<td><bold>1.00</bold></td>
</tr>
<tr>
<td/>
<td>8</td>
<td>1.00</td>
<td>0.24</td>
<td>0.87</td>
<td>0.27</td>
<td><bold>1.00</bold></td>
</tr>
<tr>
<td>Downward</td>
<td>3</td>
<td>1.00</td>
<td>1.00</td>
<td>0.91</td>
<td>0.93</td>
<td>0.81</td>
</tr>
<tr>
<td>translation(%)</td>
<td>10</td>
<td>0.62</td>
<td>0.76</td>
<td>0.76</td>
<td>0.80</td>
<td>0.78</td>
</tr>
<tr>
<td>Cropping ratio (%)</td>
<td>8</td>
<td>0.88</td>
<td>0.91</td>
<td>0.91</td>
<td>0.87</td>
<td><bold>1.00</bold></td>
</tr>
<tr>
<td>(Y direction)</td>
<td>22</td>
<td>0.68</td>
<td>0.57</td>
<td>0.59</td>
<td>0.78</td>
<td><bold>1.00</bold></td>
</tr>
<tr>
<td/>
<td>40</td>
<td>0.54</td>
<td>0.45</td>
<td>0.81</td>
<td>0.54</td>
<td>0.76</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-14">
<label>Figure 14</label>
<caption><title>Comparison of the NC values of the proposed algorithm with the three existing algorithms under different attacks. (a) Gaussian noise attack. (b) JPEG compression attack. (c) Rotation attack (clockwise). (d) Scaling attack. (e) Left translation attack. (f) Cropping attack (Y direction)</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_36317-fig-14.tif"/>
</fig>
</sec>
</sec>
<sec id="s5">
<label>5</label><title>Conclusions</title>
<p>In this paper, we propose a robust multi-watermarking algorithm for medical images based on GoogLeNet and Henon Map, which combines neural networks, chaotic encryption, and zero-watermarking techniques. First, the GoogLeNet network is migrated to learn with the constructed medical training sample set, and then the trained network is used to extract the feature vectors of medical images; meanwhile, to further improve the security of watermark information, we choose a two-dimensional chaotic Henon Map system with higher security performance to perform chaotic encryption on the watermark; among them, for watermark embedding and extraction, we embed simultaneously Three different types of watermark information are embedded and extracted, which can be used to test the reliability of the algorithm from multiple perspectives. Finally, the obtained feature vectors are combined with the encrypted watermark, thus completing the zero watermark embedding and blind extraction. The experimental results show that the algorithm has good robustness against both conventional and geometric attacks, especially in geometric attacks, and also achieves the embedding and extraction of multiple watermark information, so the algorithm has good applicability. Of course, the algorithm still has a lot of room for improvement, and future work will focus on optimizing our feature extraction network to improve the performance of the algorithm continuously.</p>
</sec>
</body>
<back>
<ack>
<p>This work was supported in part by the Natural Science Foundation of China under Grants 62063004, the Key Research Project of Hainan Province under Grant&#x00A0;ZDYF2021SHFZ093, the Hainan Provincial Natural Science Foundation of China under Grants 2019RC018 and 619QN246, and the postdoctor research from Zhejiang Province under Grant ZJ2021028.</p>
</ack>
<sec><title>Funding Statement</title>
<p>This work was supported in part by the <funding-source>Natural Science Foundation of China</funding-source> under Grants <award-id>62063004</award-id>, the <funding-source>Key Research Project of Hainan Province</funding-source> under Grant&#x00A0;<award-id>ZDYF2021SHFZ093</award-id>, the <funding-source>Hainan Provincial Natural Science Foundation of China</funding-source> under Grants <award-id>2019RC018</award-id> and <award-id>619QN246</award-id>, and the <funding-source>postdoctor research from Zhejiang Province</funding-source> under Grant <award-id>ZJ2021028</award-id>.</p>
</sec>
<sec sec-type="COI-statement"><title>Conflicts of Interest</title>
<p>The authors declare that they have no conflicts of interest to report regarding the present study.</p>
</sec>
<ref-list content-type="authoryear"><title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>O.</given-names> <surname>Ayaad</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Alloubani</surname></string-name>, <string-name><given-names>E. A.</given-names> <surname>ALhajaa</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Farhan</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Abuseif</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>The role of electronic medical records in improving the quality of health care services: Comparative study</article-title>,&#x201D; <source>International Journal of Medical Informatics</source>, vol. <volume>127</volume>, no. <issue>1</issue>, pp. <fpage>63</fpage>&#x2013;<lpage>67</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name>, <string-name><given-names>M. X.</given-names> <surname>Huang</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Wu</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Mehmood</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Recommendation system using feature extraction and pattern recognition in clinical care systems</article-title>,&#x201D; <source>Enterprise Information Systems</source>, vol. <volume>13</volume>, no. <issue>3</issue>, pp. <fpage>329</fpage>&#x2013;<lpage>351</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. J.</given-names> <surname>Vidya</surname></string-name> and <string-name><given-names>K. V.</given-names> <surname>Padmaja</surname></string-name></person-group>, &#x201C;<article-title>Enhancing security of electronic patient record using watermarking technique</article-title>,&#x201D; <source>Materials Today: Proceedings</source>, vol. <volume>5</volume>, no. <issue>4</issue>, pp. <fpage>10660</fpage>&#x2013;<lpage>10664</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T. F.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>J. B.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>M. X.</given-names> <surname>Huang</surname></string-name>, <string-name><given-names>Y. W.</given-names> <surname>Chen</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Robust watermarking algorithm for medical images based on log-polar transform</article-title>,&#x201D; <source>EURASIP Journal on Wireless Communications and Networking</source>, vol. <volume>2022</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>11</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Anand</surname></string-name> and <string-name><given-names>A. K.</given-names> <surname>Singh</surname></string-name></person-group>, &#x201C;<article-title>An improved DWT-SVD domain watermarking for medical information security</article-title>,&#x201D; <source>Computer Communications</source>, vol. <volume>152</volume>, no. <issue>3</issue>, pp. <fpage>72</fpage>&#x2013;<lpage>80</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. F.</given-names> <surname>Qasim</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Meziane</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Aspin</surname></string-name></person-group>, &#x201C;<article-title>Digital watermarking: Applicability for developing trust in medical imaging workflows state of the art review</article-title>,&#x201D; <source>Computer Science Review</source>, vol. <volume>27</volume>, pp. <fpage>45</fpage>&#x2013;<lpage>60</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Tao</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Chongmin</surname></string-name>, <string-name><given-names>J. M.</given-names> <surname>Zain</surname></string-name> and <string-name><given-names>A. N.</given-names> <surname>Abdalla</surname></string-name></person-group>, &#x201C;<article-title>Robust image watermarking theories and techniques: A review</article-title>,&#x201D; <source>Journal of Applied Research and Technology</source>, vol. <volume>12</volume>, no. <issue>1</issue>, pp. <fpage>122</fpage>&#x2013;<lpage>138</lpage>, <year>2014</year>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Begum</surname></string-name> and <string-name><given-names>M. S.</given-names> <surname>Uddin</surname></string-name></person-group>, &#x201C;<article-title>Digital image watermarking techniques: A review</article-title>,&#x201D; <source>Information</source>, vol. <volume>11</volume>, no. <issue>2</issue>, pp. <fpage>110</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F.</given-names> <surname>Regazzoni</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Palmieri</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Smailbegovic</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Cammarota</surname></string-name> and <string-name><given-names>I.</given-names> <surname>Polian</surname></string-name></person-group>, &#x201C;<article-title>Protecting artificial intelligence IPs: A survey of watermarking and fingerprinting for machine learning</article-title>,&#x201D; <source>CAAI Transactions on Intelligence Technology</source>, vol. <volume>6</volume>, no. <issue>2</issue>, pp. <fpage>180</fpage>&#x2013;<lpage>191</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R. H.</given-names> <surname>Meng</surname></string-name>, <string-name><given-names>Q.</given-names> <surname>Cui</surname></string-name> and <string-name><given-names>C. S.</given-names> <surname>Yuan</surname></string-name></person-group>, &#x201C;<article-title>A survey of image information hiding algorithms based on deep learning</article-title>,&#x201D; <source>Computer Modeling in Engineering &#x0026; Sciences</source>, vol. <volume>117</volume>, no. <issue>2</issue>, pp. <fpage>425</fpage>&#x2013;<lpage>454</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>V. L.</given-names> <surname>Cu</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Nguyen</surname></string-name>, <string-name><given-names>J. C.</given-names> <surname>Burie</surname></string-name> and <string-name><given-names>J. M.</given-names> <surname>Ogier</surname></string-name></person-group>, &#x201C;<article-title>A robust watermarking approach for security issue of binary documents using fully convolutional networks</article-title>,&#x201D; <source>International Journal on Document Analysis and Recognition</source>, vol. <volume>23</volume>, no. <issue>3</issue>, pp. <fpage>219</fpage>&#x2013;<lpage>239</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Abraham</surname></string-name> and <string-name><given-names>V.</given-names> <surname>Paul</surname></string-name></person-group>, &#x201C;<article-title>An imperceptible spatial domain color image watermarking scheme</article-title>,&#x201D; <source>Journal of King Saud University-Computer and Information Sciences</source>, vol. <volume>31</volume>, no. <issue>1</issue>, pp. <fpage>125</fpage>&#x2013;<lpage>133</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Tiwari</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Sharma</surname></string-name></person-group>, &#x201C;<article-title>A survey of transform domain based semifragile watermarking schemes for image authentication</article-title>,&#x201D; <source>Journal of the Institution of Engineers</source>, vol. <volume>93</volume>, no. <issue>3</issue>, pp. <fpage>185</fpage>&#x2013;<lpage>191</lpage>, <year>2012</year>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z. H.</given-names> <surname>Yuan</surname></string-name>, <string-name><given-names>Q. T.</given-names> <surname>Su</surname></string-name>, <string-name><given-names>D. C.</given-names> <surname>Liu</surname></string-name> and <string-name><given-names>X. T.</given-names> <surname>Zhang</surname></string-name></person-group>, &#x201C;<article-title>A blind image watermarking scheme combining spatial domain and frequency domain</article-title>,&#x201D; <source>The Visual Computer</source>, vol. <volume>37</volume>, no. <issue>7</issue>, pp. <fpage>1867</fpage>&#x2013;<lpage>1881</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R. A.</given-names> <surname>Alotaibi</surname></string-name> and <string-name><given-names>L. A.</given-names> <surname>Elrefaei</surname></string-name></person-group>, &#x201C;<article-title>Text-image watermarking based on integer wavelet transform (IWT) and discrete cosine transform (DCT)</article-title>,&#x201D; <source>Applied Computing and Informatics</source>, vol. <volume>15</volume>, no. <issue>2</issue>, pp. <fpage>191</fpage>&#x2013;<lpage>202</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Cedillo-Hernandez</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Garcia-Ugalde</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Nakano-Miyatake</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Perez-Meana</surname></string-name></person-group>, &#x201C;<article-title>Robust watermarking method in DFT domain for effective management of medical imaging</article-title>,&#x201D; <source>Signal, Image and Video Processing</source>, vol. <volume>9</volume>, no. <issue>5</issue>, pp. <fpage>1163</fpage>&#x2013;<lpage>1178</lpage>, <year>2015</year>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>I.</given-names> <surname>Assini</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Badri</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Safi</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Sahel</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Baghdad</surname></string-name></person-group>, &#x201C;<article-title>A robust hybrid watermarking technique for securing medical image</article-title>,&#x201D; <source>International Journal of Intelligent Engineering and Systems</source>, vol. <volume>11</volume>, no. <issue>3</issue>, pp. <fpage>169</fpage>&#x2013;<lpage>176</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Hu</surname></string-name>, <string-name><given-names>X. C.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>J. P.</given-names> <surname>Hu</surname></string-name>, <string-name><given-names>H. F.</given-names> <surname>Wang</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Qin</surname></string-name></person-group>, &#x201C;<article-title>A novel robust zero-watermarking algorithm for medical images</article-title>,&#x201D; <source>The Visual Computer</source>, vol. <volume>37</volume>, no. <issue>9</issue>, pp. <fpage>2841</fpage>&#x2013;<lpage>2853</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>B. W.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>J. W.</given-names> <surname>Shi</surname></string-name>, <string-name><given-names>W. S.</given-names> <surname>Wang</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Zhao</surname></string-name></person-group>, &#x201C;<article-title>Image copyright protection based on blockchain and zero-watermark</article-title>,&#x201D; <source>IEEE Transactions on Network Science and Engineering</source>, vol. <volume>9</volume>, no. <issue>1</issue>, pp. <fpage>2188</fpage>&#x2013;<lpage>2199</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Sinha</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Singh</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Gupta</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Singh</surname></string-name></person-group>, &#x201C;<article-title>Authentication and tamper detection in tele-medicine using zero watermarking</article-title>,&#x201D; <source>Procedia Computer Science</source>, vol. <volume>132</volume>, no. <issue>4</issue>, pp. <fpage>557</fpage>&#x2013;<lpage>562</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>J. B.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name> and <string-name><given-names>Y.</given-names> <surname>Ai</surname></string-name></person-group>, &#x201C;<article-title>Zero-watermarking algorithm for medical images based on dual-tree complex wavelet transform and discrete cosine transform</article-title>,&#x201D; <source>Journal of Medical Imaging and Health Informatics</source>, vol. <volume>9</volume>, no. <issue>1</issue>, pp. <fpage>188</fpage>&#x2013;<lpage>194</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>L. X.</given-names> <surname>Cao</surname></string-name>, <string-name><given-names>Y. C.</given-names> <surname>Liang</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Lv</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Park</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Miura</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Relating brain structure images to personality characteristics using 3D convolution neural network</article-title>,&#x201D; <source>CAAI Transactions on Intelligence Technology</source>, vol. <volume>6</volume>, no. <issue>3</issue>, pp. <fpage>338</fpage>&#x2013;<lpage>346</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. K.</given-names> <surname>Jafarbigloo</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Danyali</surname></string-name></person-group>, &#x201C;<article-title>Nuclear atypia grading in breast cancer histopathological images based on CNN feature extraction and LSTM classification</article-title>,&#x201D; <source>CAAI Transactions on Intelligence Technology</source>, vol. <volume>6</volume>, no. <issue>4</issue>, pp. <fpage>426</fpage>&#x2013;<lpage>439</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Fierro-Radilla</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Nakano-Miyatake</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Cedillo-Hernandez</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Cleofas-Sanchez</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Perez-Meana</surname></string-name></person-group>, &#x201C;<article-title>A robust image zero-watermarking using convolutional neural networks</article-title>,&#x201D; in <conf-name>2019 7th Int. Workshop on Biometrics and Forensics</conf-name>, <publisher-loc>Cancun, Mexico</publisher-loc>, pp. <fpage>1</fpage>&#x2013;<lpage>5</lpage>, <year>2019</year>. </mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>B. R.</given-names> <surname>Han</surname></string-name>, <string-name><given-names>J. L.</given-names> <surname>Du</surname></string-name>, <string-name><given-names>Y. Y.</given-names> <surname>Jia</surname></string-name> and <string-name><given-names>H. Z.</given-names> <surname>Zhu</surname></string-name></person-group>, &#x201C;<article-title>Zero-watermarking algorithm for medical image based on VGG19 deep convolution neural network</article-title>,&#x201D; <source>Journal of Healthcare Engineering</source>, vol. <volume>2021</volume>, no. <issue>18</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>12</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Szegedy</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>Y. Q.</given-names> <surname>Jia</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Sermanet</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Reed</surname></string-name></person-group>, &#x201C;<article-title>Going deeper with convolutions</article-title>,&#x201D; in <conf-name>Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition</conf-name>, <publisher-loc>Boston, USA</publisher-loc>, pp. <fpage>1</fpage>&#x2013;<lpage>9</lpage>, <year>2015</year>. </mixed-citation></ref>
<ref id="ref-27"><label>[27]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>H&#x00E9;non</surname></string-name></person-group>, &#x201C;<article-title>A two-dimensional mapping with a strange attractor</article-title>,&#x201D; <source>Communications in Mathematical Physics</source>, vol. <volume>50</volume>, no. <issue>1</issue>, pp. <fpage>94</fpage>&#x2013;<lpage>102</lpage>, <year>1976</year>.</mixed-citation></ref>
<ref id="ref-28"><label>[28]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Kumar</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Singh</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Ranjan</surname></string-name></person-group>, &#x201C;<article-title>A review on deep learning based pneumonia detection systems</article-title>,&#x201D; in <conf-name>Proc. ICAIS</conf-name>, <publisher-loc>Coimbatore, India</publisher-loc>, pp. <fpage>289</fpage>&#x2013;<lpage>296</lpage>, <year>2021</year>. </mixed-citation></ref>
<ref id="ref-29"><label>[29]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Ton</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Jaap</surname></string-name> and <string-name><given-names>O.</given-names> <surname>Job</surname></string-name></person-group>, &#x201C;<article-title>Issues with digital watermarking and perceptual hashing</article-title>,&#x201D; <source>Multimedia Systems and Applications IV</source>, vol. <volume>4518</volume>, pp. <fpage>189</fpage>&#x2013;<lpage>197</lpage>, <year>2001</year>.</mixed-citation></ref>
<ref id="ref-30"><label>[30]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Zeng</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>J. B.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>J. R.</given-names> <surname>Cheng</surname></string-name>, <string-name><given-names>J. J.</given-names> <surname>Zhou</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Multi-watermarking algorithm for medical image based on KAZE-DCT</article-title>,&#x201D; <source>Journal of Ambient Intelligence and Humanized Computing</source>, vol. <volume>32</volume>, no. <issue>9</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>9</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-31"><label>[31]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C. S.</given-names> <surname>Yang</surname></string-name>, <string-name><given-names>J. B.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>J. X.</given-names> <surname>Ma</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Robust zero watermarking algorithm for medical images based on Zernike-DCT</article-title>,&#x201D; <source>Security and Communication Networks</source>, vol. <volume>2021</volume>, no. <issue>5</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>8</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-32"><label>[32]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Fan</surname></string-name>, <string-name><given-names>J. B.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>U. A.</given-names> <surname>Bhatti</surname></string-name>, <string-name><given-names>C. Y.</given-names> <surname>Shao</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Gong</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>A multi-watermarking algorithm for medical images using inception v3 and dct</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>74</volume>, no. <issue>1</issue>, pp. <fpage>1279</fpage>&#x2013;<lpage>1302</lpage>, <year>2023</year>.</mixed-citation></ref>
</ref-list>
</back>
</article>