<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CSSE</journal-id>
<journal-id journal-id-type="nlm-ta">CSSE</journal-id>
<journal-id journal-id-type="publisher-id">CSSE</journal-id>
<journal-title-group>
<journal-title>Computer Systems Science &#x0026; Engineering</journal-title>
</journal-title-group><issn pub-type="ppub">0267-6192</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">14234</article-id>
<article-id pub-id-type="doi">10.32604/csse.2021.014234</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>A Quantum Spatial Graph Convolutional Network for Text Classification</article-title><alt-title alt-title-type="left-running-head">A Quantum Spatial Graph Convolutional Network for Text Classification</alt-title><alt-title alt-title-type="right-running-head">A Quantum Spatial Graph Convolutional Network for Text Classification</alt-title>
</title-group>
<contrib-group content-type="authors">
<contrib id="author-1" contrib-type="author">
<name name-style="western">
<surname>Shah</surname>
<given-names>Syed Mustajar Ahmad</given-names>
</name>
<xref ref-type="aff" rid="aff-1">1</xref>
</contrib>
<contrib id="author-2" contrib-type="author" corresp="yes">
<name name-style="western">
<surname>Ge</surname>
<given-names>Hongwei</given-names>
</name>
<xref ref-type="aff" rid="aff-1">1</xref>
<email>hwge@dlut.edu.cn</email>
</contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western">
<surname>Haider</surname>
<given-names>Sami Ahmed</given-names>
</name>
<xref ref-type="aff" rid="aff-2">2</xref>
</contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western">
<surname>Irshad</surname>
<given-names>Muhammad</given-names>
</name>
<xref ref-type="aff" rid="aff-3">3</xref>
</contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western">
<surname>Noman</surname>
<given-names>Sohail M.</given-names>
</name>
<xref ref-type="aff" rid="aff-4">4</xref>
</contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western">
<surname>Arshad</surname>
<given-names>Jehangir</given-names>
</name>
<xref ref-type="aff" rid="aff-5">5</xref>
</contrib>
<contrib id="author-7" contrib-type="author">
<name name-style="western">
<surname>Ahmad</surname>
<given-names>Asfandeyar</given-names>
</name>
<xref ref-type="aff" rid="aff-6">6</xref>
</contrib>
<contrib id="author-8" contrib-type="author">
<name name-style="western">
<surname>Younas</surname>
<given-names>Talha</given-names>
</name>
<xref ref-type="aff" rid="aff-7">7</xref>
</contrib>
<aff id="aff-1">
<label>1</label><institution>College of Computer Science and Technology, Dalian University of Technology</institution>, <addr-line>Dalian, 116024</addr-line>, <country>China</country></aff>
<aff id="aff-2">
<label>2</label><institution>Department of Computing, University of Worcester</institution>, <country>UK</country></aff>
<aff id="aff-3">
<label>3</label><institution>Department of Information Engineering, Chang&#x2019;an University</institution>, <addr-line>Xi&#x2019;an, 710054</addr-line>, <country>China</country></aff>
<aff id="aff-4">
<label>4</label><institution>Department of Cell Biology and Genetics, Shantou University Medical College</institution>, <addr-line>Shantou, 515041</addr-line>, <country>China</country></aff>
<aff id="aff-5">
<label>5</label><institution>Electrical and Computer Engineering Department, COMSATS University Islamabad</institution>, <addr-line>Lahore Campus</addr-line>, <country>Pakistan</country></aff>
<aff id="aff-6">
<label>6</label><institution>School of Information &#x0026; Communication Engineering Dalian University of Technology</institution>, <addr-line>Dalian, 116024</addr-line>, <country>China</country></aff>
<aff id="aff-7">
<label>7</label><institution>Electrical and Computer Engineering Department, COMSATS University Islamabad</institution>, <addr-line>Sahiwal Campus</addr-line>, <country>Pakistan</country></aff>
</contrib-group><author-notes><corresp id="cor1">&#x002A;Corresponding Author: Hongwei Ge. Email: 
<email>hwge@dlut.edu.cn</email></corresp></author-notes>
<pub-date pub-type="epub" date-type="pub" iso-8601-date="2020-11-23">
<day>23</day>
<month>11</month>
<year iso-8601-date="2021">2021</year>
</pub-date>
<volume>36</volume>
<issue>2</issue>
<fpage>369</fpage>
<lpage>382</lpage>
<history>
<date date-type="received">
<day>08</day>
<month>9</month>
<year iso-8601-date="2020">2020</year>
</date>
<date date-type="accepted">
<day>08</day>
<month>11</month>
<year iso-8601-date="2020">2020</year>
</date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2021 Shah et al.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>Shah et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CSSE_14234.pdf"></self-uri>
<abstract>
<p>The data generated from non-Euclidean domains and its graphical representation (with complex-relationship object interdependence) applications has observed an exponential growth. The sophistication of graph data has posed consequential obstacles to the existing machine learning algorithms. In this study, we have considered a revamped version of a semi-supervised learning algorithm for graph-structured data to address the issue of expanding deep learning approaches to represent the graph data. Additionally, the quantum information theory has been applied through Graph Neural Networks (GNNs) to generate Riemannian metrics in closed-form of several graph layers. In further, to pre-process the adjacency matrix of graphs, a new formulation is established to incorporate high order proximities. The proposed scheme has shown outstanding improvements to overcome the deficiencies in Graph Convolutional Network (GCN), particularly, the information loss and imprecise information representation with acceptable computational overhead. Moreover, the proposed Quantum Graph Convolutional Network (QGCN) has significantly strengthened the GCN on semi-supervised node classification tasks. In parallel, it expands the generalization process with a significant difference by making small random perturbations <inline-formula id="ieqn-1"><alternatives><inline-graphic xlink:href="ieqn-1.png"/><tex-math id="tex-ieqn-1"><![CDATA[$\Delta G$]]></tex-math><mml:math id="mml-ieqn-1"><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:mi>G</mml:mi></mml:math>
</alternatives></inline-formula> of the graph during the training process. The evaluation results are provided on three benchmark datasets, including Citeseer, Cora, and PubMed, that distinctly delineate the superiority of the proposed model in terms of computational accuracy against state-of-the-art GCN and three other methods based on the same algorithms in the existing literature.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Text classification</kwd>
<kwd>deep learning</kwd>
<kwd>graph convolutional networks</kwd>
<kwd>semi-supervised learning</kwd>
<kwd>GPUs</kwd>
<kwd>performance improvements</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>Text classification (TC) is an imperative process of Natural Language Processing (NLP) to categorize the unstructured textual data into a structured format. At present, TC has numerous applications in various domains, such as news filtering [<xref ref-type="bibr" rid="ref-1">1</xref>], spam detection [<xref ref-type="bibr" rid="ref-2">2</xref>&#x2013;<xref ref-type="bibr" rid="ref-4">4</xref>], opinion mining [<xref ref-type="bibr" rid="ref-5">5</xref>&#x2013;<xref ref-type="bibr" rid="ref-8">8</xref>], document organization, and retrieval [<xref ref-type="bibr" rid="ref-9">9</xref>,<xref ref-type="bibr" rid="ref-10">10</xref>]. TC is a widely studied subject in the field of information science and has been important since the beginning of digital text documents. The TC process can be divided into two phases i.e., preprocessing and classification [<xref ref-type="bibr" rid="ref-11">11</xref>]. The preprocessing transforms the text data into valuable features, as this step is important before the classification; in the next phase, classification has been applied to predict a classifier. A model has been built on the grounds of labeled training data and this model will predict the label of previously unlabeled test data. Feature selection and text representation are the two substantive steps in every TC task [<xref ref-type="bibr" rid="ref-12">12</xref>,<xref ref-type="bibr" rid="ref-13">13</xref>]. Conventionally, two prominent techniques are used to represent text data, i.e., bag-of-words (BoW) [<xref ref-type="bibr" rid="ref-14">14</xref>&#x2013;<xref ref-type="bibr" rid="ref-17">17</xref>] and n-grams [<xref ref-type="bibr" rid="ref-18">18</xref>,<xref ref-type="bibr" rid="ref-19">19</xref>]. In the earlier mentioned technique, a text document has been presented as a set of words with their corresponding frequencies in a document, while in the latter one, the sequence of data elements is found in a more extensive chain of data elements. In the context of the text, an n-gram is the sequence of <italic>n</italic> words or <italic>n</italic> characters, where <italic>n</italic> is an integer greater than zero. In the frame of reference for language comparison, an n-gram is a sequence of characters.</p>
<p>The recent progress in the domain of deep learning has offered promising avenues for researchers. Numerous Deep-Learning based methods emerged for TC. The existing reports indicated that the Deep Neural Networks (DNNs) exceptionally outperform with less demand for engineered features. DNNs can mainly classify into two categories based on hierarchical (e.g., Convolutional Neural Networks (CNN&#x2019;s) [<xref ref-type="bibr" rid="ref-20">20</xref>&#x2013;<xref ref-type="bibr" rid="ref-25">25</xref>]) and sequential (e.g., Recurrent neural networks (RNNs)) structures. The RNNs are further defined into more categories, such as Long short-term memory (LSTM) [<xref ref-type="bibr" rid="ref-26">26</xref>&#x2013;<xref ref-type="bibr" rid="ref-28">28</xref>] and Gated Recurrent Unit (GRU) [<xref ref-type="bibr" rid="ref-29">29</xref>] based on gating mechanisms.</p>
<p>The evidence from respective cohort studies indicated that various text data retain the complex relationship, which can assist in improving classification accuracy<inline-formula id="ieqn-2"><alternatives><inline-graphic xlink:href="ieqn-2.png"/><tex-math id="tex-ieqn-2"><![CDATA[$-$]]></tex-math><mml:math id="mml-ieqn-2"><mml:mo>&#x2212;</mml:mo></mml:math>
</alternatives></inline-formula>for instance, a citation network where articles are linked via citationship. Hence, for better classification results, some graph-based algorithms can efficiently be employed to enjoy the perk of the link&#x2019;s information [<xref ref-type="bibr" rid="ref-18">18</xref>,<xref ref-type="bibr" rid="ref-30">30</xref>]. One of the pioneering works had been undertaken in 2013 by Bruna et al. [<xref ref-type="bibr" rid="ref-31">31</xref>], a purposing model utilizes spectral graph theory to develop a variant of graph convolution. Starting, thereby, a broad spectrum has been observed in the development and extension of GNNs [<xref ref-type="bibr" rid="ref-20">20</xref>&#x2013;<xref ref-type="bibr" rid="ref-22">22</xref>]. The research discussed in this article contributes to the prior experimentation done by Kipf et al. [<xref ref-type="bibr" rid="ref-32">32</xref>]. The main contributions of this study are as follows:<list list-type="bullet"><list-item>
<p>The proposed method overcomes the shortcomings of previous work, with acceptable computational overhead.</p></list-item><list-item>
<p>The proposed method can significantly strengthen the semi-supervised node classification task and expand the generalization process with significant differences.</p></list-item><list-item>
<p>The QGCN scaffold the quantum information theory with GCN to generate Riemannian metrics to preprocess the adjacency matrix of graphs.</p></list-item></list></p>
<p>This paper is structured into five sections: The first section gives a brief introduction to TC. The second section examines the earlier endeavors towards TC. In the third section, a new methodology is proposed with schematic illustration, and the processing of the proposed model has been explained with the empirical demonstration. The fourth section is followed by the results obtained using the proposed QGCN, and their comparison with previous models such as GCN, GCNT, Fisher GCN, and Fisher GCNT. Lastly, summarized the main findings and conclusions.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Literature Review</title>
<sec id="s2_1">
<label>2.1</label>
<title>Text Classification</title>
<p>The TC problem has been extensively addressed and implemented in various NLP domains, including databases, information collection, and data mining. In a TC problem: the analysis contains training data <inline-formula id="ieqn-3"><alternatives><inline-graphic xlink:href="ieqn-3.png"/><tex-math id="tex-ieqn-3"><![CDATA[$D = \left\{ {{D_1} \ldots \ldots {D_n}} \right\},$]]></tex-math><mml:math id="mml-ieqn-3"><mml:mi>D</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>D</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>&#x2026;</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mrow><mml:msub><mml:mi>D</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:math>
</alternatives></inline-formula> per-labeled with a class <inline-formula id="ieqn-4"><alternatives><inline-graphic xlink:href="ieqn-4.png"/><tex-math id="tex-ieqn-4"><![CDATA[$C = \left\{ {{C_1},{C_2} \ldots {C_n}} \right\}$]]></tex-math><mml:math id="mml-ieqn-4"><mml:mi>C</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mo>&#x2026;</mml:mo><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula>. The task now is to train a classification model that can predict the appropriate class for an unknown entity <inline-formula id="ieqn-5"><alternatives><inline-graphic xlink:href="ieqn-5.png"/><tex-math id="tex-ieqn-5"><![CDATA[$L$]]></tex-math><mml:math id="mml-ieqn-5"><mml:mi>L</mml:mi></mml:math>
</alternatives></inline-formula>, which can assign one or more class labels for <inline-formula id="ieqn-6"><alternatives><inline-graphic xlink:href="ieqn-6.png"/><tex-math id="tex-ieqn-6"><![CDATA[$C$]]></tex-math><mml:math id="mml-ieqn-6"><mml:mi>C</mml:mi></mml:math>
</alternatives></inline-formula> [<xref ref-type="bibr" rid="ref-30">30</xref>,<xref ref-type="bibr" rid="ref-33">33</xref>]. The ultimate objective of TC is to train a classifier for predefined categories and enable it to predict the unknown entities [<xref ref-type="bibr" rid="ref-34">34</xref>]. Formally, we can define TC as a process of sorting documents/texts into single/multiple classes, as specified by their genre [<xref ref-type="bibr" rid="ref-35">35</xref>]. TC can mainly be categorized into two broad categories; single-label and multilabel documents. A single-label document relates to one class, whereas a multilabel document can have multiple classes.</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Graph Convolutional Neural Networks</title>
<p>The existing studies under the umbrella of GNNs are derived from the seminal work of Gorei et al. [<xref ref-type="bibr" rid="ref-36">36</xref>] Furthermore, expounded by Micheli [<xref ref-type="bibr" rid="ref-37">37</xref>] and Scarselli et al. [<xref ref-type="bibr" rid="ref-38">38</xref>]. Numerous initial studies have utilized recurrent neural architectures to investigate the representation of a node of a target by disseminating neighborhood information. CNN&#x2019;s in the domain of computer vision was the inspiration behind the GCNs. In the past few years, various new techniques emerged to address graph phenomena. Spectral graph theory, as stated by Bruna et al. [<xref ref-type="bibr" rid="ref-31">31</xref>] presented the first prominent research on GCNs.</p>
<p>In the graph, Graph embedding [<xref ref-type="bibr" rid="ref-39">39</xref>,<xref ref-type="bibr" rid="ref-40">40</xref>] is used to capture structural similarities. Deep walk [<xref ref-type="bibr" rid="ref-39">39</xref>] makes the most of the simulation localized walks in the vicinity of the node that is then transmitted to the neural network for language modeling to build the node background. Node2Vec [<xref ref-type="bibr" rid="ref-41">41</xref>] interplays breadth and depth-first sampling techniques to combine and investigate the various neighborhood types.</p>
<p>Monti et al. [<xref ref-type="bibr" rid="ref-40">40</xref>] spread the concept of coordinates of space via learning a series of Gaussian function parameters corresponding to a certain distance towards the embedding of the node. Graph attention networks (GAT) can learn these weights through a mechanism of self-attention. Jumping knowledge networks (JKNets) [<xref ref-type="bibr" rid="ref-42">42</xref>] often concentrate on the node locality concept. JK-Nets experiments show that the notion of subgraph neighborhood varies depending on the graph topology, e.g., random walks advance at nonuniform variance in various graphs. Hence, <italic>JK</italic> &#x2212; <italic>Nets</italic> aggregates across different neighborhoods and takes into account multiple node localities.</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Quantum Fisher Information Matrix</title>
<p>The Quantum information matrix (QIM) is a central aspect in the speculative quantum computation since Cram&#x00E9;r-Rao quantum bonds are quite effective when it comes to estimating quantum parameters. After the explosive growth initiated, quantum technology is not theoretical, but technology and has now reached every race of modern life and science. A quantic mechanism technique is referred to as the quantum technique and is intended to create new techniques or enhance current technology in conjunction with quantum resources/phenomena/effects.</p>
<p>Across both theoretical and practical fields, a range of features of quantum technology demonstrated a prodigious impact across leading the next industrial revolution, such as quantum computation, quantum communication, quantum metrology, and quantum cryptography. In the nearish term, quantum metrology has been proven to be an evolving source of practical implications among such techniques.</p>
<p>Quantum metrology is the analysis of high-resolution measurements of physical parameters with high sensitiveness, employing quantum theories to understand physical structures, in particular using quantum entanglement and quantum compression. This area is expected to emerge with more precise measuring technology than the conventional measuring technology.</p>
<p>Both quantum metrology and Cram&#x00E9;r-Rao bound use the estimation of quantum parameters, and it is evident that both methods are well-researched mathematical means of estimating the quantum parameter. In quantum Cramer-Rao, the quantum Fisher information and the Quantum fisher information matrix (QFIM) are the principal elements for one-parameter and multiparameter estimates [<xref ref-type="bibr" rid="ref-43">43</xref>].</p>
<p>In addition to quantum metrology, QFIM also interacts with other aspects of quantum physics, such as quantum phase and entanglement witnesses [<xref ref-type="bibr" rid="ref-44">44</xref>,<xref ref-type="bibr" rid="ref-45">45</xref>].</p>
<p>The QFIM also links to other main quantities, such as the quantum geometric tensor [<xref ref-type="bibr" rid="ref-46">46</xref>]. Therefore, a separate evaluation for QFIM in quantum mechanics should be rendered because of the main value of quantum parameter estimation. QFIM calculation techniques have evolved rapidly in various scenarios and models in recent years. However, these methods are not structurally outlined by the community.</p>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Formulation of QFIM</title>
<p>Let the vector variable <inline-formula id="ieqn-7"><alternatives><inline-graphic xlink:href="ieqn-7.png"/><tex-math id="tex-ieqn-7"><![CDATA[$\vec x = {\left( {{x_0},{x_1}, \ldots ,{x_a}, \ldots } \right)^T}$]]></tex-math><mml:math id="mml-ieqn-7"><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mi>T</mml:mi></mml:msup></mml:mrow></mml:math>
</alternatives></inline-formula> with <inline-formula id="ieqn-8"><alternatives><inline-graphic xlink:href="ieqn-8.png"/><tex-math id="tex-ieqn-8"><![CDATA[${x_a}$]]></tex-math><mml:math id="mml-ieqn-8"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> the <inline-formula id="ieqn-9"><alternatives><inline-graphic xlink:href="ieqn-9.png"/><tex-math id="tex-ieqn-9"><![CDATA[${a^{th}}$]]></tex-math><mml:math id="mml-ieqn-9"><mml:mrow><mml:msup><mml:mi>a</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:math>
</alternatives></inline-formula> component. <inline-formula id="ieqn-10"><alternatives><inline-graphic xlink:href="ieqn-10.png"/><tex-math id="tex-ieqn-10"><![CDATA[$\vec x$]]></tex-math><mml:math id="mml-ieqn-10"><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:math>
</alternatives></inline-formula> is encoded in the density matrix <inline-formula id="ieqn-11"><alternatives><inline-graphic xlink:href="ieqn-11.png"/><tex-math id="tex-ieqn-11"><![CDATA[$\rho \; = \; \rho \left( {\vec x} \right).\;$]]></tex-math><mml:math id="mml-ieqn-11"><mml:mi>&#x03C1;</mml:mi><mml:mspace width="thickmathspace"></mml:mspace><mml:mo>&#x003D;</mml:mo><mml:mspace width="thickmathspace"></mml:mspace><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo><mml:mspace width="thickmathspace"></mml:mspace></mml:math>
</alternatives></inline-formula> QFIM is represented as <inline-formula id="ieqn-12"><alternatives><inline-graphic xlink:href="ieqn-12.png"/><tex-math id="tex-ieqn-12"><![CDATA[${\rm {\cal F}}$]]></tex-math><mml:math id="mml-ieqn-12"><mml:mrow><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow></mml:mrow></mml:math>
</alternatives></inline-formula>, and entry of <inline-formula id="ieqn-13"><alternatives><inline-graphic xlink:href="ieqn-13.png"/><tex-math id="tex-ieqn-13"><![CDATA[${\rm {\cal F}}$]]></tex-math><mml:math id="mml-ieqn-13"><mml:mrow><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow></mml:mrow></mml:math>
</alternatives></inline-formula> implies as [<xref ref-type="bibr" rid="ref-44">44</xref>&#x2013;<xref ref-type="bibr" rid="ref-46">46</xref>]</p>
<p><disp-formula id="eqn-1">
<label>(1)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-1.png"/><tex-math id="tex-eqn-1"><![CDATA[$${{\rm {\cal F}}_{ab}}: = \displaystyle{1 \over 2}{T_{ \Gamma }}\left( {\rho \left\{ {{L_a},{L_b}} \right\}} \right),$$]]></tex-math><mml:math id="mml-eqn-1"><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>b</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>:&#x003D;</mml:mo><mml:mstyle scriptlevel="0" displaystyle="true"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:mrow><mml:mrow><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x0393;</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mstyle></mml:math>
</alternatives></disp-formula></p>
<p>where <inline-formula id="ieqn-14"><alternatives><inline-graphic xlink:href="ieqn-14.png"/><tex-math id="tex-ieqn-14"><![CDATA[$\left\{^\prime ,^\prime\right\}$]]></tex-math><mml:math id="mml-ieqn-14"><mml:mrow><mml:mo>{</mml:mo><mml:msup><mml:mi></mml:mi><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:msup><mml:msup><mml:mo>,</mml:mo><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:msup><mml:mo>}</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula> symbolizes the anti-commutation and <inline-formula id="ieqn-15"><alternatives><inline-graphic xlink:href="ieqn-15.png"/><tex-math id="tex-ieqn-15"><![CDATA[${L_a}\left( {{L_b}} \right)$]]></tex-math><mml:math id="mml-ieqn-15"><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula> is symmetric logarithmic (SLD) parameter derivative <inline-formula id="ieqn-16"><alternatives><inline-graphic xlink:href="ieqn-16.png"/><tex-math id="tex-ieqn-16"><![CDATA[${x_a}\left( {{x_b}} \right)$]]></tex-math><mml:math id="mml-ieqn-16"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula>, described by the equation <inline-formula id="ieqn-17"><alternatives><inline-graphic xlink:href="ieqn-17.png"/><tex-math id="tex-ieqn-17"><![CDATA[$\displaystyle{\partial \over {\partial {x_a}}}\left( {\displaystyle{\partial \over {\partial t}}} \right)$]]></tex-math><mml:math id="mml-ieqn-17"><mml:mstyle scriptlevel="0" displaystyle="true"><mml:mrow><mml:mfrac><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:mfrac></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle scriptlevel="0" displaystyle="true"><mml:mrow><mml:mfrac><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mstyle></mml:math>
</alternatives></inline-formula>.</p>
<p><disp-formula id="eqn-2">
<label>(2)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-2.png"/><tex-math id="tex-eqn-2"><![CDATA[$${\partial _{a\rho }} = \displaystyle{1 \over 2}\left( {\rho {L_a} + {L_{a\rho }}} \right)$$]]></tex-math><mml:math id="mml-eqn-2" display="block"><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>&#x03C1;</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mstyle scriptlevel="0" displaystyle="true"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mo>&#x002B;</mml:mo><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>&#x03C1;</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mstyle></mml:math>
</alternatives></disp-formula></p>
<p>The symmetric logarithmic derivative (SLD) is a Hermit operator with the average value <inline-formula id="ieqn-18"><alternatives><inline-graphic xlink:href="ieqn-18.png"/><tex-math id="tex-ieqn-18"><![CDATA[${T_\Gamma }\left( {\rho {L_a}} \right)$]]></tex-math><mml:math id="mml-ieqn-18"><mml:mrow><mml:msub><mml:mi>T</mml:mi><mml:mi mathvariant="normal">&#x0393;</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:math>
</alternatives></inline-formula> by using the above equation, <inline-formula id="ieqn-19"><alternatives><inline-graphic xlink:href="ieqn-19.png"/><tex-math id="tex-ieqn-19"><![CDATA[${{\rm {\cal F}}_{ab}}$]]></tex-math><mml:math id="mml-ieqn-19"><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>b</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> can also be explained by Fujiwara et al. [<xref ref-type="bibr" rid="ref-45">45</xref>].</p>
<p><disp-formula id="eqn-3">
<label>(3)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-3.png"/><tex-math id="tex-eqn-3"><![CDATA[$${{\rm {\cal F}}_{ab}} = {T_{ \Gamma }}\left( {{L_b}\partial a\rho } \right) = - {T_{ \Gamma }}\left( {\rho {\partial _a}{L_b}} \right)$$]]></tex-math><mml:math id="mml-eqn-3" display="block"><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>b</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mrow><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x0393;</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>a</mml:mi><mml:mi>&#x03C1;</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x0393;</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p>Based on <xref ref-type="disp-formula" rid="eqn-1">Eq. (1)</xref>, the sloping input of QFIM is:</p>
<p><disp-formula id="eqn-4">
<label>(4)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-4.png"/><tex-math id="tex-eqn-4"><![CDATA[$${{\rm {\cal F}}_{ab}} = {T_{ \Gamma }}\rho L_a^2$$]]></tex-math><mml:math id="mml-eqn-4" display="block"><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>b</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mrow><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x0393;</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mi>&#x03C1;</mml:mi><mml:msubsup><mml:mi>L</mml:mi><mml:mi>a</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:math>
</alternatives></disp-formula></p>
<p>That is precisely the parameter of <inline-formula id="ieqn-20"><alternatives><inline-graphic xlink:href="ieqn-20.png"/><tex-math id="tex-ieqn-20"><![CDATA[${x_a}$]]></tex-math><mml:math id="mml-ieqn-20"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> for QFI. The Fisher Information Matrix (FIM) concept was derived from traditional statistics. For the distribution of probability <inline-formula id="ieqn-21"><alternatives><inline-graphic xlink:href="ieqn-21.png"/><tex-math id="tex-ieqn-21"><![CDATA[$p\left( {y{\rm \mid }\vec x} \right)$]]></tex-math><mml:math id="mml-ieqn-21"><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mrow><mml:mo>&#x2223;</mml:mo></mml:mrow><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula> where <inline-formula id="ieqn-22"><alternatives><inline-graphic xlink:href="ieqn-22.png"/><tex-math id="tex-ieqn-22"><![CDATA[$p\left( {y{\rm \mid }\vec x} \right)$]]></tex-math><mml:math id="mml-ieqn-22"><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mrow><mml:mo>&#x2223;</mml:mo></mml:mrow><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula> is the conditional probability for the outcome <inline-formula id="ieqn-23"><alternatives><inline-graphic xlink:href="ieqn-23.png"/><tex-math id="tex-ieqn-23"><![CDATA[$y$]]></tex-math><mml:math id="mml-ieqn-23"><mml:mi>y</mml:mi></mml:math>
</alternatives></inline-formula>, concerning variable <italic>x</italic> an entry of FIM is presented as</p>
<p><disp-formula id="eqn-5">
<label>(5)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-5.png"/><tex-math id="tex-eqn-5"><![CDATA[$${{\rm {\frak T}}_{ab}}\!\!: \;= \;\smallint \displaystyle{{\left[ {{\partial _a}p\left( {y{\rm \mid }\vec x} \right)} \right]\left[ {{\partial _b}p\left( {y{\rm \mid }\vec x} \right)} \right]} \over {p\left( {y{\rm \mid }\vec x} \right)}}dy$$]]></tex-math><mml:math id="mml-eqn-5" display="block"><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="fraktur">T</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>b</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>:&#x003D;</mml:mo><mml:mo largeop="false">&#x222B;</mml:mo><mml:mstyle scriptlevel="0" displaystyle="true"><mml:mrow><mml:mfrac><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mrow><mml:mo>&#x2223;</mml:mo></mml:mrow><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mrow><mml:mo>&#x2223;</mml:mo></mml:mrow><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mrow><mml:mo>&#x2223;</mml:mo></mml:mrow><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mrow><mml:mi>d</mml:mi><mml:mi>y</mml:mi></mml:mstyle></mml:math>
</alternatives></disp-formula></p>
<p>For discrete outcome results:</p>
<p><disp-formula id="eqn-6">
<label>(6)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-6.png"/><tex-math id="tex-eqn-6"><![CDATA[$${\mathfrak{T}_{ab}}\!\!: \;=\; \sum_{{y}} {{{\left[ {{\partial _a}p\left( {y|\mathop x\limits^ \to } \right)} \right]\left[ {{\partial _b}p\left( {y|\mathop x\limits^ \to } \right)} \right]} \over {p\left( {y|\mathop x\limits^ \to } \right)}}}$$]]></tex-math><mml:math id="mml-eqn-6" display="block"><mml:mrow><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">T</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>b</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>:&#x003D;</mml:mo><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mrow><mml:mi mathvariant="normal">y</mml:mi></mml:mrow></mml:mrow></mml:munder><mml:mrow><mml:mrow><mml:mfrac><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mover><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mover><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mover><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p>As the metrology develops, with the development of quantum metrology [<xref ref-type="bibr" rid="ref-47">47</xref>], the classical probability distribution related FIM is usually denoted as Classical Fisher Information Matrix (CFIM), and its diagonal components are called Classical fisher information (CFI). There is no doubt in the quantum mechanical situation that the measurement method adopted would hardly influence the probability distribution and so to CFIM. This fact indicates the CFIM is a function of measurement. However, QFI is obtained until the optimized measurements are made [<xref ref-type="bibr" rid="ref-5">5</xref>], i.e., <inline-formula id="ieqn-24"><alternatives><inline-graphic xlink:href="ieqn-24.png"/><tex-math id="tex-ieqn-24"><![CDATA[${\cal F}_{aa}={\mathop {max _{\{\Pi _{y}\}}}}\,{\frak T}_{aa}\left(\rho ,\Pi_{y}\right),$]]></tex-math><mml:math id="mml-ieqn-24"><mml:msub><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mo>&#x003D;</mml:mo><mml:mi>m</mml:mi><mml:mi>a</mml:mi><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:msub><mml:mi>&#x03C0;</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo>}</mml:mo></mml:mrow></mml:mrow></mml:msub><mml:mspace width="thinmathspace"></mml:mspace><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">T</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>&#x03C1;</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>&#x03C0;</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:math>
</alternatives></inline-formula> where <inline-formula id="ieqn-25"><alternatives><inline-graphic xlink:href="ieqn-25.png"/><tex-math id="tex-ieqn-25"><![CDATA[${\Pi _y}$]]></tex-math><mml:math id="mml-ieqn-25"><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x03A0;</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> represents a positive-operator valued measure (POVM); usually, all the available methods lack in estimating QFIM. The SLD based QFIM is one of the numerous quantum variants of CFIM. The best-suited ones are those containing logarithmic derivatives on either the left or right side.</p>
<p>The SLD-based QFIM is not the only CFIM quantum version. Another well-used ones are centered on logarithmic derivatives from the right and left [<xref ref-type="bibr" rid="ref-48">48</xref>], defined by <inline-formula id="ieqn-26"><alternatives><inline-graphic xlink:href="ieqn-26.png"/><tex-math id="tex-ieqn-26"><![CDATA[${\partial _a}\rho = \rho {R_a}{\rm \; and\; }{\partial _a}\rho = R_a^t\rho$]]></tex-math><mml:math id="mml-ieqn-26"><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mi>&#x03C1;</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:msub><mml:mi>R</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mspace width="thickmathspace"></mml:mspace><mml:mi mathvariant="normal">a</mml:mi><mml:mi mathvariant="normal">n</mml:mi><mml:mi mathvariant="normal">d</mml:mi><mml:mspace width="thickmathspace"></mml:mspace></mml:mrow><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mi>&#x03C1;</mml:mi><mml:mo>&#x003D;</mml:mo><mml:msubsup><mml:mi>R</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi></mml:msubsup><mml:mi>&#x03C1;</mml:mi></mml:math>
</alternatives></inline-formula>, with the corresponding QFIM <inline-formula id="ieqn-27"><alternatives><inline-graphic xlink:href="ieqn-27.png"/><tex-math id="tex-ieqn-27"><![CDATA[${{\rm {\cal F}}_{ab}} = {T_{\Gamma }}\left( {\rho {R_a}R_a^t} \right)$]]></tex-math><mml:math id="mml-ieqn-27"><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>b</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mrow><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x0393;</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:msub><mml:mi>R</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:msubsup><mml:mi>R</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula>. On the contrary to the SLD based quantum variant, the QFIM are complex with hemitian matrix. QFIMs of all the types fall under a family of Riemannian monotone metrics, as stated by Petz et al.  [<xref ref-type="bibr" rid="ref-49">49</xref>] in <inline-formula id="ieqn-28"><alternatives><inline-graphic xlink:href="ieqn-28.png"/><tex-math id="tex-ieqn-28"><![CDATA[$1996$]]></tex-math><mml:math id="mml-ieqn-28"><mml:mn>1996</mml:mn></mml:math>
</alternatives></inline-formula>. Generally speaking, all QFIMs lead to Cram&#x00E9;r-Rao bound version but with different achievability. For example, D-invariant, which is based on the right algorithmic derivative approaches the goal of bound. Unlike SLD-based, which are real symmetrical, the QFIM is complex and Hermitian oriented on the left and right logarithmic derivatives. All QFIM versions are from a family of monotonous Riemannian metrics generated by Petz [<xref ref-type="bibr" rid="ref-49">49</xref>]. Both QFIMs can provide Cram&#x00E9;r-Raobound quantum versions but have different achievements. For example, only the one based on the right logarithmic derivative generates an attainable limit for D-invariant models [<xref ref-type="bibr" rid="ref-50">50</xref>]. For pure states, Fujiwara et al. [<xref ref-type="bibr" rid="ref-45">45</xref>] employed the SLD to a group via <inline-formula id="ieqn-29"><alternatives><inline-graphic xlink:href="ieqn-29.png"/><tex-math id="tex-ieqn-29"><![CDATA[${\partial _a}\rho = \displaystyle{1 \over 2}\left( {\rho {L_a} + L_a^{\rm t}\rho } \right)$]]></tex-math><mml:math id="mml-ieqn-29"><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mi>&#x03C1;</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mstyle scriptlevel="0" displaystyle="true"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow><mml:mo>&#x002B;</mml:mo><mml:msubsup><mml:mi>L</mml:mi><mml:mi>a</mml:mi><mml:mrow><mml:mi mathvariant="normal">t</mml:mi></mml:mrow></mml:msubsup><mml:mi>&#x03C1;</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mstyle></mml:math>
</alternatives></inline-formula>; whereas <inline-formula id="ieqn-30"><alternatives><inline-graphic xlink:href="ieqn-30.png"/><tex-math id="tex-ieqn-30"><![CDATA[${L_a}$]]></tex-math><mml:math id="mml-ieqn-30"><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> is conditionally.</p>
<p>Hermitian, and it will roll back to SLD. A practical example of an antisymmetric logarithmic derivative is the <inline-formula id="ieqn-31"><alternatives><inline-graphic xlink:href="ieqn-31.png"/><tex-math id="tex-ieqn-31"><![CDATA[$L_a^{\rm t} = - {L_a}$]]></tex-math><mml:math id="mml-ieqn-31"><mml:msubsup><mml:mi>L</mml:mi><mml:mi>a</mml:mi><mml:mrow><mml:mi mathvariant="normal">t</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x003D;</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mi>a</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula>.</p>
<p>The arbitrary size input graph <inline-formula id="ieqn-32"><alternatives><inline-graphic xlink:href="ieqn-32.png"/><tex-math id="tex-ieqn-32"><![CDATA[${G_j}\left( {{V_j};{E_j}} \right)$]]></tex-math><mml:math id="mml-ieqn-32"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>V</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>;</mml:mo><mml:mrow><mml:msub><mml:mi>E</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula> in <inline-formula id="ieqn-33"><alternatives><inline-graphic xlink:href="ieqn-33.png"/><tex-math id="tex-ieqn-33"><![CDATA[$G$]]></tex-math><mml:math id="mml-ieqn-33"><mml:mi>G</mml:mi></mml:math>
</alternatives></inline-formula> is first compared with the prototype graph <inline-formula id="ieqn-34"><alternatives><inline-graphic xlink:href="ieqn-34.png"/><tex-math id="tex-ieqn-34"><![CDATA[${G_k}\left( {{V_k};{E_k}} \right)$]]></tex-math><mml:math id="mml-ieqn-34"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>V</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow><mml:mo>;</mml:mo><mml:mrow><mml:msub><mml:mi>E</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:math>
</alternatives></inline-formula> Then <inline-formula id="ieqn-35"><alternatives><inline-graphic xlink:href="ieqn-35.png"/><tex-math id="tex-ieqn-35"><![CDATA[${G_j}$]]></tex-math><mml:math id="mml-ieqn-35"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> is encoded into a fixed, aligned vertex grid structure, where the vertex orders are preceded by the <inline-formula id="ieqn-36"><alternatives><inline-graphic xlink:href="ieqn-36.png"/><tex-math id="tex-ieqn-36"><![CDATA[${G_k}$]]></tex-math><mml:math id="mml-ieqn-36"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> structure. The grid structure of <inline-formula id="ieqn-37"><alternatives><inline-graphic xlink:href="ieqn-37.png"/><tex-math id="tex-ieqn-37"><![CDATA[${G_j}$]]></tex-math><mml:math id="mml-ieqn-37"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> is carried through multiple quantum GCN layers to collect multi-scale vertex features, where information on the vertex is propagated among selected vertices after the average mixing matrix. While the GCN layers maintain the original vertex structure orders, the concatenated vertex features form a new grid structure for <inline-formula id="ieqn-38"><alternatives><inline-graphic xlink:href="ieqn-38.png"/><tex-math id="tex-ieqn-38"><![CDATA[${G_j}$]]></tex-math><mml:math id="mml-ieqn-38"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> through the graph convolution layers. This grid structure is then moved to a standard CNN layer to learn a classification method. Note that vertex features are displayed in various colors.</p>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Methodology</title>
<sec id="s3_1">
<label>3.1</label>
<title>Quantum GCN Architecture</title>
<p>The proposed model pipeline is depicted in <xref ref-type="fig" rid="fig-1">Fig. 1</xref>. two major modules make up the QGCN architecture. The first module is a data handler that collects input data from different sources and preprocesses the data to be passed on to the Quantum GCN module. The QGCN module takes the input graphs and assigns them to the corresponding class labels. Cora, Citeseer, and PubMed are preprocessed weighted datasets that we acquired from Yang et al. [<xref ref-type="bibr" rid="ref-51">51</xref>]. Graph data is the graph metadata of the vertex and the edge, and attributes of the vertex typically contain attributes within each vertex. Then, vector attributes were embedded at the graph vertices. Instead of categorical and text variables, we preprocess every graph data and vertex attribute to translate data to real values, then transfer the attributed weighted graph dataset to the QGCN module. The QGCN module receives the weighted attributed graph data set as input and learns the labeled graph data and distribution of the vertex attributes to classify the labels of the graphs not seen. We transfer the associated weighted graph dataset to the module Quantum GCN. The Quantum GCN module receives the weighted assigned graph dataset as input and prelabeled graph details, and distribution of the vertex attribute to identify the labels of the graphs that are unknown. The Quantum GCN module contains a series of convolution layers followed by a pooling layer based on the specified depth of the proposed neural network followed by a prediction layer. Based on the classification problem, the prediction layer predicts the probability of inputs belonging to the corresponding class labels.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Graphical structure of the proposed model QGCN</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-1.png"/>
</fig>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Problem Formulation</title>
<p>As we know, a graph is directly handled by the multi-layered neural network; in other words, the induction of node vectors is positioned as per neighboring properties. Consider a graph <inline-formula id="ieqn-39"><alternatives><inline-graphic xlink:href="ieqn-39.png"/><tex-math id="tex-ieqn-39"><![CDATA[$G\; = \; \left( {V,E} \right)$]]></tex-math><mml:math id="mml-ieqn-39"><mml:mi>G</mml:mi><mml:mspace width="thickmathspace"></mml:mspace><mml:mo>&#x003D;</mml:mo><mml:mspace width="thickmathspace"></mml:mspace><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>V</mml:mi><mml:mo>,</mml:mo><mml:mi>E</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula>, where sets of nodes are <inline-formula id="ieqn-40"><alternatives><inline-graphic xlink:href="ieqn-40.png"/><tex-math id="tex-ieqn-40"><![CDATA[$E$]]></tex-math><mml:math id="mml-ieqn-40"><mml:mi>E</mml:mi></mml:math>
</alternatives></inline-formula>, and edges are <inline-formula id="ieqn-41"><alternatives><inline-graphic xlink:href="ieqn-41.png"/><tex-math id="tex-ieqn-41"><![CDATA[$V\left( {\left| V \right|\; = \; n} \right)$]]></tex-math><mml:math id="mml-ieqn-41"><mml:mi>V</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mi>V</mml:mi><mml:mo>|</mml:mo></mml:mrow><mml:mspace width="thickmathspace"></mml:mspace><mml:mo>&#x003D;</mml:mo><mml:mspace width="thickmathspace"></mml:mspace><mml:mi>n</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula>. When node connections are converging, i.e., <inline-formula id="ieqn-42"><alternatives><inline-graphic xlink:href="ieqn-42.png"/><tex-math id="tex-ieqn-42"><![CDATA[$\left( {v,v} \right)\; \epsilon \; E$]]></tex-math><mml:math id="mml-ieqn-42"><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>v</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mspace width="thickmathspace"></mml:mspace><mml:mi>&#x03B5;</mml:mi><mml:mspace width="thickmathspace"></mml:mspace><mml:mi>E</mml:mi></mml:math>
</alternatives></inline-formula> for any <inline-formula id="ieqn-43"><alternatives><inline-graphic xlink:href="ieqn-43.png"/><tex-math id="tex-ieqn-43"><![CDATA[$v$]]></tex-math><mml:math id="mml-ieqn-43"><mml:mi>v</mml:mi></mml:math>
</alternatives></inline-formula>. Let <inline-formula id="ieqn-44"><alternatives><inline-graphic xlink:href="ieqn-44.png"/><tex-math id="tex-ieqn-44"><![CDATA[$X\epsilon {R^{n \times m}}$]]></tex-math><mml:math id="mml-ieqn-44"><mml:mi>X</mml:mi><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:msup><mml:mi>R</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:math>
</alternatives></inline-formula> be a matrix consisting of all <inline-formula id="ieqn-45"><alternatives><inline-graphic xlink:href="ieqn-45.png"/><tex-math id="tex-ieqn-45"><![CDATA[$n$]]></tex-math><mml:math id="mml-ieqn-45"><mml:mi>n</mml:mi></mml:math>
</alternatives></inline-formula> nodes with their features, where <inline-formula id="ieqn-46"><alternatives><inline-graphic xlink:href="ieqn-46.png"/><tex-math id="tex-ieqn-46"><![CDATA[$m$]]></tex-math><mml:math id="mml-ieqn-46"><mml:mi>m</mml:mi></mml:math>
</alternatives></inline-formula> is the dimension of the feature vectors, each row <inline-formula id="ieqn-47"><alternatives><inline-graphic xlink:href="ieqn-47.png"/><tex-math id="tex-ieqn-47"><![CDATA[$xv{R^m}$]]></tex-math><mml:math id="mml-ieqn-47"><mml:mi>x</mml:mi><mml:mi>v</mml:mi><mml:mrow><mml:msup><mml:mi>R</mml:mi><mml:mi>m</mml:mi></mml:msup></mml:mrow></mml:math>
</alternatives></inline-formula> is the feature vector for <inline-formula id="ieqn-48"><alternatives><inline-graphic xlink:href="ieqn-48.png"/><tex-math id="tex-ieqn-48"><![CDATA[$v$]]></tex-math><mml:math id="mml-ieqn-48"><mml:mi>v</mml:mi></mml:math>
</alternatives></inline-formula>. We introduce an adjacency matrix <inline-formula id="ieqn-49"><alternatives><inline-graphic xlink:href="ieqn-49.png"/><tex-math id="tex-ieqn-49"><![CDATA[$A$]]></tex-math><mml:math id="mml-ieqn-49"><mml:mi>A</mml:mi></mml:math>
</alternatives></inline-formula> of <inline-formula id="ieqn-50"><alternatives><inline-graphic xlink:href="ieqn-50.png"/><tex-math id="tex-ieqn-50"><![CDATA[$G$]]></tex-math><mml:math id="mml-ieqn-50"><mml:mi>G</mml:mi></mml:math>
</alternatives></inline-formula> and its degree matrix <inline-formula id="ieqn-51"><alternatives><inline-graphic xlink:href="ieqn-51.png"/><tex-math id="tex-ieqn-51"><![CDATA[$D$]]></tex-math><mml:math id="mml-ieqn-51"><mml:mi>D</mml:mi></mml:math>
</alternatives></inline-formula>, where <inline-formula id="ieqn-52"><alternatives><inline-graphic xlink:href="ieqn-52.png"/><tex-math id="tex-ieqn-52"><![CDATA[${D_{ii}} = \sum\limits_j {A_{ij}}$]]></tex-math><mml:math id="mml-ieqn-52"><mml:msub><mml:mi>D</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x003D;</mml:mo><mml:mstyle displaystyle="true"><mml:munder><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mspace width="thinmathspace"></mml:mspace><mml:msub><mml:mi>A</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math>
</alternatives></inline-formula>. Because of self-loops, the <inline-formula id="ieqn-53"><alternatives><inline-graphic xlink:href="ieqn-53.png"/><tex-math id="tex-ieqn-53"><![CDATA[$A$]]></tex-math><mml:math id="mml-ieqn-53"><mml:mi>A</mml:mi></mml:math>
</alternatives></inline-formula> diagonal elements are set to <inline-formula id="ieqn-54"><alternatives><inline-graphic xlink:href="ieqn-54.png"/><tex-math id="tex-ieqn-54"><![CDATA[$1$]]></tex-math><mml:math id="mml-ieqn-54"><mml:mn>1</mml:mn></mml:math>
</alternatives></inline-formula>. GCN can collect information with only one convolution layer of its immediate neighbors; if several GCN layers are stacked, additional information is integrated. For a one-layer GCN, the new k-dimensional node feature matrix <inline-formula id="ieqn-55"><alternatives><inline-graphic xlink:href="ieqn-55.png"/><tex-math id="tex-ieqn-55"><![CDATA[${L^{\left( 1 \right)}}\epsilon {\mathbb R}{^{n \times k}}$]]></tex-math><mml:math id="mml-ieqn-55"><mml:mrow><mml:msup><mml:mi>L</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:mi mathvariant="double-struck">R</mml:mi></mml:mrow><mml:mrow><mml:msup><mml:mi></mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>k</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:math>
</alternatives></inline-formula> is computed as:</p>
<p><disp-formula id="eqn-7">
<label>(7)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-7.png"/><tex-math id="tex-eqn-7"><![CDATA[$${L^{\left( 1 \right)}} = \rho \left( {\tilde AX{W_0}} \right)$$]]></tex-math><mml:math id="mml-eqn-7" display="block"><mml:mrow><mml:msup><mml:mi>L</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:mover><mml:mi>A</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mi>X</mml:mi><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p>where <inline-formula id="ieqn-56"><alternatives><inline-graphic xlink:href="ieqn-56.png"/><tex-math id="tex-ieqn-56"><![CDATA[$\tilde A = {D^{{1 \over 2}}}A{D^{{1 \over 2}}}\;$]]></tex-math><mml:math id="mml-ieqn-56"><mml:mrow><mml:mover><mml:mi>A</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mrow><mml:msup><mml:mi>D</mml:mi><mml:mrow><mml:mstyle scriptlevel="0" displaystyle="true"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow></mml:msup></mml:mrow><mml:mi>A</mml:mi><mml:mrow><mml:msup><mml:mi>D</mml:mi><mml:mrow><mml:mstyle scriptlevel="0" displaystyle="true"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow></mml:msup></mml:mrow><mml:mspace width="thickmathspace"></mml:mspace></mml:math>
</alternatives></inline-formula> is the normalized symmetric adjacency matrix and <inline-formula id="ieqn-57"><alternatives><inline-graphic xlink:href="ieqn-57.png"/><tex-math id="tex-ieqn-57"><![CDATA[${W_0}\epsilon {R^{m \times k}}$]]></tex-math><mml:math id="mml-ieqn-57"><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:msup><mml:mi>R</mml:mi><mml:mrow><mml:mi>m</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>k</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:math>
</alternatives></inline-formula> is a weight matrix. <inline-formula id="ieqn-58"><alternatives><inline-graphic xlink:href="ieqn-58.png"/><tex-math id="tex-ieqn-58"><![CDATA[$P$]]></tex-math><mml:math id="mml-ieqn-58"><mml:mi>P</mml:mi></mml:math>
</alternatives></inline-formula> is an activation function, e.g., a <inline-formula id="ieqn-59"><alternatives><inline-graphic xlink:href="ieqn-59.png"/><tex-math id="tex-ieqn-59"><![CDATA[$ReLU\; \rho \left( x \right) = \; max\left( {0,x} \right)$]]></tex-math><mml:math id="mml-ieqn-59"><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>L</mml:mi><mml:mi>U</mml:mi><mml:mspace width="thickmathspace"></mml:mspace><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mspace width="thickmathspace"></mml:mspace><mml:mi>m</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mi>x</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula>. Multiple GCN layers assists in reducing higher-order neighborhoods information:</p>
<p><disp-formula id="eqn-8">
<label>(8)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-8.png"/><tex-math id="tex-eqn-8"><![CDATA[$${L^{\left( {j + 1} \right)}} = \rho \left( {\tilde AL0\left( j \right){W_j}} \right)$$]]></tex-math><mml:math id="mml-eqn-8" display="block"><mml:mrow><mml:msup><mml:mi>L</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>&#x002B;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:mover><mml:mi>A</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mi>L</mml:mi><mml:mn>0</mml:mn><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p>where <inline-formula id="ieqn-60"><alternatives><inline-graphic xlink:href="ieqn-60.png"/><tex-math id="tex-ieqn-60"><![CDATA[$j$]]></tex-math><mml:math id="mml-ieqn-60"><mml:mi>j</mml:mi></mml:math>
</alternatives></inline-formula> denotes the layer number and <inline-formula id="ieqn-61"><alternatives><inline-graphic xlink:href="ieqn-61.png"/><tex-math id="tex-ieqn-61"><![CDATA[${L^{\left( 0 \right)}} = X$]]></tex-math><mml:math id="mml-ieqn-61"><mml:mrow><mml:msup><mml:mi>L</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mn>0</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mi>X</mml:mi></mml:math>
</alternatives></inline-formula>.</p>
<p>The objective of learning [<xref ref-type="bibr" rid="ref-48">48</xref>] is to predict those graph perturbations are robust by applying the optimal minimax problem.</p>
<p><disp-formula id="eqn-9">
<label>(9)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-9.png"/><tex-math id="tex-eqn-9"><![CDATA[$$\mathop {min}\limits_w \,\mathop {max }\limits_\varphi - {1 \over {NM}}\sum\nolimits_{i = 1}^N {\sum\nolimits_{j = 1}^N {\log } } p\left( {{Y_i}\mid {X_i},A\left( {\phi \left( {\varphi ,{\epsilon _j}} \right)} \right),W} \right)$$]]></tex-math><mml:math id="mml-eqn-9" display="block"><mml:munder><mml:mrow><mml:mi>m</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi></mml:mrow><mml:mi>w</mml:mi></mml:munder><mml:mspace width="thinmathspace"></mml:mspace><mml:munder><mml:mrow><mml:mi>m</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi></mml:mrow><mml:mi>&#x03C6;</mml:mi></mml:munder><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mi>N</mml:mi><mml:mi>M</mml:mi></mml:mrow></mml:mfrac></mml:mrow><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:msubsup><mml:mrow><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:msubsup><mml:mrow><mml:mi>log</mml:mi></mml:mrow></mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>Y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2223;</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mi>A</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#x03D5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#x03C6;</mml:mi><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03B5;</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mi>W</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p>Here, <inline-formula id="ieqn-62"><alternatives><inline-graphic xlink:href="ieqn-62.png"/><tex-math id="tex-ieqn-62"><![CDATA[$M$]]></tex-math><mml:math id="mml-ieqn-62"><mml:mi>M</mml:mi></mml:math>
</alternatives></inline-formula> represents the total number of perturbations (e.g., <inline-formula id="ieqn-63"><alternatives><inline-graphic xlink:href="ieqn-63.png"/><tex-math id="tex-ieqn-63"><![CDATA[$M\; = \; 5$]]></tex-math><mml:math id="mml-ieqn-63"><mml:mi>M</mml:mi><mml:mspace width="thickmathspace"></mml:mspace><mml:mo>&#x003D;</mml:mo><mml:mspace width="thickmathspace"></mml:mspace><mml:mn>5</mml:mn></mml:math>
</alternatives></inline-formula>), which is similar to the GAN training procedure. The <xref ref-type="disp-formula" rid="eqn-9">Eq. (9)</xref> alternatively updates <inline-formula id="ieqn-64"><alternatives><inline-graphic xlink:href="ieqn-64.png"/><tex-math id="tex-ieqn-64"><![CDATA[$\varphi$]]></tex-math><mml:math id="mml-ieqn-64"><mml:mi>&#x03C6;</mml:mi></mml:math>
</alternatives></inline-formula> along &#x2207;<inline-formula id="ieqn-65"><alternatives><inline-graphic xlink:href="ieqn-65.png"/><tex-math id="tex-ieqn-65"><![CDATA[$\varphi$]]></tex-math><mml:math id="mml-ieqn-65"><mml:mi mathvariant="normal">&#x2207;</mml:mi><mml:mi>&#x03C6;</mml:mi></mml:math>
</alternatives></inline-formula>, the gradient to <inline-formula id="ieqn-66"><alternatives><inline-graphic xlink:href="ieqn-66.png"/><tex-math id="tex-ieqn-66"><![CDATA[$\varphi$]]></tex-math><mml:math id="mml-ieqn-66"><mml:mi>&#x03C6;</mml:mi></mml:math>
</alternatives></inline-formula>. Moreover, the optimization problem can be solved by updating alternatively the <inline-formula id="ieqn-67"><alternatives><inline-graphic xlink:href="ieqn-67.png"/><tex-math id="tex-ieqn-67"><![CDATA[$W$]]></tex-math><mml:math id="mml-ieqn-67"><mml:mi>W</mml:mi></mml:math>
</alternatives></inline-formula> w.r.t. &#x2212;&#x2207;<inline-formula id="ieqn-68"><alternatives><inline-graphic xlink:href="ieqn-68.png"/><tex-math id="tex-ieqn-68"><![CDATA[$W$]]></tex-math><mml:math id="mml-ieqn-68"><mml:mo>&#x2212;</mml:mo><mml:mi mathvariant="normal">&#x2207;</mml:mi><mml:mi>W</mml:mi></mml:math>
</alternatives></inline-formula>. Before proceeding to compute the density matrix and to avoid the numerical instability caused by multiple multiplications, <inline-formula id="ieqn-69"><alternatives><inline-graphic xlink:href="ieqn-69.png"/><tex-math id="tex-ieqn-69"><![CDATA[$\rho \left( A \right)\;$]]></tex-math><mml:math id="mml-ieqn-69"><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>A</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mspace width="thickmathspace"></mml:mspace></mml:math>
</alternatives></inline-formula>matrix should be normalized at first. To compute the correction term like in <xref ref-type="disp-formula" rid="eqn-10">Eq. (10)</xref> to avoid sparse <inline-formula id="ieqn-70"><alternatives><inline-graphic xlink:href="ieqn-70.png"/><tex-math id="tex-ieqn-70"><![CDATA[$\rho \left( A \right)$]]></tex-math><mml:math id="mml-ieqn-70"><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>A</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula> term. Moreover, <inline-formula id="ieqn-71"><alternatives><inline-graphic xlink:href="ieqn-71.png"/><tex-math id="tex-ieqn-71"><![CDATA[$W$]]></tex-math><mml:math id="mml-ieqn-71"><mml:mi>W</mml:mi></mml:math>
</alternatives></inline-formula> contains most of the free parameters. When compared with the GCN, the eigen-decomposition of the sparse needs to be solved once, before training, the eigenvectors multiply the computational cost of training with a factor of <inline-formula id="ieqn-72"><alternatives><inline-graphic xlink:href="ieqn-72.png"/><tex-math id="tex-ieqn-72"><![CDATA[$M$]]></tex-math><mml:math id="mml-ieqn-72"><mml:mi>M</mml:mi></mml:math>
</alternatives></inline-formula>.</p>
<p><disp-formula id="eqn-10">
<label>(10)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-10.png"/><tex-math id="tex-eqn-10"><![CDATA[$$\left[ {{{\bar U}_{n \times k}}{\rm diag}\left( {\displaystyle{{{\rm exp}\left( {\bar \theta + \phi } \right)} \over {{1^{\rm \top }}{\rm exp}\left( {\bar \theta + \phi } \right)}} - \bar \lambda } \right)\bar U_{k \times n}^T} \right]{X_{m \times D}}$$]]></tex-math><mml:math id="mml-eqn-10" display="block"><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mover><mml:mi>U</mml:mi><mml:mo stretchy="false">&#x00AF;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mi mathvariant="normal">d</mml:mi><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">a</mml:mi><mml:mi mathvariant="normal">g</mml:mi></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle scriptlevel="0" displaystyle="true"><mml:mrow><mml:mfrac><mml:mrow><mml:mrow><mml:mi mathvariant="normal">e</mml:mi><mml:mi mathvariant="normal">x</mml:mi><mml:mi mathvariant="normal">p</mml:mi></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:mover><mml:mi>&#x03B8;</mml:mi><mml:mo stretchy="false">&#x00AF;</mml:mo></mml:mover></mml:mrow><mml:mo>&#x002B;</mml:mo><mml:mi>&#x03D5;</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:msup><mml:mn>1</mml:mn><mml:mrow><mml:mi mathvariant="normal">&#x22A4;</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mi mathvariant="normal">e</mml:mi><mml:mi mathvariant="normal">x</mml:mi><mml:mi mathvariant="normal">p</mml:mi></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:mover><mml:mi>&#x03B8;</mml:mi><mml:mo stretchy="false">&#x00AF;</mml:mo></mml:mover></mml:mrow><mml:mo>&#x002B;</mml:mo><mml:mi>&#x03D5;</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mover><mml:mi>&#x03BB;</mml:mi><mml:mo stretchy="false">&#x00AF;</mml:mo></mml:mover></mml:mrow></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:msubsup><mml:mrow><mml:mover><mml:mi>U</mml:mi><mml:mo stretchy="false">&#x00AF;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>n</mml:mi></mml:mrow><mml:mi>T</mml:mi></mml:msubsup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>m</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>D</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p>That can be resolved in <inline-formula id="ieqn-73"><alternatives><inline-graphic xlink:href="ieqn-73.png"/><tex-math id="tex-ieqn-73"><![CDATA[$O\left( {{k_n}D} \right)$]]></tex-math><mml:math id="mml-ieqn-73"><mml:mi>O</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>k</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula> time effectively. This computation cost can be overlooked if <inline-formula id="ieqn-74"><alternatives><inline-graphic xlink:href="ieqn-74.png"/><tex-math id="tex-ieqn-74"><![CDATA[$k$]]></tex-math><mml:math id="mml-ieqn-74"><mml:mi>k</mml:mi></mml:math>
</alternatives></inline-formula> is minimal (without increasing the complexity overall), <inline-formula id="ieqn-75"><alternatives><inline-graphic xlink:href="ieqn-75.png"/><tex-math id="tex-ieqn-75"><![CDATA[${A_X}$]]></tex-math><mml:math id="mml-ieqn-75"><mml:mrow><mml:msub><mml:mi>A</mml:mi><mml:mi>X</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> computation has the complexity of <inline-formula id="ieqn-76"><alternatives><inline-graphic xlink:href="ieqn-76.png"/><tex-math id="tex-ieqn-76"><![CDATA[$O\left( {md} \right)$]]></tex-math><mml:math id="mml-ieqn-76"><mml:mi>O</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>m</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula>. Where the number of links is <inline-formula id="ieqn-77"><alternatives><inline-graphic xlink:href="ieqn-77.png"/><tex-math id="tex-ieqn-77"><![CDATA[$m$]]></tex-math><mml:math id="mml-ieqn-77"><mml:mi>m</mml:mi></mml:math>
</alternatives></inline-formula>. Shortly, under similar parameters and complexity, QGCN is slower as compared to GCN. Based on a randomly perturbed graph, the QGCN can be understood as running multiple GCN in parallel, leads to implement our perturbed QGCN.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Calculating Quantum GCN</title>
<p>The main purpose of the proposed research is to present a node-based graph convolution layer to extract multi-scale vertex features and to spread each vertex information progressively towards its nearby vertices and to the vertex itself. This typically requires the transmission of information between each vertex and its surrounding vertices. To extract richer vertex features from the proposed graph convolutional layer, we proposed QIM in terms of the GCN. The design of our proposed model has two sequential stages, firstly the structure and input layer of the grid, secondly the graphic convolution layer in the QIM. In particular, grid structure and input layer maps the arbitrary size graphs to grid structures in a fixed size, along with the aligned vertex grid structures and corresponding vertex matrices of the adjacent matrix, and integrates the matrix structures in the current QGCN model. By propagating vertex information in terms of a QFIM between the aligned grid vertices, the quantum information matrix graph convolution layer further extracts the multi-scale vertex features. Because the extracted vertex features from the graph convolution-layer retain the input grid structure properties, the extracted vertex characteristics can be read, and the graph class can be predicted.</p>
<p>The map graphs of different sizes to the grid structure of fixed-size can be aligned vertices and the related adjacency matrix of the corresponding fixed size. Suppose <inline-formula id="ieqn-78"><alternatives><inline-graphic xlink:href="ieqn-78.png"/><tex-math id="tex-ieqn-78"><![CDATA[$G = \left\{ {{G_1};{G_2} \ldots {G_n}} \right\}\; \epsilon \; G$]]></tex-math><mml:math id="mml-ieqn-78"><mml:mi>G</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>;</mml:mo><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mo>&#x2026;</mml:mo><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mspace width="thickmathspace"></mml:mspace><mml:mi>&#x03B5;</mml:mi><mml:mspace width="thickmathspace"></mml:mspace><mml:mi>G</mml:mi></mml:math>
</alternatives></inline-formula> represents a set of graphs. <inline-formula id="ieqn-79"><alternatives><inline-graphic xlink:href="ieqn-79.png"/><tex-math id="tex-ieqn-79"><![CDATA[${G_p}\left( {{V_p};{E_p};{A_p}} \right)\epsilon G$]]></tex-math><mml:math id="mml-ieqn-79"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>V</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow><mml:mo>;</mml:mo><mml:mrow><mml:msub><mml:mi>E</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow><mml:mo>;</mml:mo><mml:mrow><mml:msub><mml:mi>A</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mi>&#x03B5;</mml:mi><mml:mi>G</mml:mi></mml:math>
</alternatives></inline-formula> is a sample graph from <inline-formula id="ieqn-80"><alternatives><inline-graphic xlink:href="ieqn-80.png"/><tex-math id="tex-ieqn-80"><![CDATA[$G$]]></tex-math><mml:math id="mml-ieqn-80"><mml:mi>G</mml:mi></mml:math>
</alternatives></inline-formula>, with <inline-formula id="ieqn-81"><alternatives><inline-graphic xlink:href="ieqn-81.png"/><tex-math id="tex-ieqn-81"><![CDATA[${V_p}$]]></tex-math><mml:math id="mml-ieqn-81"><mml:mrow><mml:msub><mml:mi>V</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> representing the vertex set, <inline-formula id="ieqn-82"><alternatives><inline-graphic xlink:href="ieqn-82.png"/><tex-math id="tex-ieqn-82"><![CDATA[${E_p}$]]></tex-math><mml:math id="mml-ieqn-82"><mml:mrow><mml:msub><mml:mi>E</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> representing the edge set, and <inline-formula id="ieqn-83"><alternatives><inline-graphic xlink:href="ieqn-83.png"/><tex-math id="tex-ieqn-83"><![CDATA[${A_p}$]]></tex-math><mml:math id="mml-ieqn-83"><mml:mrow><mml:msub><mml:mi>A</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> representing the vertex adjacency matrix. Suppose each vertex <inline-formula id="ieqn-84"><alternatives><inline-graphic xlink:href="ieqn-84.png"/><tex-math id="tex-ieqn-84"><![CDATA[${v_p}\epsilon {V_p}$]]></tex-math><mml:math id="mml-ieqn-84"><mml:mrow><mml:msub><mml:mi>v</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:msub><mml:mi>V</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> is represented as a c-dimensional feature vector, the feature information of all <inline-formula id="ieqn-85"><alternatives><inline-graphic xlink:href="ieqn-85.png"/><tex-math id="tex-ieqn-85"><![CDATA[$n$]]></tex-math><mml:math id="mml-ieqn-85"><mml:mi>n</mml:mi></mml:math>
</alternatives></inline-formula> vertices in <inline-formula id="ieqn-86"><alternatives><inline-graphic xlink:href="ieqn-86.png"/><tex-math id="tex-ieqn-86"><![CDATA[${V_p}$]]></tex-math><mml:math id="mml-ieqn-86"><mml:mrow><mml:msub><mml:mi>V</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> can be denoted by a <inline-formula id="ieqn-87"><alternatives><inline-graphic xlink:href="ieqn-87.png"/><tex-math id="tex-ieqn-87"><![CDATA[$n \times c$]]></tex-math><mml:math id="mml-ieqn-87"><mml:mi>n</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>c</mml:mi></mml:math>
</alternatives></inline-formula> matrix <inline-formula id="ieqn-88"><alternatives><inline-graphic xlink:href="ieqn-88.png"/><tex-math id="tex-ieqn-88"><![CDATA[${X_p},{\rm \; i.e.},{X_p}\epsilon {R^{n \times c}}$]]></tex-math><mml:math id="mml-ieqn-88"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mspace width="thickmathspace"></mml:mspace><mml:mi mathvariant="normal">i</mml:mi><mml:mo>.</mml:mo><mml:mi mathvariant="normal">e</mml:mi><mml:mo>.</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:msup><mml:mi>R</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>c</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:math>
</alternatives></inline-formula>. It should be noted that the row of <inline-formula id="ieqn-89"><alternatives><inline-graphic xlink:href="ieqn-89.png"/><tex-math id="tex-ieqn-89"><![CDATA[${X_p}$]]></tex-math><mml:math id="mml-ieqn-89"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> follows the same vertex order of <inline-formula id="ieqn-90"><alternatives><inline-graphic xlink:href="ieqn-90.png"/><tex-math id="tex-ieqn-90"><![CDATA[${A_p}$]]></tex-math><mml:math id="mml-ieqn-90"><mml:mrow><mml:msub><mml:mi>A</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula>. If the graphs in <inline-formula id="ieqn-91"><alternatives><inline-graphic xlink:href="ieqn-91.png"/><tex-math id="tex-ieqn-91"><![CDATA[$G$]]></tex-math><mml:math id="mml-ieqn-91"><mml:mi>G</mml:mi></mml:math>
</alternatives></inline-formula> are vertex attributed graphs, <inline-formula id="ieqn-92"><alternatives><inline-graphic xlink:href="ieqn-92.png"/><tex-math id="tex-ieqn-92"><![CDATA[${X_p}$]]></tex-math><mml:math id="mml-ieqn-92"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>p</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> can be the one-hot encoding matrix of the vertex labels.</p>
<p>Sun et al. [<xref ref-type="bibr" rid="ref-52">52</xref>] stated that the canonical parameterization could be derived in a closed-form with the following equation:</p>
<p><disp-formula id="eqn-11">
<label>(11)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-11.png"/><tex-math id="tex-eqn-11"><![CDATA[$$d{s^2} = {1 \over 2}\sum\nolimits_{j = 1}^n \sum\nolimits_{k = 1}^n {{{{\left( {u_j^Td\rho {u_k}} \right)}^2}} \over {{\lambda _j} + {\lambda _k}}}$$]]></tex-math><mml:math id="mml-eqn-11" display="block"><mml:mi>d</mml:mi><mml:msup><mml:mi>s</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>&#x003D;</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac><mml:mstyle displaystyle="true"><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mstyle><mml:mspace width="thinmathspace"></mml:mspace><mml:mstyle displaystyle="true"><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>k</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mstyle><mml:mspace width="thinmathspace"></mml:mspace><mml:mfrac><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msubsup><mml:mi>u</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mi>d</mml:mi><mml:mi>&#x03C1;</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mrow><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x002B;</mml:mo><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:math>
</alternatives></disp-formula></p>
<p>However,</p>
<p><disp-formula id="eqn-12">
<label>(12)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-12.png"/><tex-math id="tex-eqn-12"><![CDATA[$$ds^2=\rho G$$]]></tex-math><mml:math id="mml-eqn-12"><mml:mi>d</mml:mi><mml:msup><mml:mi>s</mml:mi><mml:mn>2</mml:mn></mml:msup><mml:mo>&#x003D;</mml:mo><mml:mi>&#x03C1;</mml:mi><mml:mi>G</mml:mi></mml:math>
</alternatives></disp-formula></p>
<p>A constant scaling of the edge weights results in <italic>ds</italic><sup>2</sup> &#x003D; 0 because the density matrix does not vary; hence, either <inline-formula id="ieqn-93"><alternatives><inline-graphic xlink:href="ieqn-93.png"/><tex-math id="tex-ieqn-93"><![CDATA[$\rho$]]></tex-math><mml:math id="mml-ieqn-93"><mml:mi>&#x03C1;</mml:mi></mml:math>
</alternatives></inline-formula> can be zero or <inline-formula id="ieqn-94"><alternatives><inline-graphic xlink:href="ieqn-94.png"/><tex-math id="tex-ieqn-94"><![CDATA[$G$]]></tex-math><mml:math id="mml-ieqn-94"><mml:mi>G</mml:mi></mml:math>
</alternatives></inline-formula>, since <inline-formula id="ieqn-95"><alternatives><inline-graphic xlink:href="ieqn-95.png"/><tex-math id="tex-ieqn-95"><![CDATA[$\rho$]]></tex-math><mml:math id="mml-ieqn-95"><mml:mi>&#x03C1;</mml:mi></mml:math>
</alternatives></inline-formula> is the density, so it can never be zero, hence, <inline-formula id="ieqn-96"><alternatives><inline-graphic xlink:href="ieqn-96.png"/><tex-math id="tex-ieqn-96"><![CDATA[$G\; = \; 0$]]></tex-math><mml:math id="mml-ieqn-96"><mml:mi>G</mml:mi><mml:mspace width="thickmathspace"></mml:mspace><mml:mo>&#x003D;</mml:mo><mml:mspace width="thickmathspace"></mml:mspace><mml:mn>0</mml:mn></mml:math>
</alternatives></inline-formula> &#x21D2; <italic>&#x03C1;</italic> 6 &#x003D; 0, <italic>G</italic> &#x003D; 0</p>
<p>Now the density matrix can be defined as</p>
<p><disp-formula id="eqn-13">
<label>(13)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-13.png"/><tex-math id="tex-eqn-13"><![CDATA[$$\rho = \sum\nolimits_{l = 1}^n {\lambda _i}{u_i}u_i^T$$]]></tex-math><mml:math id="mml-eqn-13" display="block"><mml:mi>&#x03C1;</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mstyle displaystyle="true"><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>l</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mstyle><mml:mspace width="thinmathspace"></mml:mspace><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msubsup><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup></mml:math>
</alternatives></disp-formula></p>
<p>Multiplying the above equation by <inline-formula id="ieqn-97"><alternatives><inline-graphic xlink:href="ieqn-97.png"/><tex-math id="tex-ieqn-97"><![CDATA[$\rho$]]></tex-math><mml:math id="mml-ieqn-97"><mml:mi>&#x03C1;</mml:mi></mml:math>
</alternatives></inline-formula>, we can achieve</p>
<p><disp-formula id="eqn-14">
<label>(14)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-14.png"/><tex-math id="tex-eqn-14"><![CDATA[$${\rho ^2} = \sum\nolimits_{l = 1}^n {\lambda _i}\rho {u_i}u_i^T$$]]></tex-math><mml:math id="mml-eqn-14" display="block"><mml:msup><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>&#x003D;</mml:mo><mml:mstyle displaystyle="true"><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>l</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mstyle><mml:mspace width="thinmathspace"></mml:mspace><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mi>&#x03C1;</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msubsup><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup></mml:math>
</alternatives></disp-formula></p>
<p>By taking derivative, we get</p>
<p><disp-formula id="eqn-15">
<label>(15)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-15.png"/><tex-math id="tex-eqn-15"><![CDATA[$$\eqalign{ 2\rho d\rho = \sum\nolimits_{l = 1}^n {\lambda _i}\rho {u_i}u_i^T \cr  \Rightarrow {{2\rho d\rho } \over {{\lambda _i}}} = \sum\nolimits_{l = 1}^n {u_i}d\rho u_i^T \cr}$$]]></tex-math><mml:math id="mml-eqn-15" display="block"><mml:mtable columnspacing="1em" rowspacing="4pt" columnalign="center center"><mml:mtr><mml:mtd></mml:mtd><mml:mtd><mml:mn>2</mml:mn><mml:mi>&#x03C1;</mml:mi><mml:mi>d</mml:mi><mml:mi>&#x03C1;</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mstyle displaystyle="true"><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>l</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mstyle><mml:mspace width="thinmathspace"></mml:mspace><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mi>&#x03C1;</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msubsup><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup></mml:mtd></mml:mtr><mml:mtr><mml:mtd></mml:mtd><mml:mtd><mml:mo stretchy="false">&#x21D2;</mml:mo><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mi>&#x03C1;</mml:mi><mml:mi>d</mml:mi><mml:mi>&#x03C1;</mml:mi></mml:mrow><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mfrac><mml:mo>&#x003D;</mml:mo><mml:mstyle displaystyle="true"><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>l</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mstyle><mml:mspace width="thinmathspace"></mml:mspace><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mi>d</mml:mi><mml:mi>&#x03C1;</mml:mi><mml:msubsup><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup></mml:mtd></mml:mtr></mml:mtable></mml:math>
</alternatives></disp-formula></p>
<p>Or it can also be written as</p>
<p><disp-formula id="eqn-16">
<label>(16)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-16.png"/><tex-math id="tex-eqn-16"><![CDATA[$$\sum\nolimits_{l = 1}^n {\left( {u_j^Td\rho {u_k}} \right)^2} = {\left( {{{2\rho d\rho } \over {{\lambda _i}}}} \right)^2} = \beta$$]]></tex-math><mml:math id="mml-eqn-16" display="block"><mml:mstyle displaystyle="true"><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>l</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mstyle><mml:mspace width="thinmathspace"></mml:mspace><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msubsup><mml:mi>u</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mi>d</mml:mi><mml:mi>&#x03C1;</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>&#x003D;</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mi>&#x03C1;</mml:mi><mml:mi>d</mml:mi><mml:mi>&#x03C1;</mml:mi></mml:mrow><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mfrac><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>&#x003D;</mml:mo><mml:mi>&#x03B2;</mml:mi></mml:math>
</alternatives></disp-formula></p>
<p>where</p>
<p><disp-formula id="eqn-17">
<label>(17)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-17.png"/><tex-math id="tex-eqn-17"><![CDATA[$$\beta = {\left( {\displaystyle{{2\rho d\rho } \over {{\lambda _i}}}} \right)^2}$$]]></tex-math><mml:math id="mml-eqn-17" display="block"><mml:mi>&#x03B2;</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle scriptlevel="0" displaystyle="true"><mml:mrow><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mi>&#x03C1;</mml:mi><mml:mi>d</mml:mi><mml:mi>&#x03C1;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p>Since the result of <inline-formula id="ieqn-98"><alternatives><inline-graphic xlink:href="ieqn-98.png"/><tex-math id="tex-ieqn-98"><![CDATA[$\beta$]]></tex-math><mml:math id="mml-ieqn-98"><mml:mi>&#x03B2;</mml:mi></mml:math>
</alternatives></inline-formula> is a numerical value, so that we can consider it for our convenient such as</p>
<p><disp-formula id="eqn-18">
<label>(18)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-18.png"/><tex-math id="tex-eqn-18"><![CDATA[$${\rm ln}\left( {{y_d}{z_d}} \right) = \beta$$]]></tex-math><mml:math id="mml-eqn-18" display="block"><mml:mrow><mml:mi mathvariant="normal">l</mml:mi><mml:mi mathvariant="normal">n</mml:mi></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>d</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>z</mml:mi><mml:mi>d</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x003D;</mml:mo><mml:mi>&#x03B2;</mml:mi></mml:math>
</alternatives></disp-formula></p>
<p>Substituting the value of <inline-formula id="ieqn-99"><alternatives><inline-graphic xlink:href="ieqn-99.png"/><tex-math id="tex-ieqn-99"><![CDATA[$d{s^2}$]]></tex-math><mml:math id="mml-ieqn-99"><mml:mi>d</mml:mi><mml:mrow><mml:msup><mml:mi>s</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:math>
</alternatives></inline-formula> from <xref ref-type="disp-formula" rid="eqn-12">Eq. (12)</xref> and the value of <inline-formula id="ieqn-100"><alternatives><inline-graphic xlink:href="ieqn-100.png"/><tex-math id="tex-ieqn-100"><![CDATA[$\beta$]]></tex-math><mml:math id="mml-ieqn-100"><mml:mi>&#x03B2;</mml:mi></mml:math>
</alternatives></inline-formula> from <xref ref-type="disp-formula" rid="eqn-18">Eq. (18)</xref> in to <xref ref-type="disp-formula" rid="eqn-11">Eq. (11)</xref> and replacing the indices; such that <italic>j</italic> &#x003D; <italic>l,k</italic> &#x003D; <italic>m,&#x03BB;</italic><sub><italic>j</italic></sub> &#x003D; &#x2206;<sub><italic>l</italic></sub><italic>,&#x03BB;</italic><sub><italic>k</italic></sub> &#x003D; &#x2206;<sub><italic>m</italic></sub>.</p>
<p>We can get our QGCN formulation as below.</p>
<p><disp-formula id="eqn-19">
<label>(19)</label><alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-19.png"/><tex-math id="tex-eqn-19"><![CDATA[$$QG = {1 \over 2}\sum\nolimits_{l = 1}^n {\sum\nolimits_{m = 1}^n {{{{y_d}{z_d}} \over {{\Delta _l} + {\Delta _m}}}} }$$]]></tex-math><mml:math id="mml-eqn-19" display="block"><mml:mi>Q</mml:mi><mml:mi>G</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:mrow><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>l</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:msubsup><mml:mrow><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>m</mml:mi><mml:mo>&#x003D;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:msubsup><mml:mrow><mml:mrow><mml:mfrac><mml:mrow><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>d</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>z</mml:mi><mml:mi>d</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow><mml:mo>&#x002B;</mml:mo><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:mi>m</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p>where <inline-formula id="ieqn-101"><alternatives><inline-graphic xlink:href="ieqn-101.png"/><tex-math id="tex-ieqn-101"><![CDATA[${y_d}$]]></tex-math><mml:math id="mml-ieqn-101"><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>d</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula> represent set of documents with labels, and <inline-formula id="ieqn-102"><alternatives><inline-graphic xlink:href="ieqn-102.png"/><tex-math id="tex-ieqn-102"><![CDATA[$n$]]></tex-math><mml:math id="mml-ieqn-102"><mml:mi>n</mml:mi></mml:math>
</alternatives></inline-formula> is the dimension of the output features, and it is equal to the number of classes, <inline-formula id="ieqn-103"><alternatives><inline-graphic xlink:href="ieqn-103.png"/><tex-math id="tex-ieqn-103"><![CDATA[$y$]]></tex-math><mml:math id="mml-ieqn-103"><mml:mi>y</mml:mi></mml:math>
</alternatives></inline-formula> represents the label indicator matrix, and to evaluates the performance of our Algorithm, we utilized Gradient descent for training the weight parameters <inline-formula id="ieqn-104"><alternatives><inline-graphic xlink:href="ieqn-104.png"/><tex-math id="tex-ieqn-104"><![CDATA[${\Delta _i}\;$]]></tex-math><mml:math id="mml-ieqn-104"><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mspace width="thickmathspace"></mml:mspace></mml:math>
</alternatives></inline-formula> and <inline-formula id="ieqn-105"><alternatives><inline-graphic xlink:href="ieqn-105.png"/><tex-math id="tex-ieqn-105"><![CDATA[${\Delta _m}$]]></tex-math><mml:math id="mml-ieqn-105"><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:mi>m</mml:mi></mml:msub></mml:mrow></mml:math>
</alternatives></inline-formula>, Gradient descent is an algorithm of optimization to find parameter values (coefficients) of an <inline-formula id="ieqn-106"><alternatives><inline-graphic xlink:href="ieqn-106.png"/><tex-math id="tex-ieqn-106"><![CDATA[$\left( f \right)$]]></tex-math><mml:math id="mml-ieqn-106"><mml:mrow><mml:mo>(</mml:mo><mml:mi>f</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math>
</alternatives></inline-formula> that minimizes a cost function (cost).</p>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Results and Discussion</title>
<p>This section evaluates the proposed QGCN model for better classification, even with limited labeled data to determine its performance. Three standard datasets Cora, Citeseer, and PubMed are used to assess the efficiency of the proposed QGCN model. The statistic of different benchmark datasets is shown in <xref ref-type="table" rid="table-1">Tab. 1</xref> [<xref ref-type="bibr" rid="ref-32">32</xref>]. The Citeseer, PubMed, and Cora datasets with citation network types are also presented. It can be seen that the nodes for Citeseer, PubMed, and Cora are 3227, 197171 and 2708, respectively. The total number of classes are 6, 2 and 7, in Citeseer, PubMed, and Cora, respectively. The values of features are 3703, 500, 1433 for Citeseer, PubMed, and Cora, respectively. Finally, the label rate for Citeseer, PubMed, and Cora is 0:036, 0:003, and 0:052, respectively. Datasets variance has been also enabled to deviate the performance efficiency from the set and expected results.</p>
<table-wrap id="table-1">
<label>Table 1</label>
<caption>
<title>Summary statistics of datasets. As reported in Kipf et al. [<xref ref-type="bibr" rid="ref-32">32</xref>]</title>
</caption>
<table>
<colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Dataset</th>
<th>Type</th>
<th>Nodes</th>
<th>Edges</th>
<th>Classes</th>
<th>Features</th>
<th>Label rate</th>
</tr>
</thead>
<tbody>
<tr>
<td><italic>Citeseer</italic></td>
<td>Citation Network</td>
<td>3,327</td>
<td>4,732</td>
<td>6</td>
<td>3,703</td>
<td>0:036</td>
</tr>
<tr>
<td><italic>PubMed</italic></td>
<td>Citation Network</td>
<td>19,717</td>
<td>44,338</td>
<td>3</td>
<td>500</td>
<td>0:003</td>
</tr>
<tr>
<td><italic>Cora</italic></td>
<td>Citation Network</td>
<td>2,708</td>
<td>5,429</td>
<td>7</td>
<td>1,433</td>
<td>0:052</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>We test the efficiency of the proposed QGCN model with four deep learning methods for graphs, including GCN, GCNT, Fisher GCN, and Fisher GCNT [<xref ref-type="bibr" rid="ref-52">52</xref>]. The experiment was conducted on a Core i7 PC with 8 GB RAM and windows 10. We used Python as coding language and TensorFlow for high-performance numerical computation.</p>
<p>An experimental analysis of semi-supervised classification tasks of the transductive node has been conducted. As recently suggested in Monti et al. [<xref ref-type="bibr" rid="ref-40">40</xref>], random training: validation: testing datasets are used, which is based on the same ratios as the Planetoid split as indicated in the &#x201C;Train: Valid: Test&#x201D; column of <xref ref-type="table" rid="table-1">Tab. 1</xref>.</p>
<p>We primarily equate GCN that is a state-of-the-art on such datasets and utilized GCNT as a baseline. It was established that random walking similarities would lead to better learning of GNNs [<xref ref-type="bibr" rid="ref-49">49</xref>]. On semi-supervised node classification tasks, preprocessing of the adjacency matrix A can enhance the overall performance of GCN. This procedure is focused on DeepWalk similarities [<xref ref-type="bibr" rid="ref-39">39</xref>].</p>
<p>The GCN codes [<xref ref-type="bibr" rid="ref-32">32</xref>] are adapted to equate the four approaches precisely the same configuration and only differ in matrix <inline-formula id="ieqn-107"><alternatives><inline-graphic xlink:href="ieqn-107.png"/><tex-math id="tex-ieqn-107"><![CDATA[$A$]]></tex-math><mml:math id="mml-ieqn-107"><mml:mi>A</mml:mi></mml:math>
</alternatives></inline-formula> used for the measurement of the graph convolution. As a result of improved perturbation and pre-processing with an enhanced process of generalization, QGCN significantly improves over GCN, GCNT, Fisher GCN, and Fisher GCNT. Additionally, the QGCN provides best performance with both techniques. This variability has been pbserved due to various split of training, validation and testing datasets [<xref ref-type="bibr" rid="ref-40">40</xref>] hence, the scores vary with the split. We have seen a clear development of the approaches introduced over the baselines in repeated experiments.</p>
<p><xref ref-type="table" rid="table-2">Tab. 2</xref> validates the proposed QGCN model that significantly outperforms state-of-the-art deep learning methods for graph classifications on the Cora, Citeseer, and PubMed datasets. Here, we have also provided a detailed comparison of the proposed method and its competitors in <xref ref-type="fig" rid="fig-2">Figs. 2</xref>, <xref ref-type="fig" rid="fig-3">3</xref> and <xref ref-type="fig" rid="fig-4">4</xref>.</p>
<table-wrap id="table-2">
<label>Table 2</label>
<caption>
<title>Results comparison with State-of-the-art GCN and its other extensions</title>
</caption>
<table>
<colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr><th rowspan="2">Methods</th><th colspan="2">Cora</th><th colspan="2">Citeseer</th><th colspan="2">PubMed</th>
</tr>
<tr>
<th>Acc</th>
<th>Loss</th>
<th>Acc</th>
<th>Loss</th>
<th>Acc</th>
<th>Loss</th>
</tr>
</thead>
<tbody>
<tr>
<td>GCN</td>
<td>80<italic>.8</italic>2</td>
<td>1.072037265</td>
<td>69.515</td>
<td>1.37340863</td>
<td>78.69</td>
<td>0.73688188</td>
</tr>
<tr>
<td>GCNT</td>
<td>81.855</td>
<td>1.046698965</td>
<td>70.445</td>
<td>1.33260797</td>
<td>78.345</td>
<td>0.697023411</td>
</tr>
<tr>
<td>Fisher GCN</td>
<td>80.67</td>
<td>1.03020443</td>
<td>69.775</td>
<td>1.36068933</td>
<td>78.865</td>
<td>0.729672466</td>
</tr>
<tr>
<td>Fisher GCNT</td>
<td>81.405</td>
<td>1.031641146</td>
<td>70.385</td>
<td>1.31951523</td>
<td>79.065</td>
<td>0.690237115</td>
</tr>
<tr>
<td>Quantum GCN</td>
<td>82.045</td>
<td>1.06086768</td>
<td>70.975</td>
<td>1.36238394</td>
<td>79.45</td>
<td>0.726651127</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>Results on cora dataset</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-2.png"/>
</fig>
<fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>Results on citeseer dataset</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-3.png"/>
</fig>
<fig id="fig-4">
<label>Figure 4</label>
<caption>
<title>Results on PubMed dataset</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-4.png"/>
</fig>
<p>The Epoch versus accuracy results on the Cora is shown in <xref ref-type="fig" rid="fig-4">Fig. 4</xref>. QGCN outperformed from given methods, as shown in <xref ref-type="table" rid="table-2">Tab. 2</xref>. The proposed method shows the best results on the Cora dataset, which is 82.045 as compared to others.</p>
<p>The experimental results on Citeseer are shown in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>. from comparison: we tested all of the five methods like GCN, GCNT, Fisher GCN, Fisher GCNT, and Quantum GCN for the Citeseer dataset. The GCN showed low performance as compared to Quantum GCN, GCNT, Fisher GCN, and Fisher GCNT. In <xref ref-type="table" rid="table-2">Tab. 2</xref>. it is clear that the proposed QGCN showed high accuracy as compared to other methods, which is 70.975.</p>
<p>The change of accuracy for Epochs on PubMed dataset is shown in <xref ref-type="fig" rid="fig-2">Fig. 2</xref>. The comparison between GCN, GCNT, Fisher GCN, Fisher GCNT, and Quantum GCN for the PubMed dataset is obtained from the presented <xref ref-type="table" rid="table-2">Tab. 2</xref>. The GCNT showed low performance, where the proposed method again outperformed and showed a high accuracy value, which is 79.45.</p>
</sec>
<sec id="s5">
<label>5</label>
<title>Conclusion</title>
<p>This paper proposes a new method, &#x201C;Quantum GCN&#x201D;, based on Quantum Information theory and the GCN model for TC. In this proposed work, graph structures are modified in the GCN framework to reveal major improvements in text classification tasks. The overall generalization of the GCN is improved by introducing a new formulation <xref ref-type="disp-formula" rid="eqn-19">Eq. (19)</xref>, which is exhibited from quantum information theory. The proposed approach facilitates pre-processing of the GCN graph adjacency matrix to integrate the proximities of a high order. The experimental results are compared with state-of-the-art graph-based methods on three benchmark datasets, namely, Cora, Citeseer, and PubMed <xref ref-type="table" rid="table-2">Tab. 2</xref>. And the proposed method accuracy on the Cora dataset 82.045, Citeseer 70.975, and on PubMed 79.45, which shows superiority over its competitors.</p>
</sec>
</body>
<back><fn-group>
<fn fn-type="other">
<p>Future work: Our perturbation is efficient enough to add high-order polynomial filters. It would be more beneficial if alternative perturbations based on other distances, e.g., matrix Bregman divergence, can be explored. At the same time, the improved accuracy would broaden the application of the GCN in emerging machine learning processing. Future work will be compromised by comparing various perturbations in the GCN configuration.</p>
</fn>
<fn fn-type="other">
<p><bold>Funding Statement:</bold> This work was supported by the National Key Research and Development Program of China (2018YFB1600600), the National Natural Science Foundation of China under (61976034, U1808206), and the Dalian Science and Technology Innovation Fund (2019J12GX035).</p>
</fn>
<fn fn-type="conflict">
<p><bold>Conflicts of Interest:</bold> The authors declare that they have no conflicts of interest to report regarding the present study.</p>
</fn>
</fn-group>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1">
<label>[1]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>L.</given-names> 
<surname> Douglas Baker</surname></string-name> and <string-name>
<given-names>A. K.</given-names> 
<surname>McCallum</surname></string-name>
</person-group>, &#x201C;
<article-title>Distributional clustering of words for text classification</article-title>,&#x201D; in <conf-name>Proc. of the 21st Annual Int. ACM SIGIR Conf. on Research and Development in Information Retrieval</conf-name>, <conf-loc>Melbourne, Australia</conf-loc>, pp. 
<fpage>96</fpage>&#x2013;
<lpage>103</lpage>, 
<year iso-8601-date="1998">1998</year>. </mixed-citation>
</ref>
<ref id="ref-2">
<label>[2]</label><mixed-citation publication-type="book">
<person-group person-group-type="author"><string-name>
<given-names>A.</given-names> 
<surname>Bhowmick</surname></string-name> and <string-name>
<given-names>S. M.</given-names> 
<surname>Hazarika</surname></string-name>
</person-group>, &#x201C;<chapter-title>E-mail spam filtering: A review of techniques and trends</chapter-title>,&#x201D; In: 
<source>Advances in Electronics, Communication and Computing</source>, 
<publisher-name>Springer</publisher-name>, <publisher-loc>Singapore</publisher-loc>, 
<fpage>583</fpage>&#x2013;
<lpage>590</lpage>, 
<year iso-8601-date="2018">2018</year>.</mixed-citation>
</ref>
<ref id="ref-3">
<label>[3]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>W.</given-names> 
<surname>Cohen</surname></string-name>, <string-name>
<given-names>V.</given-names> 
<surname>Carvalho</surname></string-name> and <string-name>
<given-names>T.</given-names> 
<surname>Mitchell</surname></string-name>
</person-group>, &#x201C;
<article-title>Learning to classify email into speech acts</article-title>,&#x201D; in <conf-name>Proc. of the 2004 Conf. on Empirical Methods in Natural Language Processing</conf-name>, <conf-loc>Barcelona, Spain</conf-loc>, pp. 
<fpage>309</fpage>&#x2013;
<lpage>316</lpage>, 
<year iso-8601-date="2004">2004</year>. </mixed-citation>
</ref>
<ref id="ref-4">
<label>[4]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>D. D.</given-names> 
<surname>Lewis</surname></string-name> and <string-name>
<given-names>K. A.</given-names> 
<surname>Knowles</surname></string-name>
</person-group>, &#x201C;
<article-title>Threading electronic mail: A preliminary study</article-title>,&#x201D; 
<source>Information Processing &#x0026; Management</source>, vol. 
<volume>33</volume>, no. 
<issue>2</issue>, pp. 
<fpage>209</fpage>&#x2013;
<lpage>217</lpage>, 
<year iso-8601-date="1997">1997</year>.</mixed-citation>
</ref>
<ref id="ref-5">
<label>[5]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>H.</given-names> 
<surname>Chen</surname></string-name> and <string-name>
<given-names>D.</given-names> 
<surname>Zimbra</surname></string-name>
</person-group>, &#x201C;
<article-title>AI and opinion mining</article-title>,&#x201D; 
<source>IEEE Intelligent Systems</source>, vol. 
<volume>25</volume>, no. 
<issue>3</issue>, pp. 
<fpage>74</fpage>&#x2013;
<lpage>80</lpage>, 
<year iso-8601-date="2010">2010</year>.</mixed-citation>
</ref>
<ref id="ref-6">
<label>[6]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>M. S.</given-names> 
<surname>Hajmohammadi</surname></string-name>, <string-name>
<given-names>R.</given-names> 
<surname>Ibrahim</surname></string-name> and <string-name>
<given-names>Z. A.</given-names> 
<surname>Othman</surname></string-name>
</person-group>, &#x201C;
<article-title>Opinion mining and sentiment analysis: A survey</article-title>,&#x201D; 
<source>International Journal of Computers &#x0026; Technology</source>, vol. 
<volume>2</volume>, no. 
<issue>3</issue>, pp. 
<fpage>171</fpage>&#x2013;
<lpage>178</lpage>, 
<year iso-8601-date="2012">2012</year>.</mixed-citation>
</ref>
<ref id="ref-7">
<label>[7]</label><mixed-citation publication-type="book">
<person-group person-group-type="author"><string-name>
<given-names>R. K.</given-names> 
<surname>Bakshi</surname></string-name>, <string-name>
<given-names>N.</given-names> 
<surname>Kaur</surname></string-name>, <string-name>
<given-names>R.</given-names> 
<surname>Kaur</surname></string-name> and <string-name>
<given-names>G.</given-names> 
<surname>Kaur</surname></string-name>
</person-group>, &#x201C;<chapter-title>Opinion mining and sentiment analysis</chapter-title>,&#x201D; In 
<source>2016 3rd Int. Conf. on Computing for Sustainable Global Development (INDIACom)</source>. 
<publisher-loc>New Delhi</publisher-loc>: 
<publisher-name>IEEE</publisher-name>, pp. 
<fpage>452</fpage>&#x2013;
<lpage>455</lpage>, 
<year iso-8601-date="2016">2016</year>.</mixed-citation>
</ref>
<ref id="ref-8">
<label>[8]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>M.</given-names> 
<surname>Kang</surname></string-name>, <string-name>
<given-names>J.</given-names> 
<surname>Ahn</surname></string-name> and <string-name>
<given-names>K.</given-names> 
<surname>Lee</surname></string-name>
</person-group>, &#x201C;
<article-title>Opinion mining using ensemble text hidden Markov models for text classification</article-title>,&#x201D; 
<source>Expert Systems with Applications</source>, vol. 
<volume>94</volume>, pp. 
<fpage>218</fpage>&#x2013;
<lpage>227</lpage>, 
<year iso-8601-date="2018">2018</year>.</mixed-citation>
</ref>
<ref id="ref-9">
<label>[9]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>S.</given-names> 
<surname>Chakrabarti</surname></string-name>, <string-name>
<given-names>B.</given-names> 
<surname>Dom</surname></string-name>, <string-name>
<given-names>R.</given-names> 
<surname>Agrawal</surname></string-name> and <string-name>
<given-names>P.</given-names> 
<surname>Raghavan</surname></string-name>
</person-group>, &#x201C;
<article-title>Using taxonomy, discriminants, and signatures for navigating in text databases</article-title>,&#x201D; 
<source>In Int. Conf. on Very Large Data Bases</source>, vol. 
<volume>97</volume>, pp. 
<fpage>446</fpage>&#x2013;
<lpage>455</lpage>, 
<year iso-8601-date="1997">1997</year>.</mixed-citation>
</ref>
<ref id="ref-10">
<label>[10]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>R. A.</given-names> 
<surname>Naqvi</surname></string-name>, <string-name>
<given-names>M. A.</given-names> 
<surname>Khan</surname></string-name>, <string-name>
<given-names>N.</given-names> 
<surname>Malik</surname></string-name>, <string-name>
<given-names>S.</given-names> 
<surname>Saqib</surname></string-name>, <string-name>
<given-names>T.</given-names> 
<surname>Alyas</surname></string-name> <etal>et al.</etal>
</person-group><italic>,</italic> &#x201C;
<article-title>Roman urdu news headline classification empowered with machine learning</article-title>,&#x201D; 
<source>Computers, Materials &#x0026; Continua</source>, vol. 
<volume>65</volume>, no. 
<issue>2</issue>, pp. 
<fpage>1221</fpage>&#x2013;
<lpage>1236</lpage>, 
<year iso-8601-date="2020">2020</year>.</mixed-citation>
</ref>
<ref id="ref-11">
<label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Akshay</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Nayana</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Karthika</surname></string-name></person-group>, &#x201C;<article-title>A survey on classification and clustering algorithms for uncompressed and compressed text</article-title>,&#x201D; <source>International Journal of Applied Engineering Research</source>, vol. <volume>10</volume>, no. <issue>10</issue>, pp. <fpage>27355</fpage>&#x2013;<lpage>27373</lpage>, <year>2015</year>.</mixed-citation>
</ref>
<ref id="ref-12">
<label>[12]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>A.</given-names> 
<surname>Hotho</surname></string-name>, <string-name>
<given-names>A.</given-names> 
<surname>N&#x00FC;rnberger</surname></string-name> and <string-name>
<given-names>G.</given-names> 
<surname>Paa&#x00DF;</surname></string-name>
</person-group>, &#x201C;
<article-title>A brief survey of text mining</article-title>,&#x201D; 
<source>Journal for Language Technology and Computational Linguistics</source>, vol. 
<volume>20</volume>, pp. 
<fpage>19</fpage>&#x2013;
<lpage>62</lpage>, 
<year iso-8601-date="2005">2005</year>.</mixed-citation>
</ref>
<ref id="ref-13">
<label>[13]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>A.</given-names> 
<surname>Onan</surname></string-name> and <string-name>
<given-names>S.</given-names> 
<surname>Korukoglu</surname></string-name>
</person-group>, &#x201C;
<article-title>A feature selection model based on genetic rank aggregation for text sentiment classification</article-title>,&#x201D; 
<source>Journal of Information Science</source>, vol. 
<volume>43</volume>, no. 
<issue>1</issue>, pp. 
<fpage>25</fpage>&#x2013;
<lpage>38</lpage>, 
<year iso-8601-date="2016">2016</year>.</mixed-citation>
</ref>
<ref id="ref-14">
<label>[14]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>A.</given-names> 
<surname>Dasgupta</surname></string-name>, <string-name>
<given-names>P.</given-names> 
<surname>Drineas</surname></string-name>, <string-name>
<given-names>B.</given-names> 
<surname>Harb</surname></string-name>, <string-name>
<given-names>V.</given-names> 
<surname>Josifovski</surname></string-name> and <string-name>
<given-names>M. W.</given-names> 
<surname>Mahoney</surname></string-name>
</person-group>, &#x201C;
<article-title>Feature selection methods for text classification</article-title>,&#x201D; in <conf-name>Proc. of the 13th ACM SIGKDD Int. Conf. on Knowledge Discovery and Data Mining</conf-name>, San Jose, California, USA, pp. 
<fpage>230</fpage>&#x2013;
<lpage>239</lpage>, 
<year iso-8601-date="2007">2007</year>. </mixed-citation>
</ref>
<ref id="ref-15">
<label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R. E.</given-names> <surname>Fan</surname></string-name>, <string-name><given-names>K. W.</given-names> <surname>Chang</surname></string-name>, <string-name><given-names>C. J.</given-names> <surname>Hsieh</surname></string-name>, <string-name><given-names>X. R.</given-names> <surname>Wang</surname></string-name> and <string-name><given-names>C. J.</given-names> <surname>Lin</surname></string-name></person-group>, &#x201C;<article-title>Liblinear: A library for large linear classification</article-title>,&#x201D; <source>Journal of Machine Learning Research</source>, vol. <volume>9</volume>, no. <issue>61</issue>, pp. <fpage>1871</fpage>&#x2013;<lpage>1874</lpage>, <year>2008</year>.</mixed-citation>
</ref>
<ref id="ref-16">
<label>[16]</label><mixed-citation publication-type="book">
<person-group person-group-type="author"><string-name>
<given-names>T.</given-names> 
<surname>Joachims</surname></string-name>
</person-group>, &#x201C;<chapter-title>Text categorization with support vector machines: Learning with many relevant features</chapter-title>, &#x201D; In 
<source>European Conf. on Machine Learning</source>, <publisher-loc>Chemnitz, Germany</publisher-loc>, pp. 
<fpage>137</fpage>&#x2013;
<lpage>142</lpage>, 
<year iso-8601-date="1998">1998</year>.</mixed-citation>
</ref>
<ref id="ref-17">
<label>[17]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>A.</given-names> 
<surname>Joulin</surname></string-name>, <string-name>
<given-names>E.</given-names> 
<surname>Grave</surname></string-name>, <string-name>
<given-names>P.</given-names> 
<surname>Bojanowski</surname></string-name> and <string-name>
<given-names>T.</given-names> 
<surname>Mikolov</surname></string-name>
</person-group>, &#x201C;
<article-title>Bag of tricks for efficient text classification</article-title>,&#x201D; in <conf-name>Proc. European Chapter of the Association for Computational Linguistics</conf-name>, <conf-loc>Valencia, Spain</conf-loc>, 
<year iso-8601-date="2016">2016</year>. </mixed-citation>
</ref>
<ref id="ref-18">
<label>[18]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>F. B.</given-names> 
<surname>Silva</surname></string-name>, <string-name>
<given-names>R. O.</given-names> 
<surname>Werneck</surname></string-name>, <string-name>
<given-names>S.</given-names> 
<surname>Goldenstein</surname></string-name>, <string-name>
<given-names>S.</given-names> 
<surname>Tabbone</surname></string-name> and <string-name>
<given-names>R. D.</given-names> 
<surname>Torres</surname></string-name>
</person-group>, &#x201C;
<article-title>Graph-based bag-of-words for classification</article-title>,&#x201D; 
<source>Pattern Recognition</source>, vol. 
<volume>74</volume>, pp. 
<fpage>266</fpage>&#x2013;
<lpage>285</lpage>, 
<year iso-8601-date="2018">2018</year>.</mixed-citation>
</ref>
<ref id="ref-19">
<label>[19]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>Y.</given-names> 
<surname>Luo</surname></string-name>, <string-name>
<given-names>A. R.</given-names> 
<surname>Sohani</surname></string-name>, <string-name>
<given-names>E. P.</given-names> 
<surname>Hochberg</surname></string-name> and <string-name>
<given-names>P.</given-names> 
<surname>Szolovits</surname></string-name>
</person-group>, &#x201C;
<article-title>Automatic lymphoma classification with sentence subgraph mining from pathology reports</article-title>,&#x201D; 
<source>Journal of the American Medical Informatics Association</source>, vol. 
<volume>21</volume>, no. 
<issue>5</issue>, pp. 
<fpage>824</fpage>&#x2013;
<lpage>832</lpage>, 
<year iso-8601-date="2014">2014</year>.</mixed-citation>
</ref>
<ref id="ref-20">
<label>[20]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>S.</given-names> 
<surname>Lai</surname></string-name>, <string-name>
<given-names>L.</given-names> 
<surname>Xu</surname></string-name>, <string-name>
<given-names>K.</given-names> 
<surname>Liu</surname></string-name> and <string-name>
<given-names>J.</given-names> 
<surname>Zhao</surname></string-name>
</person-group>, &#x201C;
<article-title>Recurrent convolutional neural networks for text classification</article-title>,&#x201D; in <conf-name>Twenty-ninth AAAI Conf. on Artificial Intelligence</conf-name>, <conf-loc>Berlin, Heidelberg</conf-loc>, pp. 
<fpage>233</fpage>&#x2013;
<lpage>240</lpage>, 
<year iso-8601-date="2015">2015</year>. </mixed-citation>
</ref>
<ref id="ref-21">
<label>[21]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>M.</given-names> 
<surname>Jaderberg</surname></string-name>, <string-name>
<given-names>K.</given-names> 
<surname>Simonyan</surname></string-name>, <string-name>
<given-names>A.</given-names> 
<surname>Vedaldi</surname></string-name> and <string-name>
<given-names>A.</given-names> 
<surname>Zisserman</surname></string-name>
</person-group>, &#x201C;
<article-title>Reading text in the wild with convolutional neural networks</article-title>,&#x201D; 
<source>International Journal of Computer Vision</source>, vol. 
<volume>116</volume>, no. 
<issue>1</issue>, pp. 
<fpage>1</fpage>&#x2013;
<lpage>20</lpage>, 
<year iso-8601-date="2016">2016</year>.</mixed-citation>
</ref>
<ref id="ref-22">
<label>[22]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>D.</given-names> 
<surname>Scherer</surname></string-name>, <string-name>
<given-names>A.</given-names> 
<surname>M&#x00FC;ller</surname></string-name> and <string-name>
<given-names>S.</given-names> 
<surname>Behnke</surname></string-name>
</person-group>, &#x201C;
<article-title>Evaluation of pooling operations in convolutional architectures for object recognition</article-title>,&#x201D; in <conf-name>Int. Conf. on Artificial Neural Networks</conf-name>, <conf-loc>Thessaloniki, Greece</conf-loc>, pp. 
<fpage>92</fpage>&#x2013;
<lpage>101</lpage>, 
<year iso-8601-date="2010">2010</year>. </mixed-citation>
</ref>
<ref id="ref-23">
<label>[23]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>I.</given-names> 
<surname>Muhammad</surname></string-name>, <string-name>
<given-names>W.</given-names> 
<surname>Liu</surname></string-name>, <string-name>
<given-names>A.</given-names> 
<surname>Jahangir</surname></string-name>, <string-name>
<given-names>S. M.</given-names> 
<surname>Nouman</surname></string-name> and <string-name>
<given-names>M.</given-names> 
<surname>Aparna</surname></string-name>
</person-group>, &#x201C;
<article-title>A novel localization technique using luminous flux</article-title>,&#x201D; 
<source>Applied Sciences</source>, vol. 
<volume>9</volume>, no. 
<issue>23</issue>, pp. 
<fpage>5027</fpage>, 
<year iso-8601-date="2019">2019</year>.</mixed-citation>
</ref>
<ref id="ref-24">
<label>[24]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>M. N.</given-names> 
<surname>Sohail</surname></string-name>, <string-name>
<given-names>R.</given-names> 
<surname>Jiadong</surname></string-name>, <string-name>
<given-names>M. M.</given-names> 
<surname>Uba</surname></string-name>, <string-name>
<given-names>M.</given-names> 
<surname>Irshad</surname></string-name>, <string-name>
<given-names>W.</given-names> 
<surname>Iqbal</surname></string-name> <etal>et al.</etal>
</person-group><italic>,</italic> &#x201C;
<article-title>A hybrid forecast cost benefit classification of diabetes mellitus prevalence based on epidemiological study on real-life patient&#x2019;s data</article-title>,&#x201D; 
<source>Scientific Reports</source>, vol. 
<volume>9</volume>, no. 
<issue>1</issue>, pp. 
<fpage>16</fpage>, 
<year iso-8601-date="2019">2019</year>.</mixed-citation>
</ref>
<ref id="ref-25">
<label>[25]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>Y.</given-names> 
<surname>Kim</surname></string-name>
</person-group>, &#x201C;
<article-title>Convolutional neural networks for sentence classification</article-title>,&#x201D; in <conf-name>Proc. of the 2014 Conf. on Empirical Methods in Natural Language Processing (EMNLP)</conf-name>, <conf-loc>Doha, Qatar</conf-loc>, pp. 
<fpage>1746</fpage>&#x2013;
<lpage>1751</lpage>, 
<year iso-8601-date="2014">2014</year>. </mixed-citation>
</ref>
<ref id="ref-26">
<label>[26]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>I.</given-names> 
<surname>Sutskever</surname></string-name>, <string-name>
<given-names>J.</given-names> 
<surname>Martens</surname></string-name> and <string-name>
<given-names>G. E.</given-names> 
<surname>Hinton</surname></string-name>
</person-group>, &#x201C;
<article-title>Generating text with recurrent neural networks</article-title>,&#x201D; in <conf-name>Int. Conf. on Machine Learning (ICML)</conf-name>,  <conf-loc>Bellevue, WA, USA</conf-loc>, 
<year iso-8601-date="2011">2011</year>. </mixed-citation>
</ref>
<ref id="ref-27">
<label>[27]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>A.</given-names> 
<surname>Graves</surname></string-name> and <string-name>
<given-names>J.</given-names> 
<surname>Schmidhuber</surname></string-name>
</person-group>, &#x201C;
<article-title>Framewise phoneme classification with bidirectional long short term memory and other neural network architectures</article-title>,&#x201D; 
<source>Neural Networks</source>, vol. 
<volume>18</volume>, no. 
<issue>5&#x2013;6</issue>, pp. 
<fpage>602</fpage>&#x2013;
<lpage>610</lpage>, 
<year iso-8601-date="2005">2005</year>.</mixed-citation>
</ref>
<ref id="ref-28">
<label>[28]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>P.</given-names> 
<surname>Liu</surname></string-name>, <string-name>
<given-names>X.</given-names> 
<surname>Qiu</surname></string-name> and <string-name>
<given-names>X.</given-names> 
<surname>Huang</surname></string-name>
</person-group>, &#x201C;
<article-title>Recurrent neural network for text classification with multi-task learning</article-title>,&#x201D; in <conf-name>Proc. of Int. Joint Conf. on Artificial Intelligence</conf-name>, <conf-loc>Budapest, Hungary</conf-loc>, 
<year iso-8601-date="2016">2016</year>. </mixed-citation>
</ref>
<ref id="ref-29">
<label>[29]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>M. E.</given-names> 
<surname>Peters</surname></string-name>, <string-name>
<given-names>M.</given-names> 
<surname>Neumann</surname></string-name>, <string-name>
<given-names>M.</given-names> 
<surname>Iyyer</surname></string-name>, <string-name>
<given-names>M.</given-names> 
<surname>Gardner</surname></string-name>, <string-name>
<given-names>C.</given-names> 
<surname>Clark</surname></string-name> <etal>et al.</etal>
</person-group><italic>,</italic> &#x201C;
<article-title>Deep contextualized word representations</article-title>,&#x201D; in <conf-name>Proc. of North American Chapter of the Association for Computational Linguistics</conf-name>, <conf-loc>New Orleans, Louisiana</conf-loc>, 
<year iso-8601-date="2018">2018</year>. </mixed-citation>
</ref>
<ref id="ref-30">
<label>[30]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Shi</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Zhao</surname></string-name> and <string-name><given-names>I.</given-names> <surname>King</surname></string-name></person-group>, &#x201C;<article-title>Star-gcn: Stacked and reconstructed graph convolutional networks for recommender systems</article-title>,&#x201D; In <source>Proc. of the twenty-Eighth Int. Joint Conf. on Artificial Intelligence</source>, pp. <fpage>4264</fpage>&#x2013;<lpage>4270</lpage>, <year>2019</year>.</mixed-citation>
</ref>
<ref id="ref-31">
<label>[31]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>J.</given-names> 
<surname>Bruna</surname></string-name>, <string-name>
<given-names>W.</given-names> 
<surname>Zaremba</surname></string-name>, <string-name>
<given-names>A.</given-names> 
<surname>Szlam</surname></string-name> and <string-name>
<given-names>Y.</given-names> 
<surname>LeCun</surname></string-name>
</person-group>, &#x201C;<chapter-title>Spectral networks and locally connected networks on graphs</chapter-title>,&#x201D; In <italic>Proc. of the twenty-Eighth Int. Joint Conf. on Artificial Intelligence</italic>, pp. <fpage>4264</fpage>&#x2013;<lpage>4270</lpage>, <year>2019</year>.</mixed-citation>
</ref>
<ref id="ref-32">
<label>[32]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>T. N.</given-names> 
<surname>Kipf</surname></string-name> and <string-name>
<given-names>M.</given-names> 
<surname>Welling</surname></string-name>
</person-group>, &#x201C;<chapter-title>Semi-supervised classification with graph convolutional networks</chapter-title>,&#x201D; 
<conf-name>Int. Conf. on Learning Representations (ICLR)</conf-name>, <conf-loc>Palais des Congr&#x00E8;s Neptune, Toulon, France</conf-loc>, 
<year iso-8601-date="2016">2016</year>. </mixed-citation>
</ref>
<ref id="ref-33">
<label>[33]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>K.</given-names> 
<surname>Kowsari</surname></string-name>, <string-name>
<given-names>K.</given-names> 
<surname>Jafari Meimandi</surname></string-name>, <string-name>
<given-names>M.</given-names> 
<surname>Heidarysafa</surname></string-name>, <string-name>
<given-names>S.</given-names> 
<surname>Mendu</surname></string-name>, <string-name>
<given-names>L.</given-names> 
<surname>Barnes</surname></string-name> <etal>et al.</etal>
</person-group><italic>,</italic> &#x201C;
<article-title>Text classification algorithms: A survey</article-title>,&#x201D; 
<source>Information</source>, vol. 
<volume>10</volume>, no. 
<issue>4</issue>, pp. 
<fpage>150</fpage>, 
<year iso-8601-date="2019">2019</year>.</mixed-citation>
</ref>
<ref id="ref-34">
<label>[34]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>F. P.</given-names> 
<surname>Shah</surname></string-name> and <string-name>
<given-names>V.</given-names> 
<surname>Patel</surname></string-name>
</person-group>, &#x201C;<chapter-title>A review on feature selection and feature extraction for text classification</chapter-title>,&#x201D; 
<conf-name>Int. Conf. on Wireless Communications, Signal Processing and Networking (WiSPNET)</conf-name>, <conf-loc>Chennai, India</conf-loc>, pp. 
<fpage>2264</fpage>&#x2013;
<lpage>2268</lpage>, 
<year iso-8601-date="2016">2016</year>. </mixed-citation>
</ref>
<ref id="ref-35">
<label>[35]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>F.</given-names> 
<surname>Joseph</surname></string-name> and <string-name>
<given-names>N.</given-names> 
<surname>Ramakrishnan</surname></string-name>
</person-group>, &#x201C;
<article-title>Text categorization using improved k nearest neighbor algorithm</article-title>,&#x201D; 
<source>International Journal of Engineering Trends and Technology</source>, vol. 
<volume>4</volume>, pp. 
<fpage>65</fpage>&#x2013;
<lpage>68</lpage>, 
<year iso-8601-date="2015">2015</year>.</mixed-citation>
</ref>
<ref id="ref-36">
<label>[36]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>M.</given-names> 
<surname>Gori</surname></string-name>, <string-name>
<given-names>G.</given-names> 
<surname>Monfardini</surname></string-name> and <string-name>
<given-names>F.</given-names> 
<surname>Scarselli</surname></string-name>
</person-group>, &#x201C;
<article-title>A new model for learning in graph domains</article-title>,&#x201D; 
<source>In Proc. of IEEE Int. Joint Conf. on Neural Networks</source>, vol. 
<volume>2</volume>, pp. 
<fpage>729</fpage>&#x2013;
<lpage>734</lpage>, 
<year iso-8601-date="2005">2005</year>.</mixed-citation>
</ref>
<ref id="ref-37">
<label>[37]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>A.</given-names> 
<surname>Micheli</surname></string-name>
</person-group>, &#x201C;
<article-title>Neural network for graphs: A contextual constructive approach</article-title>,&#x201D; 
<source>IEEE Transactions on Neural Networks</source>, vol. 
<volume>20</volume>, no. 
<issue>3</issue>, pp. 
<fpage>498</fpage>&#x2013;
<lpage>511</lpage>, 
<year iso-8601-date="2009">2009</year>.</mixed-citation>
</ref>
<ref id="ref-38">
<label>[38]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>F.</given-names> 
<surname>Scarselli</surname></string-name>, <string-name>
<given-names>M.</given-names> 
<surname>Gori</surname></string-name>, <string-name>
<given-names>A. C.</given-names> 
<surname>Tsoi</surname></string-name>, <string-name>
<given-names>M.</given-names> 
<surname>Hagenbuchner</surname></string-name> and <string-name>
<given-names>G.</given-names> 
<surname>Monfardini</surname></string-name>
</person-group>, &#x201C;
<article-title>The graph neural network model</article-title>,&#x201D; 
<source>IEEE Transactions on Neural Networks</source>, vol. 
<volume>20</volume>, no. 
<issue>1</issue>, pp. 
<fpage>61</fpage>&#x2013;
<lpage>80</lpage>, 
<year iso-8601-date="2009">2009</year>.</mixed-citation>
</ref>
<ref id="ref-39">
<label>[39]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>B.</given-names> 
<surname>Perozzi</surname></string-name>, <string-name>
<given-names>R.</given-names> 
<surname>Al-Rfou</surname></string-name> and <string-name>
<given-names>S.</given-names> 
<surname>Skiena</surname></string-name>
</person-group>, &#x201C;
<article-title>Deepwalk: Online learning of social representations</article-title>,&#x201D; in <conf-name>Proc. of the 20th ACM SIGKDD Int. Conf. on Knowledge Discovery and Data Mining</conf-name>, <conf-loc>New York, USA</conf-loc>, pp. 
<fpage>701</fpage>&#x2013;
<lpage>710</lpage>, 
<year iso-8601-date="2014">2014</year>. </mixed-citation>
</ref>
<ref id="ref-40">
<label>[40]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>F.</given-names> 
<surname>Monti</surname></string-name>, <string-name>
<given-names>D.</given-names> 
<surname>Boscaini</surname></string-name>, <string-name>
<given-names>J.</given-names> 
<surname>Masci</surname></string-name>, <string-name>
<given-names>E.</given-names> 
<surname>Rodola</surname></string-name>, <string-name>
<given-names>J.</given-names> 
<surname>Svoboda</surname></string-name> <etal>et al.</etal>
</person-group><italic>,</italic> &#x201C;
<article-title>Geometric deep learning on graphs and manifolds using mixture model cnns</article-title>,&#x201D; in <conf-name>Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition</conf-name>, <conf-loc>Glasgow, UK</conf-loc>, pp. 
<fpage>5115</fpage>&#x2013;
<lpage>5124</lpage>, 
<year iso-8601-date="2017">2017</year>. </mixed-citation>
</ref>
<ref id="ref-41">
<label>[41]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>A.</given-names> 
<surname>Grover</surname></string-name> and <string-name>
<given-names>J.</given-names> 
<surname>Leskovec</surname></string-name>
</person-group>, &#x201C;
<article-title>node2vec: Scalable feature learning for networks</article-title>,&#x201D; in <conf-name>Proc. of the 22nd ACM SIGKDD Int. Conf. on Knowledge Discovery and Data Mining</conf-name>, <conf-loc>San Fransico, California</conf-loc>, pp. 
<fpage>855</fpage>&#x2013;
<lpage>864</lpage>, 
<year iso-8601-date="2016">2016</year>. </mixed-citation>
</ref>
<ref id="ref-42">
<label>[42]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>K.</given-names> 
<surname>Xu</surname></string-name>, <string-name>
<given-names>C.</given-names> 
<surname>Li</surname></string-name>, <string-name>
<given-names>Y.</given-names> 
<surname>Tian</surname></string-name>, <string-name>
<given-names>T.</given-names> 
<surname>Sonobe</surname></string-name>, <string-name>
<given-names>K.</given-names> 
<surname>Kawarabayashi </surname></string-name> <etal>et al.</etal>
</person-group><italic>,</italic> &#x201C;<chapter-title>Representation learning on graphs with jumping knowledge networks</chapter-title>,&#x201D; 
<conf-name>Int. Conf. on Machine Learning (ICML)</conf-name>, <conf-loc>Vienna, Austria</conf-loc>, pp. 
<fpage>5453</fpage>&#x2013;
<lpage>5462</lpage>.</mixed-citation>
</ref>
<ref id="ref-43">
<label>[43]</label><mixed-citation publication-type="book">
<person-group person-group-type="author"><string-name>
<given-names>C. W.</given-names> 
<surname>Helstrom</surname></string-name> and <string-name>
<given-names>C. W.</given-names> 
<surname>Helstrom</surname></string-name>
</person-group>, 
<source>Quantum detection and estimation theory</source>, vol. 
<volume>3</volume>. 
<publisher-loc>New York</publisher-loc>: 
<publisher-name>Academic press</publisher-name>, 
<year iso-8601-date="1976">1976</year>.</mixed-citation>
</ref>
<ref id="ref-44">
<label>[44]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>P.</given-names> 
<surname>Hyllus</surname></string-name>, <string-name>
<given-names>W.</given-names> 
<surname>Laskowski</surname></string-name>, <string-name>
<given-names>R.</given-names> 
<surname>Krischek</surname></string-name>, <string-name>
<given-names>C.</given-names> 
<surname>Schwemmer</surname></string-name> and <string-name>
<given-names>W.</given-names> 
<surname>Wieczorek</surname></string-name>
</person-group>, &#x201C;
<article-title>Fisher information and multiparticle entanglement</article-title>,&#x201D; 
<source>Physical Review A</source>, vol. 
<volume>85</volume>, no. 
<issue>2</issue>, pp. 
<fpage>1</fpage>, 
<year iso-8601-date="2012">2012</year>.</mixed-citation>
</ref>
<ref id="ref-45">
<label>[45]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>A.</given-names> 
<surname>Fujiwara</surname></string-name> and <string-name>
<given-names>H.</given-names> 
<surname>Nagaoka</surname></string-name>
</person-group>, &#x201C;
<article-title>Quantum fisher metric and estimation for pure state models</article-title>,&#x201D; 
<source>Physics Letters A</source>, vol. 
<volume>201</volume>, no. 
<issue>2&#x2013;3</issue>, pp. 
<fpage>119</fpage>&#x2013;
<lpage>124</lpage>, 
<year iso-8601-date="1995">1995</year>.</mixed-citation>
</ref>
<ref id="ref-46">
<label>[46]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>L. C.</given-names> 
<surname>Venuti</surname></string-name> and <string-name>
<given-names>P.</given-names> 
<surname>Zanardi</surname></string-name>
</person-group>, &#x201C;
<article-title>Quantum critical scaling of the geometric tensors</article-title>,&#x201D; 
<source>Physical Review Letters</source>, vol. 
<volume>99</volume>, no. 
<issue>9</issue>, pp. 
<fpage>095701</fpage>, 
<year iso-8601-date="2007">2007</year>.</mixed-citation>
</ref>
<ref id="ref-47">
<label>[47]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>G.</given-names> 
<surname>T&#x00F3;th</surname></string-name>
</person-group>, &#x201C;
<article-title>Multipartite entanglement and high-precision metrology</article-title>,&#x201D; 
<source>Physical Review A</source>, vol. 
<volume>85</volume>, no. 
<issue>2</issue>, pp. 
<fpage>195</fpage>, 
<year iso-8601-date="2012">2012</year>.</mixed-citation>
</ref>
<ref id="ref-48">
<label>[48]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>H.</given-names> 
<surname>Yuen</surname></string-name> and <string-name>
<given-names>M.</given-names> 
<surname>Lax</surname></string-name>
</person-group>, &#x201C;
<article-title>Multiple-parameter quantum estimation and measurement of nonselfadjoint observables</article-title>,&#x201D; 
<source>IEEE Transactions on Information Theory</source>, vol. 
<volume>19</volume>, no. 
<issue>6</issue>, pp. 
<fpage>740</fpage>&#x2013;
<lpage>750</lpage>, 
<year iso-8601-date="1973">1973</year>.</mixed-citation>
</ref>
<ref id="ref-49">
<label>[49]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>D.</given-names> 
<surname>Petz</surname></string-name>
</person-group>, &#x201C;
<article-title>Monotone metrics on matrix spaces</article-title>,&#x201D; 
<source>Linear Algebra and its Applications</source>, vol. 
<volume>244</volume>, pp. 
<fpage>81</fpage>&#x2013;
<lpage>96</lpage>, 
<year iso-8601-date="1996">1996</year>.</mixed-citation>
</ref>
<ref id="ref-50">
<label>[50]</label><mixed-citation publication-type="journal">
<person-group person-group-type="author"><string-name>
<given-names>J.</given-names> 
<surname>Suzuki</surname></string-name>
</person-group>, &#x201C;
<article-title>Explicit formula for the Holevo bound for two-parameter qubit-state estimation problem</article-title>,&#x201D; 
<source>Journal of Mathematical Physics</source>, vol. 
<volume>57</volume>, no. 
<issue>4</issue>, pp. 
<fpage>042201</fpage>, 
<year iso-8601-date="2016">2016</year>.</mixed-citation>
</ref>
<ref id="ref-51">
<label>[51]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>Z.</given-names> 
<surname>Yang</surname></string-name>, <string-name>
<given-names>W.</given-names> 
<surname>Cohen</surname></string-name> and <string-name>
<given-names>R.</given-names> 
<surname>Salakhudinov</surname></string-name>
</person-group>, &#x201C;
<article-title>Revisiting semi-supervised learning with graph embeddings</article-title>,&#x201D; in <conf-name>Int. Conf. on Machine Learning</conf-name>, <conf-loc>New York City, NY, USA</conf-loc>, pp. 
<fpage>40</fpage>&#x2013;
<lpage>48</lpage>, 
<year iso-8601-date="2016">2016</year>. </mixed-citation>
</ref>
<ref id="ref-52">
<label>[52]</label><mixed-citation publication-type="conf-proc">
<person-group person-group-type="author"><string-name>
<given-names>K.</given-names> 
<surname>Sun</surname></string-name>, <string-name>
<given-names>P.</given-names> 
<surname>Koniusz</surname></string-name> and <string-name>
<given-names>Z.</given-names> 
<surname>Wang</surname></string-name>
</person-group>, &#x201C;
<article-title>Fisher-bures adversary graph convolutional networks</article-title>,&#x201D; in <conf-name>Proc. of The 35th Uncertainty in Artificial Intelligence Conf.</conf-name>, 
<publisher-name>PMLR</publisher-name>, <conf-loc>Tel Aviv, Israel</conf-loc>, pp. 
<fpage>465</fpage>&#x2013;
<lpage>475</lpage>, 
<year iso-8601-date="2019">2019</year>. </mixed-citation>
</ref>
</ref-list>
</back>
</article>