Example #1
0
    def test_12_merge_graph(self):
        #3 nodes
        Xa = np.array([[1.0, 2.0], [6.3, 1.0], [4.3, -2.0]])
        Ea = np.array([[0, 1, 1, 0.5], [1, 2, 0, 0.2]])

        Xb = np.array([[6.3, 1.0], [1.3, -2.0]])
        Eb = np.array([[0, 1, 1, 0.5]])

        gA = GCNDataset('GA')
        gA.X = Xa
        gA.E = Ea

        gB = GCNDataset('GB')
        gB.X = Xb
        gB.E = Eb

        print('Graph A')
        print(gA.X, gA.E.shape)

        print('Graph B')
        print(gB.X, gB.E.shape)

        gc = GCNDataset.merge_graph(gA, gB)

        print(gc.X)
        print(gc.E)

        #TODO Test on Y too
        self.assertEquals(5, gc.X.shape[0])
        self.assertEquals(3, gc.E.shape[0])
Example #2
0
    def convert_X_to_GCNDataset(self, X):
        """
        Same code as above, dedicated to the predict mode (no need  for Y)
        """
        graph_id = 0

        nf = X[0]
        edge = X[1]
        ef = X[2]
        nb_node = nf.shape[0]

        graph = GCNDataset(str(graph_id))
        graph.X = nf
        graph.Y = -np.ones(
            (nb_node, len(self.labelBinarizer.classes_)), dtype='i')

        # print(edger)
        A1 = sp.coo_matrix((np.ones(edge.shape[0]), (edge[:, 0], edge[:, 1])),
                           shape=(nb_node, nb_node))
        # A2 = sp.coo_matrix((np.ones(edger.shape[0]), (edger[:, 0], edger[:, 1])), shape=(nb_node, nb_node))
        graph.A = A1  # + A2

        # JL: unued??   edge_normalizer = Normalizer()
        # Normalize EA

        E0 = np.hstack([edge, ef])  # check order
        # E1 = np.hstack([edger, efr])  # check order

        graph.E = E0
        #graph.compute_NA()
        graph.compute_NodeEdgeMat()

        return graph
Example #3
0
    def convert_lX_lY_to_GCNDataset(self,
                                    lX,
                                    lY,
                                    training=False,
                                    test=False,
                                    predict=False):
        gcn_list = []
        graph_id = 0

        # This has state information here --> move that to DU_Model_ECN ...
        lys = []
        for _, ly in zip(lX, lY):
            lys.extend(list(ly))
        #print (lys)

        if training:
            self.labelBinarizer.fit(lys)

        for lx, ly in zip(lX, lY):
            nf = lx[0]
            edge = lx[1]
            ef = lx[2]
            nb_node = nf.shape[0]

            graph = GCNDataset(str(graph_id))
            graph.X = nf
            if training or test:
                graph.Y = self.labelBinarizer.transform(ly)

            elif predict:
                graph.Y = -np.ones(
                    (nb_node, len(self.labelBinarizer.classes_)), dtype='i')
            else:
                raise Exception(
                    'Invalid Usage: one of train,test,predict should be true')
            # We are making the adacency matrix here

            # print(edger)
            A1 = sp.coo_matrix(
                (np.ones(edge.shape[0]), (edge[:, 0], edge[:, 1])),
                shape=(nb_node, nb_node))
            # A2 = sp.coo_matrix((np.ones(edger.shape[0]), (edger[:, 0], edger[:, 1])), shape=(nb_node, nb_node))
            graph.A = A1  # + A2

            # JL: unued??   edge_normalizer = Normalizer()
            # Normalize EA

            E0 = np.hstack([edge, ef])  # check order
            # E1 = np.hstack([edger, efr])  # check order

            graph.E = E0
            #graph.compute_NA()
            graph.compute_NodeEdgeMat()

            gcn_list.append(graph)
            graph_id += 1

        return gcn_list
Example #4
0
def get_graph_test():
    #For graph att net
    X = np.array([[1.0, 0.5], [0.5, 0.5], [0.0, 1.0]], dtype='float32')
    E = np.array([[0, 1, 1.0], [1, 0, 1.0], [2, 1, 1.0], [1, 2, 1.0]],
                 dtype='float32')
    Y = np.array([[1, 0], [0, 1], [0, 1]], dtype='int32')

    gcn = GCNDataset('UT_test_1')
    gcn.X = X
    gcn.E = E
    gcn.Y = Y
    gcn.compute_NodeEdgeMat()
    return gcn
Example #5
0
    def test_logit_convolve(self):
        # 3 nodes a;b,c  a<->b and c<->b   a->b<c>
        X = np.array([[1.0, 2.0], [6.3, 1.0], [4.3, -2.0]])
        Y = np.array([[1, 0], [0, 1.0], [1.0, 0.0]])
        E = np.array([
            [0, 1, 1.0, 1, 0],  #edge a->b
            [1, 0, 1.0, 0, 1],  #edge b->a
            [2, 1, 1.0, 0.0, 1.0]
        ])

        nb_node = 3
        gA = GCNDataset('GLogitConvolve')
        gA.X = X
        gA.Y = Y
        gA.E = E
        gA.A = sp.coo_matrix((np.ones(E.shape[0]), (E[:, 0], E[:, 1])),
                             shape=(nb_node, nb_node))

        gA.compute_NodeEdgeMat()
        gA.compute_NA()

        #Test in degree out_degree
        print(gA.in_degree, gA.out_degree)
        self.assertAlmostEqual(2, gA.in_degree[1])
        print(gA.NA_indegree)

        self.assertAlmostEqual(0.5, gA.NA_indegree[1, 0])
        #self.assertAlmostEqual(2, gA.indegree[1])
        #now assuming P(Y|a)=[1,0] P(Y|c)=[1,0] and current P(Y|b)=[0.5,0.5]
        pY = np.array([[1, 0], [0.5, 0.5], [0.8, 0.2]])
        #Node b  has two edges
        # Yt=[0 1;1 0]

        Yt = np.array([[0.0, 1.0], [1.0, 0.0]])

        pY_Yt = tf.matmul(pY, Yt, transpose_b=True)

        Yt_sum = EdgeConvNet.logitconvolve_fixed(pY, Yt, gA.NA_indegree)

        init = tf.global_variables_initializer()
        with tf.Session() as session:
            session.run(init)
            Ytt = session.run(pY_Yt)
            print(Ytt)
            Res = session.run(Yt_sum)
            print(Res)