Пример #1
0
    def chebyshev5(self, x, L, Fout, K):
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin * N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N

        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat([x, x_], axis=0)  # K x M x Fin*N

        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3, 1, 2, 0])  # N x M x Fin x K
        x = tf.reshape(x, [N * M, Fin * K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable([Fin * K, Fout], regularization=False)
        x = tf.matmul(x, W)  # N*M x Fout
        print(W.get_shape())
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
Пример #2
0
    def chebyshev2(self, x, L, Fout, K):
        """
		Filtering with Chebyshev interpolation
		Implementation: numpy.

		Data: x of size N x M x F
			N: number of signals
			M: number of vertices
			F: number of features per signal per vertex
		"""
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        # Transform to Chebyshev basis
        x = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x = tf.reshape(x, [M, Fin * N])  # M x Fin*N

        def chebyshev(x):
            return graph.chebyshev(L, x, K)

        x = tf.py_func(chebyshev, [x], [tf.float32])[0]  # K x M x Fin*N
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3, 1, 2, 0])  # N x M x Fin x K
        x = tf.reshape(x, [N * M, Fin * K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature.
        W = self._weight_variable([Fin * K, Fout], regularization=False)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
    def chebyshev5(self, x, L, Fout, K):
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin * N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N

        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat([x, x_], axis=0)  # K x M x Fin*N

        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3, 1, 2, 0])  # N x M x Fin x K
        x = tf.reshape(x, [N * M, Fin * K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable([Fin * K, Fout], regularization=False)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
Пример #4
0
def chebyshev2(x, L, Fout, K, name='cheby2'):
    with tf.variable_scope(name):

        _, M, Fin = x.get_shape()
        M, Fin = int(M), int(Fin)
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)

        x = tf.transpose(x, perm=[1, 2, 0])
        #x = tf.reshape(x,[M,Fin*N])
        x = tf.reshape(x, [M, -1])

        def chebyshev(x):
            return graph.chebyshev(L, x, K)

        x = tf.py_func(chebyshev, [x], [tf.float32])[0]
        #x = tf.reshape(x,[K,M,Fin,N])
        x = tf.reshape(x, [K, M, Fin, -1])
        x = tf.transpose(x, perm=[3, 1, 2, 0])
        #x = tf.reshape(x,[N*M,Fin*K])
        x = tf.reshape(x, [-1, Fin * K])

        W = _weight_variable([Fin * K, Fout], regularization=True)
        x = tf.matmul(x, W)

        #return tf.reshape(x,[N,M,Fout])

        return tf.reshape(x, [-1, M, Fout])
Пример #5
0
    def chebyshev_p(self, x, L, Fout, K):
        # Fout: num of output features
        # N: number of signal, batch size
        # V: number of vertices, graph size
        # Fin: number of features per signal
        N, V, Fin = x.get_shape()
        L = scipy.sparse.csr_matrix(L)
        # convert to a list of chebyshev matrix
        base_L = graph.rescale_L(L, lmax=2)
        coef_list = chebyshev_list(K)
        chebyshev_Ls = []
        for coef in coef_list:
            L = 0
            for i in range(len(coef)):
                L += coef[i] * (base_L**i)
            chebyshev_Ls.append(L)

        # convert to sparseTensor
        def convert2Sparse(L):
            L = L.tocoo()
            indices = np.column_stack((L.row, L.col))
            print(len(indices))
            L = tf.SparseTensor(indices, L.data, L.shape)
            return tf.sparse_reorder(L)

        chebyshev_Ls = map(lambda L: convert2Sparse(L), chebyshev_Ls)

        # chebyshev filtering
        # N x V x Fin -> N x Fin x V -> Fin*N x V -> V x Fin*N
        x = tf.transpose(x, perm=[0, 2, 1])
        x = tf.reshape(x, [-1, V])
        x = tf.transpose(x)

        x_filtered = []
        for T in chebyshev_Ls:
            # T: V x V, x: V x Fin*N, output: V x Fin*N
            x_filtered.append(tf.sparse_tensor_dense_matmul(T, x))
            # T: V x V, x: N x V x Fin, output: N x V x Fin
            # x_filtered.append(tf.map_fn(lambda x: tf.sparse_tensor_dense_matmul(T, x), x))

        # K x N x V x Fin
        # x = tf.stack(x_filtered)
        # x = tf.parallel_stack(x_filtered)

        # K x V x Fin*N -> K x V x Fin x N -> N x V x Fin x K
        x = tf.stack(x_filtered)
        x = tf.reshape(x, [K, V, Fin, -1])
        x = tf.transpose(x, perm=[3, 1, 2, 0])

        # K x N x V x Fin -> N x V x Fin x K
        # x = tf.transpose(x, perm=[1, 2, 3, 0])
        x = tf.reshape(x, [-1, Fin * K])
        W = self._weight_variable([Fin * K, Fout], regularization=False)
        x = tf.matmul(x, W)  # N*V x Fout
        x = tf.nn.relu(x)
        return tf.reshape(x, [-1, V, Fout])
Пример #6
0
def cheby_conv(x, L, lmax, feat_out, K, W):
    '''
    x : [batch_size, N_node, feat_in] - input of each time step
    nSample : number of samples = batch_size
    nNode : number of node in graph
    feat_in : number of input feature
    feat_out : number of output feature
    L : laplacian
    lmax : ?
    K : size of kernel(number of cheby coefficients)
    W : cheby_conv weight [K * feat_in, feat_out]
    '''
    nSample, nNode, feat_in = x.get_shape()
    nSample, nNode, feat_in = int(nSample), int(nNode), int(feat_in)
    L = graph.rescale_L(L,
                        lmax)  #What is this operation?? --> rescale Laplacian
    L = L.tocoo()

    indices = np.column_stack((L.row, L.col))
    L = tf.SparseTensor(indices, L.data, L.shape)
    L = tf.sparse_reorder(L)

    x0 = tf.transpose(x, perm=[1, 2,
                               0])  #change it to [nNode, feat_in, nSample]
    # x0 = tf.reshape(x0, [nNode, feat_in*nSample])
    x = tf.expand_dims(x0, 0)  # make it [1, nNode, feat_in*nSample]

    def concat(x, x_):
        x_ = tf.expand_dims(x_, 0)
        return tf.concat([x, x_], axis=0)

    L_dense = tf.sparse_to_dense(L.indices, L.shape, L.values)
    if K > 1:
        # tf.sparse_to_dense(
        #     tf.concat(values=[indices, labels], axis=1),
        #     [10, 10], 1.0, 0.0)
        # L=tf.sparse_to_dense(indices=L.)
        x1 = tf.einsum('ij,jkn->ikn', L_dense, x0)

        # x1 = tf.sparse_tensor_dense_matmul(L, x0)
        # x1=tf.matmul(L,x0)
        x = concat(x, x1)

    for k in range(2, K):
        x2 = 2 * tf.einsum('ij,jkn->ikn', L_dense, x1) - x0
        x = concat(x, x2)
        x0, x1 = x1, x2

    x = tf.reshape(x, [K, nNode, feat_in, nSample])
    x = tf.transpose(x, perm=[3, 1, 2, 0])
    x = tf.reshape(x, [nSample * nNode, feat_in * K])

    x = tf.matmul(x, W)  #No Bias term?? -> Yes
    out = tf.reshape(x, [nSample, nNode, feat_out])
    return out
Пример #7
0
 def __init__(self, L, F, K):
     super(cgcnn2_5, self).__init__()
     L = graph.rescale_L(L, lmax=2)  # Graph Laplacian, M x M
     L = L.tocoo()
     data = L.data
     indices = np.empty((L.nnz, 2))
     indices[:, 0] = L.row
     indices[:, 1] = L.col
     L = tf.SparseTensor(indices, data, L.shape)
     self.L = tf.sparse_reorder(L)
     self.F = F  # Number of filters
     self.K = K  # Polynomial order, i.e. filter size (number of hopes)
Пример #8
0
    def chebyshev5(self, x, L, Fout, K):
        """
        Filtering with Chebyshev interpolation
        Implementation: numpy.
        
        Data: x of size N x F x M
            N: number of signals
            F: number of features per signal per vertex
            M: number of vertices
        """

        N, Fin, M = x.shape
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        indice = torch.LongTensor(list(indices.T))
        data = torch.tensor(list(L.data))
        L = torch.sparse.FloatTensor(indice, data, L.shape)
        L = L.cuda()
        #L = tf.sparse_reorder(L)

        # Transform to Chebyshev basis
        x0 = x.permute(2, 1, 0)  # M x Fin x N

        x0 = x0.contiguous().view(M, -1)  # M x Fin*N
        x = x0.unsqueeze(0)  # 1 x M x Fin*N

        def concat(x, x_):
            x_ = x_.unsqueeze(0)  # 1 x M x Fin*N
            return torch.cat([x, x_], 0)  # K x M x Fin*N

        if K > 1:
            x1 = torch.sparse.mm(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * torch.sparse.mm(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = x.view([K, M, Fin, N])  # K x M x Fin x N
        x = x.permute(3, 1, 2, 0)  # N x M x Fin x K
        x = x.contiguous().view([N * M, Fin * K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.

        x = torch.mm(x, self.weigth)  # N*M x Fout
        x = x.contiguous().view(N, Fout, M)
        x = x + self.bais
        return x  # N x Fout x M
Пример #9
0
    def chebyshev5(self, x, L, Fout, K):
        N, M, Fin, B = x.get_shape()
        N, M, Fin, B = int(N), int(M), int(Fin), int(B)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify
        # the shared L.

        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data.astype(np.float32), L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        list_tensor = []
        for i in range(B):
            hist_i = x[:, :, :, i]
            hist_i = tf.reshape(hist_i, (N, M, Fin))
            x0 = tf.transpose(hist_i, perm=[1, 2, 0])  # M x Fin x N
            x0 = tf.reshape(x0, [M, Fin * N])  # M x Fin*N
            hist_i = tf.expand_dims(x0, 0)  # 1 x M x Fin*N

            def concat(x, x_):
                x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
                return tf.concat([x, x_], axis=0)  # K x M x Fin*N

            # xk = 2 * L^{hat} * x_{k-1} - x_{k-2}, x_0 = x, x_1 = L * x
            if K > 1:
                x1 = tf.sparse_tensor_dense_matmul(L, x0)
                hist_i = concat(hist_i, x1)

            for k in range(2, K):
                x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
                hist_i = concat(hist_i, x2)
                x0, x1 = x1, x2

            hist_i = tf.reshape(hist_i, [K, M, Fin, N])  # K x M x Fin x N
            hist_i = tf.transpose(hist_i, perm=[3, 1, 2, 0])  # N x M x Fin x K
            hist_i = tf.reshape(hist_i, [N * M, Fin * K])  # N*M x Fin*K
            # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature
            # pair.
            W = self._weight_variable(
                [Fin * K, Fout], index=i, regularization=True)
            hist_i = tf.matmul(hist_i, W)  # N*M x Fout
            hist_i = tf.reshape(hist_i, [N, M, Fout])

            hist_i = tf.expand_dims(hist_i, -1)
            list_tensor.append(hist_i)
        gconvoluted = tf.concat(list_tensor, axis=-1)

        return gconvoluted  # N x M x Fout x B
Пример #10
0
    def chebyshev(self, x, L, Fout, K):
        # Fout: num of output features
        # N: number of signal, batch size
        # V: number of vertices, graph size
        # Fin: number of features per signal
        N, V, Fin = x.get_shape()
        L = scipy.sparse.csr_matrix(L)
        # convert to a list of chebyshev matrix
        L = graph.rescale_L(L, lmax=2)

        # convert to sparseTensor
        def convert2Sparse(L):
            L = L.tocoo()
            indices = np.column_stack((L.row, L.col))
            print(len(indices))
            L = tf.SparseTensor(indices, L.data, L.shape)
            return tf.sparse_reorder(L)

        L = convert2Sparse(L)

        # chebyshev filtering
        # N x V x Fin -> V x Fin x N -> V x Fin*N
        x = tf.transpose(x, perm=[1, 2, 0])
        x = tf.reshape(x, [V, -1])
        x0 = x
        x = tf.expand_dims(x, 0)

        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x V x Fin*N
            return tf.concat([x, x_], axis=0)  # K x V x Fin*N

        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)

        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0
            x = concat(x, x2)
            x0, x1 = x1, x2

        # K x V x Fin*N -> K x V x Fin x N -> N x V x Fin x K -> N*V x Fin*K
        x = tf.reshape(x, [K, V, Fin, -1])
        x = tf.transpose(x, perm=[3, 1, 2, 0])
        x = tf.reshape(x, [-1, Fin * K])

        W = self._weight_variable([Fin * K, Fout], regularization=False)
        x = tf.matmul(x, W)  # N*V x Fout
        return tf.reshape(x, [-1, V, Fout])
Пример #11
0
def chebyshev5(x, L, Fout, K, name='cheby'):
    with tf.variable_scope(name):
        list_size = tf.shape(x)  #this is for the None shape of the Tensor...
        N = list_size[0]
        _, M, Fin = x.get_shape()
        #pdb.set_trace()
        M, Fin = int(M), int(Fin)
        # Rescale Laplacian and store as sparse Tensor

        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)

        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin * N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N

        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat([x, x_], axis=0)  # K x M x Fin*N

        #pdb.set_trace()
        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3, 1, 2, 0])  # N x M x Fin x K
        x = tf.reshape(x, [N * M, Fin * K])  # N*M x Fin*K
        W = _weight_variable([Fin * K, Fout], regularization=True)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
Пример #12
0
 def __init__(self, L, F, K):
     super(cgcnn2_3, self).__init__()
     L = graph.rescale_L(L, lmax=2)  # Graph Laplacian, M x M
     self.L = L.toarray()
     self.F = F  # Number of filters
     self.K = K  # Polynomial order, i.e. filter size (number of hopes)