Ejemplo n.º 1
0
 def forward(self, input, adj,deg):
     support = torch.mm(input, self.weight)
     # stop()
     if adj is not None:
         output = torch.spmm(adj, support)
     else:
         output = support
     if self.bias is not None:
         return output + self.bias
     else:
         return output
Ejemplo n.º 2
0
 def forward(self, input, adjs):
     adj_mat = []
     node_num = adjs.shape[1]
     for i, adj in enumerate(adjs):
         if not self.featureless:
             adj_mat.append(torch.spmm(adj, input))
         else:
             adj_mat.append(adj)
     adj_mat = torch.cat(adj_mat, dim=1)
     if self.num_basis > 0:
         weight = torch.matmul(self.coef, torch.reshape(self.basis, (self.num_basis, self.in_features, self.out_features)).permute(1, 0, 2))
         weight = torch.reshape(weight, (self.in_features * self.support, self.out_features))
         output = torch.spmm(adj_mat, weight)
     else:
         output = torch.spmm(adj_mat, self.basis)
     if self.featureless:
         temp = torch.ones(node_num)
         temp_drop = F.dropout(temp, self.dropout)
         output = temp_drop.reshape(-1, 1) * output
     return output + self.bias
Ejemplo n.º 3
0
def sgc_precompute(features, adj, degree, alpha):
    t = perf_counter()
    ori_features = features
    emb = features
    for i in range(degree):
        features = (1-alpha) * torch.spmm(adj, features)
        emb += features
    emb /= degree
    emb = emb + alpha * ori_features
    precompute_time = perf_counter()-t
    return emb, precompute_time
Ejemplo n.º 4
0
 def forward(self, input, adj):
     # adj is extracted from the graph structure
     support = torch.mm(input, self.weight)
     I_n = sp.eye(adj.shape[0])
     I_n = sparse_mx_to_torch_sparse_tensor(I_n).cuda()
     output = torch.spmm((I_n + self.smooth * adj) / (1 + self.smooth),
                         support)
     if self.bias is not None:
         return output + self.bias
     else:
         return output
Ejemplo n.º 5
0
 def __init__(self, data, nhid, dropout, K=2):
     super(GFNN, self).__init__()
     nfeat, nclass = data.num_features, data.num_classes
     self.fc1 = nn.Linear(nfeat, nhid)
     self.fc2 = nn.Linear(nhid, nclass)
     self.dropout = dropout
     self.prelu = nn.PReLU()
     processed_x = data.features.clone()
     for _ in range(K):
         processed_x = torch.spmm(data.norm_adj, processed_x)
     self.processed_x = processed_x
Ejemplo n.º 6
0
 def forward(self, input, adj):
     print('Input', type(input), input.shape)
     print('Adj', type(adj), adj.shape)
     adj_dot_features = torch.spmm(adj, input)
     output = torch.mm(adj_dot_features, self.weight)
     #support = torch.mm(input, self.weight)
     #output = torch.spmm(adj, support)
     if self.bias is not None:
         return output + self.bias
     else:
         return output
Ejemplo n.º 7
0
    def forward(self, x, adj, dropout=0):
        x = F.dropout(x, dropout)
        x = self.ll(x)
        value = self.ll_att(x)
        # value2=self.ll_att2(x)
        value = F.leaky_relu(value)
        value = 20 - F.leaky_relu(20 - value)
        value = torch.exp(value)
        # value=sparse_dense_mul(adj,value,value2)

        # dividefactor=torch.sparse.sum(value,dim=1).to_dense().unsqueeze(1)

        dividefactor = torch.spmm(adj, value)
        x = x * value
        x = torch.spmm(adj, x)
        x = x / dividefactor

        if self.activation != None:
            x = self.activation(x)
        return x
Ejemplo n.º 8
0
    def poolwT(self, x, L):
        Mp = L.shape[0]
        N, M, Fin = x.shape
        # Rescale transform Matrix L and store as a TF sparse tensor. Copy to not modify the shared L.
        x = x.permute(1, 2, 0).contiguous()  #M x Fin x N
        x = x.view(M, Fin * N)  # M x Fin*N

        x = torch.spmm(L, x)  # Mp x Fin*N
        x = x.view(Mp, Fin, N)  # Mp x Fin x N
        x = x.permute(2, 0, 1).contiguous()   # N x Mp x Fin
        return x
Ejemplo n.º 9
0
    def norm(x, adj_sp):
        # here edge_index is already a sparse tensor
        deg = torch.sparse.sum(adj_sp, 1)
        deg_inv = deg.pow(-1).to_dense()

        x = torch.spmm(adj_sp, x)
        #  print(x,deg_inv)
        x = x.t() * deg_inv
        #  x:512*dim, edge_weight:256*512

        return x.t()
Ejemplo n.º 10
0
    def forward(self, adj, features):
        l = list()
        for i in range(self.radius+1):
            l.append(features[i])

        for i in range(2*self.radius-1, -1, -1):
            if i == 2*self.radius-1:
                if adj[i].shape != (1,1):
                    x = self.fc1[(i+1)//2](l[i//2+1]) + torch.spmm(adj[i], self.fc1[(i+1)//2+1](l[i//2+1]))
                else:
                    x = self.fc1[(i+1)//2](l[i//2+1])
            elif i%2 == 0:
                x = self.fc1[i//2](l[i//2]) + torch.spmm(adj[i], self.fc2[i+i//2](x))
            else:
                if adj[i].shape != (1,1):
                    x = self.fc2[i+(i-1)//2](x) + torch.spmm(adj[i], self.fc2[i+(i-1)//2+1](x))
            
            x = self.dropout(x)
            
        return x
Ejemplo n.º 11
0
    def forward(self, input, adj):
        assert self.in_features == input.shape[1]
        x = self.phi(input)
        output = torch.spmm(adj, x)

        if self.agg == 'sum':
            x = (input + output)
        elif self.agg == 'cat':
            x = torch.cat([input, output], dim=1)
        x = self.encoder(x)
        return x
Ejemplo n.º 12
0
    def forward(self, seq, adj, sparse=False):
        seq = self.fc(seq)
        if sparse:
            seq = torch.unsqueeze(torch.spmm(adj, torch.squeeze(seq, 0)), 0)
        else:
            seq = torch.bmm(adj, seq)

        if self.isBias:
            seq += self.bias

        return self.act(seq)
Ejemplo n.º 13
0
def spmatmul(den, sp):
    """
    den: Dense tensor of shape batch_size x in_chan x #V
    sp : Sparse tensor of shape newlen x #V
    """
    batch_size, in_chan, nv = list(den.size())
    new_len = sp.size()[0]
    den = den.permute(2, 1, 0).contiguous().view(nv, -1)
    res = torch.spmm(sp, den).view(new_len, in_chan,
                                   batch_size).contiguous().permute(2, 1, 0)
    return res
Ejemplo n.º 14
0
    def forward(self, feature):
        """
        Making a forward pass.
        :param feature_indices: Non zero value indices.
        :param feature_values: Matrix values.
        :return filtered_features: Output features.
        """

        filtered_features = torch.spmm(feature, self.weight_matrix)
        filtered_features = filtered_features + self.bias
        return filtered_features
Ejemplo n.º 15
0
 def forward(self, x, edge_index, edge_weight=None):
     edge_index, _ = remove_self_loops(edge_index)
     edge_weight = torch.ones(edge_index.shape[1]).to(
         x.device) if edge_weight is None else edge_weight
     adj = torch.sparse_coo_tensor(edge_index, edge_weight,
                                   (x.shape[0], x.shape[0]))
     adj = adj.to(x.device)
     out = (1 + self.eps) * x + torch.spmm(adj, x)
     if self.apply_func is not None:
         out = self.apply_func(out)
     return out
Ejemplo n.º 16
0
    def forward(self, H, A):
        W = self.weight
        b = self.bias

        HW = torch.mm(H, W)
        # AHW = SparseMM.apply(A, HW)
        AHW = torch.spmm(A, HW)
        if self.bias is not None:
            return AHW + b
        else:
            return AHW
Ejemplo n.º 17
0
 def forward(self, input, adj=1.0):
     input = to_dense(input)
     support = self.linear(input)
     if isinstance(adj, (float, int)):
         output = support * adj
     else:
         adj = adj_norm(
             adj, True) if self.norm == 'symmetric' else adj_norm(
                 adj, False) if self.norm == 'asymmetric' else adj
         output = torch.spmm(adj, support)
     return output
Ejemplo n.º 18
0
 def forward(self, input, edge_index, edge_attr=None):
     if edge_attr is None:
         edge_attr = torch.ones(edge_index.shape[1]).float().to(input.device)
     adj = torch.sparse_coo_tensor(
         edge_index,
         edge_attr,
         (input.shape[0], input.shape[0]),
     ).to(input.device)
     support = self.W(input)
     output = torch.spmm(adj, support)
     return output
Ejemplo n.º 19
0
    def forward(self, seq, adj, sparse=False):
        seq_fts = self.fc(seq)
        if sparse:
            out = torch.unsqueeze(torch.spmm(adj, torch.squeeze(seq_fts, 0)),
                                  0)
        else:
            out = torch.bmm(adj, seq_fts)
        if self.bias is not None:
            out += self.bias

        return self.act(out)
 def forward(self, input, adj):
     # (300,300)*(300,16)->(300,16)
     # (300,16)*(16,300)->(300,300)
     support = torch.mm(input, self.weight)
     # (300,300)*(300,16)->(300,16)
     # (300,300)*(300,300)->(300,300)
     output = torch.spmm(adj, support)
     if self.bias is not None:
         return output + self.bias
     else:
         return output
Ejemplo n.º 21
0
def sgc_precompute(features, adj, adj_dist, degree, concat, L, K, idx_train,
                   idx_val, idx_test):
    t = perf_counter()
    mem = [features]
    # for i in range(degree):
    #     features = torch.spmm(adj.cuda(), features)
    #     adj1 = torch.spmm(adj2, adj1)
    #     mem.append(features)
    if K:
        local_features = torch.spmm(adj, features)
        all_features = [[], [], []]

        all_features[0].append(local_features[idx_train])
        all_features[1].append(local_features[idx_val])
        all_features[2].append(local_features[idx_test])

        zz = 0
        for i in range(0, K - L):
            low_feat = torch.spmm(adj_dist[i], features)
            # Check the number of neighbors
            train = low_feat[:120]
            val = low_feat[120:620]
            test = low_feat[-1000:]
            gt1 = torch.sum(train.gt(0))
            te1 = torch.sum(test.gt(0))
            val = torch.sum(val.gt(0))
            print('partion: train | test | val:', gt1 / 120, te1 / 1000,
                  val / 500)
            # print('total',i,low_feat,low_feat.shape)
            # print('train total:',train,train.shape)
            # print('test total:',test,test.shape)
            all_features[0].append(low_feat[idx_train])
            all_features[1].append(low_feat[idx_val])
            all_features[2].append(low_feat[idx_test])
            zz += low_feat
        # print('total length:',len(all_features))
    else:
        all_features = torch.spmm(adj, features)

    precompute_time = perf_counter() - t
    return all_features, precompute_time
Ejemplo n.º 22
0
    def forward(self, x_in, adj):
        x = self.mlp1(x_in)
        out = torch.spmm(adj, x)
        out = self.mlp2(out)

        z = torch.sigmoid(self.fc1_update(out) + self.fc2_update(x))
        r = torch.sigmoid(self.fc1_reset(out) + self.fc2_reset(x))
        out = torch.tanh(self.fc1(out) + self.fc2(r * x))
        out = (
            1 - z
        ) * x + z * out  # compute the final output feature vector for the node
        return out
Ejemplo n.º 23
0
    def next_layer(self, h, padded_neighbor_list=None, Adj_block=None):
        if self.neighbor_pooling_type == "max":
            # If max pooling
            pooled = self.maxpool(h, padded_neighbor_list)
        else:
            #If sum or average pooling
            pooled = torch.spmm(Adj_block, h)
            if self.neighbor_pooling_type == "average":
                #If average pooling
                degree = torch.spmm(
                    Adj_block,
                    torch.ones((Adj_block.shape[0], 1)).to(self.device))
                pooled = pooled / degree

        # Representation of neighboring and center nodes
        pooled_rep = self.mlp(pooled)
        h = self.batch_norm(pooled_rep)

        # Non-linearity
        h = F.relu(h)
        return h
Ejemplo n.º 24
0
 def forward(self, input, adj):
     #__import__('pdb').set_trace()
     #print(input)
     #print('333')
     support = torch.mm(input, self.weight)
     #print(adj)
     output = torch.spmm(adj, support) + 1
     print(output)
     if self.bias is not None:
         return output + self.bias
     else:
         return output
Ejemplo n.º 25
0
    def forward(self, input, adj):
        U, s, Vt = self.svd()

        support = torch.mm(input, U[:, :self.r])
        support = support * s[:self.r]
        support = torch.mm(support, Vt[:self.r, :])
        output = torch.spmm(adj, support)

        if self.bias is not None:
            output = output + self.bias

        return self.sigma(output)
Ejemplo n.º 26
0
 def b_forward(self, inputs, adjs):
     outputs = []
     for i in range(inputs.size(0)):
         input = inputs[i]
         adj = adjs[i] if adjs.dim() == 3 else adjs
         support = torch.mm(input, self.weight)
         output = torch.spmm(adj, support)
         if self.bias is not None:
             outputs.append(output.unsqueeze(0) + self.bias)
         else:
             return outputs.append(output.unsqueeze(0))
     return torch.cat(outputs, dim=0)
Ejemplo n.º 27
0
 def forward(self, input, edge_index):
     adj = torch.sparse_coo_tensor(
         edge_index,
         torch.ones(edge_index.shape[1]).float(),
         (input.shape[0], input.shape[0]),
     ).cuda()
     support = torch.mm(input, self.weight)
     output = torch.spmm(adj, support)
     if self.bias is not None:
         return output + self.bias
     else:
         return output
Ejemplo n.º 28
0
    def forward(self, x, adj):

        propagations = [x]
        for _ in range(self.K):
            x = torch.spmm(adj, x)
            propagations.append(x)

        h = torch.stack(propagations, axis=1)
        retrain_score = self.w(h)
        retrain_score = self.activation(retrain_score).permute(0, 2, 1).contiguous()
        out = (retrain_score @ h).squeeze(1)
        return out
Ejemplo n.º 29
0
    def next_layer(self, h, layer, padded_neighbors_list=None, Adj_block=None):

        if self.neighboor_pooling_type == "max":

            pooled = self.maxpool(h, padded_neighbors_list)
        else:

            pooled = torch.spmm(Adj_block, h)
            if self.neighboor_pooling_type == "average":

                degree = torch.spmm(
                    Adj_block,
                    torch.ones((Adj_block.shape[0], 1)).to(self.device))
                pooled = pooled / degree

        pooled_rep = self.mlps[layer](pooled)

        h = self.batch_norms[layer](pooled_rep)

        h = F.relu(h)
        return h
Ejemplo n.º 30
0
 def inverse(self, output):
     """ bijective or injecitve block inverse """
     x2, y1, adj = output
     hidden = self.linear.forward(x2)
     hidden = F.dropout(hidden, self.dropout, training=self.training)
     if adj.is_sparse:
         support = torch.spmm(adj, hidden)
     else:
         support = torch.mm(adj, hidden)
     Fx2 = self.act(support)
     x1 = Fx2 + y1
     return x1, x2, adj