Example #1
0
    def normalize_adj(A: Tensor, eps: float = 1E-9) -> Tensor:
        """ Normalize adjacency matrix for LPA:
        A = D^(-1/2) * A * D^(-1/2)
        A = softmax(A)

        :param A: adjacency matrix
        :param eps: small value
        :return:
        """
        D = sparse.sum(A, dim=0)
        D = sqrt(1.0 / (D.to_dense() + eps))
        D = diag(D).to_sparse()
        # nz_indices = torch.nonzero(D, as_tuple=False)
        # D = torch.sparse.FloatTensor(nz_indices.T, D, adj.shape)
        A = dot(D, A.to_dense()).to_sparse()
        A = dot(A, D.to_dense()).to_sparse()
        # A = self.softmax(A)

        return A
    def normalize_adj(adj: t.Tensor, eps: float = 1E-9) -> t.Tensor:
        """ Normalize adjacency matrix for LPA:
        A = D^(-1/2) * A * D^(-1/2)
        A = softmax(A)

        :param adj: adjacency matrix
        :param eps: small value
        :return:
        """
        D = t.sparse.sum(adj, dim=0)
        D = t.sqrt(1.0 / (D.to_dense() + eps))
        D = t.diag(D).to_sparse()
        # nz_indices = t.nonzero(D, as_tuple=False)
        # D = t.sparse.FloatTensor(nz_indices.T, D, adj.shape)
        adj = dot(D, adj.to_dense()).to_sparse()
        adj = dot(adj, D.to_dense()).to_sparse()
        # adj = self.softmax(adj)

        return adj
    def forward(self, adj: t.Tensor, Y: t.Tensor) -> t.Tensor:
        """ Y_hat = A * Y

        :param adj:
        :param Y:
        :param inputs:
        :return:
        """
        adj = self.normalize_adj(adj)
        Y_hat = dot(adj, Y)
        # Y_hat[self.labelled_mask] = Y[self.labelled_mask]
        return Y_hat
    def forward(self, A, X, targeted_drop=0.1):
        torch.autograd.set_detect_anomaly(True)
        D = self.get_targeted_dropout(shape_A=A.shape, targeted_drop=targeted_drop)
        D_prime = torch.mul(self.S, D)
        A_prime = torch.mul(D_prime, A)
        support = torch.mm(X, self.weight)
        X = dot(A_prime, support)

        if self.bias is not None:
            return X + self.bias
        else:
            return X
 def feed_forward(self, batch_i, x):
     self.xs[batch_i] = x
     z = dot(self.weights, x) + self.bias
     self.zs[batch_i] = z
     return self.activation.act(z)
Example #6
0
 def g(self, i, layer, w_delta):
     db = w_delta * layer.activation.prime(layer.zs[i])
     self.g_b[layer.name] = self.g_b[layer.name] + db if layer.name in self.g_b else db
     dw = dot(db, layer.xs[i].transpose())
     self.g_w[layer.name] = self.g_w[layer.name] + dw if layer.name in self.g_w else dw
     return dot(layer.weights.transpose(), db)