Exemple #1
0
    def rand_prop(self, x, edge_index, edge_weight):
        edge_weight = symmetric_normalization(x.shape[0], edge_index, edge_weight)
        x = self.dropNode(x)

        y = x
        for i in range(self.order):
            x = spmm(edge_index, edge_weight, x).detach_()
            y.add_(x)
        return y.div_(self.order + 1.0).detach_()
Exemple #2
0
 def forward(self, x, edge_index, edge_attr=None):
     support = torch.mm(x, self.weight)
     if edge_attr is None:
         edge_attr = torch.ones(edge_index.shape[1]).to(x.device)
     out = spmm(edge_index, edge_attr, support)
     if self.bias is not None:
         return out + self.bias
     else:
         return out
Exemple #3
0
 def forward(self, x, edge_index, edge_attr, init_x):
     """Symmetric normalization"""
     hidden = spmm(edge_index, edge_attr, x)
     hidden = (1 - self.alpha) * hidden + self.alpha * init_x
     h = self.beta * torch.matmul(hidden,
                                  self.weight) + (1 - self.beta) * hidden
     if self.residual:
         h = h + x
     return h
Exemple #4
0
 def forward(self, graph, x):
     # edge_index, _ = remove_self_loops()
     # edge_weight = torch.ones(edge_index.shape[1]).to(x.device) if edge_weight is None else edge_weight
     # adj = torch.sparse_coo_tensor(edge_index, edge_weight, (x.shape[0], x.shape[0]))
     # adj = adj.to(x.device)
     # out = (1 + self.eps) * x + torch.spmm(adj, x)
     out = (1 + self.eps) * x + spmm(graph, x)
     if self.apply_func is not None:
         out = self.apply_func(out)
     return out
Exemple #5
0
 def _calculate_A_hat(self, x, adj):
     device = x.device
     adj_values = torch.ones(adj.shape[1]).to(device)
     adj, adj_values = add_remaining_self_loops(adj, adj_values, 1,
                                                x.shape[0])
     deg = spmm(adj, adj_values,
                torch.ones(x.shape[0], 1).to(device)).squeeze()
     deg_sqrt = deg.pow(-1 / 2)
     adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]
     return adj, adj_values
Exemple #6
0
def outcome_correlation(g, labels, alpha, nprop, post_step, alpha_term=True):
    result = labels.clone()
    for _ in range(nprop):
        result = alpha * spmm(g, result)
        if alpha_term:
            result += (1 - alpha) * labels
        else:
            result += labels
        result = post_step(result)
    return result
Exemple #7
0
def bingge_norm_adj(adj, adj_values, num_nodes):
    adj, adj_values = add_self_loops(adj, adj_values, 1, num_nodes)
    deg = spmm(adj, adj_values,
               torch.ones(num_nodes, 1).to(adj.device)).squeeze()
    deg_sqrt = deg.pow(-1 / 2)
    adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]
    row, col = adj[0], adj[1]
    mask = row != col
    adj_values[row[mask]] += 1
    return adj, adj_values
Exemple #8
0
 def forward(self, input, edge_index, edge_attr=None):
     if edge_attr is None:
         edge_attr = torch.ones(edge_index.shape[1]).float().to(
             input.device)
     support = torch.mm(input, self.weight)
     output = spmm(edge_index, edge_attr, support)
     if self.bias is not None:
         return output + self.bias
     else:
         return output
Exemple #9
0
 def _calculate_A_hat(self, x, edge_index):
     device = x.device
     edge_attr = torch.ones(edge_index.shape[1]).to(device)
     edge_index, edge_attr = add_remaining_self_loops(
         edge_index, edge_attr, 1, x.shape[0])
     deg = spmm(edge_index, edge_attr,
                torch.ones(x.shape[0], 1).to(device)).squeeze()
     deg_sqrt = deg.pow(-1 / 2)
     edge_attr = deg_sqrt[edge_index[1]] * edge_attr * deg_sqrt[
         edge_index[0]]
     return edge_index, edge_attr
Exemple #10
0
 def forward(self, input, edge_index):
     h = torch.mm(input, self.weight)
     # Self-attention on the nodes - Shared attention mechanism
     # edge_h: 2*D x E
     edge_h = torch.cat((h[edge_index[0, :], :], h[edge_index[1, :], :]), dim=1).t()
     # do softmax for each row, this need index of each row, and for each row do softmax over it
     alpha = self.leakyrelu(self.a.mm(edge_h).squeeze())  # E
     n = len(input)
     alpha = softmax(alpha, edge_index[0], n)
     output = spmm(edge_index, self.dropout(alpha), h)  # h_prime: N x out
     # output = spmm(edge, self.dropout(alpha), n, n, self.dropout(h)) # h_prime: N x out
     return output
Exemple #11
0
    def _preprocessing(self, x, edge_index):
        num_nodes = x.shape[0]

        op_embedding = []
        op_embedding.append(x)

        # Convert to numpy arrays on cpu
        edge_index, _ = dropout_adj(edge_index, drop_rate=self.dropedge_rate)
        row, col = edge_index

        if self.undirected:
            edge_index = to_undirected(edge_index, num_nodes)
            row, col = edge_index

        # adj matrix
        edge_index, edge_attr = get_adj(row,
                                        col,
                                        asymm_norm=self.asymm_norm,
                                        set_diag=self.set_diag,
                                        remove_diag=self.remove_diag)

        nx = x
        for _ in range(self.num_propagations):
            nx = spmm(edge_index, edge_attr, nx)
            op_embedding.append(nx)

        # transpose adj matrix
        edge_index, edge_attr = get_adj(col,
                                        row,
                                        asymm_norm=self.asymm_norm,
                                        set_diag=self.set_diag,
                                        remove_diag=self.remove_diag)

        nx = x
        for _ in range(self.num_propagations):
            nx = spmm(edge_index, edge_attr, nx)
            op_embedding.append(nx)

        return torch.cat(op_embedding, dim=1)
Exemple #12
0
    def forward(self, x, edge_index):
        device = x.device
        edge_attr = torch.ones(edge_index.shape[1]).to(device)
        edge_index, edge_attr = add_remaining_self_loops(
            edge_index, edge_attr, 1, x.shape[0])
        deg = spmm(edge_index, edge_attr,
                   torch.ones(x.shape[0], 1).to(device)).squeeze()
        deg_sqrt = deg.pow(-1 / 2)
        edge_attr = deg_sqrt[edge_index[1]] * edge_attr * deg_sqrt[
            edge_index[0]]

        x = self.sgc1(x, edge_index, edge_attr)
        return x
Exemple #13
0
    def forward(self, graph, x):
        support = torch.mm(x, self.weight)
        output = spmm(graph, support)

        # Self-loop
        output = output + torch.mm(
            x, self.self_weight) if self.self_weight is not None else output

        output = output + self.bias if self.bias is not None else output
        # BN
        output = self.bn(output) if self.bn is not None else output
        # Res
        return self.sigma(output) + input if self.res else self.sigma(output)
Exemple #14
0
 def forward(self, local_preds, batch):
     """
     !!! With NO dropout \n
     :param: local_preds: hidden input \n
     :edge_idx: sparse edge index \n
     :batch: batch Tensor
     """
     preds = local_preds
     for _ in range(self.k):
         new_features = spmm(batch, preds)
         preds = (1 - self.alpha) * new_features + self.alpha * local_preds
     final_preds = preds
     return final_preds
Exemple #15
0
    def forward(self, x, adj):
        device = x.device
        adj_values = torch.ones(adj.shape[1]).to(
            device)  # Returns a tensor filled with the scalar value 1 with specific device, the shape defined by the variable argument size.
        adj, adj_values = add_remaining_self_loops(adj, adj_values, 1, x.shape[0])
        deg = spmm(adj, adj_values,
                   torch.ones(x.shape[0], 1).to(device)).squeeze()  # spmm([2,12431], [12431], [3327,1])
        deg_sqrt = deg.pow(-1 / 2)
        adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]

        x = self.sgc1(x, adj, adj_values)

        return F.log_softmax(x, dim=-1)
Exemple #16
0
    def forward(self, graph, x):
        h = torch.matmul(x, self.W).view(-1, self.nhead, self.out_features)
        h[torch.isnan(h)] = 0.0

        row, col = graph.edge_index
        # Self-attention on the nodes - Shared attention mechanism
        h_l = (self.a_l * h).sum(dim=-1)[row]
        h_r = (self.a_r * h).sum(dim=-1)[col]
        edge_attention = self.leakyrelu(h_l + h_r)
        # edge_attention: E * H
        edge_attention = mul_edge_softmax(graph, edge_attention)
        edge_attention = self.dropout(edge_attention)

        if check_mh_spmm() and next(self.parameters()).device.type != "cpu":
            if self.nhead > 1:
                h_prime = mh_spmm(graph, edge_attention, h)
                out = h_prime.view(h_prime.shape[0], -1)
            else:
                edge_weight = edge_attention.view(-1)
                with graph.local_graph():
                    graph.edge_weight = edge_weight
                    out = spmm(graph, h.squeeze(1))
        else:
            with graph.local_graph():
                h_prime = []
                h = h.permute(1, 0, 2).contiguous()
                for i in range(self.nhead):
                    edge_weight = edge_attention[:, i]
                    graph.edge_weight = edge_weight
                    hidden = h[i]
                    assert not torch.isnan(hidden).any()
                    h_prime.append(spmm(graph, hidden))
            out = torch.cat(h_prime, dim=1)

        if self.residual:
            res = self.residual(x)
            out += res
        return out
Exemple #17
0
 def forward(self, x, adj):
     device = x.device
     adj_values = torch.ones(adj.shape[1]).to(device)
     adj, adj_values = add_remaining_self_loops(adj, adj_values, 1, x.shape[0])
     deg = spmm(adj, adj_values, torch.ones(x.shape[0], 1).to(device)).squeeze()
     deg_sqrt = deg.pow(-1 / 2)
     adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]
     x = F.dropout(x, self.dropout, training=self.training)
     x = F.relu(self.gc1(x, adj, adj_values))
     x = F.dropout(x, self.dropout, training=self.training)
     x = F.relu(self.gc2(x, adj, adj_values))
     x = F.dropout(x, self.dropout, training=self.training)
     x = self.gc3(x, adj, adj_values)
     return x
Exemple #18
0
 def inference(self, x, adj):
     x = x.to(self._device)
     origin_device = adj.device
     adj = adj.to(self._device)
     xs = [x]
     for i in range(self.num_layers):
         x = spmm(adj, x)
         x = self.lins[i](x)
         if i != self.num_layers - 1:
             x = self.norms[i](x)
             x = x.relu_()
         xs.append(x)
     adj = adj.to(origin_device)
     return x, xs
Exemple #19
0
    def forward(self, graph, x):
        support = self.linear(x)
        out = spmm(graph, support, actnn=True)

        if self.norm is not None:
            out = self.norm(out)
        if self.act is not None:
            out = self.act(out)

        if self.residual is not None:
            out = out + self.residual(x)
        if self.dropout is not None:
            out = self.dropout(out)
        return out
Exemple #20
0
    def forward(self, graph, x):
        support = torch.mm(x, self.weight)
        out = spmm(graph, support)
        if self.bias is not None:
            out = out + self.bias
        if self.norm is not None:
            out = self.norm(out)
        if self.act is not None:
            out = self.act(out)

        if self.residual is not None:
            out = out + self.residual(x)
        if self.dropout is not None:
            out = self.dropout(out)
        return out
Exemple #21
0
    def forward(self, graph, x):
        edge_index = graph.edge_index
        N, dim = x.shape

        # nl_adj_mat_ind, nl_adj_mat_val = add_self_loops(edge_index, num_nodes=N)[0], edge_attr.squeeze()
        nl_adj_mat_ind = add_remaining_self_loops(edge_index, num_nodes=N)[0]
        nl_adj_mat_ind = torch.stack(nl_adj_mat_ind)
        nl_adj_mat_val = torch.ones(nl_adj_mat_ind.shape[1]).to(x.device)

        for _ in range(self.nhop - 1):
            nl_adj_mat_ind, nl_adj_mat_val = spspmm(nl_adj_mat_ind,
                                                    nl_adj_mat_val,
                                                    nl_adj_mat_ind,
                                                    nl_adj_mat_val, N, N, N,
                                                    True)

        result = []
        for i in range(self.subheads):
            h = torch.mm(x, self.weight[i])

            adj_mat_ind, adj_mat_val = nl_adj_mat_ind, nl_adj_mat_val
            h = F.dropout(h, p=self.dropout, training=self.training)

            adj_mat_ind, adj_mat_val = self.attention(h, adj_mat_ind,
                                                      adj_mat_val)
            # laplacian matrix normalization
            adj_mat_val = self.normalization(adj_mat_ind, adj_mat_val, N)

            val_h = h

            with graph.local_graph():
                graph.edge_index = adj_mat_ind
                graph.edge_weight = adj_mat_val
                for _ in range(i + 1):
                    val_h = spmm(graph, val_h)
                    # val_h = spmm(adj_mat_ind, F.dropout(adj_mat_val, p=self.node_dropout, training=self.training), N, N, val_h)

                # val_h = val_h / norm
                val_h[val_h != val_h] = 0
                val_h = val_h + self.bias[i]
                val_h = self.adaptive_enc[i](val_h)
                val_h = self.activation(val_h)
                val_h = F.dropout(val_h,
                                  p=self.dropout,
                                  training=self.training)
                result.append(val_h)
        h_res = torch.cat(result, dim=1)
        return h_res
Exemple #22
0
    def bdd_forward(self, x, edge_index, edge_type):
        _x = x.view(-1, self.num_bases, self.block_in_feats)

        edge_weight = torch.ones(edge_type.shape).to(x.device)
        edge_weight = row_normalization(x.shape[0], edge_index, edge_weight)

        h_list = []
        for edge_t in range(self.num_edge_types):
            _weight = self.weight[edge_t].view(self.num_bases, self.block_in_feats, self.block_out_feats)
            edge_mask = (edge_type == edge_t)
            _edge_index_t = edge_index.t()[edge_mask].t()
            h_t = torch.einsum("abc,bcd->abd", _x, _weight).reshape(-1, self.out_feats)
            h_t = spmm(_edge_index_t, edge_weight[edge_mask], h_t)
            h_list.append(h_t)

        return h_list
Exemple #23
0
    def basis_forward(self, x, edge_index, edge_type):
        if self.num_bases < self.num_edge_types:
            weight = torch.matmul(self.alpha, self.weight.view(self.num_bases, -1))
            weight = weight.view(self.num_edge_types, self.in_feats, self.out_feats)
        else:
            weight = self.weight
        edge_weight = torch.ones(edge_type.shape).to(x.device)
        edge_weight = row_normalization(x.shape[0], edge_index, edge_weight)

        h = torch.matmul(x, weight)  # (N, d1) by (r, d1, d2) -> (r, N, d2)

        h_list = []
        for edge_t in range(self.num_edge_types):
            edge_mask = (edge_type == edge_t)
            _edge_index_t = edge_index.t()[edge_mask].t()
            temp = spmm(_edge_index_t, edge_weight[edge_mask], h[edge_t])
            h_list.append(temp)
        return h_list
Exemple #24
0
    def inference_batch(self, x, test_loader):
        device = self._device
        xs = [x]
        for i in range(self.num_layers):
            tmp_x = []
            for target_id, full_id, full_adj in test_loader:
                full_adj = full_adj.to(device)
                agg_x = spmm(full_adj,
                             x[full_id].to(device))[:target_id.shape[0]]
                agg_x = self.lins[i](agg_x)

                if i != self.num_layers - 1:
                    agg_x = self.norms[i](agg_x)
                    agg_x = agg_x.relu_()
                tmp_x.append(agg_x.cpu())
            x = torch.cat(tmp_x, dim=0)
            xs.append(x)
        return x, xs
Exemple #25
0
    def forward(self):
        x, edge_index, edge_attr = self.data.x, self.data.edge_index, self.data.edge_attr
        device = x.device

        adj, adj_values = add_remaining_self_loops(edge_index, edge_attr, 1,
                                                   x.shape[0])
        deg = spmm(adj, adj_values,
                   torch.ones(x.shape[0], 1).to(device)).squeeze()
        deg_sqrt = deg.pow(-1 / 2)
        adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]

        x = F.dropout(x, self.dropout, training=self.training)
        x = F.relu(self.gc1(x, adj, adj_values))

        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2(x, adj, adj_values)

        return F.log_softmax(x, dim=-1)
Exemple #26
0
def average_neighbor_features(graph, feats, nhop, norm="sym", style="all"):
    results = []
    if norm == "sym":
        graph.sym_norm()
    elif norm == "row":
        graph.row_norm()
    else:
        raise NotImplementedError

    x = feats
    results.append(x)
    for i in range(nhop):
        x = spmm(graph, x)
        if style == "all":
            results.append(x)
    if style != "all":
        results = x
    return results
Exemple #27
0
    def predict(self, x, edge_index, batch_size, norm_func):
        device = next(self.fc.parameters()).device
        num_nodes = x.shape[0]
        pred_logits = []
        with torch.no_grad():
            for i in range(0, num_nodes, batch_size):
                batch_x = x[i : i + batch_size].to(device)
                batch_logits = self.fc(batch_x)
                pred_logits.append(batch_logits.cpu())
        pred_logits = torch.cat(pred_logits, dim=0)

        edge_weight = norm_func(num_nodes, edge_index)
        edge_weight = edge_weight * (1 - self.alpha)

        predictions = pred_logits
        for _ in range(self.nprop):
            predictions = spmm(edge_index, edge_weight, predictions) + self.alpha * pred_logits
        return predictions
Exemple #28
0
    def forward(self, x, adj):
        layer_outputs = []
        if self.sparse:
            device = x.device
            adj_values = torch.ones(adj.shape[1]).to(device)
            adj, adj_values = add_remaining_self_loops(adj, adj_values, 1,
                                                       x.shape[0])
            deg = spmm(adj, adj_values,
                       torch.ones(x.shape[0], 1).to(device)).squeeze()
            deg_sqrt = deg.pow(-1 / 2)
            adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]

            # deg_ = 1 / deg
            # adj_values = adj_values * deg_[adj[1]]

            for i in range(self.n_layers):

                x = F.dropout(x, self.dropout, training=self.training)
                x = F.relu(getattr(self, 'gc{}'.format(i))(x, adj, adj_values))
                x = F.dropout(x, self.dropout, training=self.training)
                layer_outputs.append(x)

            h = torch.stack(layer_outputs, dim=0)
            h = torch.max(h, dim=0)[0]

        # x = F.relu(x)
        # x = torch.sigmoid(x)
        # return x
        # h2 = x

        else:
            graph = self.preprocessing(
                x, adj, x.device) if not self.graph else self.graph

            for i in range(self.n_layers):
                dropout = getattr(self, 'dropout{}'.format(i))
                gconv = getattr(self, 'gconv{}'.format(i))
                x = dropout(F.relu(gconv(graph, x)))
                layer_outputs.append(x)

            h = torch.stack(layer_outputs, dim=0)
            h = torch.max(h, dim=0)[0]

        return F.log_softmax(self.last_linear(h), dim=-1)
Exemple #29
0
    def forward(self, x, adj):
        def get_ready_format(input, edge_index, edge_attr=None):
            if edge_attr is None:
                edge_attr = torch.ones(edge_index.shape[1]).float().to(
                    input.device)
            adj = torch.sparse_coo_tensor(
                edge_index,
                edge_attr,
                (input.shape[0], input.shape[0]),
            ).to(input.device)
            return adj

        if self.use_cache:
            flag = str(adj.shape[1])
            if flag not in self.cache:
                edge_index, edge_attr = self._calculate_A_hat(x, adj)
                self.cache[flag] = (edge_index, edge_attr)
            else:
                edge_index, edge_attr = self.cache[flag]
        else:
            edge_index, edge_attr = self._calculate_A_hat(x, adj)
        # get prediction
        x = F.dropout(x, p=self.dropout, training=self.training)
        local_preds = self.nn.forward(x)

        # apply personalized pagerank
        if self.propagation == "ppnp":
            if self.vals is None:
                self.vals = self.alpha * torch.inverse(
                    torch.eye(x.shape[0]).to(x.device) - (1 - self.alpha) *
                    get_ready_format(x, edge_index, edge_attr))
            final_preds = F.dropout(self.vals) @ local_preds
        else:  # appnp
            preds = local_preds
            edge_attr = F.dropout(edge_attr,
                                  p=self.dropout,
                                  training=self.training)
            for _ in range(self.niter):
                new_features = spmm(edge_index, edge_attr, preds)
                preds = (1 -
                         self.alpha) * new_features + self.alpha * local_preds
            final_preds = preds
        return final_preds
Exemple #30
0
    def forward(self, x, adj):
        device = x.device
        adj_values = torch.ones(adj.shape[1]).to(device)
        adj, adj_values = add_remaining_self_loops(adj, adj_values, 1, x.shape[0])
        deg = spmm(adj, adj_values, torch.ones(x.shape[0], 1).to(device)).squeeze()
        deg_sqrt = deg.pow(-1/2)
        adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]

        x = F.dropout(x, self.dropout, training=self.training)
        x = F.relu(self.gc1(x, adj, adj_values))
        # h1 = x
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2(x, adj, adj_values)

        # x = F.relu(x)
        # x = torch.sigmoid(x)
        # return x
        # h2 = x
        return F.log_softmax(x, dim=-1)