Exemple #1
0
    def forward(self, x, edge_index, edge_attr=None):
        if self.edge_attr is None:
            if edge_attr is not None:
                self.edge_attr = edge_attr
            else:
                edge_index, edge_weight = add_remaining_self_loops(
                    edge_index=edge_index,
                    edge_weight=torch.ones(edge_index.shape[1]).to(x.device),
                    fill_value=1,
                    num_nodes=x.shape[0])
                self.edge_attr = symmetric_normalization(
                    num_nodes=x.shape[0],
                    edge_index=edge_index,
                    edge_weight=edge_weight,
                )
        else:
            edge_index, _ = add_remaining_self_loops(
                edge_index=edge_index,
                edge_weight=torch.ones(edge_index.shape[1]).to(x.device),
                fill_value=1,
                num_nodes=x.shape[0])

        init_h = F.dropout(x, p=self.dropout, training=self.training)
        init_h = F.relu(self.fc_layers[0](init_h))

        h = init_h

        for layer in self.layers:
            h = F.dropout(h, p=self.dropout, training=self.training)
            h = layer(h, edge_index, self.edge_attr, init_h)
            h = self.activation(h)
        h = F.dropout(h, p=self.dropout, training=self.training)
        out = self.fc_layers[1](h)
        return out
Exemple #2
0
 def forward(self, x, edge_index, edge_attr=None):
     if self.edge_index is None:
         num_nodes = torch.max(edge_index) + 1
         self.edge_index = edge_index
         self.edge_attr = symmetric_normalization(num_nodes, edge_index)
     h = spmm(self.edge_index, self.edge_attr, x)
     return self.weight(h)
Exemple #3
0
    def forward(self, x, edge_index, edge_attr=None):
        _attr = str(edge_index.shape[1])
        if _attr not in self.cache:
            edge_index, edge_attr = add_remaining_self_loops(
                edge_index=edge_index,
                edge_weight=torch.ones(edge_index.shape[1]).to(x.device),
                fill_value=1,
                num_nodes=x.shape[0],
            )
            edge_attr = symmetric_normalization(x.shape[0], edge_index,
                                                edge_attr)

            self.cache[_attr] = (edge_index, edge_attr)
        edge_index, edge_attr = self.cache[_attr]

        init_h = F.dropout(x, p=self.dropout, training=self.training)
        init_h = F.relu(self.fc_layers[0](init_h))

        h = init_h

        for layer in self.layers:
            h = F.dropout(h, p=self.dropout, training=self.training)
            h = layer(h, edge_index, edge_attr, init_h)
            h = self.activation(h)
        h = F.dropout(h, p=self.dropout, training=self.training)
        out = self.fc_layers[1](h)
        return out
Exemple #4
0
    def rand_prop(self, x, edge_index, edge_weight):
        edge_weight = symmetric_normalization(x.shape[0], edge_index, edge_weight)
        x = self.dropNode(x)

        y = x
        for i in range(self.order):
            x = spmm(edge_index, edge_weight, x).detach_()
            y.add_(x)
        return y.div_(self.order + 1.0).detach_()
Exemple #5
0
    def get_embeddings(self, x, edge_index):
        edge_index, edge_attr = add_remaining_self_loops(edge_index,
                                                         num_nodes=x.shape[0])
        edge_attr = symmetric_normalization(x.shape[0], edge_index, edge_attr)

        h = x
        for i in range(self.num_layers - 1):
            h = F.dropout(h, self.dropout, training=self.training)
            h = self.layers[i](h, edge_index, edge_attr)
        return h
Exemple #6
0
    def forward(self, x, edge_index):
        edge_index, edge_attr = add_remaining_self_loops(edge_index,
                                                         num_nodes=x.shape[0])
        edge_attr = symmetric_normalization(x.shape[0], edge_index, edge_attr)

        x = F.dropout(x, self.dropout, training=self.training)
        x = F.relu(self.gc1(x, edge_index, edge_attr))
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2(x, edge_index, edge_attr)
        return x
Exemple #7
0
 def forward(
     self,
     x: torch.Tensor,
     edge_index: torch.Tensor,
     edge_weight: Optional[torch.Tensor] = None,
 ):
     num_nodes = x.shape[0]
     edge_index, edge_weight = add_remaining_self_loops(edge_index, edge_weight)
     edge_weight = symmetric_normalization(num_nodes, edge_index)
     return self.encoder(x, edge_index, edge_weight)
Exemple #8
0
 def forward(self, x, edge_index):
     edge_index, edge_attr = add_remaining_self_loops(edge_index,
                                                      num_nodes=x.shape[0])
     edge_attr = symmetric_normalization(x.shape[0], edge_index, edge_attr)
     h = x
     for i in range(self.num_layers):
         h = self.layers[i](h, edge_index, edge_attr)
         if i != self.num_layers - 1:
             h = F.relu(h)
             h = F.dropout(h, self.dropout, training=self.training)
     return h
Exemple #9
0
 def forward(self, x, edge_index):
     x = self.ses[0](x)
     edge_index, edge_weight = add_remaining_self_loops(edge_index)
     edge_weight = symmetric_normalization(x.shape[0], edge_index,
                                           edge_weight)
     for se, conv in zip(self.ses[1:], self.convs[:-1]):
         x = F.relu(conv(x, edge_index, edge_weight))
         x = se(x)
         x = F.dropout(x, p=self.dropout, training=self.training)
     x = self.convs[-1](x, edge_index, edge_weight)
     return x
Exemple #10
0
    def forward(self, x, edge_index):
        flag = str(edge_index.shape[1])
        if flag not in self.cache:
            edge_attr = torch.ones(edge_index.shape[1]).to(x.device)
            edge_index, edge_attr = add_remaining_self_loops(
                edge_index, edge_attr, 1, x.shape[0])
            edge_attr = symmetric_normalization(x.shape[0], edge_index,
                                                edge_attr)
            self.cache[flag] = (edge_index, edge_attr)
        edge_index, edge_attr = self.cache[flag]

        x = self.nn(x, edge_index, edge_attr)
        return x
Exemple #11
0
    def normalize_adj(self, norm="sym"):
        if self.__normed__:
            return
        if self.weight is None or self.weight.shape[0] != self.edge_index.shape[1]:
            self.weight = torch.ones(self.num_edges, device=self.device)

        edge_index = torch.stack([self.row, self.col])
        if norm == "sym":
            self.weight = symmetric_normalization(self.num_nodes, edge_index, self.weight)
        elif norm == "row":
            self.weight = row_normalization(self.num_nodes, edge_index, self.weight)
        else:
            raise NotImplementedError
        self.__normed__ = norm
Exemple #12
0
def get_adj(row, col, asymm_norm=False, set_diag=True, remove_diag=False):
    edge_index = torch.stack([row, col])
    edge_attr = torch.ones(edge_index.shape[1]).to(edge_index.device)
    if set_diag:
        edge_index, edge_attr = add_remaining_self_loops(edge_index, edge_attr)
    elif remove_diag:
        edge_index, _ = remove_self_loops(edge_index)

    num_nodes = int(torch.max(edge_index)) + 1
    if not asymm_norm:
        edge_attr = row_normalization(num_nodes, edge_index, edge_attr)
    else:
        edge_attr = symmetric_normalization(num_nodes, edge_index, edge_attr)
    return edge_index, edge_attr
Exemple #13
0
    def forward(self, x, edge_index):
        device = x.device

        edge_index, edge_attr = add_remaining_self_loops(edge_index)
        edge_attr = symmetric_normalization(x.shape[0], edge_index, edge_attr)
        adj_values = edge_attr

        x = F.dropout(x, self.dropout, training=self.training)
        x = F.relu(self.gc1(x, edge_index, adj_values))

        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2(x, edge_index, adj_values)

        return F.log_softmax(x, dim=-1)
Exemple #14
0
 def forward(self, x, edge_index, edge_weight=None):
     x = self.normalize_x(x)
     if edge_weight is None:
         edge_weight = torch.ones(edge_index.shape[1], dtype=torch.float32).to(x.device)
     edge_weight = symmetric_normalization(x.shape[0], edge_index, edge_weight)
     x = self.rand_prop(x, edge_index, edge_weight)
     if self.use_bn:
         x = self.bn1(x)
     x = F.dropout(x, self.input_droprate, training=self.training)
     x = F.relu(self.layer1(x))
     if self.use_bn:
         x = self.bn2(x)
     x = F.dropout(x, self.hidden_droprate, training=self.training)
     x = self.layer2(x)
     return x
Exemple #15
0
    def forward(self, x, edge_index, edge_attr=None):
        num_nodes = x.shape[0]
        if self.cache is None:
            self.cache = dict()
        if "edge_weight" not in self.cache:
            edge_index, edge_weight = add_remaining_self_loops(
                edge_index, edge_attr)
            edge_weight = symmetric_normalization(x.shape[0], edge_index,
                                                  edge_weight)
            self.cache["edge_index"] = edge_index
            self.cache["edge_weight"] = edge_weight
        edge_index, edge_weight = self.cache["edge_index"].to(
            x.device), self.cache["edge_weight"].to(x.device)
        adj = torch.sparse_coo_tensor(edge_index, edge_weight,
                                      (num_nodes, num_nodes))

        idx = np.random.permutation(num_nodes)
        shuf_fts = x[idx, :]

        logits = self._forward(x, shuf_fts, adj, True, None)
        return logits
Exemple #16
0
    def preprocess(self, x, edge_index, edge_attr=None):
        num_nodes = x.shape[0]

        edge_index, edge_weight = add_remaining_self_loops(
            edge_index, edge_attr)

        adj = edge_index.cpu().numpy()
        edge_weight = symmetric_normalization(x.shape[0], edge_index,
                                              edge_weight)
        adj = sp.coo_matrix((edge_weight.cpu().numpy(), (adj[0], adj[1])),
                            shape=(num_nodes, num_nodes)).todense()

        g = nx.Graph()
        g.add_nodes_from(list(range(num_nodes)))
        g.add_edges_from(edge_index.cpu().numpy().transpose())
        diff = compute_ppr(g, 0.2)

        if self.dataset_name == "citeseer":
            epsilons = [1e-5, 1e-4, 1e-3, 1e-2]
            avg_degree = np.sum(adj) / adj.shape[0]
            epsilon = epsilons[np.argmin([
                abs(avg_degree -
                    np.argwhere(diff >= e).shape[0] / diff.shape[0])
                for e in epsilons
            ])]

            diff[diff < epsilon] = 0.0
            scaler = MinMaxScaler()
            scaler.fit(diff)
            diff = scaler.transform(diff)

        if self.cache is None:
            self.cache = dict()
        self.cache["diff"] = diff
        self.cache["adj"] = adj
        self.device = next(self.gcn1.parameters()).device