Пример #1
0
    def forward(self, x, edge_index, edge_attr=None):
        if self.edge_attr is None:
            if edge_attr is not None:
                self.edge_attr = edge_attr
            else:
                edge_index, edge_weight = add_remaining_self_loops(
                    edge_index=edge_index,
                    edge_weight=torch.ones(edge_index.shape[1]).to(x.device),
                    fill_value=1,
                    num_nodes=x.shape[0])
                self.edge_attr = symmetric_normalization(
                    num_nodes=x.shape[0],
                    edge_index=edge_index,
                    edge_weight=edge_weight,
                )
        else:
            edge_index, _ = add_remaining_self_loops(
                edge_index=edge_index,
                edge_weight=torch.ones(edge_index.shape[1]).to(x.device),
                fill_value=1,
                num_nodes=x.shape[0])

        init_h = F.dropout(x, p=self.dropout, training=self.training)
        init_h = F.relu(self.fc_layers[0](init_h))

        h = init_h

        for layer in self.layers:
            h = F.dropout(h, p=self.dropout, training=self.training)
            h = layer(h, edge_index, self.edge_attr, init_h)
            h = self.activation(h)
        h = F.dropout(h, p=self.dropout, training=self.training)
        out = self.fc_layers[1](h)
        return out
Пример #2
0
    def fit(self, model, dataset):
        self.data = dataset[0]
        self.data.edge_index, _ = add_remaining_self_loops(self.data.edge_index)
        if hasattr(self.data, "edge_index_train"):
            self.data.edge_index_train, _ = add_remaining_self_loops(self.data.edge_index_train)
        self.evaluator = dataset.get_evaluator()
        self.loss_fn = dataset.get_loss_fn()

        self.train_loader = NeighborSampler(
            data=self.data,
            mask=self.data.train_mask,
            sizes=self.sample_size,
            batch_size=self.batch_size,
            num_workers=self.num_workers,
            shuffle=True,
        )
        self.test_loader = NeighborSampler(
            data=self.data, mask=None, sizes=[-1], batch_size=self.batch_size, shuffle=False
        )
        self.model = model.to(self.device)
        self.model.set_data_device(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
        best_model = self.train()
        self.model = best_model
        acc, loss = self._test_step()
        return dict(Acc=acc["test"], ValAcc=acc["val"])
Пример #3
0
    def forward(self,
                x: torch.Tensor,
                edge_index: torch.Tensor,
                edge_attr: Optional[torch.Tensor] = None) -> torch.Tensor:
        if self.cache_edge_attr is None:
            edge_index, _ = add_remaining_self_loops(edge_index)
            if self.improved:
                self_loop = torch.stack([torch.arange(0, x.shape[0])] * 2,
                                        dim=0).to(x.device)
                edge_index = torch.cat([edge_index, self_loop], dim=1)

            edge_attr = row_normalization(x.shape[0], edge_index)
            self.cache_edge_attr = edge_attr
            self.cache_edge_index = edge_index
        else:
            edge_index = self.cache_edge_index
            edge_attr = self.cache_edge_attr

        if self.training and self.adj_dropout > 0:
            edge_index, edge_attr = dropout_adj(edge_index, edge_attr,
                                                self.adj_dropout)

        x = F.dropout(x, p=self.n_dropout, training=self.training)
        h = self.in_gcn(x, edge_index, edge_attr)
        h = self.act(h)
        h_list = self.unet(h, edge_index, edge_attr)

        h = h_list[-1]
        h = F.dropout(h, p=self.n_dropout, training=self.training)
        return self.out_gcn(h, edge_index, edge_attr)
Пример #4
0
    def forward(self, x, edge_index, edge_attr):
        N, dim = x.shape
        # x = self.dropout(x)

        # adj_mat_ind, adj_mat_val = add_self_loops(edge_index, num_nodes=N)[0], edge_attr.squeeze()
        adj_mat_ind = add_remaining_self_loops(edge_index, num_nodes=N)[0]
        adj_mat_val = torch.ones(adj_mat_ind.shape[1]).to(x.device)

        h = torch.mm(x, self.weight)
        h = F.dropout(h, p=self.dropout, training=self.training)
        for _ in range(self.nhop - 1):
            adj_mat_ind, adj_mat_val = spspmm(adj_mat_ind, adj_mat_val,
                                              adj_mat_ind, adj_mat_val, N, N,
                                              N, True)

        adj_mat_ind, adj_mat_val = self.attention(h, adj_mat_ind, adj_mat_val)

        # MATRIX_MUL
        # laplacian matrix normalization
        adj_mat_val = self.normalization(adj_mat_ind, adj_mat_val, N)

        val_h = h
        # N, dim = val_h.shape

        # MATRIX_MUL
        # val_h = spmm(adj_mat_ind, F.dropout(adj_mat_val, p=self.node_dropout, training=self.training), N, N, val_h)
        val_h = spmm(adj_mat_ind, adj_mat_val, N, N, val_h)

        val_h[val_h != val_h] = 0
        val_h = val_h + self.bias
        val_h = self.adaptive_enc(val_h)
        val_h = F.dropout(val_h, p=self.dropout, training=self.training)
        # val_h = self.activation(val_h)
        return val_h
Пример #5
0
    def forward(self, x, edge_index, edge_attr=None):
        _attr = str(edge_index.shape[1])
        if _attr not in self.cache:
            edge_index, edge_attr = add_remaining_self_loops(
                edge_index=edge_index,
                edge_weight=torch.ones(edge_index.shape[1]).to(x.device),
                fill_value=1,
                num_nodes=x.shape[0],
            )
            edge_attr = symmetric_normalization(x.shape[0], edge_index,
                                                edge_attr)

            self.cache[_attr] = (edge_index, edge_attr)
        edge_index, edge_attr = self.cache[_attr]

        init_h = F.dropout(x, p=self.dropout, training=self.training)
        init_h = F.relu(self.fc_layers[0](init_h))

        h = init_h

        for layer in self.layers:
            h = F.dropout(h, p=self.dropout, training=self.training)
            h = layer(h, edge_index, edge_attr, init_h)
            h = self.activation(h)
        h = F.dropout(h, p=self.dropout, training=self.training)
        out = self.fc_layers[1](h)
        return out
Пример #6
0
 def add_remaining_self_loops(self):
     edge_index = torch.stack([self.row, self.col])
     edge_index, self.weight = add_remaining_self_loops(
         edge_index, num_nodes=self.num_nodes)
     self.row, self.col = edge_index
     if indicator is True:
         self._to_csr()
Пример #7
0
def node_degree_as_feature(data):
    r"""
    Set each node feature as one-hot encoding of degree
    :param data: a list of class Data
    :return: a list of class Data
    """
    max_degree = 0
    degrees = []
    for graph in data:
        edge_index = graph.edge_index
        edge_weight = torch.ones((edge_index.size(1), ),
                                 device=edge_index.device)
        fill_value = 1
        num_nodes = graph.num_nodes
        edge_index, edge_weight = add_remaining_self_loops(
            edge_index, edge_weight, fill_value, num_nodes)
        row, col = edge_index
        deg = torch.zeros(num_nodes).to(edge_index.device).scatter_add_(
            0, row, edge_weight).long()
        degrees.append(deg.cpu() - 1)
        max_degree = max(torch.max(deg), max_degree)
    max_degree = int(max_degree)
    for i in range(len(data)):
        one_hot = torch.zeros(data[i].num_nodes,
                              max_degree).scatter_(1, degrees[i].unsqueeze(1),
                                                   1)
        data[i].x = one_hot.to(data[i].y.device)
    return data
Пример #8
0
 def forward(self, x, edge_index):
     edge_index, _ = add_remaining_self_loops(edge_index)
     x = F.dropout(x, self.dropout, training=self.training)
     x = torch.cat([att(x, edge_index) for att in self.attentions], dim=1)
     x = F.dropout(x, self.dropout, training=self.training)
     x = F.elu(self.out_att(x, edge_index))
     return F.log_softmax(x, dim=1)
Пример #9
0
def aug_norm_adj(adj, adj_values, num_nodes):
    adj, adj_values = add_remaining_self_loops(adj, adj_values, 1, num_nodes)
    deg = spmm(adj, adj_values,
               torch.ones(num_nodes, 1).to(adj.device)).squeeze()
    deg_sqrt = deg.pow(-1 / 2)
    adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]
    return adj, adj_values
Пример #10
0
 def forward(self, x, edge_index):
     edge_index, _ = add_remaining_self_loops(edge_index)
     for mixhop in self.mixhops:
         x = F.relu(mixhop(x, edge_index))
         x = F.dropout(x, p=self.dropout, training=self.training)
     x = self.fc(x)
     return F.log_softmax(x, dim=1)
Пример #11
0
 def add_remaining_self_loops(self):
     if self.attr is not None and len(self.attr.shape) == 1:
         edge_index, weight_attr = add_remaining_self_loops(
             (self.row, self.col), edge_weight=self.attr, fill_value=0, num_nodes=self.num_nodes
         )
         self.row, self.col = edge_index
         self.attr = weight_attr
         self.weight = torch.ones_like(self.row).float()
     else:
         edge_index, self.weight = add_remaining_self_loops(
             (self.row, self.col), fill_value=1, num_nodes=self.num_nodes
         )
         self.row, self.col = edge_index
         self.attr = None
     self.row_ptr, reindex = coo2csr_index(self.row, self.col, num_nodes=self.num_nodes)
     self.row = self.row[reindex]
     self.col = self.col[reindex]
Пример #12
0
 def _calculate_A_hat(self, x, adj):
     device = x.device
     adj_values = torch.ones(adj.shape[1]).to(device)
     adj, adj_values = add_remaining_self_loops(adj, adj_values, 1, x.shape[0])
     deg = spmm(adj, adj_values, torch.ones(x.shape[0], 1).to(device)).squeeze()
     deg_sqrt = deg.pow(-1 / 2)
     adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]
     return adj, adj_values
Пример #13
0
    def forward(self, x, edge_index):
        edge_index, _ = add_remaining_self_loops(edge_index)

        x = F.dropout(x, p=self.dropout, training=self.training)
        x = F.elu(self.att1(x, edge_index))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = F.elu(self.att2(x, edge_index))
        return F.normalize(x, p=2, dim=1)
Пример #14
0
 def _calculate_A_hat(self, x, edge_index):
     device = x.device
     edge_attr = torch.ones(edge_index.shape[1]).to(device)
     edge_index, edge_attr = add_remaining_self_loops(edge_index, edge_attr, 1, x.shape[0])
     deg = spmm(edge_index, edge_attr, torch.ones(x.shape[0], 1).to(device)).squeeze()
     deg_sqrt = deg.pow(-1 / 2)
     edge_attr = deg_sqrt[edge_index[1]] * edge_attr * deg_sqrt[edge_index[0]]
     return edge_index, edge_attr
Пример #15
0
    def forward(self, x, edge_index):
        edge_index, _ = add_remaining_self_loops(edge_index)

        for i, layer in enumerate(self.attentions):
            x = F.dropout(x, p=self.dropout, training=self.training)
            x = layer(x, edge_index)
            if i != self.num_layers - 1:
                x = F.elu(x)
        return x
Пример #16
0
    def get_embeddings(self, x, edge_index):
        edge_index, edge_attr = add_remaining_self_loops(edge_index,
                                                         num_nodes=x.shape[0])
        edge_attr = symmetric_normalization(x.shape[0], edge_index, edge_attr)

        h = x
        for i in range(self.num_layers - 1):
            h = F.dropout(h, self.dropout, training=self.training)
            h = self.layers[i](h, edge_index, edge_attr)
        return h
Пример #17
0
    def forward(self, x, edge_index):
        edge_index, edge_attr = add_remaining_self_loops(edge_index,
                                                         num_nodes=x.shape[0])
        edge_attr = symmetric_normalization(x.shape[0], edge_index, edge_attr)

        x = F.dropout(x, self.dropout, training=self.training)
        x = F.relu(self.gc1(x, edge_index, edge_attr))
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2(x, edge_index, edge_attr)
        return x
Пример #18
0
 def add_remaining_self_loops(self):
     edge_index, self.weight = add_remaining_self_loops(
         (self.row, self.col), num_nodes=self.num_nodes)
     self.row, self.col = edge_index
     self.row_ptr, reindex = coo2csr_index(self.row,
                                           self.col,
                                           num_nodes=self.num_nodes)
     self.row = self.row[reindex]
     self.col = self.col[reindex]
     self.attr = None
Пример #19
0
 def forward(
     self,
     x: torch.Tensor,
     edge_index: torch.Tensor,
     edge_weight: Optional[torch.Tensor] = None,
 ):
     num_nodes = x.shape[0]
     edge_index, edge_weight = add_remaining_self_loops(edge_index, edge_weight)
     edge_weight = symmetric_normalization(num_nodes, edge_index)
     return self.encoder(x, edge_index, edge_weight)
Пример #20
0
 def forward(self, x, edge_index):
     x = self.ses[0](x)
     edge_index, edge_weight = add_remaining_self_loops(edge_index)
     edge_weight = symmetric_normalization(x.shape[0], edge_index,
                                           edge_weight)
     for se, conv in zip(self.ses[1:], self.convs[:-1]):
         x = F.relu(conv(x, edge_index, edge_weight))
         x = se(x)
         x = F.dropout(x, p=self.dropout, training=self.training)
     x = self.convs[-1](x, edge_index, edge_weight)
     return x
Пример #21
0
 def forward(self, x, edge_index):
     edge_index, edge_attr = add_remaining_self_loops(edge_index,
                                                      num_nodes=x.shape[0])
     edge_attr = symmetric_normalization(x.shape[0], edge_index, edge_attr)
     h = x
     for i in range(self.num_layers):
         h = self.layers[i](h, edge_index, edge_attr)
         if i != self.num_layers - 1:
             h = F.relu(h)
             h = F.dropout(h, self.dropout, training=self.training)
     return h
Пример #22
0
    def fit(self, model, data):
        data = data[0]
        self.model = model.to(self.device)
        self.data = data
        self.test_gpu_volume()
        self.subgraph_loader = NeighborSampler(
            data.edge_index,
            sizes=[
                -1,
            ],
            batch_size=self.batch_size,
            shuffle=False,
            num_workers=10,
        )

        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=self.lr,
                                          weight_decay=self.weight_decay)
        self.edge_index, _ = add_remaining_self_loops(
            data.edge_index,
            torch.ones(data.edge_index.shape[1]).to(data.x.device), 1,
            data.x.shape[0])
        self.train_index = torch.where(data.train_mask)[0].tolist()

        epoch_iter = tqdm(range(self.max_epoch))
        patience = 0
        best_score = 0
        best_loss = np.inf
        max_score = 0
        min_loss = np.inf
        for epoch in epoch_iter:
            self._train_step()
            if epoch % 5 == 0:
                val_acc, val_loss = self._test_step(split="val")
                epoch_iter.set_description(
                    f"Epoch: {epoch:03d}, Val: {val_acc:.4f}")
                if val_loss <= min_loss or val_acc >= max_score:
                    if val_acc >= best_score:  # SAINT loss is not accurate
                        best_loss = val_loss
                        best_score = val_acc
                        best_model = copy.deepcopy(self.model)
                    min_loss = np.min((min_loss, val_loss))
                    max_score = np.max((max_score, val_acc))
                    patience = 0
                else:
                    patience += 1
                    if patience == self.patience:
                        self.model = best_model
                        epoch_iter.close()
                        break
        test_acc, _ = self._test_step(split="test")
        val_acc, _ = self._test_step(split="val")
        print(f"Test accuracy = {test_acc}")
        return dict(Acc=test_acc, ValAcc=val_acc)
Пример #23
0
    def forward(self, x, edge_index):
        flag = str(edge_index.shape[1])
        if flag not in self.cache:
            edge_attr = torch.ones(edge_index.shape[1]).to(x.device)
            edge_index, edge_attr = add_remaining_self_loops(
                edge_index, edge_attr, 1, x.shape[0])
            edge_attr = symmetric_normalization(x.shape[0], edge_index,
                                                edge_attr)
            self.cache[flag] = (edge_index, edge_attr)
        edge_index, edge_attr = self.cache[flag]

        x = self.nn(x, edge_index, edge_attr)
        return x
Пример #24
0
    def forward(self, x, adj):
        device = x.device
        adj_values = torch.ones(adj.shape[1]).to(
            device)  # Returns a tensor filled with the scalar value 1 with specific device, the shape defined by the variable argument size.
        adj, adj_values = add_remaining_self_loops(adj, adj_values, 1, x.shape[0])
        deg = spmm(adj, adj_values,
                   torch.ones(x.shape[0], 1).to(device)).squeeze()  # spmm([2,12431], [12431], [3327,1])
        deg_sqrt = deg.pow(-1 / 2)
        adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]

        x = self.sgc1(x, adj, adj_values)

        return F.log_softmax(x, dim=-1)
Пример #25
0
    def forward(self, x, edge_index):
        device = x.device
        edge_attr = torch.ones(edge_index.shape[1]).to(device)
        edge_index, edge_attr = add_remaining_self_loops(
            edge_index, edge_attr, 1, x.shape[0])
        deg = spmm(edge_index, edge_attr,
                   torch.ones(x.shape[0], 1).to(device)).squeeze()
        deg_sqrt = deg.pow(-1 / 2)
        edge_attr = deg_sqrt[edge_index[1]] * edge_attr * deg_sqrt[
            edge_index[0]]

        x = self.sgc1(x, edge_index, edge_attr)
        return x
Пример #26
0
    def forward(self, x, edge_index):
        device = x.device

        edge_index, edge_attr = add_remaining_self_loops(edge_index)
        edge_attr = symmetric_normalization(x.shape[0], edge_index, edge_attr)
        adj_values = edge_attr

        x = F.dropout(x, self.dropout, training=self.training)
        x = F.relu(self.gc1(x, edge_index, adj_values))

        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2(x, edge_index, adj_values)

        return F.log_softmax(x, dim=-1)
Пример #27
0
 def forward(self, x, adj):
     device = x.device
     adj_values = torch.ones(adj.shape[1]).to(device)
     adj, adj_values = add_remaining_self_loops(adj, adj_values, 1, x.shape[0])
     deg = spmm(adj, adj_values, torch.ones(x.shape[0], 1).to(device)).squeeze()
     deg_sqrt = deg.pow(-1 / 2)
     adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]
     x = F.dropout(x, self.dropout, training=self.training)
     x = F.relu(self.gc1(x, adj, adj_values))
     x = F.dropout(x, self.dropout, training=self.training)
     x = F.relu(self.gc2(x, adj, adj_values))
     x = F.dropout(x, self.dropout, training=self.training)
     x = self.gc3(x, adj, adj_values)
     return x
Пример #28
0
def get_adj(row, col, asymm_norm=False, set_diag=True, remove_diag=False):
    edge_index = torch.stack([row, col])
    edge_attr = torch.ones(edge_index.shape[1]).to(edge_index.device)
    if set_diag:
        edge_index, edge_attr = add_remaining_self_loops(edge_index, edge_attr)
    elif remove_diag:
        edge_index, _ = remove_self_loops(edge_index)

    num_nodes = int(torch.max(edge_index)) + 1
    if not asymm_norm:
        edge_attr = row_normalization(num_nodes, edge_index, edge_attr)
    else:
        edge_attr = symmetric_normalization(num_nodes, edge_index, edge_attr)
    return edge_index, edge_attr
Пример #29
0
    def forward(self, graph, x):
        edge_index = graph.edge_index
        N, dim = x.shape

        # nl_adj_mat_ind, nl_adj_mat_val = add_self_loops(edge_index, num_nodes=N)[0], edge_attr.squeeze()
        nl_adj_mat_ind = add_remaining_self_loops(edge_index, num_nodes=N)[0]
        nl_adj_mat_ind = torch.stack(nl_adj_mat_ind)
        nl_adj_mat_val = torch.ones(nl_adj_mat_ind.shape[1]).to(x.device)

        for _ in range(self.nhop - 1):
            nl_adj_mat_ind, nl_adj_mat_val = spspmm(nl_adj_mat_ind,
                                                    nl_adj_mat_val,
                                                    nl_adj_mat_ind,
                                                    nl_adj_mat_val, N, N, N,
                                                    True)

        result = []
        for i in range(self.subheads):
            h = torch.mm(x, self.weight[i])

            adj_mat_ind, adj_mat_val = nl_adj_mat_ind, nl_adj_mat_val
            h = F.dropout(h, p=self.dropout, training=self.training)

            adj_mat_ind, adj_mat_val = self.attention(h, adj_mat_ind,
                                                      adj_mat_val)
            # laplacian matrix normalization
            adj_mat_val = self.normalization(adj_mat_ind, adj_mat_val, N)

            val_h = h

            with graph.local_graph():
                graph.edge_index = adj_mat_ind
                graph.edge_weight = adj_mat_val
                for _ in range(i + 1):
                    val_h = spmm(graph, val_h)
                    # val_h = spmm(adj_mat_ind, F.dropout(adj_mat_val, p=self.node_dropout, training=self.training), N, N, val_h)

                # val_h = val_h / norm
                val_h[val_h != val_h] = 0
                val_h = val_h + self.bias[i]
                val_h = self.adaptive_enc[i](val_h)
                val_h = self.activation(val_h)
                val_h = F.dropout(val_h,
                                  p=self.dropout,
                                  training=self.training)
                result.append(val_h)
        h_res = torch.cat(result, dim=1)
        return h_res
Пример #30
0
    def forward(self):
        x, edge_index, edge_attr = self.data.x, self.data.edge_index, self.data.edge_attr
        device = x.device

        adj, adj_values = add_remaining_self_loops(edge_index, edge_attr, 1,
                                                   x.shape[0])
        deg = spmm(adj, adj_values,
                   torch.ones(x.shape[0], 1).to(device)).squeeze()
        deg_sqrt = deg.pow(-1 / 2)
        adj_values = deg_sqrt[adj[1]] * adj_values * deg_sqrt[adj[0]]

        x = F.dropout(x, self.dropout, training=self.training)
        x = F.relu(self.gc1(x, adj, adj_values))

        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2(x, adj, adj_values)

        return F.log_softmax(x, dim=-1)