예제 #1
0
    def _preprocessing(self, graph, x, drop_edge=False):
        device = x.device
        graph.to("cpu")
        x = x.to("cpu")

        graph.eval()

        op_embedding = [x]

        edge_index = graph.edge_index
        if self.undirected:
            edge_index = to_undirected(edge_index)

        if drop_edge:
            edge_index, _ = dropout_adj(edge_index,
                                        drop_rate=self.dropedge_rate)

        graph = get_adj(graph, remove_diag=self.remove_diag)

        for norm in self.adj_norm:
            with graph.local_graph():
                graph.edge_index = edge_index
                graph.normalize(norm)
                if self.diffusion == "ppr":
                    results = multi_hop_ppr_diffusion(graph, graph.x,
                                                      self.num_propagations)
                else:
                    results = multi_hop_sgc(graph, graph.x,
                                            self.num_propagations)
                op_embedding.extend(results)

        graph.to(device)
        return torch.cat(op_embedding, dim=1).to(device)
예제 #2
0
    def _preprocessing(self, graph, x):
        op_embedding = []
        op_embedding.append(x)

        edge_index = graph.edge_index

        # Convert to numpy arrays on cpu
        edge_index, _ = dropout_adj(edge_index, drop_rate=self.dropedge_rate)

        # if self.undirected:
        #     edge_index = to_undirected(edge_index, num_nodes)

        graph = get_adj(graph,
                        asymm_norm=self.asymm_norm,
                        set_diag=self.set_diag,
                        remove_diag=self.remove_diag)

        with graph.local_graph():
            graph.edge_index = edge_index
            for _ in range(self.num_propagations):
                x = spmm(graph, x)
                op_embedding.append(x)

        for _ in range(self.num_propagations):
            nx = spmm(graph, x)
            op_embedding.append(nx)

        return torch.cat(op_embedding, dim=1)
예제 #3
0
    def forward(self, graph: Graph) -> torch.Tensor:
        x = graph.x
        if self.improved and not hasattr(graph, "unet_improved"):
            row, col = graph.edge_index
            row = torch.cat(
                [row, torch.arange(0, x.shape[0], device=x.device)], dim=0)
            col = torch.cat(
                [col, torch.arange(0, x.shape[0], device=x.device)], dim=0)
            graph.edge_index = (row, col)
            graph["unet_improved"] = True
        graph.row_norm()

        with graph.local_graph():
            if self.training and self.adj_dropout > 0:
                graph.edge_index, graph.edge_weight = dropout_adj(
                    graph.edge_index, graph.edge_weight, self.adj_dropout)

            x = F.dropout(x, p=self.n_dropout, training=self.training)
            h = self.in_gcn(graph, x)
            h = self.act(h)
            h_list = self.unet(graph, h)

            h = h_list[-1]
            h = F.dropout(h, p=self.n_dropout, training=self.training)
            return self.out_gcn(graph, h)
예제 #4
0
    def forward(self,
                x: torch.Tensor,
                edge_index: torch.Tensor,
                edge_attr: Optional[torch.Tensor] = None) -> torch.Tensor:
        if self.cache_edge_attr is None:
            edge_index, _ = add_remaining_self_loops(edge_index)
            if self.improved:
                self_loop = torch.stack([torch.arange(0, x.shape[0])] * 2,
                                        dim=0).to(x.device)
                edge_index = torch.cat([edge_index, self_loop], dim=1)

            edge_attr = row_normalization(x.shape[0], edge_index)
            self.cache_edge_attr = edge_attr
            self.cache_edge_index = edge_index
        else:
            edge_index = self.cache_edge_index
            edge_attr = self.cache_edge_attr

        if self.training and self.adj_dropout > 0:
            edge_index, edge_attr = dropout_adj(edge_index, edge_attr,
                                                self.adj_dropout)

        x = F.dropout(x, p=self.n_dropout, training=self.training)
        h = self.in_gcn(x, edge_index, edge_attr)
        h = self.act(h)
        h_list = self.unet(h, edge_index, edge_attr)

        h = h_list[-1]
        h = F.dropout(h, p=self.n_dropout, training=self.training)
        return self.out_gcn(h, edge_index, edge_attr)
예제 #5
0
파일: grace_mw.py 프로젝트: rpatil524/cogdl
 def prop(
     self,
     graph: Graph,
     x: torch.Tensor,
     drop_feature_rate: float = 0.0,
     drop_edge_rate: float = 0.0,
 ):
     x = dropout_features(x, drop_feature_rate)
     with graph.local_graph():
         graph.edge_index, graph.edge_weight = dropout_adj(
             graph.edge_index, graph.edge_weight, drop_edge_rate)
         return self.model.forward(graph, x)
예제 #6
0
    def forward(self, graph: Graph) -> torch.Tensor:
        x = graph.x
        if self.improved and not hasattr(graph, "unet_improved"):
            self_loop = torch.stack([torch.arange(0, x.shape[0])] * 2, dim=0).to(x.device)
            graph.edge_index = torch.cat([graph.edge_index, self_loop], dim=1)
            graph["unet_improved"] = True
        graph.row_norm()

        with graph.local_graph():
            if self.training and self.adj_dropout > 0:
                graph.edge_index, graph.edge_weight = dropout_adj(graph.edge_index, graph.edge_weight, self.adj_dropout)

            x = F.dropout(x, p=self.n_dropout, training=self.training)
            h = self.in_gcn(graph, x)
            h = self.act(h)
            h_list = self.unet(graph, h)

            h = h_list[-1]
            h = F.dropout(h, p=self.n_dropout, training=self.training)
            return self.out_gcn(graph, h)
예제 #7
0
파일: sign.py 프로젝트: yuchen7/cogdl
    def _preprocessing(self, x, edge_index):
        num_nodes = x.shape[0]

        op_embedding = []
        op_embedding.append(x)

        # Convert to numpy arrays on cpu
        edge_index, _ = dropout_adj(edge_index, drop_rate=self.dropedge_rate)
        row, col = edge_index

        if self.undirected:
            edge_index = to_undirected(edge_index, num_nodes)
            row, col = edge_index

        # adj matrix
        edge_index, edge_attr = get_adj(row,
                                        col,
                                        asymm_norm=self.asymm_norm,
                                        set_diag=self.set_diag,
                                        remove_diag=self.remove_diag)

        nx = x
        for _ in range(self.num_propagations):
            nx = spmm(edge_index, edge_attr, nx)
            op_embedding.append(nx)

        # transpose adj matrix
        edge_index, edge_attr = get_adj(col,
                                        row,
                                        asymm_norm=self.asymm_norm,
                                        set_diag=self.set_diag,
                                        remove_diag=self.remove_diag)

        nx = x
        for _ in range(self.num_propagations):
            nx = spmm(edge_index, edge_attr, nx)
            op_embedding.append(nx)

        return torch.cat(op_embedding, dim=1)
예제 #8
0
파일: revgcn.py 프로젝트: rpatil524/cogdl
    def forward(self, graph):
        graph.requires_grad = False
        edge_index, edge_weight = dropout_adj(
            graph.edge_index, drop_rate=self.drop_edge_rate, renorm=None, training=self.training
        )
        h = graph.x
        h = F.dropout(h, self.dropout, training=self.training)

        with graph.local_graph():
            graph.edge_index = edge_index
            graph.sym_norm()
            assert (graph.degrees() > 0).all()
            h = self.layers[0](graph, h)

            mask = shared_dropout(h, self.dropout)
            for i in range(1, len(self.layers) - 1):
                h = self.layers[i](graph, h, mask)
            h = self.norm(h)
            h = self.act(h)
            h = F.dropout(h, p=self.dropout, training=self.training)
            h = self.layers[-1](graph, h)

        return h
예제 #9
0
 def transform_data(self):
     self.graph.edge_index, _ = dropout_adj(edge_index=self.edge_index,
                                            drop_rate=self.dropedge_rate)
     return self.graph.to(self.device)