Пример #1
0
    def forward(self, graph: Graph) -> torch.Tensor:
        x = graph.x
        if self.improved and not hasattr(graph, "unet_improved"):
            row, col = graph.edge_index
            row = torch.cat(
                [row, torch.arange(0, x.shape[0], device=x.device)], dim=0)
            col = torch.cat(
                [col, torch.arange(0, x.shape[0], device=x.device)], dim=0)
            graph.edge_index = (row, col)
            graph["unet_improved"] = True
        graph.row_norm()

        with graph.local_graph():
            if self.training and self.adj_dropout > 0:
                graph.edge_index, graph.edge_weight = dropout_adj(
                    graph.edge_index, graph.edge_weight, self.adj_dropout)

            x = F.dropout(x, p=self.n_dropout, training=self.training)
            h = self.in_gcn(graph, x)
            h = self.act(h)
            h_list = self.unet(graph, h)

            h = h_list[-1]
            h = F.dropout(h, p=self.n_dropout, training=self.training)
            return self.out_gcn(graph, h)
Пример #2
0
    def forward(self, graph: Graph) -> torch.Tensor:
        x = graph.x
        if self.improved and not hasattr(graph, "unet_improved"):
            self_loop = torch.stack([torch.arange(0, x.shape[0])] * 2, dim=0).to(x.device)
            graph.edge_index = torch.cat([graph.edge_index, self_loop], dim=1)
            graph["unet_improved"] = True
        graph.row_norm()

        with graph.local_graph():
            if self.training and self.adj_dropout > 0:
                graph.edge_index, graph.edge_weight = dropout_adj(graph.edge_index, graph.edge_weight, self.adj_dropout)

            x = F.dropout(x, p=self.n_dropout, training=self.training)
            h = self.in_gcn(graph, x)
            h = self.act(h)
            h_list = self.unet(graph, h)

            h = h_list[-1]
            h = F.dropout(h, p=self.n_dropout, training=self.training)
            return self.out_gcn(graph, h)