コード例 #1
0
    def forward(self, data):
        x, edge_index = data.x, data.edge_index

        dropedge_index = edge_index

        if self.edge_dropout:
            dropedge_index, _ = dropout_adj(edge_index,
                                            p=self.edge_dropout,
                                            force_undirected=True,
                                            training=self.training)

        for layer in self.conv_layers:
            x = layer(x, dropedge_index)

            if self.layer_wise_dropedge and self.edge_dropout:
                dropedge_index, _ = dropout_adj(edge_index,
                                                p=self.edge_dropout,
                                                force_undirected=True,
                                                training=self.training)

        return x
コード例 #2
0
ファイル: gnns.py プロジェクト: joneswong/AutoGraph
 def forward(self, data):
     if self.edge_droprate != 0.0:
         x = data.x
         edge_index, edge_weight = dropout_adj(data.edge_index,
                                               data.edge_weight,
                                               self.edge_droprate)
     else:
         x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
     for conv in self.convs:
         x = x if self.fea_norm_layer is None else self.fea_norm_layer(x)
         x = F.dropout(x, p=self.droprate, training=self.training)
         x = F.elu(conv(x, edge_index, edge_weight))
     x = self.appnp(x)
     # return F.log_softmax(x, dim=-1)
     # due to focal loss: return the logits, put the log_softmax operation into the GNNAlgo
     return x
コード例 #3
0
ファイル: gnns.py プロジェクト: joneswong/AutoGraph
 def forward(self, data):
     if self.edge_droprate != 0.0:
         x = data.x
         edge_index, edge_weight = dropout_adj(data.edge_index,
                                               data.edge_weight,
                                               self.edge_droprate)
     else:
         x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
     x = F.relu(self.first_lin(x))
     x = F.dropout(x, p=self.dropout_rate, training=self.training)
     for conv in self.convs:
         x = F.relu(conv(x, edge_index, edge_weight=edge_weight))
     x = F.dropout(x, p=self.dropout_rate, training=self.training)
     x = self.lin2(x)
     # return F.log_softmax(x, dim=-1)
     # due to focal loss: return the logits, put the log_softmax operation into the GNNAlgo
     return x
コード例 #4
0
    def forward(self, local_preds: torch.FloatTensor, edge_index):
        sz = local_preds.size(0)
        steps = torch.ones(sz).to(local_preds.device)
        sum_h = torch.zeros(sz).to(local_preds.device)
        continue_mask = torch.ones(sz, dtype=torch.bool).to(local_preds.device)
        x = torch.zeros_like(local_preds).to(local_preds.device)

        prop = self.dropout(local_preds)
        for i in range(0, self.niter):

            old_prop = prop
            continue_fmask = continue_mask.type('torch.FloatTensor').to(
                local_preds.device)

            drop_edge_index, _ = dropout_adj(edge_index,
                                             training=self.training)
            drop_edge_index, drop_norm = GCNConv.norm(drop_edge_index, sz)

            prop = self.propagate(drop_edge_index, x=prop, norm=drop_norm)

            h = torch.sigmoid(self.halt(prop)).t().squeeze()
            prob_mask = (((sum_h + h) < 0.99) & continue_mask).squeeze()
            prob_fmask = prob_mask.type('torch.FloatTensor').to(
                local_preds.device)

            steps = steps + prob_fmask
            sum_h = sum_h + prob_fmask * h

            final_iter = steps <= self.niter

            condition = prob_mask & final_iter
            p = torch.where(condition, sum_h, 1 - sum_h)

            to_update = self.dropout(continue_fmask)[:, None]
            x = x + (prop * p[:, None] + old_prop *
                     (1 - p)[:, None]) * to_update

            continue_mask = continue_mask & prob_mask

            if (~continue_mask).all():
                break

        x = x / steps[:, None]

        return x, (steps - 1), (1 - sum_h)
コード例 #5
0
ファイル: gnns.py プロジェクト: joneswong/AutoGraph
 def forward(self, data):
     if self.edge_droprate != 0.0:
         x = data.x
         edge_index, edge_weight = dropout_adj(data.edge_index,
                                               data.edge_weight,
                                               self.edge_droprate)
     else:
         x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
     for i, conv in enumerate(self.convs):
         # todo (daoyuan) add layer_norm
         x = x if self.fea_norm_layer is None else self.fea_norm_layer(x)
         x = F.dropout(x, p=self.droprate, training=self.training)
         if i == len(self.convs) - 1:
             x = conv(x, edge_index, edge_weight)
         else:
             x = F.elu(conv(x, edge_index, edge_weight))
     # return F.log_softmax(x, dim=-1)
     # due to focal loss: return the logits, put the log_softmax operation into the GNNAlgo
     return x
コード例 #6
0
ファイル: model.py プロジェクト: jsicheng/gatmc
    def forward(self, x, edge_index, edge_type, edge_norm, data):
        # g = to_networkx(data)
        # if self.accum == 'stack':
        #     # TODO: if stack also refers to input x:
        #     # num_nodes = int(x.shape[0] / self.num_relations)
        #     # assert num_nodes == g.number_of_nodes()
        #     # TODO: else:
        #     assert x.shape[0] == g.number_of_nodes()
        # timer = Timer()
        # print('start', timer.get_time())
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        cache_key = tuple(list(edge_index.shape))
        if hash(cache_key) not in self.edge_obj_cache:
            edge_index, edge_type = dropout_adj(edge_index,
                                                edge_type.view(-1, 1),
                                                self.drop_prob)
            edge_index, edge_type = edge_index.to(device), edge_type.view(
                -1).to(device)
            self.init_g(x.shape[0], edge_index, edge_type)
            relation2edge_dict = self.separate_edges(
                edge_index, edge_type)  # TODO: separate edges correctly
            avg_ratings, num_ratings, ones_vec = self.get_node_metadata()
            self.edge_obj_cache[hash(cache_key)] = \
                edge_index, edge_type, relation2edge_dict, \
                avg_ratings, num_ratings, ones_vec
        else:
            edge_index, edge_type, relation2edge_dict, avg_ratings, num_ratings, ones_vec = \
                self.edge_obj_cache[hash(cache_key)]
        # print('ei', timer.get_time())
        x = self.get_x_init(avg_ratings, num_ratings, ones_vec)
        # print('x_init', timer.get_time())

        features = self.gnn(
            x, edge_index,
            relation2edge_dict)  # TODO: set up dimensions properly
        # print('gnn', timer.get_time())
        # features = self.rgc_layer(x, edge_index, edge_type, edge_norm)
        u_features, i_features = self.separate_features(features)
        u_features, i_features = self.dense_layer(u_features, i_features)
        # print('sep+dense', timer.get_time())

        return u_features, i_features
コード例 #7
0
    def forward(self, data):
        x, edge_index = data.x, data.edge_index

        dropedge_index = edge_index

        if self.edge_dropout:
            dropedge_index, _ = dropout_adj(edge_index,
                                            p=self.edge_dropout,
                                            force_undirected=True,
                                            training=self.training)

        outputs = []

        for layer in self.conv_layers:
            x = layer(x, dropedge_index)
            outputs.append(x)

        x = torch.cat(outputs, dim=1)
        x = self.last_conv(x, dropedge_index)

        return x
コード例 #8
0
ファイル: gcn_algo.py プロジェクト: joneswong/AutoGraph
    def forward(self, data):
        # make feature selection accordingly
        if self._fe == ":":
            datax = data.x
        elif self._fe.startswith(":"):
            datax = data.x[:, :int(self._fe[1:])]
        else:
            datax = data.x[:, int(self._fe[:-1]):]

        if self.edge_droprate != 0.0:
            x = datax
            edge_index, edge_weight = dropout_adj(data.edge_index,
                                                  data.edge_weight,
                                                  self.edge_droprate)
        else:
            x, edge_index, edge_weight = datax, data.edge_index, data.edge_weight

        deep_x, wide_x, attention = None, None, None
        if self.deep:
            if self.res_type == 0.0:
                x = F.relu(self.first_lin(x))
                x = F.dropout(x,
                              p=self.hidden_droprate,
                              training=self.training)
                for conv in self.convs:
                    x = F.relu(conv(x, edge_index, edge_weight=edge_weight))
                x = F.dropout(x,
                              p=self.hidden_droprate,
                              training=self.training)
                x = self.lin2(x)
            else:
                x = F.relu(self.first_lin(x))
                x = F.dropout(x,
                              p=self.hidden_droprate,
                              training=self.training)
                x_list = [] if self.directed else [x]
                for conv in self.convs:
                    x = F.relu(conv(x, edge_index, edge_weight=edge_weight))
                    if self.res_type == 3.0 and len(x_list) != 0:
                        x = x + x_list[0]
                    x_list.append(x)
                if self.res_type == 1.0:
                    x = x + x_list[0]
                elif self.res_type == 2.0:
                    x = torch.sum(torch.stack(x_list, 0), 0)
                x = F.dropout(x,
                              p=self.hidden_droprate,
                              training=self.training)
                x = self.lin2(x)
            deep_x = x

        if self.wide:
            wide_x = self.wide_layer(datax)
            if self.deep:
                attention = torch.unsqueeze(F.softmax(
                    self.attention_layer(datax), dim=-1),
                                            dim=1)

        if self.deep and self.wide:
            return torch.sum(torch.stack([deep_x, wide_x], dim=2) * attention,
                             dim=-1)
        elif self.deep and not self.wide:
            return deep_x
        elif not self.deep and self.wide:
            return wide_x
        else:
            return None
コード例 #9
0
ファイル: gcn_algo.py プロジェクト: joneswong/AutoGraph
    def forward(self, data):
        is_real_weighted_graph = data["real_weight_edge"]
        # norm_type = "right" if data["directed"] else "both"
        # another directed GCN used in R-GCN, have not achieve improvements on feedback dataset 3
        # for conv in self.convs:
        #     conv._norm = norm_type

        # make feature selection accordingly
        if self._fe == ":":
            datax = data.x
        elif self._fe.startswith(":"):
            datax = data.x[:, :int(self._fe[1:])]
        else:
            datax = data.x[:, int(self._fe[:-1]):]

        if self.edge_droprate != 0.0:
            x = datax
            edge_index, edge_weight = dropout_adj(data.edge_index,
                                                  data.edge_weight,
                                                  self.edge_droprate)
            self.g.clear()
            self.g.add_nodes(data.num_nodes)
            self.g.add_edges(edge_index[0], edge_index[1])
            if self.self_loop:
                self.g.add_edges(self.g.nodes(), self.g.nodes()
                                 )  # add self-loop to avoid invalid normalizer
            if is_real_weighted_graph:
                if self.self_loop:
                    self.g.edata["weight"] = torch.cat(
                        (edge_weight,
                         torch.ones(self.g.number_of_nodes(),
                                    device=datax.device)))
                else:
                    self.g.edata["weight"] = torch.tensor(edge_weight,
                                                          dtype=torch.float32,
                                                          device=datax.device)
        else:
            x, edge_index, edge_weight = datax, data.edge_index, data.edge_weight
            if self.g.number_of_nodes(
            ) == 0:  # first forward, build the graph once
                self.g.add_nodes(data.num_nodes)
                self.g.add_edges(edge_index[0], edge_index[1])
                if self.self_loop:
                    self.g.add_edges(self.g.nodes(), self.g.nodes(
                    ))  # add self-loop to avoid invalid normalizer
                if is_real_weighted_graph:
                    if self.self_loop:
                        self.g.edata["weight"] = torch.cat(
                            (edge_weight,
                             torch.ones(self.g.number_of_nodes(),
                                        device=datax.device)))
                    else:
                        self.g.edata["weight"] = torch.tensor(
                            edge_weight,
                            dtype=torch.float32,
                            device=datax.device)

        deep_x, wide_x, attention = None, None, None
        if self.deep:
            if self.res_type == 0.0:
                x = F.relu(self.first_lin(x))
                x = F.dropout(x,
                              p=self.hidden_droprate,
                              training=self.training)
                for conv in self.convs:
                    x = F.relu(
                        conv(self.g, x,
                             real_weighted_g=is_real_weighted_graph))
                x = F.dropout(x,
                              p=self.hidden_droprate,
                              training=self.training)
                x = self.lin2(x)
            else:
                x = F.relu(self.first_lin(x))
                x = F.dropout(x,
                              p=self.hidden_droprate,
                              training=self.training)
                x_list = [] if self.directed else [x]
                for conv in self.convs:
                    x = F.relu(
                        conv(self.g, x,
                             real_weighted_g=is_real_weighted_graph))
                    if self.res_type == 3.0 and len(x_list) != 0:
                        x = x + x_list[0]
                    x_list.append(x)
                if self.res_type == 1.0:
                    x = x + x_list[0]
                elif self.res_type == 2.0:
                    x = torch.sum(torch.stack(x_list, 0), 0)
                x = F.dropout(x,
                              p=self.hidden_droprate,
                              training=self.training)
                x = self.lin2(x)
            deep_x = x

        if self.wide:
            wide_x = self.wide_layer(datax)
            if self.deep:
                attention = torch.unsqueeze(F.softmax(
                    self.attention_layer(datax), dim=-1),
                                            dim=1)

        if self.deep and self.wide:
            return torch.sum(torch.stack([deep_x, wide_x], dim=2) * attention,
                             dim=-1)
        elif self.deep and not self.wide:
            return deep_x
        elif not self.deep and self.wide:
            return wide_x
        else:
            return None