Ejemplo n.º 1
0
    def forward(self, data, flag):
        x, edge_index, edge_orig = data.x, data.edge_index, data.edge_index_orig
        if flag == 'Training':
            edge_index1, _ = dropout_adj(edge_orig, p=self.p)
            edge_index2, _ = dropout_adj(edge_orig, p=self.p)
        else:
            edge_index1, _ = dropout_adj(edge_orig, p=0)
            edge_index2, _ = dropout_adj(edge_orig, p=0)

        x = self.linear1(x)
        x = self.drop1(x)
        x = F.leaky_relu(x)

        z = x[edge_index[0, :], :]
        y = x[edge_index[1, :], :]

        #        x = torch.add(z,y)/2
        #        x = torch.cat((y,z), 1)
        #        x = F.cosine_similarity(z,y)
        #        x = x[:,None]

        x = torch.cat((y, z), 1)

        #        x = self.linear1(x)
        #        x = self.drop1(x)
        #        x = F.relu(x)

        x2 = self.linear2(x)
        x = x2
        return x, x2
Ejemplo n.º 2
0
    def forward(self, x, edge_index):
        x_in = x
        edge_index, _ = add_remaining_self_loops(edge_index)

        if self.norm == 'dropedge':
            if self.training:
                edge_index, _ = dropout_adj(edge_index,
                                            force_undirected=True,
                                            training=True)
            else:
                edge_index, _ = dropout_adj(edge_index,
                                            force_undirected=True,
                                            training=False)

        row, col = edge_index
        deg = degree(row)
        deg_inv_sqrt = deg.pow(-0.5)
        norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]

        # x = self.linear(x)

        if self.norm == 'neighbornorm':
            x_j = self.normlayer(x, edge_index)
        else:
            x_j = x[col]

        x_j = norm.view(-1, 1) * x_j
        out = scatter_add(src=x_j, index=row, dim=0, dim_size=x.size(0))

        out = self.linear(out)

        if self.activation:
            out = F.relu(out)

        if self.norm == 'batchnorm':
            out = self.normlayer(out)
        elif self.norm == 'layernorm':
            out = self.normlayer(out)
        elif self.norm == 'pairnorm':
            out = self.normlayer(out)
        elif self.norm == 'nodenorm':
            out = self.normlayer(out)

        if self.residual:
            out = x_in + out

        if self.dropout:
            out = F.dropout(out, p=0.5, training=self.training)

        return out
Ejemplo n.º 3
0
def train(model: Model, x, edge_index):
    model.train()
    optimizer.zero_grad()
    edge_index_1 = dropout_adj(edge_index, p=drop_edge_rate_1)[0]
    edge_index_2 = dropout_adj(edge_index, p=drop_edge_rate_2)[0]
    x_1 = drop_feature(x, drop_feature_rate_1)
    x_2 = drop_feature(x, drop_feature_rate_2)
    z1 = model(x_1, edge_index_1)
    z2 = model(x_2, edge_index_2)

    loss = model.loss(z1, z2, batch_size=0)
    loss.backward()
    optimizer.step()

    return loss.item()
Ejemplo n.º 4
0
    def forward(self, data):
        x, batch = data.x, data.batch
        edge_index = knn_graph(x, 100, batch)
        edge_index, _ = dropout_adj(edge_index, p=0.3)
        batch = data.batch

        x = F.leaky_relu(self.conv1(x, edge_index))
        x1 = torch.cat([gap(x, batch), gmp(x, batch)], dim=1)

        x = F.leaky_relu(self.conv2(x, edge_index))
        x2 = torch.cat([gap(x, batch), gmp(x, batch)], dim=1)

        x = F.leaky_relu(self.conv3(x, edge_index))
        x3 = torch.cat([gap(x, batch), gmp(x, batch)], dim=1)

        x = torch.cat([x1, x2, x3], dim=1)

        x = self.batchnorm1(x)

        x = F.leaky_relu(self.linear1(x))

        x = self.drop(x)
        x = F.leaky_relu(self.linear2(x))
        x = F.leaky_relu(self.linear3(x))
        x = F.leaky_relu(self.linear4(x))
        x = F.leaky_relu(self.linear5(x))

        x = self.out(x)
        if self.classification:
            x = torch.sigmoid(x)
        x = x.view(-1)

        return x
Ejemplo n.º 5
0
    def forward(self, data):
        start = time.time()
        x, edge_index, edge_type, batch = data.x, data.edge_index, data.edge_type, data.batch
        if self.adj_dropout > 0:
            edge_index, edge_type = dropout_adj(
                edge_index,
                edge_type,
                p=self.adj_dropout,
                force_undirected=self.force_undirected,
                num_nodes=len(x),
                training=self.training)
        concat_states = []
        for conv in self.convs:
            x = torch.tanh(conv(x, edge_index, edge_type))
            concat_states.append(x)
        concat_states = torch.cat(concat_states, 1)

        users = data.x[:, 0] == 1
        items = data.x[:, 1] == 1
        x = torch.cat([concat_states[users], concat_states[items]], 1)
        if self.side_features:
            x = torch.cat([x, data.u_feature, data.v_feature], 1)

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        if self.regression:
            return x[:, 0] * self.multiply_by
        else:
            return F.log_softmax(x, dim=-1)
Ejemplo n.º 6
0
    def forward(self, batch, propagate_messages=True):
        batch.edge_attr[torch.isnan(batch.edge_attr)] = 0
        batch.x[torch.isnan(batch.x)] = 0

        #randomly drop edges
        batch.edge_index, batch.edge_attr = dropout_adj(
            batch.edge_index, edge_attr=batch.edge_attr, p=self.edge_dropout)

        #initial dropout and embedding
        batch.x = self.embedding[0](batch.x.float())
        for mod in self.embedding[1:]:
            batch.x = batch.x + mod(batch.x.float())

        #positional encoding
        if self.positional_encoding and propagate_messages:
            batch.x = self.posencoder(batch.x, batch.batch)

        batch.propagate_messages = propagate_messages
        #graph convolutions
        batch = self.gconv(batch)

        #fully connected linear layers
        x = self.lin(batch.x)

        return x
Ejemplo n.º 7
0
    def _preprocessing(self, x, edge_index):
        num_nodes = x.shape[0]

        op_embedding = []
        op_embedding.append(x)

        # Convert to numpy arrays on cpu
        edge_index, _ = dropout_adj(edge_index, p=self.dropedge_rate, num_nodes=num_nodes)
        row, col = edge_index

        if self.undirected:
            edge_index = to_undirected(edge_index, num_nodes)
            row, col = edge_index

        # adj matrix
        adj = get_adj(
            row, col, num_nodes, asymm_norm=self.asymm_norm, set_diag=self.set_diag, remove_diag=self.remove_diag
        )

        nx = x
        for _ in range(self.num_propagations):
            nx = adj @ nx
            op_embedding.append(nx)

        # transpose adj matrix
        adj = get_adj(
            col, row, num_nodes, asymm_norm=self.asymm_norm, set_diag=self.set_diag, remove_diag=self.remove_diag
        )

        nx = x
        for _ in range(self.num_propagations):
            nx = adj @ nx
            op_embedding.append(nx)

        return torch.cat(op_embedding, dim=1)
Ejemplo n.º 8
0
 def forward(self, data):
     x, edge_index, edge_type, batch = data.x, data.edge_index, data.edge_type, data.batch
     if self.adj_dropout > 0:
         edge_index, edge_type = dropout_adj(
             edge_index,
             edge_type,
             p=self.adj_dropout,
             force_undirected=self.force_undirected,
             num_nodes=len(x),
             training=self.training)
     concat_states = []
     for conv in self.convs:
         x = torch.tanh(conv(x, edge_index, edge_type))
         concat_states.append(x)
     concat_states = torch.cat(concat_states, 1)
     x = global_sort_pool(concat_states, batch,
                          self.k)  # batch * (k*hidden)
     x = x.unsqueeze(1)  # batch * 1 * (k*hidden)
     x = F.relu(self.conv1d_params1(x))
     x = self.maxpool1d(x)
     x = F.relu(self.conv1d_params2(x))
     x = x.view(len(x), -1)  # flatten
     x = F.relu(self.lin1(x))
     x = F.dropout(x, p=0.5, training=self.training)
     x = self.lin2(x)
     if self.regression:
         return x[:, 0]
     else:
         return F.log_softmax(x, dim=-1)
Ejemplo n.º 9
0
    def forward(self, data):
        x, batch = data.x, data.batch
        edge_index = knn_graph(x, 100, batch)                               #?
        edge_index, _ = dropout_adj(edge_index, p=0.3)                      #?
        batch = data.batch

        
        y=data.x
        y=self.point1(y, edge_index)  #dim=n_intermediate
        pointlist=[y]
        for f in range(self.point_depth-1):
            y=self.pointfkt[f](y, edge_index)
            
            pointlist.append(y)
        
        y=torch.cat(pointlist, dim=1) #dim=n_intermediate*point_depth
        y = torch.cat([gap(y, batch), gmp(y, batch)], dim=1)

        
        x = self.batchnorm1(y)
        for g in range(self.lin_depth):
            x=F.leaky_relu(self.linearfkt[g](x))
            if (g-1)%3==0 and self.lin_depth-1>g:  #g=1,4,7,... u. noch mind. zwei weitere Layers
                x = self.drop[g](x)


        x = self.out(x)
        if self.classification:
            x = torch.sigmoid(x)
        x = x.view(-1)

        return x
Ejemplo n.º 10
0
    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
        if self.hyperparameters['dropedge_rate'] is not None:
            edge_index, edge_weight = dropout_adj(edge_index, edge_weight, p=self.hyperparameters['dropedge_rate'],\
                 force_undirected=False, num_nodes=None, training=self.training)

        if self.hyperparameters['use_linear']:
            x = F.relu(self.input_lin(x))
        else:
            x = F.relu(self.conv1(x, edge_index, edge_weight))
            if self.hyperparameters['num_layers'] == 1:
                return x
        x = F.dropout(x,
                      p=self.hyperparameters['dropout_rate'],
                      training=self.training)
        for conv in self.convs:
            x = F.relu(conv(x, edge_index, edge_weight=edge_weight))
            x = F.dropout(x,
                          p=self.hyperparameters['dropout_rate'],
                          training=self.training)
        if self.hyperparameters['use_linear']:
            x = self.output_lin(x)
        else:
            x = self.conv2(x, edge_index, edge_weight)
        return x
Ejemplo n.º 11
0
    def forward(self, data):
        x, batch = data.x, data.batch
        edge_index = knn_graph(x, 100, batch)  #?
        edge_index, _ = dropout_adj(edge_index, p=0.3)  #?
        batch = data.batch

        x = F.leaky_relu(self.conv1(x, edge_index))
        x1 = torch.cat([gap(x, batch), gmp(x, batch)], dim=1)
        convlist = [x1]

        for f in range(self.conv_depth - 1):
            x = F.leaky_relu(self.convfkt[f](x, edge_index))
            xi = torch.cat([gap(x, batch), gmp(x, batch)], dim=1)
            convlist.append(xi)

        x = torch.cat(convlist, dim=1)

        x = self.batchnorm1(x)
        for g in range(self.lin_depth):
            x = F.leaky_relu(self.linearfkt[g](x))
            if (
                    g - 1
            ) % 3 == 0 and self.lin_depth - 1 > g:  #g=1,4,7,... u. noch mind. zwei weitere Layers
                x = self.drop[g](x)

        x = self.out(x)
        if self.classification:
            x = torch.sigmoid(x)
        x = x.view(-1)

        return x
Ejemplo n.º 12
0
def test_dropout_adj():
    edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]])
    edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6])

    out = dropout_adj(edge_index, edge_attr, training=False)
    assert edge_index.tolist() == out[0].tolist()
    assert edge_attr.tolist() == out[1].tolist()

    torch.manual_seed(5)
    out = dropout_adj(edge_index, edge_attr)
    assert out[0].tolist() == [[1, 3], [0, 2]]
    assert out[1].tolist() == [2, 6]

    torch.manual_seed(5)
    out = dropout_adj(edge_index, edge_attr, force_undirected=True)
    assert out[0].tolist() == [[1, 2], [2, 1]]
    assert out[1].tolist() == [3, 3]
Ejemplo n.º 13
0
    def forward(self, x, edge_index):
        edge_index, _ = dropout_adj(
            edge_index, p=0.2, force_undirected=True, num_nodes=x.shape[0], training=self.training
        )
        x = F.dropout(x, p=self.dropout, training=self.training)

        x = self.unet(x, edge_index)
        return x
Ejemplo n.º 14
0
    def forward(self, data):
        edge_index, _ = dropout_adj(
            data.edge_index, p=self.initial_dropout_adj, force_undirected=True,
            num_nodes=data.num_nodes, training=self.training)
        x = F.dropout(data.x, p=self.initial_dropout_nodes, training=self.training)

        x = self.unet(x, edge_index)
        return F.log_softmax(x, dim=1)
Ejemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser(description='OGB (Node2Vec)')
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--task', type=str, default='ogbn')
    parser.add_argument('--dataset', type=str, default='arxiv')
    parser.add_argument('--embedding_dim', type=int, default=128)
    parser.add_argument('--walk_length', type=int, default=80)
    parser.add_argument('--context_size', type=int, default=20)
    parser.add_argument('--walks_per_node', type=int, default=10)
    parser.add_argument('--batch_size', type=int, default=256)
    parser.add_argument('--lr', type=float, default=0.01)
    parser.add_argument('--epochs', type=int, default=5)
    parser.add_argument('--log_steps', type=int, default=1)
    parser.add_argument('--dropedge_rate', type=float, default=0.4)
    parser.add_argument('--dump_adj_only', dest="dump_adj_only", action="store_true", help="dump adj matrix for proX")
    parser.set_defaults(dump_adj_only=False)
    args = parser.parse_args()

    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    device = torch.device(device)

    dataset = create_dataset(name=f'{args.task}-{args.dataset}')
    data = dataset[0]
    if args.dataset == 'arxiv':
        data.edge_index = to_undirected(data.edge_index, data.num_nodes)
    elif args.dataset == 'papers100M':
        data.edge_index, _ = dropout_adj(data.edge_index, p = args.dropedge_rate, num_nodes= data.num_nodes)
        data.edge_index = to_undirected(data.edge_index, data.num_nodes)

    if args.dump_adj_only:
        adj = to_scipy_sparse_matrix(data.edge_index)
        sp.save_npz(f'data/{args.name}-adj.npz', adj)
        return

    model = Node2Vec(data.edge_index, args.embedding_dim, args.walk_length,
                     args.context_size, args.walks_per_node,
                     sparse=True).to(device)

    loader = model.loader(batch_size=args.batch_size, shuffle=True,
                          num_workers=4)
    optimizer = torch.optim.SparseAdam(model.parameters(), lr=args.lr)

    model.train()
    for epoch in range(1, args.epochs + 1):
        for i, (pos_rw, neg_rw) in enumerate(loader):
            optimizer.zero_grad()
            loss = model.loss(pos_rw.to(device), neg_rw.to(device))
            loss.backward()
            optimizer.step()

            if (i + 1) % args.log_steps == 0:
                print(f'Epoch: {epoch:02d}, Step: {i+1:03d}/{len(loader)}, '
                      f'Loss: {loss:.4f}')

            if (i + 1) % 100 == 0:  # Save model every 100 steps.
                save_embedding(model, args.embedding_dim, args.dataset, args.context_size)
        save_embedding(model, args.embedding_dim, args.dataset, args.context_size)
Ejemplo n.º 16
0
 def forward(self, x, edge_index, edge_attr):
     # x: [N, node_channels]
     # edge_index: [2, E]
     # edge_attr: [E, edge_channels]
     # calling propagate function consequently call message and update
     edge_index, edge_attr = dropout_adj(edge_index,
                                         edge_attr=edge_attr,
                                         p=self.drop_prob)
     return self.propagate(edge_index, x=x, edge_attr=edge_attr)
Ejemplo n.º 17
0
    def forward(self, data, mask):
        #  x0, edge_index0, edge_weight0 = data.x, data.edge_index, data.edge_attr


        edge_index0, _ = dropout_adj(
            data.edge_index, p=self.initial_dropout_adj, force_undirected=True,
            num_nodes=data.num_nodes, training=self.training)
        x0 = F.dropout(data.x, p=self.initial_dropout_nodes, training=self.training)

        # level 0 conv  
        x0_ = self.gcn0_in(x0, edge_index0)

        # pooled 1 
        s1 = F.relu(self.conv_pool1(x0_, edge_index0))
        x1, adj1, l1, e1 = dense_diff_pool(x0_, data.adj, s1, mask)
        x1 = torch.squeeze(x1)
        
        # get edge index level 1
        adj1_sparse_tuple = dense_to_sparse(torch.squeeze(adj1))
        edge_index1 = adj1_sparse_tuple[0]
        edge_weight1 = adj1_sparse_tuple[1]
                
        # level 1 conv
        x1_ = self.gcn1_in(x1, edge_index1, edge_weight1)
        
        # pooled 2 
        s2 = self.conv_pool2(x1_, edge_index1, edge_weight1)
        s2 = F.relu(s2)
        x2, adj2, l2, e2 = dense_diff_pool(x1_, adj1, s2)
        x2 = torch.squeeze(x2)
        
        # get edge index level 2
        adj2_sparse_tuple = dense_to_sparse(torch.squeeze(adj2))
        edge_index2 = adj2_sparse_tuple[0]
        edge_weight2 = adj2_sparse_tuple[1]
        
        # level 2 conv
        x2_out = self.gcn2_in(x2, edge_index2, edge_weight2)
        x2_out_up = torch.matmul(s2, x2_out) # unpool level 2
        
        # output level 1
        x1_out = self.gcn1_out(torch.cat((x1_, x2_out_up), 1), edge_index1, edge_weight1)
        x1_out_up = torch.matmul(s1, x1_out) # unpool level 1
        
        # output level 0 
        x0_out = self.gcn0_out(torch.cat((x0_, x1_out_up), 1), edge_index0)
    
        edge_loss = l1 + e1 +l2 + e2
        
        edges = {'e1' :{'e': edge_index1, 'w': edge_weight1},
                 'e2' :{'e': edge_index2, 'w': edge_weight2}}

        output_dict = {'prediction': F.log_softmax(x0_out, dim=1), 's01': s1,
                       'edge_loss': edge_loss, 'adj1': adj1, 'edges': edges}

        return output_dict
Ejemplo n.º 18
0
    def _feature_masking(self, data):
        feat_mask1 = torch.FloatTensor(data.x.shape[1]).uniform_() > self.p_f1
        feat_mask2 = torch.FloatTensor(data.x.shape[1]).uniform_() > self.p_f2
        x1, x2 = data.x.clone(), data.x.clone()
        x1, x2 = x1 * feat_mask1, x2 * feat_mask2

        edge_index1, edge_attr1 = dropout_adj(data.edge_index,
                                              data.edge_attr,
                                              p=self.p_e1)
        edge_index2, edge_attr2 = dropout_adj(data.edge_index,
                                              data.edge_attr,
                                              p=self.p_e2)

        new_data1, new_data2 = data.clone(), data.clone()
        new_data1.x, new_data2.x = x1, x2
        new_data1.edge_index, new_data2.edge_index = edge_index1, edge_index2
        new_data1.edge_attr, new_data2.edge_attr = edge_attr1, edge_attr2

        return new_data1, new_data2
Ejemplo n.º 19
0
    def forward(self, x, edge_index):
        edge_index, _ = dropout_adj(edge_index,
                                    p=0.2,
                                    force_undirected=True,
                                    training=self.training)
        x = F.dropout(x, p=0.8, training=self.training)

        #x = self.unet(x, edge_index)
        x = F.normalize(self.unet(x, edge_index), eps=1e-3)
        return x
Ejemplo n.º 20
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        next_possible_nodes, headers = data.next_possible_nodes, data.headers

        bs = torch.unique(batch).size(0)

        ### SETUP TASK LEARNING ###
        x, edge_index, task1_true, task2_true, mask, masked_nodes_prim = prepare_task_learning(
            x, next_possible_nodes, headers, edge_index, bs)
        if self.training:
            self.count_nodes(masked_nodes_prim)
        ###########################

        # positional encoding
        x = x.view(bs, 23, -1).permute(0, 2, 1)
        x = self.pe(x)
        x = x.permute(0, 2, 1)
        x = x.reshape(bs * 23, -1)

        # apply classic convolution
        x = x.reshape(bs, 23, -1)
        x = self.classic_conv(x)
        x = x.reshape(bs * 23, -1)
        x = self.classic_conv_bn(x)

        # dropout adjacency matrix
        edge_index, _ = dropout_adj(edge_index,
                                    p=0.5,
                                    force_undirected=True,
                                    num_nodes=data.num_nodes,
                                    training=self.training)

        x = self.ll1(x)

        # att block 1
        x = self.att_block1(x, edge_index)

        x = self.slc4(x, self.fff)

        x = self.gp(x[mask], batch[mask])

        # apply classic convolution
        x = x.unsqueeze(1)
        x = self.classic_conv2(x)
        x = x.mean(axis=1)
        x = self.classic_conv_bn2(x)

        task1_pred = self.fc_task1(x)  # random masked node task
        task2_pred = self.fc_task2(x)  # random node in next graph task

        if self.graph_embedding_function is not None:
            self.graph_embedding_function(x, 0)

        return x, task1_true, task1_pred, task2_true, task2_pred
    def forward(self):
        #No dropout happening here anymore, the dropout probabilities are both 0
        edge_index, _ = dropout_adj(data.edge_index,
                                    p=0,
                                    force_undirected=True,
                                    num_nodes=data.num_nodes,
                                    training=self.training)
        x = F.dropout(data.x, p=0, training=self.training)

        x = self.unet(x, edge_index)
        return F.log_softmax(x, dim=1)
    def forward(self, data, flag):
        x, edge_index, edge_orig = data.x, data.edge_index, data.edge_index_orig
        if flag == 'Training':
            edge_index1, _ = dropout_adj(edge_orig, p=self.p)
            edge_index2, _ = dropout_adj(edge_orig, p=self.p)
        else:
            edge_index1, _ = dropout_adj(edge_orig, p=0)
            edge_index2, _ = dropout_adj(edge_orig, p=0)

        x = self.conv1(x, edge_index1)
        x = self.drop1(x)
        x = F.leaky_relu(x)

        z = x[edge_index[0, :], :]
        y = x[edge_index[1, :], :]

        x = torch.cat((y, z), 1)

        x2 = self.linear2(x)
        x = x2
        return x, x2
Ejemplo n.º 23
0
Archivo: unet.py Proyecto: jkx19/cogdl
    def forward(self, graph):
        x = graph.x
        edge_index = torch.stack(graph.edge_index)
        edge_index, _ = dropout_adj(edge_index,
                                    p=0.2,
                                    force_undirected=True,
                                    num_nodes=x.shape[0],
                                    training=self.training)
        x = F.dropout(x, p=self.dropout, training=self.training)

        x = self.unet(x, edge_index)
        return x
Ejemplo n.º 24
0
 def forward(self, data):
     x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
     if self.hyperparameters['dropedge_rate'] is not None:
         edge_index, edge_weight = dropout_adj(edge_index, edge_weight, p=self.hyperparameters['dropedge_rate'],\
              force_undirected=False, num_nodes=None, training=self.training)
     x = self.in_lin(x)
     x = F.dropout(x,
                   p=self.hyperparameters['dropout_rate'],
                   training=self.training)
     x = self.incep_conv(x, edge_index, edge_weight)
     x = self.out_lin(x)
     return x
Ejemplo n.º 25
0
    def global_graph_encoding(self, X_tid):
        node_init_feat = self.user_tweet_embedding.weight
        node_init_feat = self.dropout(node_init_feat)
        edge_index = self.graph.edge_index.cuda()
        edge_weight = self.graph.edge_weight.cuda()

        edge_index, edge_weight = utils.dropout_adj(edge_index, edge_weight, training=self.training)

        node_rep1 = self.gnn1(node_init_feat, edge_index, edge_weight)
        node_rep1 = self.dropout(node_rep1)

        graph_output = self.gnn2(node_rep1, edge_index, edge_weight)
        return graph_output[X_tid]
Ejemplo n.º 26
0
    def drop_edge(idx: int):
        global drop_weights

        if param['drop_scheme'] == 'uniform':
            return dropout_adj(data.edge_index,
                               p=param[f'drop_edge_rate_{idx}'])[0]
        elif param['drop_scheme'] in ['degree', 'evc', 'pr']:
            return drop_edge_weighted(data.edge_index,
                                      drop_weights,
                                      p=param[f'drop_edge_rate_{idx}'],
                                      threshold=0.7)
        else:
            raise Exception(f'undefined drop scheme: {param["drop_scheme"]}')
Ejemplo n.º 27
0
    def forward(self, x):

        edge_atrr, edge_index = self.atrr(x)
        edge_atrr = edge_atrr.cuda()
        edge_index = edge_index.cuda()
        edge_index, edge_atrr = dropout_adj(edge_index, edge_atrr)
        x = self.conv1(x, edge_index, edge_weight=edge_atrr)
        x = self.bn1(x)
        x = self.conv2(x, edge_index, edge_weight=edge_atrr)
        x = self.bn2(x)
        x = x.view(x.size(0), -1)
        x = self.layer5(x)
        return x
Ejemplo n.º 28
0
    def forward(self, data):
        x = data.x  # [N, C]
        edge_index = data.edge_index
        edge_index, _ = dropout_adj(edge_index,
                                    p=0.2,
                                    force_undirected=True,
                                    num_nodes=data.num_nodes,
                                    training=self.training)
        x = F.dropout(data.x, p=0.92, training=self.training)

        x = self.unet(x, edge_index)
        out = F.log_softmax(x, dim=1)  # [N, out_c]
        return out
Ejemplo n.º 29
0
 def encode(self, x, edge_index, edge_attr, hour, week):
     edge_index, edge_attr = dropout_adj(edge_index,
                                         edge_attr,
                                         p=self.adj_drop,
                                         num_nodes=len(x),
                                         training=self.training)
     for conv in self.GNNs:
         x = F.relu(conv(x, edge_index, edge_weight=edge_attr))
         x = self.dropout_layer(x)
     z = x.view(-1, self.n_nodes * self.hidden_list[-1])
     z = self.concat_time(z, hour, week)
     z = self.dropout_layer(z)
     z = self.fc(z)  # (batch_size , node_dim )
     return z
 def forward(self, data):
     edge_index, _ = dropout_adj(data.edge_index,
                                 p=0.1,
                                 force_undirected=True,
                                 num_nodes=data.num_nodes,
                                 training=self.training)
     x = data.pos  #F.dropout(data.pos, p=0.1, training=self.training)
     x = self.unet(x, edge_index)
     x = self.lin1(x)
     x = self.lin2(F.relu(x))
     x = self.lin3(x)
     if self.cls:
         return F.log_softmax(x, dim=1)
     else:
         return x