コード例 #1
0
def train(epoch):
    t = time.time()
    model.train()
    output = model(features, adj)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train, confusion_matrix = accuracy(output[idx_train],
                                           labels[idx_train])
    optimizer.zero_grad()
    loss_train.backward()
    optimizer.step()

    print('###### Epoch: {:04d} ######'.format(epoch + 1))
    print('Training: Loss={:04f} Accuracy={:04f}'.format(
        loss_train.item(), acc_train.item()))
    #print('Training: Confusion Matrix(tn,fp,fn,tp)=', confusion_matrix)

    if not args.fastmode:
        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        model.eval()
        output = model(features, adj)

    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val, confusion_matrix = accuracy(output[idx_val], labels[idx_val])
    print('Validation: Loss={:04f} Accuracy={:04f}'.format(
        loss_val.item(), acc_val.item()))
コード例 #2
0
def train(epoch):
    t = time.time()

    X = features

    model.train()
    optimizer.zero_grad()

    output = torch.log_softmax(model(X), dim=-1)

    loss_train = F.nll_loss(output[idx_train], labels[idx_train])

    acc_train = accuracy(output[idx_train], labels[idx_train])

    loss_train.backward()

    optimizer.step()

    if not args.fastmode:
        model.eval()
        output = model(X)
        output = torch.log_softmax(output, dim=-1)

    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])

    if epoch % 100 == 0:
        print('Epoch: {:04d}'.format(epoch + 1),
              'loss_train: {:.4f}'.format(loss_train.item()),
              'acc_train: {:.4f}'.format(acc_train.item()),
              'loss_val: {:.4f}'.format(loss_val.item()),
              'acc_val: {:.4f}'.format(acc_val.item()),
              'time: {:.4f}s'.format(time.time() - t))

    return loss_val.item(), acc_val.item()
コード例 #3
0
def train_GCN(epoch):
    t = time.time()

    # print(features[idx_train].shape, interaction_train.unsqueeze(dim=1).shape)
    # X = torch.cat((features[idx_train], interaction_train), 1)
    X = features
    X[idx_test] = 0
    model_GCN.train()
    optimizer.zero_grad()
    output = model_GCN(X, A)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])

    acc_train = accuracy(output[idx_train], labels[idx_train])

    loss_train.backward()

    optimizer.step()

    model_GCN.eval()
    output = torch.log_softmax(model_GCN(features, A), dim=-1)
    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    if epoch % 100 == 0:
        print('Epoch: {:04d}'.format(epoch + 1),
              'loss_train: {:.4f}'.format(loss_train.item()),
              'acc_train: {:.4f}'.format(acc_train.item()),
              'loss_val: {:.4f}'.format(loss_val.item()),
              'acc_val: {:.4f}'.format(acc_val.item()),
              'time: {:.4f}s'.format(time.time() - t))

    return loss_val.item(), acc_val.item()
コード例 #4
0
ファイル: train.py プロジェクト: linhlpv/GCNs_demo
def train(epoch):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()

    if not args.fastmode:
        # Đánh giá trên bộ dữ liệu validation,
        # lúc này vô hiệu hóa dropout.
        model.eval()
        output = model(features, adj)

    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])

    # Đưa ra kết quả của mỗi epoch
    print('Epoch: {:04d}'.format(epoch + 1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}s'.format(time.time() - t))
 def train(self):
     optimizer = optim.Adam(self.model.parameters(),
                            lr=config.learning.lr,
                            weight_decay=config.learning.weight_decay)
     max_acc = 0
     for epoch in range(self.epochs):
         self.model.train()
         optimizer.zero_grad()
         output = self.model(self.features, self.adj)
         loss_train = F.nll_loss(output[self.idx_train],
                                 self.all_labels[self.idx_train])
         # acc_train = accuracy(output[self.idx_train], self.all_labels[self.idx_train])
         loss_train.backward()
         optimizer.step()
         self.model.eval()
         output = self.model(self.features, self.adj)
         #acc_val = accuracy(output[self.idx_val], self.all_labels_init[self.idx_val])
         #if acc_val.item() >= max_acc:
         #    max_acc = acc_val.item()
         acc_test = accuracy(output[self.idx_test],
                             self.all_labels_init[self.idx_test])
         if acc_test.item() > max_acc:
             max_acc = acc_test.item()
             if config.learning.save_model:
                 torch.save(self.model.state_dict(), config.paths.models)
             self.best_epoch = epoch
             acc_test = accuracy(output[self.idx_test],
                                 self.all_labels_init[self.idx_test])
             preds = output
             beliefs = preds.max(1)[1].type_as(self.all_labels)
             #torch.save(self.model.state_dict(),
             #               "../Stats/models_graph/loss/model{}_methodmix_{}_ration_{}_unkn.h5".format(
             #                   config.method_decomposition_embedding, val, config.num_nearest_neighbours))
     return beliefs.numpy() + 1, acc_test
コード例 #6
0
ファイル: train.py プロジェクト: ma8sa/Graph_motion_seg
def train(epoch):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()

    print(list(model.parameters())[0].grad)
    input()
    optimizer.step()

    if not args.fastmode:
        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        model.eval()
        output = model(features, adj)

    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    a = torch.argmax(output[idx_val], dim=1)
    print(a)

    print(len(a[a[:] == 2]))
    input()
    acc_val = accuracy(output[idx_val], labels[idx_val])
    print('Epoch: {:04d}'.format(epoch + 1),
          'loss_train: {:.4f}'.format(loss_train.data[0]),
          'acc_train: {:.4f}'.format(acc_train.data[0]),
          'loss_val: {:.4f}'.format(loss_val.data[0]),
          'acc_val: {:.4f}'.format(acc_val.data[0]),
          'time: {:.4f}s'.format(time.time() - t))
コード例 #7
0
def train(epoch):
    t = time.time()
    model.train(
    )  #this sets the module in training mode. Some layers (e.g., dropout and batchnorm) behave different under train and eval (test) mode
    optimizer.zero_grad()
    output = model(
        features, adj
    )  #this is the same as model.forward(features, adj), i.e., a forward pass. Hence returns final embedding
    loss_train = F.nll_loss(
        output[idx_train], labels[idx_train]
    )  #negative log likelihood loss, (input: torch.Tensor, target: torch.Tensor)
    #NOTE: it is enough that the shape of output is (-1, C: number of classes), no need to specify
    acc_train = accuracy(
        output[idx_train], labels[idx_train]
    )  #(I think) The max of the elements is taken to find classification prediction and compute accuracy.
    loss_train.backward(
    )  #performs backpropogation, make sure gradients are zeroed beforehand.
    optimizer.step()  #performs optimisation step

    if not args.fastmode:
        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        model.eval()
        output = model(features, adj)

    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    accval_lst.append(acc_val.item())
    acctrn_lst.append(acc_train.item())
    print('Epoch: {:04d}'.format(epoch + 1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}s'.format(time.time() - t))
コード例 #8
0
def train(epoch):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()

    if not args.fastmode:
        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        model.eval()
        output = model(features, adj)

    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    print(
        "Epoch: {:04d}".format(epoch + 1),
        "loss_train: {:.4f}".format(loss_train.item()),
        "acc_train: {:.4f}".format(acc_train.item()),
        "loss_val: {:.4f}".format(loss_val.item()),
        "acc_val: {:.4f}".format(acc_val.item()),
        "time: {:.4f}s".format(time.time() - t),
    )
コード例 #9
0
def train(epoch):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
    loss_train = F.nll_loss(
        output[idx_train],
        labels[idx_train])  # - args.regularization_factor * regularizer
    acc_train = accuracy(output[idx_train], labels_train)
    if args.amp:
        with amp.scale_loss(loss_train, optimizer) as scaled_loss:
            scaled_loss.backward()
        optimizer.step()
    else:
        loss_train.backward()
        optimizer.step()
    model.eval()
    output = model(features, adj)
    # loss_val = F.nll_loss(output[idx_val], labels_val)
    acc_val = accuracy(output[idx_val], labels_val)
    if args.debug:
        print(
            'E%04d' % (epoch + 1),
            'loss_train: %4.2e, acc_train: %6.2f%%, best_val: %5.2f%%, best_test: %5.2f%%'
            % (loss_train.item(), 100 * acc_train.item(), best_val,
               100 * best_test),
            # 'loss_val: {:.2e}'.format(loss_val.item()),
            # 'acc_val: {:.2f}%'.format(100 * acc_val.item()),
            # 'time: {:.1e}'.format(time.time() - t),
            end=" ")
    return 100 * acc_train.item(), loss_train.item(), 100 * acc_val.item()
コード例 #10
0
def train(epoch):
    t = time.time()
    model.train()
    optimizer.zero_grad()

    output = model(features_train, adj_train)
    print("Labels shape")
    print(labels.shape)
    print("Output shape")
    print(output.shape)
    semisupervised = np.arange(output.shape[0]/4)
    # loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    loss_train = F.nll_loss(output[semisupervised], labels_train[semisupervised])
    # acc_train = accuracy(output[idx_train], labels[idx_train])
    acc_train = accuracy(output, labels_train)
    loss_train.backward()
    optimizer.step()

    if not args.fastmode:
        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        model.eval()
        output = model(features_val, adj_val)

    # loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    loss_val = F.nll_loss(output, labels_val)
    # acc_val = accuracy(output[idx_val], labels[idx_val])
    acc_val = accuracy(output, labels_val)
    print('Epoch: {:04d}'.format(epoch+1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}s'.format(time.time() - t))
コード例 #11
0
def train(epoch):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
    # 分类损失,使用交叉熵
    # 由于在算output时已经使用了log_softmax,这里使用的损失函数就是NLLloss,如果前面没有加log运算,这里就要使用CrossEntropyLoss`了
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()

    if not args.fastmode:
        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        model.eval()
        output = model(features, adj)

    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    print('Epoch: {:04d}'.format(epoch + 1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}s'.format(time.time() - t))
コード例 #12
0
def train(model, features, edge_index, edge_weight, labels, epoch, idx_train,
          idx_val):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, edge_index, edge_weight)
    loss_train = F.cross_entropy(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()

    # if not args.fastmode:
    #     # Evaluate validation set performance separately,
    #     # deactivates dropout during validation run.
    #     model.eval()
    #     output = model(features, adj)

    loss_val = F.cross_entropy(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    print('Epoch: {:04d}'.format(epoch + 1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}s'.format(time.time() - t))
def train(epoch):
    t = time.time()
    '''将模型转为训练模式,并将优化器梯度置零'''
    model.train()
    optimizer.zero_grad()
    '''计算输出时,对所有的节点计算输出'''
    output = model(features, adj)
    '''损失函数,仅对训练集节点计算,即:优化仅对训练集数据进行'''
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    # 计算准确率
    acc_train = accuracy(output[idx_train], labels[idx_train])
    # 反向传播
    loss_train.backward()
    # 优化
    optimizer.step()
    '''fastmode ? '''
    if not args.fastmode:
        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        model.eval()
        output = model(features, adj)
    '''验证集 loss 和 accuracy '''
    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    '''输出训练集+验证集的 loss 和 accuracy '''
    print('Epoch: {:04d}'.format(epoch + 1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}s'.format(time.time() - t))
コード例 #14
0
ファイル: train_edge.py プロジェクト: zbn123/grand_dropedge
def train(epoch, cold_start=False):
    model.train()

    t = time.time()
    #A_1, A_2 = A,A
    A_1, A_2 = feature_generator_(adj_, A, features, args.order, edges)
    x = features
    optimizer.zero_grad()
    outputs = model(x, x, A_1, A_2)

    A2 = Edge_Generator(A * 0.5, args.order, args.alpha)
    #print(np.sum((adj_).todense()))
    #A = adj2A(A)
    #if args.cuda:
    #    A = A.cuda()
    if cold_start:
        loss_train = theloss_cold(outputs, idx_train)  #labels_2
    else:
        loss_train = theloss(outputs, idx_train)
    # acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()

    if not args.fastmode:
        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        model.eval()
        outputs = model(x, x, A2, A2)

    if cold_start:
        loss_val = theloss_cold(outputs, idx_val)
    else:
        loss_val = theloss(outputs, idx_val)

    # prob = torch.cat([outputs[-1], outputs[-2]], 1)
    prob = (torch.exp(outputs[-1]) + torch.exp(outputs[-2])) / 2.0

    # y_true = labels.cpu().data.numpy()
    # y_pred = (prob.max(1)[1]%(labels.max().item() + 1)).type_as(labels).cpu().data.numpy()

    # acc_train = f1_score(y_true[idx_train], y_pred[idx_train], average='micro')
    # acc_val = f1_score(y_true[idx_val], y_pred[idx_val], average='micro')
    acc_train = accuracy(prob[idx_train], labels[idx_train])
    acc_val = accuracy(prob[idx_val], labels[idx_val])

    if (epoch + 1) % 1 == 0:
        # print('num of training', len(idx_train_fake))
        print('{:04d}'.format(epoch + 1),
              'loss_train: {:.4f}'.format(loss_train.item()),
              'acc_train: {:.5f}'.format(acc_train.item()),
              'loss_val: {:.4f}'.format(loss_val.item()),
              'acc_val: {:.5}'.format(acc_val.item()),
              'time: {:.4f}s'.format(time.time() - t))

    return loss_val.item(), acc_val.item()
コード例 #15
0
def train_pmle(epoch):
    t = time.time()
    
    # print(features[idx_train].shape, interaction_train.unsqueeze(dim=1).shape)
    # X = torch.cat((features[idx_train], interaction_train), 1)
    X = features
    model_pmle.train()
    optimizer_pmle.zero_grad()
    X_list = []
    K = args.sample
    for k in range(K):
        X_list.append(rand_prop(X, training=True))

    output_list = []
    for k in range(K):
        output_list.append(torch.log_softmax(model_pmle(model.forward_last(X_list[k][idx_train]).data, interaction_train), dim=-1))

    
    loss_train = 0.
    for k in range(K):
        loss_train += F.nll_loss(output_list[k], labels[idx_train])
     
        
    loss_train = loss_train/K

    # output = torch.log_softmax(model_pmle(X, interaction_train), dim=-1)

    
    # loss_train = F.nll_loss(output, labels[idx_train])

    acc_train = accuracy(output_list[0], labels[idx_train])

    loss_train.backward()

    optimizer_pmle.step()

    # if not args.fastmode:
        # X = rand_prop(X,training=False)
        # output = model_pmle(X[idx_train, interaction_train)
        # output = torch.log_softmax(output, dim=-1)
    
    model_pmle.eval()
    output_val = torch.log_softmax(model_pmle(new_features[idx_val], interaction_val), dim=-1) 
    loss_val = F.nll_loss(output_val, labels[idx_val]) 
    acc_val = accuracy(output_val, labels[idx_val])

    print('Epoch: {:04d}'.format(epoch+1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}s'.format(time.time() - t))

    return loss_val.item(), acc_val.item()
コード例 #16
0
def train(epoch):
    t = time.time()
    
    X = features
    
    model.train()
    optimizer.zero_grad()
    X_list = []
    K = args.sample
    for k in range(K):
        X_list.append(rand_prop(X, training=True))

    output_list = []
    for k in range(K):
        output_list.append(torch.log_softmax(model(X_list[k]), dim=-1))

    
    loss_train = 0.
    for k in range(K):
        loss_train += F.nll_loss(output_list[k][idx_train], labels[idx_train])
     
        
    loss_train = loss_train/K
    #loss_train = F.nll_loss(output_1[idx_train], labels[idx_train]) + F.nll_loss(output_1[idx_train], labels[idx_train])
    #loss_js = js_loss(output_1[idx_unlabel], output_2[idx_unlabel])
    #loss_en = entropy_loss(output_1[idx_unlabel]) + entropy_loss(output_2[idx_unlabel])
    loss_consis = consis_loss(output_list)

    loss_train = loss_train + loss_consis
    acc_train = accuracy(output_list[0][idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()

    if not args.fastmode:
        model.eval()
        X = rand_prop(X,training=False)
        output = model(X)
        output = torch.log_softmax(output, dim=-1)
        
    loss_val = F.nll_loss(output[idx_val], labels[idx_val]) 
    acc_val = accuracy(output[idx_val], labels[idx_val])
    print('Epoch: {:04d}'.format(epoch+1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}s'.format(time.time() - t))
    return loss_val.item(), acc_val.item()
コード例 #17
0
def test(features, adj, labels, idx_test, model):
    model.eval()
    output = model(features, adj)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))
コード例 #18
0
def test(output):
    model.eval()
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:",
          "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))
コード例 #19
0
def test():  #similar to train, but in eval mode and on test data.
    model.eval()
    output = model(features, adj)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))
コード例 #20
0
def validate(epoch, loader, model):

    model.eval()
    total_loss = 0.0
    total_acc = 0.0
    count = 0
    val_len = len(loader)

    t = time.time()

    for i, (adj, features, labels) in enumerate(loader):
        # cuda()
        features = features.cuda()
        adj = adj.cuda()
        labels = labels.cuda()
        # model()
        output = model(features, adj)
        labels = labels.view(-1)
        # loss()
        val_train = F.nll_loss(output, labels)
        acc_val = accuracy(output, labels)

        count += 1
        total_loss += val_train
        total_acc += acc_val

        #if i%40 == 0:
        #print("# {} val loss : {}, acc val:{}".format(i,val_train,acc_val))
        #print("# {}/{} loss : {} , AVG acc : {}, time: {} , ETA:  {}".format(i,val_len,float(total_loss)/count,total_acc/count,time.time()-t, (val_len-i)/40.0 * (time.time()-t) ))
        #t = time.time()

    return total_loss / count, total_acc / count
コード例 #21
0
def test_pmle():
    print(model_pmle.beta.weight.data)

## Gibbs sampler
  # A is whole adjacency graph now
    num_classes = labels.max().item() + 1
    model_pmle.eval()
    output = model_pmle(new_features[idx_test], interaction_test)
    # output_probs = torch.log_softmax(output, dim=-1)
    # test_labels = output_probs.max(1)[1].type_as(labels)
    if args.use_gibbs:
        # init randomly
        test_labels = torch.LongTensor(len_test,1).random_(0, num_classes)
        test_labels_onehot = torch.FloatTensor(len_test, num_classes)
        test_labels_onehot.zero_()
        test_labels_onehot.scatter_(1, test_labels, 1)
        for iter in range(num_iter):
          # choose a random node
          i = np.random.randint(0, len_test)
          # re-sample from conditional distribution
          # 1. calculate logits for each class
          logits = model_pmle.beta.weight.data * torch.matmul(A[idx_test[i]][idx_test], test_labels_onehot) + output[i]
          # 2. apply softmax
          probs = F.softmax(logits, dim=0)
          test_labels_onehot[i] = torch.distributions.OneHotCategorical(probs).sample()
        output = test_labels_onehot
    else:
        output = torch.log_softmax(output, dim=-1)
    
    loss_test = F.nll_loss(output, labels[idx_test])
    acc_test = accuracy(output, labels[idx_test])
    print("Test set results:",
          "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))
コード例 #22
0
ファイル: train_edge.py プロジェクト: zbn123/grand_dropedge
def test():
    model.eval()
    #x = Feature_Generator(A, features, args.order)
    #x = Feature_Generator(A, features, args.order)
    x = features
    A2 = Edge_Generator(A * 0.5, args.order, args.alpha)
    #A = adj2A(A)
    #if args.cuda:
    #    A = A.cuda()
    outputs = model(x, x, A2, A2)

    loss_test = theloss(outputs, idx_test)

    prob = (torch.exp(outputs[-1]) + torch.exp(outputs[-2])) / 2.0
    # prob = torch.cat([outputs[-1], outputs[-2]], 1)

    # y_true = labels.cpu().data.numpy()
    # y_pred = (prob.max(1)[1]%(labels.max().item() + 1)).type_as(labels).cpu().data.numpy()
    # print(y_pred[:10])
    # assert False
    # acc_test = f1_score(y_true[idx_test], y_pred[idx_test], average='micro')
    # acc_val = f1_score(y_true[idx_val], y_pred[idx_val], average='micro')

    acc_test = accuracy(prob[idx_test], labels[idx_test])
    y_true = labels[idx_test].cpu().data.numpy()
    y_pred = prob[idx_test].max(1)[1].type_as(
        labels[idx_test]).cpu().data.numpy()
    f1_test = f1_score(y_true, y_pred, average='micro')
    print("Model Test set results:",
          "test_loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.5f}".format(acc_test.item()),
          "f1_score= {:.5f}".format(f1_test))
コード例 #23
0
def validate(epoch,loader,model):
   
    model.eval()
    total_loss= 0.0
    total_acc = 0.0
    count = 0
    val_len = len(loader)    
    

    for i in range(val_len):
        
          # cuda() 
          adj,features,labels = loader[i] 
          features = features.cuda()
          adj = adj.cuda()
          labels = labels.cuda()
          # model()
          output = model(features, adj)
          # loss()
          val_train = F.nll_loss(output, labels)
          acc_val = accuracy(output, labels)

          if i%40 == 0:
             print("# {} val loss : {}, acc val:{}".format(i,val_train,acc_val)) 

          count += 1
          total_loss += val_train
          total_acc += acc_val

    return total_loss/count,total_acc/count
コード例 #24
0
def test_GCN():
    model_GCN.eval()
    output = model_GCN(features, A)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))
コード例 #25
0
    def test():
        model.eval()
        output = model(features, adj)
        # loss_test = F.nll_loss(output[test_idx], labels[test_idx])
        acc_test = accuracy(output[test_idx], labels[test_idx])

        return acc_test
コード例 #26
0
def perform_test():
    model.eval()
    output = model(features, adj)
    loss_test = loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    f1_score(output[idx_test], labels[idx_test])
    print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))
コード例 #27
0
 def _train():
     model.train()
     optimizer.zero_grad()
     output = model(features, adj)
     loss_train = F.nll_loss(output[train_idx], labels[train_idx])
     acc_train = accuracy(output[train_idx], labels[train_idx])
     loss_train.backward()
     optimizer.step()
def test():
    model.eval()  # model转为测试模式
    output = model(features, adj)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))
    return output  # 可视化返回output
コード例 #29
0
def test():
    model.eval()
    X = features
    output = model(X)
    output = torch.log_softmax(output, dim=-1)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))
コード例 #30
0
def test_pmle():
    print(model_pmle.beta.weight.data)
    model_pmle.eval()
    output = model_pmle(new_features[idx_test], interaction_test)
    loss_test = F.nll_loss(output, labels[idx_test])
    acc_test = accuracy(output, labels[idx_test])
    print("Test set results:",
          "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))