Esempio n. 1
0
def pretrain(dataset):
    model = GAT(
        num_features=args.input_dim,
        hidden_size=args.hidden_size,
        embedding_size=args.embedding_size,
        alpha=args.alpha,
    ).to(device)
    print(model)
    optimizer = Adam(model.parameters(),
                     lr=args.lr,
                     weight_decay=args.weight_decay)

    # data process
    dataset = utils.data_preprocessing(dataset)
    adj = dataset.adj.to(device)
    adj_label = dataset.adj_label.to(device)
    M = utils.get_M(adj).to(device)

    # data and label
    x = torch.Tensor(dataset.x).to(device)
    y = dataset.y.cpu().numpy()

    for epoch in range(args.max_epoch):
        model.train()
        A_pred, z = model(x, adj, M)
        loss = F.binary_cross_entropy(A_pred.view(-1), adj_label.view(-1))
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        with torch.no_grad():
            _, z = model(x, adj, M)
            kmeans = KMeans(n_clusters=args.n_clusters,
                            n_init=20).fit(z.data.cpu().numpy())
            acc, nmi, ari, f1 = eva(y, kmeans.labels_, epoch)
        if epoch % 5 == 0:
            torch.save(model.state_dict(),
                       f"./pretrain/predaegc_{args.name}_{epoch}.pkl")
Esempio n. 2
0
                        weight_decay = args.weight_decay)
     
 features, adj, labels = Variable(features), Variable(adj), Variable(labels)
 
 # train
 
 start_time = time.time()
 loss_values = []
 patience_counter = 0
 best = args.epochs + 1
 best_epoch = 0
 
 for epoch in range(args.epochs):
     loss_values.append(train(epoch, model, optimizer, features, adj, idx_train, idx_val))
     
     torch.save(model.state_dict(), '{}.pkl'.format(epoch))
     
     if loss_values[-1] < best:
         best = loss_values[-1]
         best_epoch = epoch
         patience_counter = 0
     else:
         patience_counter += 1
     
     if patience_counter == args.patience:
         break
     
     files = glob.glob('*.pkl')
     for file in files:
         epoch_nb = int(file.split('.')[0])
         if epoch_nb < best_epoch:
Esempio n. 3
0
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:", "loss= {}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))


# Train model
t_total = time.time()
loss_values = []
bad_counter = 0
best = args.epochs + 1
best_epoch = 0
save_name = '.' + str(args.fastGAT) + '.pkl'
for epoch in range(args.epochs):
    loss_values.append(train(epoch))

    torch.save(model.state_dict(), ('{}' + save_name).format(epoch))
    if loss_values[-1] < best:
        best = loss_values[-1]
        best_epoch = epoch
        bad_counter = 0
    else:
        bad_counter += 1

    if bad_counter == args.patience:
        break

    files = glob.glob('*' + save_name)
    for file in files:
        epoch_nb = int(file.split('.')[0])
        if epoch_nb < best_epoch:
            os.remove(file)
Esempio n. 4
0
def train(args):
    ## load training data
    print "loading training data ......"
    node_num, class_num = removeIsolated(args.suffix)
    label, feature_map, adj_lists = collectGraph_train(node_num, class_num,
                                                       args.feat_dim,
                                                       args.num_sample,
                                                       args.suffix)
    label = torch.LongTensor(label)
    feature_map = torch.FloatTensor(feature_map)

    model = GAT(args.feat_dim, args.embed_dim, class_num, args.alpha,
                args.dropout, args.nheads, args.use_cuda)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.learning_rate,
                                 weight_decay=args.weight_decay)
    scheduler = StepLR(optimizer,
                       step_size=args.step_size,
                       gamma=args.learning_rate_decay)

    ## train
    np.random.seed(2)
    random.seed(2)
    rand_indices = np.random.permutation(node_num)
    train_nodes = rand_indices[:args.train_num]
    val_nodes = rand_indices[args.train_num:]

    if args.use_cuda:
        model.cuda()
        label = label.cuda()
        feature_map = feature_map.cuda()

    epoch_num = args.epoch_num
    batch_size = args.batch_size
    iter_num = int(math.ceil(args.train_num / float(batch_size)))
    check_loss = []
    val_accuracy = []
    check_step = args.check_step
    train_loss = 0.0
    iter_cnt = 0
    for e in range(epoch_num):
        model.train()
        scheduler.step()

        random.shuffle(train_nodes)
        for batch in range(iter_num):
            batch_nodes = train_nodes[batch * batch_size:(batch + 1) *
                                      batch_size]
            batch_label = label[batch_nodes].squeeze()
            batch_neighbors = [adj_lists[node] for node in batch_nodes]
            _, logit = model(feature_map, batch_nodes, batch_neighbors)
            loss = F.nll_loss(logit, batch_label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            iter_cnt += 1
            train_loss += loss.cpu().item()
            if iter_cnt % check_step == 0:
                check_loss.append(train_loss / check_step)
                print time.strftime(
                    '%Y-%m-%d %H:%M:%S'
                ), "epoch: {}, iter: {}, loss:{:.4f}".format(
                    e, iter_cnt, train_loss / check_step)
                train_loss = 0.0

        ## validation
        model.eval()

        group = int(math.ceil(len(val_nodes) / float(batch_size)))
        val_cnt = 0
        for batch in range(group):
            batch_nodes = val_nodes[batch * batch_size:(batch + 1) *
                                    batch_size]
            batch_label = label[batch_nodes].squeeze()
            batch_neighbors = [adj_lists[node] for node in batch_nodes]
            _, logit = model(feature_map, batch_nodes, batch_neighbors)
            batch_predict = np.argmax(logit.cpu().detach().numpy(), axis=1)
            val_cnt += np.sum(batch_predict == batch_label.cpu().numpy())
        val_accuracy.append(val_cnt / float(len(val_nodes)))
        print time.strftime('%Y-%m-%d %H:%M:%S'
                            ), "Epoch: {}, Validation Accuracy: {:.4f}".format(
                                e, val_cnt / float(len(val_nodes)))
        print "******" * 10

    checkpoint_path = 'checkpoint/checkpoint_{}.pth'.format(
        time.strftime('%Y%m%d%H%M'))
    torch.save(
        {
            'train_num': args.train_num,
            'epoch_num': args.epoch_num,
            'batch_size': args.batch_size,
            'learning_rate': args.learning_rate,
            'embed_dim': args.embed_dim,
            'num_sample': args.num_sample,
            'graph_state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
        }, checkpoint_path)

    vis = visdom.Visdom(env='GraphAttention', port='8099')
    vis.line(X=np.arange(1,
                         len(check_loss) + 1, 1) * check_step,
             Y=np.array(check_loss),
             opts=dict(title=time.strftime('%Y-%m-%d %H:%M:%S'),
                       xlabel='itr.',
                       ylabel='loss'))
    vis.line(X=np.arange(1,
                         len(val_accuracy) + 1, 1),
             Y=np.array(val_accuracy),
             opts=dict(title=time.strftime('%Y-%m-%d %H:%M:%S'),
                       xlabel='epoch',
                       ylabel='accuracy'))

    return checkpoint_path, class_num