Beispiel #1
0
    return loss


# Train model
t_total = time.time()
loss_values = []
bad_counter = 0
best = args.epochs + 1
best_epoch = 0

batchsize = 4
tensor_dataset = TensorDataset(edges_head, edges_tail)
data_loader = DataLoader(tensor_dataset, batch_size=batchsize, shuffle=True)

for epoch in range(args.epochs):
    model.train()

    for batch in tqdm(data_loader):
        head, tail = tuple(t for t in batch)
        t = time.time()
        model.train()
        optimizer.zero_grad()
        output = model(features[head] + features[tail], adj[head] + adj[tail])
        head_emb = output[0]
        tail_emb = output[1]

        neg_idx = torch.randint(0, 7127, (4, 2))
        neg_head_emb = output[neg_idx[:, 0]]
        neg_tail_emb = output[neg_idx[:, 1]]

        # loss_train = pred_loss(head_emb, tail_emb)
Beispiel #2
0
    def __init__(self,graph,sparse = False,epochs = 200,learning_rate = 0.005,
                 weight_decay = 5e-4,hidden = 8,nb_heads = 8,drop_out = 0.6,
                alpha = 0.2 ,patience = 100,train = 1500,val = 2000,test = 3100):
        self.graph = graph
        self.sparse  = sparse
        self.epochs = epochs
        self.learning_rate = learning_rate
        self.weight_decay = weight_decay
        self.hidden = hidden
        self.nb_heads  = nb_heads
        self.drop_out = drop_out
        self.alpha = alpha
        self.patience = patience
        self.train = train
        self.val = val
        self.test = test

        idx_train,idx_val , idx_test = self.load_data()

        random.seed(random_seed)
        np.random.seed(random_seed)
        torch.manual_seed(random_seed)

        if self.sparse:
            model = SpGAT(nfeat=self.features.shape[1],
                          nhid=self.hidden,
                          nclass=int(self.labels.max()) + 1,
                          dropout=self.drop_out,
                          nheads=self.nb_heads,
                          alpha=self.alpha)
        else:
            model = GAT(nfeat=self.features.shape[1],
                        nhid=self.hidden,
                        nclass=int(self.labels.max()) + 1,
                        dropout=self.drop_out,
                        nheads=self.nb_heads,
                        alpha=self.alpha)

        optimizer = optim.Adam(model.parameters(),
                               lr=self.learning_rate,
                               weight_decay=self.weight_decay)

        #利用GPU
        # device = torch.device("cuda:0")
        # torch.cuda.empty_cache()
        # model.to(device)
        # self.features = self.features.to(device)
        # self.adj = self.adj.to(device)
        # self.labels = self.labels.to(device)
        # idx_train = idx_train.to(device)
        # idx_val = idx_val.to(device)
        # idx_test = idx_test.to(device)

        features, adj, labels = Variable(self.features), Variable(self.adj), Variable(self.labels)

        t_total = time.time()
        loss_values = []
        bad_counter = 0
        best = self.epochs + 1
        best_epoch = 0
        for epoch in range(self.epochs):

            t = time.time()
            model.train()
            optimizer.zero_grad()
            output = model(features, adj)
            loss_train = F.nll_loss(output[idx_train], labels[idx_train])
            acc_train = accuracy(output[idx_train], labels[idx_train])
            loss_train.backward()
            optimizer.step()

            model.eval()
            output = model(features, adj)

            loss_val = F.nll_loss(output[idx_val], labels[idx_val])
            acc_val = accuracy(output[idx_val], labels[idx_val])

            print('Epoch: {:04d}'.format(epoch + 1),
                  'loss_train: {:.4f}'.format(loss_train.data),
                  'acc_train: {:.4f}'.format(acc_train.data),
                  'loss_val: {:.4f}'.format(loss_val.data),
                  'acc_val: {:.4f}'.format(acc_val.data),
                  'time: {:.4f}s'.format(time.time() - t))
            loss_values.append(loss_val.data)
            torch.save(model.state_dict(), '{}.pkl'.format(epoch))
            if loss_values[-1] < best:
                best = loss_values[-1]
                best_epoch = epoch
                bad_counter = 0
            else:
                bad_counter += 1

            if bad_counter == self.patience:
                break

            files = glob.glob('*.pkl')
            for file in files:
                epoch_nb = int(file.split('.')[0])
                if epoch_nb < best_epoch:
                    os.remove(file)

        print("Optimization Finished!")
        print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
        print('Loading {}th epoch'.format(best_epoch))
        model.load_state_dict(torch.load('{}.pkl'.format(best_epoch)))

        model.eval()
        output = model(features, adj)
        loss_test = F.nll_loss(output[idx_test], labels[idx_test])
        acc_test = accuracy(output[idx_test], labels[idx_test])
        print("Test set results:",
              "loss= {:.4f}".format(loss_test.data),
              "accuracy= {:.4f}".format(acc_test.data))