예제 #1
0
def train_gat(args):
    # Creating the gat model here.
    ####################################

    print("Defining model")

    print(
        "\nModel type -> GAT layer with {} heads used , Initital Embeddings training"
        .format(args.nheads_GAT[0]))
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT,
                                args.use_simple_layer)
    wandb.watch(model_gat, log="all")

    if CUDA:
        model_gat.cuda()

    optimizer = torch.optim.Adam(model_gat.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_gat)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=500,
                                                gamma=0.5,
                                                last_epoch=-1)
    torch.nn.utils.clip_grad_norm(model_gat.parameters(), 0.1)

    gat_loss_func = nn.MarginRankingLoss(margin=args.margin)

    current_batch_2hop_indices = torch.tensor([])
    if (args.use_2hop):
        current_batch_2hop_indices = Corpus_.get_batch_nhop_neighbors_all(
            args, Corpus_.unique_entities_train, node_neighbors_2hop)
    if args.use_2hop:
        if CUDA:
            current_batch_2hop_indices = Variable(
                torch.LongTensor(current_batch_2hop_indices)).cuda()
        else:
            current_batch_2hop_indices = Variable(
                torch.LongTensor(current_batch_2hop_indices))
    else:
        current_batch_2hop_indices = None

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_gat))

    for epoch in range(args.epochs_gat):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_gat.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_gat == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_gat
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_gat) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            # forward pass
            entity_embed, relation_embed = model_gat(
                Corpus_, Corpus_.train_adj_matrix, train_indices,
                current_batch_2hop_indices)

            optimizer.zero_grad()

            loss = batch_gat_loss(gat_loss_func, train_indices, entity_embed,
                                  relation_embed)

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print(
                "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                .format(iters, end_time_iter - start_time_iter,
                        loss.data.item()))

        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        wandb.log({'epoch_loss': epoch_losses[-1]})
        if (epoch + 1) % 200 == 0 or (epoch + 1) == args.epochs_gat:
            save_model(model_gat, args.data, epoch, args.output_folder,
                       args.use_2hop)
        if (epoch + 1) == args.epochs_gat:
            save_final(model_gat, 'encoder', wandb.run.dir, args.use_2hop)
예제 #2
0
def train_gat(args, word_embed_matrix, word_vocab, char_vocab):

    # Creating the gat model here.
    ####################################

    print("Defining model")

    print(
        "\nModel type -> GAT layer with {} heads used , Initital Embeddings training"
        .format(args.nheads_GAT[0]))
    global initial_entity_emb_params
    global entity_embeddings

    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT,
                                initial_entity_emb_params)
    if os.path.exists('{0}trained_{1}.pth'.format(args.output_folder, 0)):
        if CUDA:
            model_gat.load_state_dict(
                torch.load(
                    os.path.join(args.output_folder,
                                 'trained_{}.pth'.format(0))))
        else:
            model_gat.load_state_dict(
                torch.load(os.path.join(args.output_folder,
                                        'trained_{}.pth'.format(0)),
                           map_location=torch.device('cpu')))

    display_every = 100

    if CUDA:
        model_gat.cuda()

    # optimizer = torch.optim.Adam(
    #     model_gat.parameters(), lr=args.lr, weight_decay=args.weight_decay_gat)
    optimizer = torch.optim.SGD(model_gat.parameters(), lr=args.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=500,
                                                gamma=0.5,
                                                last_epoch=-1)

    gat_loss_func = nn.MarginRankingLoss(margin=args.margin)

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_gat))

    for epoch in range(args.epochs_gat):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_gat.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        num_iters_per_epoch = int(
            np.ceil(len(Corpus_.unique_entities_train) / args.batch_size_gat))
        random_indices = np.arange(0, len(Corpus_.unique_entities_train))
        np.random.shuffle(random_indices)
        random_unique_entities_train = []
        for idx in range(len(random_indices)):
            random_unique_entities_train.append(
                Corpus_.unique_entities_train[random_indices[idx]])

        start_idx = end_idx = previous_end_idx = 0
        reduced_batch_size = None
        iters = 0
        for iters in tqdm(range(num_iters_per_epoch)):
            start_time_iter = time.time()

            start_idx = end_idx
            if reduced_batch_size:
                end_idx = min(len(random_unique_entities_train),
                              start_idx + reduced_batch_size)
            else:
                end_idx = min(len(random_unique_entities_train),
                              start_idx + args.entities_per_batch)

            batch_train_adj_matrix, current_batch_entities_set = Corpus_.get_batch_adj_data(
                iters,
                start_idx=start_idx,
                end_idx=end_idx,
                unique_entities_train=random_unique_entities_train)
            if batch_train_adj_matrix[0][0].shape[0] == 0:
                continue

            train_indices, train_values = Corpus_.get_iteration_triples_batch(
                list(current_batch_entities_set['source']))

            if args.use_2hop:
                current_batch_2hop_indices = Corpus_.get_batch_nhop_neighbors_all(
                    args, list(current_batch_entities_set['source']),
                    node_neighbors_2hop)
                if current_batch_2hop_indices.shape[0] == 0:
                    # current_batch_2hop_indices = np.empty((1,4))
                    current_batch_2hop_indices = torch.tensor([],
                                                              dtype=torch.long)
            else:
                current_batch_2hop_indices = torch.tensor([], dtype=torch.long)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()
                if args.use_2hop:
                    current_batch_2hop_indices = Variable(
                        torch.LongTensor(current_batch_2hop_indices)).cuda()
            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))
                if args.use_2hop:
                    current_batch_2hop_indices = Variable(
                        torch.LongTensor(current_batch_2hop_indices))

            entity_embed, relation_embed, mask = model_gat(
                Corpus_,
                torch.tensor(list(current_batch_entities_set['source'])),
                batch_train_adj_matrix, current_batch_2hop_indices)

            optimizer.zero_grad()

            loss = batch_gat_loss(gat_loss_func, train_indices, entity_embed,
                                  relation_embed)

            loss.backward()
            optimizer.step()
            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()
            print(
                "Iteration-> {0}/{1}  , Iteration_time-> {2:.4f} , Iteration_loss {3:.4f}"
                .format(iters, num_iters_per_epoch,
                        end_time_iter - start_time_iter, loss.data.item()))

            if iters % display_every == 0:
                num_samples = 1000
                random_ent_indices = np.random.randint(0,
                                                       entity_embed.shape[0],
                                                       num_samples)
                sampled_entities = np.array(
                    entity_embed[random_ent_indices].detach().cpu())
                mean_vector = np.mean(sampled_entities, axis=-1)
                norm_entities = np.sqrt(
                    np.sum(np.square(sampled_entities), axis=-1))
                norm_mean = np.sqrt(
                    np.sum(np.square(sampled_entities), axis=-1))
                den = norm_mean * norm_entities
                num = np.dot(mean_vector, norm_entities.transpose())
                cosine_dist = num / den

                mean_cosine_dist = np.mean(cosine_dist)
                median_cosine_dist = np.median(cosine_dist)
                min_norm = np.min(norm_entities)
                max_norm = np.max(norm_entities)

                print('mean_cosine_dist: ', mean_cosine_dist)
                print('median_cosine_dist: ', median_cosine_dist)
                print('min_norm: ', min_norm)
                print('max_norm: ', max_norm)

        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

        save_model(model_gat, args.data, 0, args.output_folder)
예제 #3
0
파일: main.py 프로젝트: gecongcong/TransGAT
def train_gat(args):
    # Creating the gat model here.
    ####################################
    print("Defining model")
    print("\nModel type -> GAT layer with {} heads used , Initital Embeddings training".format(args.nheads_GAT[0]))


    '''
    model_gat初始化进行修改
    '''
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, nhop_embeddings, nhop_array, args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    if CUDA:
        model_gat = model_gat.cuda()


    optimizer = torch.optim.Adam(model_gat.parameters(), lr=args.lr, weight_decay=args.weight_decay_gat)

    scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer, step_size=500, gamma=0.5, last_epoch=-1)

    gat_loss_func = nn.MarginRankingLoss(margin=args.margin)

    # train_loader = torch.utils.data.DataLoader(
    #     current_batch_2hop_indices,  sampler=train_sampler, **kwargs)
    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_gat))

    """
        Add by cc
        Add early stopping with patience
        if patience times in a row score_fn gives a result lower than the best result,
        than the training will be stopped
        """
    counter = 0
    best_score = None
    for epoch in range(args.epochs_gat):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(
            list(Corpus_.train_triples)).astype(np.int32)

        model_gat.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []


        if len(Corpus_.train_indices) % args.batch_size_gat == 0:
            num_iters_per_epoch = len(Corpus_.train_indices) // args.batch_size_gat
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) // args.batch_size_gat) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)
            iter_unique_entities = get_unique_entity(train_indices)
            current_batch_2hop_indices = torch.tensor([])
            start_2hop_time = time.time()
            current_batch_2hop_indices = Corpus_.get_batch_nhop_neighbors_all(args, iter_unique_entities,
                                                                              node_neighbors_2hop)
            end_2hop_time = time.time()
            print(end_2hop_time - start_2hop_time)
            if CUDA:
                train_indices = Variable(torch.LongTensor(train_indices)).cuda()
                # train_values = Variable(torch.FloatTensor(train_values)).cuda().half()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                # train_values = Variable(torch.FloatTensor(train_values))

            current_batch_2hop_indices = Variable(torch.LongTensor(current_batch_2hop_indices)).cuda()

            # forward pass
            entity_embed, relation_embed = model_gat(
                Corpus_, Corpus_.train_adj_matrix, train_indices, current_batch_2hop_indices)

            optimizer.zero_grad()

            loss = batch_gat_loss(
                gat_loss_func, train_indices, entity_embed, relation_embed)

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print("Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}".format(
                iters, end_time_iter - start_time_iter, loss.data.item()))

        scheduler.step()
        avg_loss = sum(epoch_loss) / len(epoch_loss)
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch, avg_loss, time.time() - start_time))
        epoch_losses.append(avg_loss)

        """
        early stopping
        """
        if best_score is None:
            best_score = 99
        elif avg_loss > best_score:
            counter += 1
            if counter >= args.patience_gat:
                break
        else:
            best_score = avg_loss
            counter = 0

        # plot avg_loss
        writer.add_scalar('WN18RR_testing [add (h,t)->r]: GAT average loss--epoch', sum(epoch_loss) / len(epoch_loss), epoch)
        save_model(model_gat, args.data, epoch, args.output_folder)
예제 #4
0
def train_gat(args):
    # Creating the gat model here.
    ####################################

    print("Defining model")

    print(
        "\nModel type -> GAT layer with {} heads used , Initital Embeddings training"
        .format(args.nheads_GAT[0]))
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)

    if CUDA:
        model_gat.cuda()

    optimizer = torch.optim.Adam(model_gat.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_gat)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=500,
                                                gamma=0.5,
                                                last_epoch=-1)

    gat_loss_func = nn.MarginRankingLoss(margin=args.margin)

    current_batch_2hop_indices = torch.tensor([])
    if (args.use_2hop):
        current_batch_2hop_indices = Corpus_.get_batch_nhop_neighbors_all(
            args, Corpus_.unique_entities_train, node_neighbors_2hop)

    if CUDA:
        current_batch_2hop_indices = Variable(
            torch.LongTensor(current_batch_2hop_indices)).cuda()
    else:
        current_batch_2hop_indices = Variable(
            torch.LongTensor(current_batch_2hop_indices))

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_gat))

    for epoch in range(args.epochs_gat):
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_gat.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_gat == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_gat
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_gat) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            # forward pass
            entity_embed, relation_embed = model_gat(
                Corpus_, Corpus_.train_adj_matrix, train_indices,
                current_batch_2hop_indices)

            optimizer.zero_grad()

            loss = batch_gat_loss(gat_loss_func, train_indices, entity_embed,
                                  relation_embed)

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

        scheduler.step()
        if epoch % 100 == 0:
            print("Epoch {} , average loss {} , epoch_time {}".format(
                epoch,
                sum(epoch_loss) / len(epoch_loss),
                time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        summary.add_scalar('loss/gat_loss',
                           sum(epoch_loss) / len(epoch_loss), epoch)

        if (epoch + 1) % 200 == 0:
            save_model(model_gat, args.data, epoch, args.output_folder)

    now = time.localtime()
    f = open(
        (args.output_folder + "train_gat_epoch_losses_{}-{}-{}.txt").format(
            now.tm_year, now.tm_mon, now.tm_mday), 'w')
    for i in epoch_losses:
        f.write(str(i))
        f.write('\n')
    f.close()
예제 #5
0
def train_gat(args):

    # Creating the gat model here.
    ####################################

    print("Defining model")

    print(
        "\nModel type -> GAT layer with {} heads used , Initital Embeddings training".format(args.nheads_GAT[0]))
    if args.tanh:
        model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                    args.drop_GAT, args.alpha, args.nheads_GAT, 'tanh')
    else:
        model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                    args.drop_GAT, args.alpha, args.nheads_GAT, 'leakyrelu')

    model_gat = nn.DataParallel(model_gat)


    if CUDA:
        model_gat.cuda()

    if args.load_gat is not None and args.epochs_gat == 0:
        model_gat.load_state_dict(torch.load(
            '{0}gat/trained_{1}.pth'.format(args.output_folder, args.epochs_gat - 1)))
    elif args.load_gat is not None and args.epochs_gat > 0:
        model_gat.load_state_dict(torch.load(args.load_gat))

    optimizer = torch.optim.Adam(
        model_gat.parameters(), lr=args.lr, weight_decay=args.weight_decay_gat)

    scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer, step_size=500, gamma=0.5, last_epoch=-1)

    gat_loss_func = nn.MarginRankingLoss(margin=args.margin)

    current_batch_2hop_indices = torch.tensor([], dtype=torch.long)
    if(args.use_2hop):
        current_batch_2hop_indices = Corpus_.get_batch_nhop_neighbors_all(args,
                                                                          Corpus_.unique_entities_train, node_neighbors_2hop)

    if CUDA:
        current_batch_2hop_indices = Variable(
            torch.LongTensor(current_batch_2hop_indices)).cuda()
    else:
        current_batch_2hop_indices = Variable(
            torch.LongTensor(current_batch_2hop_indices))

    epoch_losses = []   # losses of all epochs
    print("Number of epochs {}".format(args.epochs_gat))

    for epoch in range(args.epochs_gat):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(
            list(Corpus_.train_triples)).astype(np.int32)

        model_gat.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_gat == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_gat
        else:
            num_iters_per_epoch = (
                len(Corpus_.train_indices) // args.batch_size_gat) + 1
        if args.debug:
            pass
        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            # forward pass
            entity_embed, relation_embed = model_gat(
                Corpus_, Corpus_.train_adj_matrix, train_indices, current_batch_2hop_indices)
            # print('Forward pass', entity_embed.shape, relation_embed.shape)
            optimizer.zero_grad()

            loss = batch_gat_loss(
                gat_loss_func, train_indices, entity_embed, relation_embed)

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            line = "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}".format(
                iters, end_time_iter - start_time_iter, loss.data.item())
            print(line)
            write_to_file(args, line)

        scheduler.step()
        line = "Epoch {} , average loss {} , epoch_time {}".format(
            epoch, sum(epoch_loss) / len(epoch_loss), time.time() - start_time)
        print(line)
        write_to_file(args, line)

        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

        save_model(model_gat, args.data, epoch,
                   args.output_folder + 'gat/')
    if CUDA:
        final_entity_embeddings = model_gat.module.final_entity_embeddings.cpu().detach().numpy()
        final_relation_embeddings = model_gat.module.final_relation_embeddings.cpu().detach().numpy()

    else:
        final_entity_embeddings = model_gat.module.final_entity_embeddings.detach().numpy()
        final_relation_embeddings = model_gat.module.final_relation_embeddings.detach().numpy()
    save_mbeddings(args, final_entity_embeddings, final_relation_embeddings)