Example #1
0
def evaluate_conv(args):

    # Creating convolution model here.
    ####################################

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    # print("Only Conv model trained")
    # model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
    #                              args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv,
    #                              args.nheads_GAT, args.out_channels)

    if CUDA:
        # model_conv.cuda()
        model_gat.cuda()

    model_gat.load_state_dict(
        torch.load('{}/trained_{}.pth'.format(args.output_folder,
                                              args.epochs_gat - 1)))
    # model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    # model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    # model_conv.eval()
    with torch.no_grad():
        Corpus_.get_validation_pred(args, model_gat, unique_entities)
Example #2
0
def evaluate_conv(args, unique_entities_train, unique_entities_test):

    global initial_entity_emb_params
    global entity_embeddings

    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT,
                                initial_entity_emb_params)
    model_gat.load_state_dict(
        torch.load('{0}trained_{1}.pth'.format(args.output_folder, 0)))

    model_entity_embedding = EntityEmbedding(                                  initial_entity_emb_params['entity_embed_dim_in'], \
                                  initial_entity_emb_params['hidden_dim_entity'], \
                                  initial_entity_emb_params['num_encoder_layers_entity'], \
                                  initial_entity_emb_params['is_bidirectional'], \
                                  initial_entity_emb_params['drop_out_rate'], \
                                  initial_entity_emb_params['hidden_dim_entity'], \
                                  initial_entity_emb_params['entity_embed_dim_out'], \
                                  initial_entity_emb_params['entity_conv_filter_size'],
                                  initial_entity_emb_params['word_vocab'], \
                                  initial_entity_emb_params['word_embed_dim'], \
                                  initial_entity_emb_params['char_embed_dim'], \
                                  initial_entity_emb_params['word_embed_matrix'], \
                                  initial_entity_emb_params['char_feature_size'], \
                                  initial_entity_emb_params['conv_filter_size'], \
                                  initial_entity_emb_params['max_word_len_entity'], \
                                  initial_entity_emb_params['char_vocab'])
    model_entity_embedding.load_state_dict(
        torch.load('{0}{1}/trained_{2}.pth'.format(args.output_folder,
                                                   'entity_embeddings', 0)))

    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)
    model_conv.load_state_dict(
        torch.load('{0}conv/trained_{1}.pth'.format(args.output_folder, 0)))

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()
        model_entity_embedding.cuda()
    model_conv.eval()
    model_gat.eval()
    model_entity_embedding.eval()
    with torch.no_grad():
        Corpus_.get_validation_cnfmat(args,
                                      model_gat,
                                      model_entity_embedding,
                                      model_conv,
                                      unique_entities_train,
                                      unique_entities_test,
                                      reuse=False,
                                      gat_only=False)
Example #3
0
def save_entity_relation_final_embeddings():
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT, initial_entity_emb_params)
    model_gat.load_state_dict(torch.load(
        '{0}trained_{1}.pth'.format(args.output_folder, 0)))

    final_entity_embeddings = model_gat.final_entity_embeddings
    final_relation_embeddings = model_gat.final_relation_embeddings

    save_embed(final_entity_embeddings,os.path.join(args.output_folder,'final_entity_embeddings.json'))
    save_embed(final_relation_embeddings,os.path.join(args.output_folder,'final_relation_embeddings.json'))

    np.save(os.path.join(args.output_folder,'W_ent2rel.json'),np.array(model_gat.W_ent2rel.cpu().detach()))
Example #4
0
def train_conv(args):
    # Creating convolution model here.
    ####################################

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT,
                                args.use_simple_layer)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    wandb.watch(model_conv, log="all")

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()
    if args.use_2hop:
        model_gat.load_state_dict(
            torch.load('{}/trained_{}_paths.pth'.format(
                args.output_folder, args.epochs_gat - 1)))
    else:
        model_gat.load_state_dict(
            torch.load('{}/trained_{}.pth'.format(args.output_folder,
                                                  args.epochs_gat - 1)))
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)
    torch.nn.utils.clip_grad_norm(model_conv.parameters(), 0.5)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print(
                "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                .format(iters, end_time_iter - start_time_iter,
                        loss.data.item()))

        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        wandb.log({'epoch_loss': epoch_losses[-1]})
        if (epoch + 1) % 50 == 0 or (epoch + 1) == args.epochs_conv:
            save_model(model_conv, args.data, epoch,
                       args.output_folder + "conv/", args.use_2hop)
        if (epoch + 1) == args.epochs_conv:
            save_final(model_conv, 'decoder', wandb.run.dir, args.use_2hop)
Example #5
0
def train_conv(args):

    # Creating convolution model here.
    ####################################

    global initial_entity_emb_params

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT,
                                initial_entity_emb_params)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    # model_gat.load_state_dict(torch.load(
    #     '{}/trained_{}.pth'.format(args.output_folder, args.epochs_gat - 1)))
    model_gat.load_state_dict(
        torch.load(os.path.join(args.output_folder,
                                'trained_{}.pth'.format(0))))
    # if os.path.exists('{0}trained_{1}.pth'.format(args.output_folder + "conv/", 0)):
    #   model_conv.load_state_dict(torch.load(
    #       os.path.join(args.output_folder + "conv/",'trained_{}.pth'.format(0))))
    # else:
    #   model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    #   model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    # import pdb; pdb.set_trace()
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    model_conv.final_entity_embeddings.requires_grad = False
    model_conv.final_relation_embeddings.requires_grad = False

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()
    bce_loss = torch.nn.functional.binary_cross_entropy_with_logits

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)

        random.shuffle(Corpus_.train_triples)

        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in tqdm(range(num_iters_per_epoch)):
            # print(model_conv.final_entity_embeddings[0][:10],model_conv.final_entity_embeddings[50][:10],model_conv.final_entity_embeddings[100][:10])
            # print(model_conv.final_relation_embeddings[0][:10],model_conv.final_relation_embeddings[50][:10],model_conv.final_relation_embeddings[100][:10])
            start_time_iter = time.time()
            # train_indices, train_values = Corpus_.get_iteration_batch(iters)
            train_indices, train_values = Corpus_.get_iteration_batch(0)
            # print(train_indices.tolist())
            # print(train_indices[:3],train_indices[64:67],train_indices[128:131],train_indices[192:198])
            # print(train_values[:3],train_values[64:67],train_values[128:131],train_values[192:198])

            # # import pdb; pdb.set_trace()
            # sampled_entities = np.concatenate((np.array(model_conv.final_entity_embeddings[train_indices[:,0]].detach().cpu()),np.array(model_conv.final_entity_embeddings[train_indices[:,2]].detach().cpu())),axis=0)
            # mean_vector = np.mean(sampled_entities,axis=-1)
            # norm_entities = np.sqrt(np.sum(np.square(sampled_entities),axis=-1))
            # norm_mean = np.sqrt(np.sum(np.square(sampled_entities),axis=-1))
            # den = norm_mean*norm_entities
            # num = np.dot(mean_vector,norm_entities.transpose())
            # cosine_dist = num/den
            # mean_cosine_dist = np.mean(cosine_dist)
            # median_cosine_dist = np.median(cosine_dist)
            # min_norm = np.min(norm_entities)
            # max_norm = np.max(norm_entities)
            # print('mean_cosine_dist: ',mean_cosine_dist)
            # print('median_cosine_dist: ',median_cosine_dist)
            # print('min_norm: ',min_norm)
            # print('max_norm: ',max_norm)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)

            optimizer.zero_grad()

            # valid_preds = preds[:args.batch_size_conv]
            # valid_preds = valid_preds.repeat(int(args.valid_invalid_ratio_conv)*2, 1)
            # valid_values = torch.ones(valid_preds.shape)
            # if CUDA:
            #   valid_values = valid_values.cuda()
            # preds = torch.cat((preds,valid_preds),dim=0)
            # train_values = torch.cat((train_values,valid_values),dim=0)

            # loss = margin_loss(preds.view(-1), train_values.view(-1))
            train_values = train_values.view(-1)
            train_values = (train_values + 1) / 2
            train_values = train_values.float()
            preds = preds.view(-1)
            # import pdb; pdb.set_trace()
            print(preds)
            print(train_values)
            weights = train_values + (1 - train_values) * 1 / (
                args.valid_invalid_ratio_conv * 2)
            loss = bce_loss(preds, train_values.float(), weight=weights)

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print(
                "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                .format(iters, end_time_iter - start_time_iter,
                        loss.data.item()))
            # break
        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

        # save_model(model_conv, args.data, epoch,
        #            args.output_folder + "conv/")
        save_model(model_conv, args.data, 0, args.output_folder + "conv/")
Example #6
0
def train_entity_embeddings(args, word_embed_matrix, word_vocab, char_vocab):

    global initial_entity_emb_params
    global entity_embeddings

    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT,
                                initial_entity_emb_params)
    print("Only Entity embedding model")

    model_entity_embedding =  EntityEmbedding(\
                                  initial_entity_emb_params['entity_embed_dim_in'], \
                                  initial_entity_emb_params['hidden_dim_entity'], \
                                  initial_entity_emb_params['num_encoder_layers_entity'], \
                                  initial_entity_emb_params['is_bidirectional'], \
                                  initial_entity_emb_params['drop_out_rate'], \
                                  initial_entity_emb_params['hidden_dim_entity'], \
                                  initial_entity_emb_params['entity_embed_dim_out'], \
                                  initial_entity_emb_params['entity_conv_filter_size'],
                                  initial_entity_emb_params['word_vocab'], \
                                  initial_entity_emb_params['word_embed_dim'], \
                                  initial_entity_emb_params['char_embed_dim'], \
                                  initial_entity_emb_params['word_embed_matrix'], \
                                  initial_entity_emb_params['char_feature_size'], \
                                  initial_entity_emb_params['conv_filter_size'], \
                                  initial_entity_emb_params['max_word_len_entity'], \
                                  initial_entity_emb_params['char_vocab'])
    if CUDA:
        model_gat.cuda()
        model_entity_embedding.cuda()

    if CUDA:
        model_gat.load_state_dict(
            torch.load(
                os.path.join(args.output_folder, 'trained_{}.pth'.format(0))))
    else:
        model_gat.load_state_dict(
            torch.load(os.path.join(args.output_folder,
                                    'trained_{}.pth'.format(0)),
                       map_location=torch.device('cpu')))
    entity_embed_gat = model_gat.entity_embeddings

    optimizer = torch.optim.Adam(model_gat.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_gat)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=500,
                                                gamma=0.5,
                                                last_epoch=-1)

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_ent_emb))

    for epoch in range(args.epochs_gat):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_entity_embedding.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        num_emb_iters_per_epoch = int(
            np.ceil(
                len(Corpus_.unique_entities_train) / args.entities_per_batch))
        random_indices = np.arange(0, len(Corpus_.unique_entities_train))
        np.random.shuffle(random_indices)
        random_unique_entities_train = []
        for idx in range(len(random_indices)):
            random_unique_entities_train.append(
                Corpus_.unique_entities_train[random_indices[idx]])
        # for iters in tqdm(range(num_emb_iters_per_epoch)):

        start_idx = end_idx = previous_end_idx = 0
        reduced_batch_size = None
        pbar = tqdm(total=num_emb_iters_per_epoch)
        iters = 0
        while True:
            if end_idx >= len(random_unique_entities_train):
                break
            start_time_iter = time.time()

            try:

                iters += 1
                start_idx = end_idx
                if reduced_batch_size:
                    end_idx = min(len(random_unique_entities_train),
                                  start_idx + reduced_batch_size)
                else:
                    end_idx = min(len(random_unique_entities_train),
                                  start_idx + args.entities_per_batch)

                current_batch_entities = random_unique_entities_train[
                    start_idx:end_idx]

                train_indices, train_values = Corpus_.get_iteration_entities_batch(
                    current_batch_entities,
                    args.valid_invalid_ratio_entity_embed)

                batch_entities_ctx_data = Corpus_.get_batch_entities_ctx_data(
                    train_indices[:len(current_batch_entities)],
                    args.conv_filter_size,
                    args.max_word_len,
                    triple=False)

                ctx_words = torch.from_numpy(
                    batch_entities_ctx_data['ctx_words_list'].astype('long'))
                ctx_char_seq = torch.from_numpy(
                    batch_entities_ctx_data['ctx_char_seq'].astype('long'))
                ctx_mask = torch.from_numpy(
                    batch_entities_ctx_data['ctx_mask'].astype('bool'))

                if CUDA:
                    train_indices = Variable(
                        torch.LongTensor(train_indices)).cuda()
                    train_values = Variable(
                        torch.FloatTensor(train_values)).cuda()
                    ctx_words = Variable(ctx_words).cuda()
                    ctx_char_seq = Variable(ctx_char_seq).cuda()
                    ctx_mask = Variable(ctx_mask).cuda()
                else:
                    train_indices = Variable(torch.LongTensor(train_indices))
                    train_values = Variable(torch.FloatTensor(train_values))
                    ctx_words = Variable(ctx_words)
                    ctx_char_seq = Variable(ctx_char_seq)
                    ctx_mask = Variable(ctx_mask)

                entity_embeddings_batch = model_entity_embedding(
                    ctx_words, ctx_char_seq, ctx_mask)

                optimizer.zero_grad()

                loss = batch_entity_embedding_loss(train_indices, train_values,
                                                   entity_embed_gat,
                                                   entity_embeddings_batch)

                loss.backward()
                optimizer.step()
                epoch_loss.append(loss.data.item())

                reduced_batch_size = None
                previous_end_idx = end_idx
                pbar.update(1)
                end_time_iter = time.time()
                print(
                    "Iteration-> {0}/{1}  , Iteration_time-> {2:.4f} , Iteration_loss {3:.4f}"
                    .format(iters, num_emb_iters_per_epoch,
                            end_time_iter - start_time_iter, loss.data.item()))

            except RuntimeError as e:
                raise e

        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

        save_model(model_entity_embedding, args.data, 0,
                   args.output_folder + 'entity_embeddings/')
Example #7
0
def train_gat(args, word_embed_matrix, word_vocab, char_vocab):

    # Creating the gat model here.
    ####################################

    print("Defining model")

    print(
        "\nModel type -> GAT layer with {} heads used , Initital Embeddings training"
        .format(args.nheads_GAT[0]))
    global initial_entity_emb_params
    global entity_embeddings

    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT,
                                initial_entity_emb_params)
    if os.path.exists('{0}trained_{1}.pth'.format(args.output_folder, 0)):
        if CUDA:
            model_gat.load_state_dict(
                torch.load(
                    os.path.join(args.output_folder,
                                 'trained_{}.pth'.format(0))))
        else:
            model_gat.load_state_dict(
                torch.load(os.path.join(args.output_folder,
                                        'trained_{}.pth'.format(0)),
                           map_location=torch.device('cpu')))

    display_every = 100

    if CUDA:
        model_gat.cuda()

    # optimizer = torch.optim.Adam(
    #     model_gat.parameters(), lr=args.lr, weight_decay=args.weight_decay_gat)
    optimizer = torch.optim.SGD(model_gat.parameters(), lr=args.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=500,
                                                gamma=0.5,
                                                last_epoch=-1)

    gat_loss_func = nn.MarginRankingLoss(margin=args.margin)

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_gat))

    for epoch in range(args.epochs_gat):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_gat.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        num_iters_per_epoch = int(
            np.ceil(len(Corpus_.unique_entities_train) / args.batch_size_gat))
        random_indices = np.arange(0, len(Corpus_.unique_entities_train))
        np.random.shuffle(random_indices)
        random_unique_entities_train = []
        for idx in range(len(random_indices)):
            random_unique_entities_train.append(
                Corpus_.unique_entities_train[random_indices[idx]])

        start_idx = end_idx = previous_end_idx = 0
        reduced_batch_size = None
        iters = 0
        for iters in tqdm(range(num_iters_per_epoch)):
            start_time_iter = time.time()

            start_idx = end_idx
            if reduced_batch_size:
                end_idx = min(len(random_unique_entities_train),
                              start_idx + reduced_batch_size)
            else:
                end_idx = min(len(random_unique_entities_train),
                              start_idx + args.entities_per_batch)

            batch_train_adj_matrix, current_batch_entities_set = Corpus_.get_batch_adj_data(
                iters,
                start_idx=start_idx,
                end_idx=end_idx,
                unique_entities_train=random_unique_entities_train)
            if batch_train_adj_matrix[0][0].shape[0] == 0:
                continue

            train_indices, train_values = Corpus_.get_iteration_triples_batch(
                list(current_batch_entities_set['source']))

            if args.use_2hop:
                current_batch_2hop_indices = Corpus_.get_batch_nhop_neighbors_all(
                    args, list(current_batch_entities_set['source']),
                    node_neighbors_2hop)
                if current_batch_2hop_indices.shape[0] == 0:
                    # current_batch_2hop_indices = np.empty((1,4))
                    current_batch_2hop_indices = torch.tensor([],
                                                              dtype=torch.long)
            else:
                current_batch_2hop_indices = torch.tensor([], dtype=torch.long)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()
                if args.use_2hop:
                    current_batch_2hop_indices = Variable(
                        torch.LongTensor(current_batch_2hop_indices)).cuda()
            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))
                if args.use_2hop:
                    current_batch_2hop_indices = Variable(
                        torch.LongTensor(current_batch_2hop_indices))

            entity_embed, relation_embed, mask = model_gat(
                Corpus_,
                torch.tensor(list(current_batch_entities_set['source'])),
                batch_train_adj_matrix, current_batch_2hop_indices)

            optimizer.zero_grad()

            loss = batch_gat_loss(gat_loss_func, train_indices, entity_embed,
                                  relation_embed)

            loss.backward()
            optimizer.step()
            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()
            print(
                "Iteration-> {0}/{1}  , Iteration_time-> {2:.4f} , Iteration_loss {3:.4f}"
                .format(iters, num_iters_per_epoch,
                        end_time_iter - start_time_iter, loss.data.item()))

            if iters % display_every == 0:
                num_samples = 1000
                random_ent_indices = np.random.randint(0,
                                                       entity_embed.shape[0],
                                                       num_samples)
                sampled_entities = np.array(
                    entity_embed[random_ent_indices].detach().cpu())
                mean_vector = np.mean(sampled_entities, axis=-1)
                norm_entities = np.sqrt(
                    np.sum(np.square(sampled_entities), axis=-1))
                norm_mean = np.sqrt(
                    np.sum(np.square(sampled_entities), axis=-1))
                den = norm_mean * norm_entities
                num = np.dot(mean_vector, norm_entities.transpose())
                cosine_dist = num / den

                mean_cosine_dist = np.mean(cosine_dist)
                median_cosine_dist = np.median(cosine_dist)
                min_norm = np.min(norm_entities)
                max_norm = np.max(norm_entities)

                print('mean_cosine_dist: ', mean_cosine_dist)
                print('median_cosine_dist: ', median_cosine_dist)
                print('min_norm: ', min_norm)
                print('max_norm: ', max_norm)

        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

        save_model(model_gat, args.data, 0, args.output_folder)
Example #8
0
def train_conv(args):

    # Creating convolution model here.
    ####################################

    print("Defining model")
    model_gat = SpKBGATModified(
        initial_entity_emb=entity_embeddings,
        initial_relation_emb=relation_embeddings,
        entity_out_dim=args.entity_out_dim,
        relation_out_dim=args.entity_out_dim,
        drop_GAT=args.drop_GAT,
        alpha=args.alpha,
        nheads_GAT=args.nheads_GAT,
    )
    model_conv = SpKBGATConvOnly(
        initial_entity_emb=entity_embeddings,
        initial_relation_emb=relation_embeddings,
        entity_out_dim=args.entity_out_dim,
        relation_out_dim=args.entity_out_dim,
        drop_GAT=args.drop_GAT,
        drop_conv=args.drop_conv,
        alpha=args.alpha,
        alpha_conv=args.alpha_conv,
        nheads_GAT=args.nheads_GAT,
        conv_out_channels=args.out_channels,
    )
    if CUDA:
        model_conv.cuda()
        model_gat.cuda()
    print("Only Conv model trained")

    model_gat.load_state_dict(
        torch.load("{}/trained_{}.pth".format(args.output_folder,
                                              args.epochs_gat - 1)),
        strict=False,
    )
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()

            train_indices, train_values = Corpus_.get_iteration_batch(iters)
            train_indices = Variable(torch.LongTensor(train_indices))
            train_values = Variable(torch.FloatTensor(train_values))
            if CUDA:
                train_indices = train_indices.cuda()
                train_values = train_values.cuda()

            pred = model_conv(Corpus_, Corpus_.train_adj_matrix, train_indices)

            optimizer.zero_grad()

            loss = margin_loss(pred.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print(
                "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                .format(iters, end_time_iter - start_time_iter,
                        loss.data.item()))

        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

    save_model(model_conv, args.data, args.epochs_gat - 1,
               args.output_folder + "conv/")
Example #9
0
def train_conv(args):
    # Creating convolution model here.
    ####################################

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    model_gat.load_state_dict(
        torch.load('{}/trained_{}.pth'.format(args.output_folder,
                                              args.epochs_gat - 1)))
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            if iters % 500 == 0:
                print(
                    "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                    .format(iters, end_time_iter - start_time_iter,
                            loss.data.item()))

            summary.add_scalar('loss/conv_loss_iter', loss.data.item(),
                               iters + epoch * num_iters_per_epoch)

        scheduler.step()

        if epoch % 10 == 0:
            print("Epoch {} , average loss {} , epoch_time {}".format(
                epoch,
                sum(epoch_loss) / len(epoch_loss),
                time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        summary.add_scalar('loss/conv_loss_epoch',
                           sum(epoch_loss) / len(epoch_loss), epoch)

        if (epoch + 1) % 10 == 0:
            save_model(model_conv, args.data, epoch,
                       args.output_folder + "conv/")

    now = time.localtime()
    f = open(
        (args.output_folder + "train_conv_epoch_losses_{}-{}-{}.txt").format(
            now.tm_year, now.tm_mon, now.tm_mday), 'w')
    for i in epoch_losses:
        f.write(str(i))
        f.write('\n')
    f.close()
Example #10
0
def train_conv(args):
    # Creating convolution model here.
    ####################################
    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, nhop_embeddings, nhop_array, args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    print("Only Conv model trained")
    '''
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv,
                                 args.nheads_GAT, args.out_channels)
                                 '''
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, nhop_embeddings, args.entity_out_dim,
                                 args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv,
                                 args.nheads_GAT, args.out_channels)
    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    model_gat.load_state_dict(torch.load(
        '{}trained.pth'.format(args.output_folder)))
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings_new

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(), lr=args.lr, weight_decay=args.weight_decay_conv)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=25, gamma=0.5, last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    """
            Add by cc
            Add early stopping with patience
            if patience times in a row score_fn gives a result lower than the best result,
            than the training will be stopped
            """
    counter = 0
    best_score = None
    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(
            list(Corpus_.train_triples)).astype(np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) // args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(torch.LongTensor(train_indices)).cuda(1)
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(
                Corpus_, Corpus_.train_adj_matrix, train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print("Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}".format(
                iters, end_time_iter - start_time_iter, loss.data.item()))
            writer.add_scalar('WN18RR_add_conv_epoch [add (h,t)->r]: ConvKB per iteration loss--iter', loss.data.item(), iters)
        scheduler.step()
        avg_loss = sum(epoch_loss) / len(epoch_loss)
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch, avg_loss, time.time() - start_time))
        epoch_losses.append(avg_loss)

        """
                early stopping
                """
        if best_score is None:
            best_score = 99
        elif avg_loss > best_score:
            counter += 1
            if counter >= args.patience_conv:
                break
        else:
            best_score = avg_loss
            counter = 0

        writer.add_scalar('WN18RR_add_conv_epoch [add (h,t)->r]: ConvKB average loss--epoch', sum(epoch_loss) / len(epoch_loss), epoch)
        save_model(model_conv, args.data, epoch, args.output_folder + "conv/")
Example #11
0
    for edge in nhop_indices:
        nhop_set.add(str(edge[1]) + "_" + str(edge[2]))
    nhop_embedding_dic = {}
    nhop_array = []
    nhop_num = 0
    for nhop_edge in nhop_set:
        nhop_embedding_dic[nhop_edge] = nhop_num
        nhop_array.append(nhop_edge.split('_'))
        nhop_num += 1
    nhop_embeddings = torch.cuda.FloatTensor(
        np.random.randn(nhop_num, args.embedding_size))
    nhop_embeddings = nn.Parameter(nhop_embeddings)
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    model_gat.load_state_dict(
        torch.load('{}trained.pth'.format(args.output_folder)))

    for i in range(len(nhop_array)):
        nhop_embeddings[i] = model_gat.final_relation_embeddings[int(
            nhop_array[i][0])] + model_gat.final_relation_embeddings[int(
                nhop_array[i][1])]

    dir = {
        'array': nhop_array,
        'embeddings': nhop_embeddings,
        'relation_embeddings': model_gat.final_relation_embeddings
    }

    file_name = args.dataset_name + "_2hop.pickle"
    file_path = args.data + "/" + file_name
    print(file_path)
Example #12
0
def train_conv(args):

    # Creating convolution model here.
    ####################################

    # for e in entity_embeddings:
    #     exx.append(e)

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    model_gat.load_state_dict(
        torch.load('{}/trained_{}.pth'.format(args.output_folder,
                                              args.epochs_gat - 1)))

    # -------------------------将e_type与 model_gat.final_entity_embeddings融合(1,压缩;2,拼接)
    # 1,压缩:(未完成,,,,因为entity2vec_type_200中数据不全是200维)
    # model_gat.final_entity_embeddings=torch.add(20*model_gat.final_entity_embeddings,0.8*e_type)
    # 2,tensor 合并:Tensor: concat([tensor1, tensor2], div, name='concat')
    # model_gat.final_entity_embeddings=torch.cat([model_gat.final_entity_embeddings,e_type],1)
    #==================================================================================================
    #
    #

    for e in model_gat.final_entity_embeddings:
        # global n
        # n+=1
        e_ = e.cpu().detach().numpy()
        exx.append(e_)

    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    # --------------------------保留gat后实体向量-------------------------------------------------------
    # global sss
    # sss=str(model_gat.final_entity_embeddings.size())
    # print(sss)
    # global min_emb
    # global temp
    # min_emb=model_gat.final_entity_embeddings
    # temp=model_gat.final_entity_embeddings
    # print(temp.size())
    # print(min_emb.size())
    # min_emb=min_emb.cpu().detach().numpy()
    #======================================================================================================
    #
    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    loss_convKB = []
    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print(
                "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                .format(iters, end_time_iter - start_time_iter,
                        loss.data.item()))

        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        loss_convKB.append(sum(epoch_loss) / len(epoch_loss))
        save_model(model_conv, args.data, epoch, args.output_folder + "conv/")

    with open("data/FB15k-237/loss_result/convKB_loss_mlp(2,2).txt",
              "a") as f4:
        for sub_re in loss_convKB:
            f4.write(str(sub_re))
            f4.write('\n')
Example #13
0
def train_conv(args):

    # Creating convolution model here.
    ####################################

    print("Defining model")
    if args.tanh:
        model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                    args.drop_GAT, args.alpha, args.nheads_GAT, 'tanh')
    else:
        model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                    args.drop_GAT, args.alpha, args.nheads_GAT, 'leakyrelu')
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv,
                                 args.nheads_GAT, args.out_channels)

    model_gat = nn.DataParallel(model_gat)
    model_conv = nn.DataParallel(model_conv)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()
    # load gat weights given pretrained
    if (args.load_gat is None) or (args.load_gat is not None and args.epochs_gat > 0):
        model_gat.load_state_dict(torch.load(
            '{0}gat/trained_{1}.pth'.format(args.output_folder, args.epochs_gat - 1)))
    else:
        model_gat.load_state_dict(torch.load(args.load_gat))

    if isinstance(model_conv, nn.DataParallel):
        if args.load_conv is None:
            model_conv.module.final_entity_embeddings = model_gat.module.final_entity_embeddings
            model_conv.module.final_relation_embeddings = model_gat.module.final_relation_embeddings
        else:
            model_conv.load_state_dict(torch.load(args.load_conv))
    else:
        model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
        model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(
        model_conv.parameters(), lr=args.lr, weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer, step_size=25, gamma=0.5, last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []   # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(
            list(Corpus_.train_triples)).astype(np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (
                len(Corpus_.train_indices) // args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(
                Corpus_, Corpus_.train_adj_matrix, train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            for param in model_conv.parameters():
                param.grad.data.clamp_(-1, 1)
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()
            line = "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}".format(
                iters, end_time_iter - start_time_iter, loss.data.item())
            print(line)
            write_to_file(args, line)

        scheduler.step()
        line = "Epoch {} , average loss {} , epoch_time {}".format(
            epoch, sum(epoch_loss) / len(epoch_loss), time.time() - start_time)
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        print(line)
        write_to_file(args, line)

        save_model(model_conv, args.data, epoch,
                   args.output_folder + "conv/")
Example #14
0
def train_gat(args):

    # Creating the gat model here.
    ####################################

    print("Defining model")

    print(
        "\nModel type -> GAT layer with {} heads used , Initital Embeddings training".format(args.nheads_GAT[0]))
    if args.tanh:
        model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                    args.drop_GAT, args.alpha, args.nheads_GAT, 'tanh')
    else:
        model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                    args.drop_GAT, args.alpha, args.nheads_GAT, 'leakyrelu')

    model_gat = nn.DataParallel(model_gat)


    if CUDA:
        model_gat.cuda()

    if args.load_gat is not None and args.epochs_gat == 0:
        model_gat.load_state_dict(torch.load(
            '{0}gat/trained_{1}.pth'.format(args.output_folder, args.epochs_gat - 1)))
    elif args.load_gat is not None and args.epochs_gat > 0:
        model_gat.load_state_dict(torch.load(args.load_gat))

    optimizer = torch.optim.Adam(
        model_gat.parameters(), lr=args.lr, weight_decay=args.weight_decay_gat)

    scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer, step_size=500, gamma=0.5, last_epoch=-1)

    gat_loss_func = nn.MarginRankingLoss(margin=args.margin)

    current_batch_2hop_indices = torch.tensor([], dtype=torch.long)
    if(args.use_2hop):
        current_batch_2hop_indices = Corpus_.get_batch_nhop_neighbors_all(args,
                                                                          Corpus_.unique_entities_train, node_neighbors_2hop)

    if CUDA:
        current_batch_2hop_indices = Variable(
            torch.LongTensor(current_batch_2hop_indices)).cuda()
    else:
        current_batch_2hop_indices = Variable(
            torch.LongTensor(current_batch_2hop_indices))

    epoch_losses = []   # losses of all epochs
    print("Number of epochs {}".format(args.epochs_gat))

    for epoch in range(args.epochs_gat):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(
            list(Corpus_.train_triples)).astype(np.int32)

        model_gat.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_gat == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_gat
        else:
            num_iters_per_epoch = (
                len(Corpus_.train_indices) // args.batch_size_gat) + 1
        if args.debug:
            pass
        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            # forward pass
            entity_embed, relation_embed = model_gat(
                Corpus_, Corpus_.train_adj_matrix, train_indices, current_batch_2hop_indices)
            # print('Forward pass', entity_embed.shape, relation_embed.shape)
            optimizer.zero_grad()

            loss = batch_gat_loss(
                gat_loss_func, train_indices, entity_embed, relation_embed)

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            line = "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}".format(
                iters, end_time_iter - start_time_iter, loss.data.item())
            print(line)
            write_to_file(args, line)

        scheduler.step()
        line = "Epoch {} , average loss {} , epoch_time {}".format(
            epoch, sum(epoch_loss) / len(epoch_loss), time.time() - start_time)
        print(line)
        write_to_file(args, line)

        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

        save_model(model_gat, args.data, epoch,
                   args.output_folder + 'gat/')
    if CUDA:
        final_entity_embeddings = model_gat.module.final_entity_embeddings.cpu().detach().numpy()
        final_relation_embeddings = model_gat.module.final_relation_embeddings.cpu().detach().numpy()

    else:
        final_entity_embeddings = model_gat.module.final_entity_embeddings.detach().numpy()
        final_relation_embeddings = model_gat.module.final_relation_embeddings.detach().numpy()
    save_mbeddings(args, final_entity_embeddings, final_relation_embeddings)
Example #15
0
def train_conv(args):

    # Creating convolution model here.
    ####################################

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    model_gat.load_state_dict(
        torch.load('{}/trained_{}.pth'.format(args.output_folder,
                                              args.epochs_gat - 1)))

    # open('/scratche/home/shikhar/real_rel_gcn/cloned_repos/ConvKB/data/FB15k-237/entity2vec.txt', 'w').write('\n'.join(['\t'.join([str(y) for y in x]) for x in ent]))
    # open('/scratche/home/shikhar/real_rel_gcn/cloned_repos/ConvKB/data/FB15k-237/relation2vec.txt', 'w').write('\n'.join(['\t'.join([str(y) for y in x]) for x in ent]))

    # open('/scratche/home/shikhar/real_rel_gcn/cloned_repos/ConvKB/data/WN18RR/entity2vec.txt', 'w').write('\n'.join(['\t'.join([str(y) for y in x]) for x in model_gat.final_entity_embeddings.cpu().detach().numpy()]))
    # open('/scratche/home/shikhar/real_rel_gcn/cloned_repos/ConvKB/data/WN18RR/relation2vec.txt', 'w').write('\n'.join(['\t'.join([str(y) for y in x]) for x in model_gat.final_relation_embeddings.cpu().detach().numpy()]))
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)
    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()
            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)
            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()


# =============================================================================
# 			print("Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}".format(iters, end_time_iter - start_time_iter, loss.data.item()))
# =============================================================================

# scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

        save_model(model_conv, args.data, epoch, args.output_folder + "conv/")