コード例 #1
0
def train_conv(args):
    # Creating convolution model here.
    ####################################

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT,
                                args.use_simple_layer)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    wandb.watch(model_conv, log="all")

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()
    if args.use_2hop:
        model_gat.load_state_dict(
            torch.load('{}/trained_{}_paths.pth'.format(
                args.output_folder, args.epochs_gat - 1)))
    else:
        model_gat.load_state_dict(
            torch.load('{}/trained_{}.pth'.format(args.output_folder,
                                                  args.epochs_gat - 1)))
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)
    torch.nn.utils.clip_grad_norm(model_conv.parameters(), 0.5)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print(
                "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                .format(iters, end_time_iter - start_time_iter,
                        loss.data.item()))

        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        wandb.log({'epoch_loss': epoch_losses[-1]})
        if (epoch + 1) % 50 == 0 or (epoch + 1) == args.epochs_conv:
            save_model(model_conv, args.data, epoch,
                       args.output_folder + "conv/", args.use_2hop)
        if (epoch + 1) == args.epochs_conv:
            save_final(model_conv, 'decoder', wandb.run.dir, args.use_2hop)
コード例 #2
0
ファイル: train_convkb.py プロジェクト: gzn00417/gcn_conkb
def train_conv(args):

    # Creating convolution model here.
    ####################################

    print("Defining model")
    model_gat = SpKBGATModified(
        initial_entity_emb=entity_embeddings,
        initial_relation_emb=relation_embeddings,
        entity_out_dim=args.entity_out_dim,
        relation_out_dim=args.entity_out_dim,
        drop_GAT=args.drop_GAT,
        alpha=args.alpha,
        nheads_GAT=args.nheads_GAT,
    )
    model_conv = SpKBGATConvOnly(
        initial_entity_emb=entity_embeddings,
        initial_relation_emb=relation_embeddings,
        entity_out_dim=args.entity_out_dim,
        relation_out_dim=args.entity_out_dim,
        drop_GAT=args.drop_GAT,
        drop_conv=args.drop_conv,
        alpha=args.alpha,
        alpha_conv=args.alpha_conv,
        nheads_GAT=args.nheads_GAT,
        conv_out_channels=args.out_channels,
    )
    if CUDA:
        model_conv.cuda()
        model_gat.cuda()
    print("Only Conv model trained")

    model_gat.load_state_dict(
        torch.load("{}/trained_{}.pth".format(args.output_folder,
                                              args.epochs_gat - 1)),
        strict=False,
    )
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()

            train_indices, train_values = Corpus_.get_iteration_batch(iters)
            train_indices = Variable(torch.LongTensor(train_indices))
            train_values = Variable(torch.FloatTensor(train_values))
            if CUDA:
                train_indices = train_indices.cuda()
                train_values = train_values.cuda()

            pred = model_conv(Corpus_, Corpus_.train_adj_matrix, train_indices)

            optimizer.zero_grad()

            loss = margin_loss(pred.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print(
                "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                .format(iters, end_time_iter - start_time_iter,
                        loss.data.item()))

        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

    save_model(model_conv, args.data, args.epochs_gat - 1,
               args.output_folder + "conv/")
コード例 #3
0
def train_conv(args):

    # Creating convolution model here.
    ####################################

    global initial_entity_emb_params

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT,
                                initial_entity_emb_params)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    # model_gat.load_state_dict(torch.load(
    #     '{}/trained_{}.pth'.format(args.output_folder, args.epochs_gat - 1)))
    model_gat.load_state_dict(
        torch.load(os.path.join(args.output_folder,
                                'trained_{}.pth'.format(0))))
    # if os.path.exists('{0}trained_{1}.pth'.format(args.output_folder + "conv/", 0)):
    #   model_conv.load_state_dict(torch.load(
    #       os.path.join(args.output_folder + "conv/",'trained_{}.pth'.format(0))))
    # else:
    #   model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    #   model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    # import pdb; pdb.set_trace()
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    model_conv.final_entity_embeddings.requires_grad = False
    model_conv.final_relation_embeddings.requires_grad = False

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()
    bce_loss = torch.nn.functional.binary_cross_entropy_with_logits

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)

        random.shuffle(Corpus_.train_triples)

        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in tqdm(range(num_iters_per_epoch)):
            # print(model_conv.final_entity_embeddings[0][:10],model_conv.final_entity_embeddings[50][:10],model_conv.final_entity_embeddings[100][:10])
            # print(model_conv.final_relation_embeddings[0][:10],model_conv.final_relation_embeddings[50][:10],model_conv.final_relation_embeddings[100][:10])
            start_time_iter = time.time()
            # train_indices, train_values = Corpus_.get_iteration_batch(iters)
            train_indices, train_values = Corpus_.get_iteration_batch(0)
            # print(train_indices.tolist())
            # print(train_indices[:3],train_indices[64:67],train_indices[128:131],train_indices[192:198])
            # print(train_values[:3],train_values[64:67],train_values[128:131],train_values[192:198])

            # # import pdb; pdb.set_trace()
            # sampled_entities = np.concatenate((np.array(model_conv.final_entity_embeddings[train_indices[:,0]].detach().cpu()),np.array(model_conv.final_entity_embeddings[train_indices[:,2]].detach().cpu())),axis=0)
            # mean_vector = np.mean(sampled_entities,axis=-1)
            # norm_entities = np.sqrt(np.sum(np.square(sampled_entities),axis=-1))
            # norm_mean = np.sqrt(np.sum(np.square(sampled_entities),axis=-1))
            # den = norm_mean*norm_entities
            # num = np.dot(mean_vector,norm_entities.transpose())
            # cosine_dist = num/den
            # mean_cosine_dist = np.mean(cosine_dist)
            # median_cosine_dist = np.median(cosine_dist)
            # min_norm = np.min(norm_entities)
            # max_norm = np.max(norm_entities)
            # print('mean_cosine_dist: ',mean_cosine_dist)
            # print('median_cosine_dist: ',median_cosine_dist)
            # print('min_norm: ',min_norm)
            # print('max_norm: ',max_norm)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)

            optimizer.zero_grad()

            # valid_preds = preds[:args.batch_size_conv]
            # valid_preds = valid_preds.repeat(int(args.valid_invalid_ratio_conv)*2, 1)
            # valid_values = torch.ones(valid_preds.shape)
            # if CUDA:
            #   valid_values = valid_values.cuda()
            # preds = torch.cat((preds,valid_preds),dim=0)
            # train_values = torch.cat((train_values,valid_values),dim=0)

            # loss = margin_loss(preds.view(-1), train_values.view(-1))
            train_values = train_values.view(-1)
            train_values = (train_values + 1) / 2
            train_values = train_values.float()
            preds = preds.view(-1)
            # import pdb; pdb.set_trace()
            print(preds)
            print(train_values)
            weights = train_values + (1 - train_values) * 1 / (
                args.valid_invalid_ratio_conv * 2)
            loss = bce_loss(preds, train_values.float(), weight=weights)

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print(
                "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                .format(iters, end_time_iter - start_time_iter,
                        loss.data.item()))
            # break
        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

        # save_model(model_conv, args.data, epoch,
        #            args.output_folder + "conv/")
        save_model(model_conv, args.data, 0, args.output_folder + "conv/")
コード例 #4
0
def train_conv(args):
    # Creating convolution model here.
    ####################################

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    model_gat.load_state_dict(
        torch.load('{}/trained_{}.pth'.format(args.output_folder,
                                              args.epochs_gat - 1)))
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            if iters % 500 == 0:
                print(
                    "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                    .format(iters, end_time_iter - start_time_iter,
                            loss.data.item()))

            summary.add_scalar('loss/conv_loss_iter', loss.data.item(),
                               iters + epoch * num_iters_per_epoch)

        scheduler.step()

        if epoch % 10 == 0:
            print("Epoch {} , average loss {} , epoch_time {}".format(
                epoch,
                sum(epoch_loss) / len(epoch_loss),
                time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        summary.add_scalar('loss/conv_loss_epoch',
                           sum(epoch_loss) / len(epoch_loss), epoch)

        if (epoch + 1) % 10 == 0:
            save_model(model_conv, args.data, epoch,
                       args.output_folder + "conv/")

    now = time.localtime()
    f = open(
        (args.output_folder + "train_conv_epoch_losses_{}-{}-{}.txt").format(
            now.tm_year, now.tm_mon, now.tm_mday), 'w')
    for i in epoch_losses:
        f.write(str(i))
        f.write('\n')
    f.close()
コード例 #5
0
ファイル: main.py プロジェクト: gecongcong/TransGAT
def train_conv(args):
    # Creating convolution model here.
    ####################################
    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, nhop_embeddings, nhop_array, args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    print("Only Conv model trained")
    '''
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv,
                                 args.nheads_GAT, args.out_channels)
                                 '''
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, nhop_embeddings, args.entity_out_dim,
                                 args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv,
                                 args.nheads_GAT, args.out_channels)
    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    model_gat.load_state_dict(torch.load(
        '{}trained.pth'.format(args.output_folder)))
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings_new

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(), lr=args.lr, weight_decay=args.weight_decay_conv)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=25, gamma=0.5, last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    """
            Add by cc
            Add early stopping with patience
            if patience times in a row score_fn gives a result lower than the best result,
            than the training will be stopped
            """
    counter = 0
    best_score = None
    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(
            list(Corpus_.train_triples)).astype(np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) // args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(torch.LongTensor(train_indices)).cuda(1)
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(
                Corpus_, Corpus_.train_adj_matrix, train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print("Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}".format(
                iters, end_time_iter - start_time_iter, loss.data.item()))
            writer.add_scalar('WN18RR_add_conv_epoch [add (h,t)->r]: ConvKB per iteration loss--iter', loss.data.item(), iters)
        scheduler.step()
        avg_loss = sum(epoch_loss) / len(epoch_loss)
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch, avg_loss, time.time() - start_time))
        epoch_losses.append(avg_loss)

        """
                early stopping
                """
        if best_score is None:
            best_score = 99
        elif avg_loss > best_score:
            counter += 1
            if counter >= args.patience_conv:
                break
        else:
            best_score = avg_loss
            counter = 0

        writer.add_scalar('WN18RR_add_conv_epoch [add (h,t)->r]: ConvKB average loss--epoch', sum(epoch_loss) / len(epoch_loss), epoch)
        save_model(model_conv, args.data, epoch, args.output_folder + "conv/")
コード例 #6
0
def train_conv(args):

    # Creating convolution model here.
    ####################################

    # for e in entity_embeddings:
    #     exx.append(e)

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    model_gat.load_state_dict(
        torch.load('{}/trained_{}.pth'.format(args.output_folder,
                                              args.epochs_gat - 1)))

    # -------------------------将e_type与 model_gat.final_entity_embeddings融合(1,压缩;2,拼接)
    # 1,压缩:(未完成,,,,因为entity2vec_type_200中数据不全是200维)
    # model_gat.final_entity_embeddings=torch.add(20*model_gat.final_entity_embeddings,0.8*e_type)
    # 2,tensor 合并:Tensor: concat([tensor1, tensor2], div, name='concat')
    # model_gat.final_entity_embeddings=torch.cat([model_gat.final_entity_embeddings,e_type],1)
    #==================================================================================================
    #
    #

    for e in model_gat.final_entity_embeddings:
        # global n
        # n+=1
        e_ = e.cpu().detach().numpy()
        exx.append(e_)

    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    # --------------------------保留gat后实体向量-------------------------------------------------------
    # global sss
    # sss=str(model_gat.final_entity_embeddings.size())
    # print(sss)
    # global min_emb
    # global temp
    # min_emb=model_gat.final_entity_embeddings
    # temp=model_gat.final_entity_embeddings
    # print(temp.size())
    # print(min_emb.size())
    # min_emb=min_emb.cpu().detach().numpy()
    #======================================================================================================
    #
    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    loss_convKB = []
    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            print(
                "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                .format(iters, end_time_iter - start_time_iter,
                        loss.data.item()))

        scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        loss_convKB.append(sum(epoch_loss) / len(epoch_loss))
        save_model(model_conv, args.data, epoch, args.output_folder + "conv/")

    with open("data/FB15k-237/loss_result/convKB_loss_mlp(2,2).txt",
              "a") as f4:
        for sub_re in loss_convKB:
            f4.write(str(sub_re))
            f4.write('\n')
コード例 #7
0
def train_conv(args):

    # Creating convolution model here.
    ####################################

    print("Defining model")
    if args.tanh:
        model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                    args.drop_GAT, args.alpha, args.nheads_GAT, 'tanh')
    else:
        model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                    args.drop_GAT, args.alpha, args.nheads_GAT, 'leakyrelu')
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv,
                                 args.nheads_GAT, args.out_channels)

    model_gat = nn.DataParallel(model_gat)
    model_conv = nn.DataParallel(model_conv)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()
    # load gat weights given pretrained
    if (args.load_gat is None) or (args.load_gat is not None and args.epochs_gat > 0):
        model_gat.load_state_dict(torch.load(
            '{0}gat/trained_{1}.pth'.format(args.output_folder, args.epochs_gat - 1)))
    else:
        model_gat.load_state_dict(torch.load(args.load_gat))

    if isinstance(model_conv, nn.DataParallel):
        if args.load_conv is None:
            model_conv.module.final_entity_embeddings = model_gat.module.final_entity_embeddings
            model_conv.module.final_relation_embeddings = model_gat.module.final_relation_embeddings
        else:
            model_conv.load_state_dict(torch.load(args.load_conv))
    else:
        model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
        model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(
        model_conv.parameters(), lr=args.lr, weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer, step_size=25, gamma=0.5, last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []   # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(
            list(Corpus_.train_triples)).astype(np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (
                len(Corpus_.train_indices) // args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(
                Corpus_, Corpus_.train_adj_matrix, train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            for param in model_conv.parameters():
                param.grad.data.clamp_(-1, 1)
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()
            line = "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}".format(
                iters, end_time_iter - start_time_iter, loss.data.item())
            print(line)
            write_to_file(args, line)

        scheduler.step()
        line = "Epoch {} , average loss {} , epoch_time {}".format(
            epoch, sum(epoch_loss) / len(epoch_loss), time.time() - start_time)
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        print(line)
        write_to_file(args, line)

        save_model(model_conv, args.data, epoch,
                   args.output_folder + "conv/")
コード例 #8
0
def train_conv(args):

    # Creating convolution model here.
    ####################################

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    model_gat.load_state_dict(
        torch.load('{}/trained_{}.pth'.format(args.output_folder,
                                              args.epochs_gat - 1)))

    # open('/scratche/home/shikhar/real_rel_gcn/cloned_repos/ConvKB/data/FB15k-237/entity2vec.txt', 'w').write('\n'.join(['\t'.join([str(y) for y in x]) for x in ent]))
    # open('/scratche/home/shikhar/real_rel_gcn/cloned_repos/ConvKB/data/FB15k-237/relation2vec.txt', 'w').write('\n'.join(['\t'.join([str(y) for y in x]) for x in ent]))

    # open('/scratche/home/shikhar/real_rel_gcn/cloned_repos/ConvKB/data/WN18RR/entity2vec.txt', 'w').write('\n'.join(['\t'.join([str(y) for y in x]) for x in model_gat.final_entity_embeddings.cpu().detach().numpy()]))
    # open('/scratche/home/shikhar/real_rel_gcn/cloned_repos/ConvKB/data/WN18RR/relation2vec.txt', 'w').write('\n'.join(['\t'.join([str(y) for y in x]) for x in model_gat.final_relation_embeddings.cpu().detach().numpy()]))
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)
    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        print("\nepoch-> ", epoch)
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()
            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)
            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()


# =============================================================================
# 			print("Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}".format(iters, end_time_iter - start_time_iter, loss.data.item()))
# =============================================================================

# scheduler.step()
        print("Epoch {} , average loss {} , epoch_time {}".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

        save_model(model_conv, args.data, epoch, args.output_folder + "conv/")