コード例 #1
0
def main(args):
    tokenizer = BertTokenizer.from_pretrained(args.bert_model)
    train_loader, _, _ = get_squad_data_loader(tokenizer, args.train_dir,
                                         shuffle=True, args=args)
    eval_data = get_squad_data_loader(tokenizer, args.dev_dir,
                                      shuffle=False, args=args)

    args.device = torch.cuda.current_device()

    trainer = VAETrainer(args)

    loss_log1 = tqdm(total=0, bar_format='{desc}', position=2)
    loss_log2 = tqdm(total=0, bar_format='{desc}', position=3)
    eval_log = tqdm(total=0, bar_format='{desc}', position=5)
    best_eval_log = tqdm(total=0, bar_format='{desc}', position=6)

    print("MODEL DIR: " + args.model_dir)

    best_bleu, best_em, best_f1 = 0.0, 0.0, 0.0
    for epoch in trange(int(args.epochs), desc="Epoch", position=0):
        for batch in tqdm(train_loader, desc="Train iter", leave=False, position=1):
            c_ids, q_ids, a_ids, start_positions, end_positions \
            = batch_to_device(batch, args.device)
            trainer.train(c_ids, q_ids, a_ids, start_positions, end_positions)
            
            str1 = 'Q REC : {:06.4f} A REC : {:06.4f}'
            str2 = 'ZQ KL : {:06.4f} ZA KL : {:06.4f} INFO : {:06.4f}'
            str1 = str1.format(float(trainer.loss_q_rec), float(trainer.loss_a_rec))
            str2 = str2.format(float(trainer.loss_zq_kl), float(trainer.loss_za_kl), float(trainer.loss_info))
            loss_log1.set_description_str(str1)
            loss_log2.set_description_str(str2)

        if epoch > 10:
            metric_dict, bleu, _ = eval_vae(epoch, args, trainer, eval_data)
            f1 = metric_dict["f1"]
            em = metric_dict["exact_match"]
            bleu = bleu * 100
            _str = '{}-th Epochs BLEU : {:02.2f} EM : {:02.2f} F1 : {:02.2f}'
            _str = _str.format(epoch, bleu, em, f1)
            eval_log.set_description_str(_str)
            if em > best_em:
                best_em = em
            if f1 > best_f1:
                best_f1 = f1
                trainer.save(os.path.join(args.model_dir, "best_f1_model.pt"))
            if bleu > best_bleu:
                best_bleu = bleu
                trainer.save(os.path.join(args.model_dir, "best_bleu_model.pt"))

            _str = 'BEST BLEU : {:02.2f} EM : {:02.2f} F1 : {:02.2f}'
            _str = _str.format(best_bleu, best_em, best_f1)
            best_eval_log.set_description_str(_str)
コード例 #2
0
def main(args):
    tokenizer = BertTokenizer.from_pretrained(args.bert_model)
    train_loader, _, _ = get_squad_data_loader(tokenizer, args.train_dir,
                                         shuffle=True, args=args)
    eval_data = get_squad_data_loader(tokenizer, args.dev_dir,
                                      shuffle=False, args=args)

    args.device = torch.cuda.current_device()

    trainer = Trainer(args)

    log_dir = os.path.join(args.model_dir, socket.gethostname())
    writer = SummaryWriter(log_dir=log_dir)

    loss_log = tqdm(total=0, bar_format='{desc}', position=2)
    eval_log = tqdm(total=0, bar_format='{desc}', position=4)
    best_eval_log = tqdm(total=0, bar_format='{desc}', position=5)

    print("MODEL DIR: " + args.model_dir)

    stack = 0
    niter = 0
    best_bleu = 0.0
    for epoch in trange(int(args.epochs), desc="Epoch", position=0):
        #train_iterator = train_loader
        for batch in tqdm(train_loader, desc="Train iter", leave=False, position=1):
            c_ids, q_ids, a_ids \
            = batch_to_device(batch, args.device)
            trainer.train(c_ids, q_ids, a_ids)
            niter += 1
            writer.add_scalars('data/loss_group',
                               {'loss_q_rec': trainer.loss_q_rec},
                               niter)
            str = 'Q REC : {:06.4f}'
            str = str.format(float(trainer.loss_q_rec))
            loss_log.set_description_str(str)

        bleu = eval_vae(epoch, args, trainer, eval_data)
        bleu = bleu * 100
        str = '{}-th Epochs BLEU : {:02.2f}'
        str = str.format(epoch, bleu)
        eval_log.set_description_str(str)
        writer.add_scalars('data/performance',
                           {'bleu': bleu}, epoch)

        if bleu > best_bleu:
            best_bleu = bleu
            trainer.save(os.path.join(args.model_dir, "best_bleu_model.pt"))

        str = 'BEST BLEU : {:02.2f}'
        str = str.format(best_bleu)
        best_eval_log.set_description_str(str)
コード例 #3
0
def main(args):
    tokenizer = BertTokenizer.from_pretrained(args.bert_model)
    train_loader, _, _ = get_squad_data_loader(tokenizer,
                                               args.train_dir,
                                               shuffle=True,
                                               args=args)
    eval_data = get_squad_data_loader(tokenizer,
                                      args.dev_dir,
                                      shuffle=False,
                                      args=args)

    args.device = torch.cuda.current_device()

    trainer = VAETrainer(args)

    log_dir = os.path.join(args.model_dir, socket.gethostname())
    writer = SummaryWriter(log_dir=log_dir)

    loss_log = tqdm(total=0, bar_format='{desc}', position=2)
    eval_log = tqdm(total=0, bar_format='{desc}', position=4)
    best_eval_log = tqdm(total=0, bar_format='{desc}', position=5)

    print("MODEL DIR: " + args.model_dir)

    stack = 0
    niter = 0
    best_avg_qa_loss, best_bleu, best_em, best_f1 = 1000.0, 0.0, 0.0, 0.0
    for epoch in trange(int(args.epochs), desc="Epoch", position=0):
        #train_iterator = train_loader
        for batch in tqdm(train_loader,
                          desc="Train iter",
                          leave=False,
                          position=1):
            c_ids, q_ids, a_ids, start_positions, end_positions \
            = batch_to_device(batch, args.device)
            trainer.train(c_ids, q_ids, a_ids, start_positions, end_positions)
            niter += 1
            writer.add_scalars(
                'data/loss_group', {
                    'loss_q_rec': trainer.loss_q_rec,
                    'loss_a_rec': trainer.loss_a_rec,
                    'loss_kl': trainer.loss_kl,
                    'loss_info': trainer.loss_info
                }, niter)
            str = 'Q REC : {:06.4f} A REC : {:06.4f} KL : {:06.4f} INFO : {:06.4f}'
            str = str.format(float(trainer.loss_q_rec),
                             float(trainer.loss_a_rec), float(trainer.loss_kl),
                             float(trainer.loss_info))
            loss_log.set_description_str(str)

        metric_dict, bleu, all_results \
        = eval_vae(epoch, args, trainer, eval_data)
        f1 = metric_dict["f1"]
        em = metric_dict["exact_match"]
        bleu = bleu * 100
        str = '{}-th Epochs BLEU : {:02.2f} EM : {:02.2f} F1 : {:02.2f}'
        str = str.format(epoch, bleu, em, f1)
        eval_log.set_description_str(str)
        writer.add_scalars('data/performance', {
            'bleu': bleu,
            'em': em,
            'f1': f1
        }, epoch)
        if em > best_em:
            best_em = em
        if f1 > best_f1:
            best_f1 = f1
            trainer.save(os.path.join(args.model_dir, "best_f1_model.pt"))
        if bleu > best_bleu:
            best_bleu = bleu
            trainer.save(os.path.join(args.model_dir, "best_bleu_model.pt"))

        str = 'BEST BLEU : {:02.2f} EM : {:02.2f} F1 : {:02.2f}'
        str = str.format(best_bleu, best_em, best_f1)
        best_eval_log.set_description_str(str)

        mat = []
        metadata = []
        for j in range(len(all_results)):
            mat.append(all_results[j].posterior_z_prob.view(-1))
            str = "[{}] [Pos] Real Q: {} Real A: {} Pos Q: {} Pos A: {}"
            str = str.format(j, all_results[j].real_question,
                             all_results[j].real_answer,
                             all_results[j].posterior_question,
                             all_results[j].posterior_answer)

            if j % 100 == 0:
                print('###################### real questions\n')
                print(all_results[j].real_question)
                print(all_results[j].real_answer)
                print('###################### generated prior questions\n')
                print(all_results[j].posterior_question)
                print(all_results[j].posterior_answer)

                print('###################### generated prior questions\n')
                print(all_results[j].prior_question)
                print(all_results[j].prior_answer)

            metadata.append(str)

            mat.append(all_results[j].prior_z_prob.view(-1))
            str = "[{}] [Pri] Pri Q: {} Pri A: {}"
            str = str.format(j, all_results[j].prior_question,
                             all_results[j].prior_answer)
            metadata.append(str)
        mat = torch.stack(mat, dim=0)
        writer.add_embedding(mat=mat, metadata=metadata, global_step=epoch)
コード例 #4
0
ファイル: main.py プロジェクト: Carlos-UR/Info-HCVAE
def main(args):
    tokenizer = BertTokenizer.from_pretrained(args.bert_model)
    train_loader, _, _ = get_squad_data_loader(tokenizer,
                                               args.train_dir,
                                               shuffle=True,
                                               args=args)
    eval_data = get_squad_data_loader(tokenizer,
                                      args.dev_dir,
                                      shuffle=False,
                                      args=args)

    args.device = torch.cuda.current_device()

    trainer = VAETrainer(args)

    loss_log1 = tqdm(total=0, bar_format='{desc}', position=2)
    loss_log2 = tqdm(total=0, bar_format='{desc}', position=3)
    eval_log = tqdm(total=0, bar_format='{desc}', position=5)
    best_eval_log = tqdm(total=0, bar_format='{desc}', position=6)

    # Cargar checkpoint
    if args.load_checkpoint:
        epochs = trainer.loadd(args.model_dir)
        best_f1, best_bleu, best_em = VAETrainer.load_measures(args.model_dir)
        print(
            f"The current best measures are: F1  = {best_f1}, BLEU = {best_bleu} and EM = {best_em}."
        )
    else:
        epochs = -1
        best_bleu, best_em, best_f1 = 0.0, 0.0, 0.0

    print("MODEL DIR: " + args.model_dir)
    mlflow_logger = init_mlflow(args, f"{args.model_dir}/mlruns")
    for epoch in trange(int(args.epochs), desc="Epoch", position=0):
        if epoch <= epochs:
            print(f"jumping epoch {epoch}...")
        else:
            for batch in tqdm(train_loader,
                              desc="Train iter",
                              leave=False,
                              position=1):
                c_ids, q_ids, a_ids, start_positions, end_positions \
                = batch_to_device(batch, args.device)
                trainer.train(c_ids, q_ids, a_ids, start_positions,
                              end_positions)

                str1 = 'Q REC : {:06.4f} A REC : {:06.4f}'
                str2 = 'ZQ KL : {:06.4f} ZA KL : {:06.4f} INFO : {:06.4f}'
                str1 = str1.format(float(trainer.loss_q_rec),
                                   float(trainer.loss_a_rec))
                str2 = str2.format(float(trainer.loss_zq_kl),
                                   float(trainer.loss_za_kl),
                                   float(trainer.loss_info))
                loss_log1.set_description_str(str1)
                loss_log2.set_description_str(str2)

            if epoch >= 0:
                f1, em, bleu, _str = eval_measures(epoch, args, trainer,
                                                   eval_data)
                eval_log.set_description_str(_str)
                result = {"epoch": epoch, "em": em, "f1": f1, "bleu": bleu}
                mlflow_logger.on_result(result)
                if em > best_em:
                    best_em = em
                if f1 > best_f1:
                    best_f1 = f1
                    trainer.save(
                        os.path.join(args.model_dir, "best_f1_model.pt"),
                        epoch, f1, bleu, em)
                if bleu > best_bleu:
                    best_bleu = bleu
                    trainer.save(
                        os.path.join(args.model_dir, "best_bleu_model.pt"),
                        epoch, f1, bleu, em)
                trainer.save(os.path.join(args.model_dir, "checkpoint.pt"),
                             epoch, f1, bleu, em)
                mlflow_logger.on_checkpoint(
                    f"{args.model_dir}/mlruns/checkpoint")
                _str = 'BEST BLEU : {:02.2f} EM : {:02.2f} F1 : {:02.2f}'
                _str = _str.format(best_bleu, best_em, best_f1)
                best_eval_log.set_description_str(_str)