Example #1
0
File: run.py Project: yyht/openie6
def predict(hparams,
            checkpoint_callback,
            meta_data_vocab,
            train_dataloader,
            val_dataloader,
            test_dataloader,
            all_sentences,
            mapping=None,
            conj_word_mapping=None):
    if hparams.task == 'conj':
        hparams.checkpoint = hparams.conj_model
    if hparams.task == 'oie':
        hparams.checkpoint = hparams.oie_model

    checkpoint_paths = get_checkpoint_path(hparams)
    assert len(checkpoint_paths) == 1
    checkpoint_path = checkpoint_paths[0]
    if has_cuda:
        loaded_hparams_dict = torch.load(checkpoint_path)['hparams']
    else:
        loaded_hparams_dict = torch.load(
            checkpoint_path, map_location=torch.device('cpu'))['hparams']
    current_hparams_dict = vars(hparams)
    loaded_hparams_dict = data.override_args(loaded_hparams_dict,
                                             current_hparams_dict,
                                             sys.argv[1:])
    loaded_hparams = data.convert_to_namespace(loaded_hparams_dict)
    model = Model(loaded_hparams, meta_data_vocab)

    if mapping != None:
        model._metric.mapping = mapping
    if conj_word_mapping != None:
        model._metric.conj_word_mapping = conj_word_mapping

    logger = None
    trainer = Trainer(gpus=hparams.gpus,
                      logger=logger,
                      resume_from_checkpoint=checkpoint_path)
    start_time = time.time()
    model.all_sentences = all_sentences
    trainer.test(model, test_dataloaders=test_dataloader)
    end_time = time.time()
    print(f'Total Time taken = {end_time-start_time} s')

    return model
Example #2
0
File: run.py Project: yyht/openie6
def test(hparams,
         checkpoint_callback,
         meta_data_vocab,
         train_dataloader,
         val_dataloader,
         test_dataloader,
         all_sentences,
         mapping=None,
         conj_word_mapping=None):
    checkpoint_paths = get_checkpoint_path(hparams)
    if not 'train' in hparams.mode:
        if has_cuda:
            loaded_hparams_dict = torch.load(checkpoint_paths[0])['hparams']
        else:
            loaded_hparams_dict = torch.load(
                checkpoint_paths[0],
                map_location=torch.device('cpu'))['hparams']
        current_hparams_dict = vars(hparams)
        loaded_hparams_dict = data.override_args(loaded_hparams_dict,
                                                 current_hparams_dict,
                                                 sys.argv[1:])
        loaded_hparams = data.convert_to_namespace(loaded_hparams_dict)
    else:
        loaded_hparams = hparams

    model = Model(loaded_hparams, meta_data_vocab)
    if mapping != None:
        model._metric.mapping = mapping
    if conj_word_mapping != None:
        model._metric.conj_word_mapping = conj_word_mapping

    logger = get_logger('test', hparams)
    test_f = open(hparams.save + '/logs/test.txt', 'w')

    for checkpoint_path in checkpoint_paths:
        trainer = Trainer(logger=logger,
                          gpus=hparams.gpus,
                          resume_from_checkpoint=checkpoint_path)
        trainer.test(model, test_dataloaders=test_dataloader)
        result = model.results
        test_f.write(f'{checkpoint_path}\t{result}\n')
        test_f.flush()
    test_f.close()
    shutil.move(hparams.save + f'/logs/test.part',
                hparams.save + f'/logs/test')
Example #3
0
File: run.py Project: yyht/openie6
def resume(hparams, checkpoint_callback, meta_data_vocab, train_dataloader,
           val_dataloader, test_dataloader, all_sentences):
    checkpoint_paths = get_checkpoint_path(hparams)
    assert len(checkpoint_paths) == 1
    checkpoint_path = checkpoint_paths[0]
    if has_cuda:
        loaded_hparams_dict = torch.load(checkpoint_path)['hparams']
    else:
        loaded_hparams_dict = torch.load(
            checkpoint_path, map_location=torch.device('cpu'))['hparams']
    current_hparams_dict = vars(hparams)
    loaded_hparams_dict = data.override_args(loaded_hparams_dict,
                                             current_hparams_dict,
                                             sys.argv[1:])
    loaded_hparams = data.convert_to_namespace(loaded_hparams_dict)

    model = Model(loaded_hparams, meta_data_vocab)

    logger = get_logger('resume', hparams)
    trainer = Trainer(show_progress_bar=True,
                      num_sanity_val_steps=5,
                      gpus=hparams.gpus,
                      logger=logger,
                      checkpoint_callback=checkpoint_callback,
                      min_epochs=hparams.epochs,
                      max_epochs=hparams.epochs,
                      resume_from_checkpoint=checkpoint_path,
                      accumulate_grad_batches=int(
                          hparams.accumulate_grad_batches),
                      gradient_clip_val=hparams.gradient_clip_val,
                      num_tpu_cores=hparams.num_tpu_cores,
                      use_tpu=hparams.use_tpu,
                      train_percent_check=hparams.train_percent_check,
                      track_grad_norm=hparams.track_grad_norm,
                      val_check_interval=hparams.val_check_interval)
    trainer.fit(model,
                train_dataloader=train_dataloader,
                val_dataloaders=val_dataloader)
    shutil.move(hparams.save + f'/logs/resume.part',
                hparams.save + f'/logs/resume')