Ejemplo n.º 1
0
def run_evaluator_on_latest_model(dataset, config='lstm'):
    config = configurations[config](dataset)
    latest_model = get_latest_model(
        os.path.join(config['training']['basepath'],
                     config['training']['exp_dirname']))
    evaluator = Evaluator(dataset, latest_model, _type=dataset.trainer_type)
    _ = evaluator.evaluate(dataset.test_data, save_results=True)
    return evaluator
def run_evaluator_on_latest_model(dataset, config="lstm"):
    print("EVALUATING LATEST MODEL")

    config = configurations[config](dataset)
    latest_model = get_latest_model(
        os.path.join(config["training"]["basepath"], config["training"]["exp_dirname"])
    )
    evaluator = Evaluator(dataset, latest_model, _type=dataset.trainer_type)
    _ = evaluator.evaluate(dataset.test_data, save_results=True)
    return evaluator
Ejemplo n.º 3
0
def generate_graphs_on_latest_model(dataset, config='lstm'):
    config = configurations[config](dataset)
    latest_model = get_latest_model(
        os.path.join(config['training']['basepath'],
                     config['training']['exp_dirname']))
    evaluator = Evaluator(dataset, latest_model, _type=dataset.trainer_type)
    _ = evaluator.evaluate(dataset.test_data, save_results=False)
    generate_graphs(dataset,
                    config['training']['exp_dirname'],
                    evaluator.model,
                    test_data=dataset.test_data)
Ejemplo n.º 4
0
def train_dataset(dataset, args, config='lstm') :
        config = generate_config(dataset, args, config)
        trainer = Trainer(dataset, args, config=config)
        #go ahead and save model
        dirname = trainer.model.save_values(save_model=False)
        print("DIRECTORY:", dirname)
        if args.adversarial :
            trainer.train_adversarial(dataset.train_data, dataset.test_data, args)
        else :
            trainer.train_standard(dataset.train_data, dataset.test_data, args, save_on_metric=dataset.save_on_metric)
        print('####################################')
        print("TEST RESULTS FROM BEST MODEL")
        evaluator = Evaluator(dataset, trainer.model.dirname, args)
        _ = evaluator.evaluate(dataset.test_data, save_results=True)
        return trainer, evaluator
Ejemplo n.º 5
0
def train_dataset(dataset, config='lstm'):
    try:
        config = configurations[config](dataset)
        trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
        trainer.train(dataset.train_data,
                      dataset.dev_data,
                      n_iters=8,
                      save_on_metric=dataset.save_on_metric)
        evaluator = Evaluator(dataset,
                              trainer.model.dirname,
                              _type=dataset.trainer_type)
        _ = evaluator.evaluate(dataset.test_data, save_results=True)
        return trainer, evaluator
    except:
        return
def generate_graphs_on_latest_model(dataset, config="lstm"):
    print("GENERATING GRAPHS FOR EXPERIMENT ON LATEST MODEL")

    config = configurations[config](dataset)
    latest_model = get_latest_model(
        os.path.join(config["training"]["basepath"], config["training"]["exp_dirname"])
    )
    evaluator = Evaluator(dataset, latest_model, _type=dataset.trainer_type)
    _ = evaluator.evaluate(dataset.test_data, save_results=False)
    generate_graphs(
        dataset,
        config["training"]["exp_dirname"],
        evaluator.model,
        test_data=dataset.test_data,
    )
Ejemplo n.º 7
0
def train_dataset(dataset, config='lstm'):

    config = configurations[config](dataset)
    trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
    if hasattr(dataset, 'n_iter'):
        n_iters = dataset.n_iter
    else:
        n_iters = 8

    trainer.train(dataset.train_data,
                  dataset.dev_data,
                  n_iters=n_iters,
                  save_on_metric=dataset.save_on_metric)
    evaluator = Evaluator(dataset,
                          trainer.model.dirname,
                          _type=dataset.trainer_type)
    _ = evaluator.evaluate(dataset.test_data, save_results=True)
    return trainer, evaluator
Ejemplo n.º 8
0
def train_dataset_and_get_lime_explanations(dataset, encoders, num_iters=15):
    for e in encoders:
        config = configurations[e](dataset)
        trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
        trainer.train(dataset.train_data,
                      dataset.dev_data,
                      n_iters=num_iters,
                      save_on_metric=dataset.save_on_metric)

        evaluator = Evaluator(dataset,
                              trainer.model.dirname,
                              _type=dataset.trainer_type)
        predictions, attentions = evaluator.evaluate(dataset.test_data,
                                                     save_results=True)

        lime_explanations = trainer.model.get_lime_explanations(
            dataset.test_data.X)
        return predictions, attentions, lime_explanations
Ejemplo n.º 9
0
def train_dataset_and_get_gradient(dataset, encoders, num_iters=15):
    for e in encoders:
        config = configurations[e](dataset)
        trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
        trainer.train(dataset.train_data,
                      dataset.dev_data,
                      n_iters=num_iters,
                      save_on_metric=dataset.save_on_metric)

        evaluator = Evaluator(dataset,
                              trainer.model.dirname,
                              _type=dataset.trainer_type)
        predictions, attentions = evaluator.evaluate(dataset.test_data,
                                                     save_results=True)
        grads = evaluator.gradient_experiment_get_grads(dataset.test_data)
        from Trainers.PlottingBC import process_grads
        process_grads(grads)
        return predictions, attentions, grads
def train_dataset(dataset, config="lstm"):
    print("STARTING TRAINING")

    config = configurations[config](dataset)
    trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
    if hasattr(dataset, "n_iter"):
        n_iters = dataset.n_iter
    else:
        n_iters = 8

    trainer.train(
        dataset.train_data,
        dataset.dev_data,
        n_iters=n_iters,
        save_on_metric=dataset.save_on_metric,
    )
    evaluator = Evaluator(dataset, trainer.model.dirname, _type=dataset.trainer_type)
    _ = evaluator.evaluate(dataset.test_data, save_results=True)
    return trainer, evaluator
Ejemplo n.º 11
0
def train_dataset_and_get_atn_map(dataset, encoders, num_iters=15):
    for e in encoders:
        config = configurations[e](dataset)
        trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
        trainer.train(dataset.train_data,
                      dataset.dev_data,
                      n_iters=num_iters,
                      save_on_metric=dataset.save_on_metric)
        train_losses = trainer.model.train_losses

        evaluator = Evaluator(dataset,
                              trainer.model.dirname,
                              _type=dataset.trainer_type)
        predictions, attentions = evaluator.evaluate(dataset.test_data,
                                                     save_results=True)

        # evaluator = Evaluator(dataset, trainer.model.last_epch_dirname,
        #                       _type=dataset.trainer_type)
        predictions_lst_epch, attentions_lst_epch = evaluator.evaluate(
            dataset.test_data)
        return predictions, attentions, predictions_lst_epch, attentions_lst_epch, train_losses
Ejemplo n.º 12
0
def eval_swa_model(dataset, top_lvl_models_dir):
    dirs = [
        d for d in os.listdir(top_lvl_models_dir)
        if 'enc.th' in os.listdir(os.path.join(top_lvl_models_dir, d))
    ]
    Model = BC.Model
    swa = Model.init_from_config(os.path.join(top_lvl_models_dir, dirs[0]))
    swa.dirname = os.path.join(top_lvl_models_dir, dirs[0])
    i = 1
    for new_model_dir in dirs[1:]:
        new_model = BC.Model.init_from_config(
            os.path.join(top_lvl_models_dir, new_model_dir))
        adding_params(swa.encoder, new_model.encoder, 1.0 / (i + 1))
        adding_params(swa.decoder, new_model.decoder, 1.0 / (i + 1))
        i += 1
    # divide_all_params(swa.encoder, len(dirs))
    # divide_all_params(swa.decoder, len(dirs))
    evaluator = Evaluator(dataset,
                          os.path.join(top_lvl_models_dir, dirs[0]),
                          _type=dataset.trainer_type)
    evaluator.model = swa
    _ = evaluator.evaluate(dataset.test_data, save_results=True)
    return evaluator
Ejemplo n.º 13
0
def train_dataset_and_temp_scale(dataset, encoders):
    for e in encoders:
        config = configurations[e](dataset)
        trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
        trainer.train(dataset.train_data,
                      dataset.dev_data,
                      n_iters=8,
                      save_on_metric=dataset.save_on_metric)
        evaluator = Evaluator(dataset,
                              trainer.model.dirname,
                              _type=dataset.trainer_type)

        print("Temperature-scaling..")
        orig_model = evaluator.model
        dev_x_tensor = BatchHolder(dataset.dev_data.X).seq
        dev_x_tensor_lengths = BatchHolder(dataset.dev_data.X).lengths
        dev_x_tensor_masks = BatchHolder(dataset.dev_data.X).masks
        valid_dataset = TensorDataset(
            dev_x_tensor, dev_x_tensor_lengths, dev_x_tensor_masks,
            torch.from_numpy(np.array(dataset.dev_data.y)))
        valid_loader = DataLoader(valid_dataset, batch_size=1)

        scaled_model = ModelWithTemperature(orig_model)
        scaled_model.set_temperature(valid_loader)
Ejemplo n.º 14
0
def run_evaluator_on_specific_model(dataset, model_path, config='lstm'):
    config = configurations[config](dataset)
    evaluator = Evaluator(dataset, model_path, _type=dataset.trainer_type)
    evaluator.model.temperature = config['training']['temperature']
    _ = evaluator.evaluate(dataset.test_data, save_results=True)
    return evaluator