コード例 #1
0
def run_evaluator_on_latest_model(dataset, config):
    config = configurations_qa[config](dataset)
    latest_model = get_latest_model(
        os.path.join(config["training"]["basepath"],
                     config["training"]["exp_dirname"]))
    evaluator = Evaluator(dataset, latest_model)
    _ = evaluator.evaluate(dataset.test_data, save_results=True)
    return evaluator
コード例 #2
0
def generate_graphs_on_latest_model(dataset, config):

    config = configurations_qa[config](dataset)
    latest_model = get_latest_model(os.path.join(config["training"]["basepath"], config["training"]["exp_dirname"]))
    if latest_model is not None:
        evaluator = Evaluator(dataset, latest_model)
        _ = evaluator.evaluate(dataset.test_data, save_results=True, is_embds=False)
        print('outside eval')
        generate_graphs(dataset, config["training"]["exp_dirname"], evaluator.model, test_data=dataset.test_data)
コード例 #3
0
def train_dataset_and_get_atn_map(dataset, encoders, num_iters=15):
    for e in encoders:
        config = configurations_qa[e](dataset)
        trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
        trainer.train(dataset.train_data,
                      dataset.dev_data,
                      n_iters=num_iters,
                      save_on_metric=dataset.save_on_metric)
        # Get train losses as well?

        evaluator = Evaluator(dataset, trainer.model.dirname)
        _, attentions, scores = evaluator.evaluate(dataset.test_data,
                                                   save_results=True)
        return scores, attentions
コード例 #4
0
def train_dataset(dataset, config):
    try:
        config = configurations_qa[config](dataset)
        n_iters = dataset.n_iters if hasattr(dataset, "n_iters") else 25
        trainer = Trainer(dataset, config=config, _type=dataset.trainer_type)
        trainer.train(dataset.train_data,
                      dataset.dev_data,
                      n_iters=n_iters,
                      save_on_metric=dataset.save_on_metric)
        evaluator = Evaluator(dataset, trainer.model.dirname)
        _ = evaluator.evaluate(dataset.test_data, save_results=True)
        return trainer, evaluator
    except Exception as e:
        print(e)
        return
コード例 #5
0
def generate_graphs_on_latest_model(dataset, config):
    try:
        config = configurations_qa[config](dataset)
        latest_model = get_latest_model(
            os.path.join(config['training']['basepath'],
                         config['training']['exp_dirname']))
        if latest_model is not None:
            evaluator = Evaluator(dataset, latest_model)
            _ = evaluator.evaluate(dataset.test_data, save_results=True)
            generate_graphs(dataset,
                            config['training']['exp_dirname'],
                            evaluator.model,
                            test_data=dataset.test_data)
    except:
        return
コード例 #6
0
def generate_graphs_on_latest_model(dataset, config):
    print("GENERATING GRAPHS FOR EXPERIMENT ON LATEST MODEL")

    config = configurations_qa[config](dataset)
    latest_model = get_latest_model(
        os.path.join(config["training"]["basepath"],
                     config["training"]["exp_dirname"]))
    if latest_model is not None:
        evaluator = Evaluator(dataset, latest_model)
        _ = evaluator.evaluate(dataset.test_data,
                               save_results=True,
                               is_embds=False)
        print("outside eval")
        generate_graphs(
            dataset,
            config["training"]["exp_dirname"],
            evaluator.model,
            test_data=dataset.test_data,
        )