コード例 #1
0
def validate_entry_point(args):
    agent, train_env, val_envs = train.train_setup(args, args.batch_size)
    agent.load(args.model_prefix)

    for env_name, (val_env, evaluator) in sorted(val_envs.items()):
        agent.env = val_env
        # teacher_results = agent.test(
        #     use_dropout=False, feedback='teacher', allow_cheat=True,
        #     beam_size=1)
        # teacher_score_summary, _ = evaluator.score_results(teacher_results)
        # for metric,val in teacher_score_summary.items():
        #     print("{} {}\t{}".format(env_name, metric, val))

        results = agent.test(use_dropout=False,
                             feedback='argmax',
                             beam_size=args.beam_size)
        score_summary, _ = evaluator.score_results(results)

        if args.eval_file:
            eval_file = "{}_{}.json".format(args.eval_file, env_name)
            eval_results = []
            for instr_id, result in results.items():
                eval_results.append({
                    'instr_id': instr_id,
                    'trajectory': result['trajectory']
                })
            with open(eval_file, 'w') as f:
                utils.pretty_json_dump(eval_results, f)

        # TODO: testing code, remove
        # score_summary_direct, _ = evaluator.score_results(agent.results)
        # assert score_summary == score_summary_direct

        for metric, val in sorted(score_summary.items()):
            print("{} {}\t{}".format(env_name, metric, val))
コード例 #2
0
ファイル: rational_speaker.py プロジェクト: YzyLmc/AC-GG_0.2
def validate_entry_point(args):
    follower, follower_train_env, follower_val_envs = \
        train.train_setup(args, args.batch_size)
    load_args = {}
    if args.no_cuda:
        load_args['map_location'] = 'cpu'
    follower.load(args.follower_prefix, **load_args)

    speaker, speaker_train_env, speaker_val_envs = \
        train_speaker.train_setup(args)
    speaker.load(args.speaker_prefix, **load_args)

    for env_name, (val_env, evaluator) in sorted(speaker_val_envs.items()):
        if args.output_file:
            output_file = "{}_{}.json".format(args.output_file, env_name)
        else:
            output_file = None
        scores_by_weight, _ = run_rational_speaker(
            val_env,
            evaluator,
            speaker,
            follower,
            args.beam_size,
            include_gold=args.include_gold,
            output_file=output_file)
        pprint.pprint(scores_by_weight)
        weight, score_summary = max(scores_by_weight.items(),
                                    key=lambda pair: pair[1]['bleu'])
        print("max success_rate with weight: {}".format(weight))
        for metric, val in score_summary.items():
            print("{} {}\t{}".format(env_name, metric, val))
コード例 #3
0
ファイル: rational_follower.py プロジェクト: YzyLmc/AC-GG_0.2
def validate_entry_point(args):
    follower, follower_train_env, follower_val_envs = train.train_setup(
        args, args.batch_size)
    load_args = {}
    if args.no_cuda:
        load_args['map_location'] = 'cpu'
    follower.load(args.follower_prefix, **load_args)

    speaker, speaker_train_env, speaker_val_envs = \
        train_speaker.train_setup(args)
    speaker.load(args.speaker_prefix, **load_args)

    for env_name, (val_env, evaluator) in sorted(follower_val_envs.items()):
        if args.output_file:
            output_file = "{}_{}.json".format(args.output_file, env_name)
        else:
            output_file = None
        if args.eval_file:
            eval_file = "{}_{}_speaker_weight_%.2f.json".format(
                args.eval_file, env_name)
        else:
            eval_file = None
        accuracies_by_weight, index_counts_by_weight = run_rational_follower(
            val_env,
            evaluator,
            follower,
            speaker,
            args.beam_size,
            include_gold=args.include_gold,
            output_file=output_file,
            eval_file=eval_file,
            compute_oracle=args.compute_oracle,
            mask_undo=args.mask_undo,
            state_factored_search=args.state_factored_search,
            state_first_n_ws_key=args.state_first_n_ws_key,
            physical_traversal=args.physical_traversal,
        )
        pprint.pprint(accuracies_by_weight)
        pprint.pprint(
            {w: sorted(d.items())
             for w, d in index_counts_by_weight.items()})
        weight, score_summary = max(accuracies_by_weight.items(),
                                    key=lambda pair: pair[1]['success_rate'])
        print("max success_rate with weight: {}".format(weight))
        for metric, val in score_summary.items():
            print("{} {}\t{}".format(env_name, metric, val))
コード例 #4
0
def main(lr=1e-4, gamma=0.5, hidden_size=100, num_epochs=20):
    loaders = load_small_mnist_data()
    dataset_sizes = {
        g: np.prod(dl.dataset[0][0].shape)
        for g, dl in loaders.items()
    }
    print(dataset_sizes)

    input_size = dataset_sizes["train"]

    model = SmallNetwork(input_size, hidden_size=hidden_size, num_classes=10)
    criterion, optimizer, scheduler = train_setup(model, lr=lr, gamma=gamma)

    final_model, history = fancy_train_model(model,
                                             loaders,
                                             criterion,
                                             optimizer,
                                             scheduler,
                                             num_epochs=num_epochs)
    print(history)
    return final_model
コード例 #5
0
ファイル: run_search.py プロジェクト: hyzcn/FAST
def setup_agent_envs(args):
    if args.job == 'train' or args.job == 'cache':
        train_splits = ['train']
    else:
        train_splits = []
    return train.train_setup(args, train_splits)
コード例 #6
0
    return dl_train, dl_test


if __name__ == "__main__":
    m = 50
    dl_train, dl_test = make_mapped_wavelet_dataloaders(
        mnist_train.data,
        mnist_train.targets,
        mnist_test.data,
        mnist_test.targets,
        m=m,
    )

    num_classes = np.unique(mnist_train.targets).size
    model = SimpleNet(m, num_classes)
    criterion, optimizer, lr_scheduler = train_setup(model, lr=1e-5)
    train_fn = create_train_fn(model, criterion, optimizer)
    eval_fn = create_eval_fn(model, criterion)

    train_loss = []
    train_acc = []
    test_loss = []
    test_acc = []

    for epoch in range(100):
        for batch in dl_train:
            tmp_train_loss = []
            tmp_train_acc = []
            l, a = train_fn(batch)
            tmp_train_loss.append(l)
            tmp_train_acc.append(a)