Beispiel #1
0
def main(opts):
    # set manual_seed and build vocab
    print(opts, flush=True)

    setup(opts, opts.seed)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    print(f"Usando {device} :)")

    # create a batch training environment that will also preprocess text
    vocab = read_vocab(opts.train_vocab)
    tok = Tokenizer(opts.remove_punctuation == 1, opts.reversed == 1, vocab=vocab, encoding_length=opts.max_cap_length)

    # create language instruction encoder
    encoder_kwargs = {
        'opts': opts,
        'vocab_size': len(vocab),
        'embedding_size': opts.word_embedding_size,
        'hidden_size': opts.rnn_hidden_size,
        'padding_idx': padding_idx,
        'dropout_ratio': opts.rnn_dropout,
        'bidirectional': opts.bidirectional == 1,
        'num_layers': opts.rnn_num_layers
    }
    print('Using {} as encoder ...'.format(opts.lang_embed))
    if 'lstm' in opts.lang_embed:
        encoder = EncoderRNN(**encoder_kwargs)
    else:
        raise ValueError('Unknown {} language embedding'.format(opts.lang_embed))
    print(encoder)

    # create policy model
    policy_model_kwargs = {
        'opts':opts,
        'img_fc_dim': opts.img_fc_dim,
        'img_fc_use_batchnorm': opts.img_fc_use_batchnorm == 1,
        'img_dropout': opts.img_dropout,
        'img_feat_input_dim': opts.img_feat_input_dim,
        'rnn_hidden_size': opts.rnn_hidden_size,
        'rnn_dropout': opts.rnn_dropout,
        'max_len': opts.max_cap_length,
        'max_navigable': opts.max_navigable
    }

    if opts.arch == 'regretful':
        model = Regretful(**policy_model_kwargs)
    elif opts.arch == 'self-monitoring':
        model = SelfMonitoring(**policy_model_kwargs)
    elif opts.arch == 'speaker-baseline':
        model = SpeakerFollowerBaseline(**policy_model_kwargs)
    else:
        raise ValueError('Unknown {} model for seq2seq agent'.format(opts.arch))
    print(model)

    encoder = encoder.to(device)
    model = model.to(device)

    params = list(encoder.parameters()) + list(model.parameters())
    optimizer = torch.optim.Adam(params, lr=opts.learning_rate)

    # optionally resume from a checkpoint
    if opts.resume:
        model, encoder, optimizer, best_success_rate = resume_training(opts, model, encoder, optimizer)

    # if a secondary exp name is specified, this is useful when resuming from a previous saved
    # experiment and save to another experiment, e.g., pre-trained on synthetic data and fine-tune on real data
    if opts.exp_name_secondary:
        opts.exp_name += opts.exp_name_secondary

    feature, img_spec = load_features(opts.img_feat_dir, opts.blind)

    if opts.test_submission:
        assert opts.resume, 'The model was not resumed before running for submission.'
        test_env = ('test', (R2RPanoBatch(opts, feature, img_spec, batch_size=opts.batch_size,
                                 splits=['test'], tokenizer=tok), Evaluation(['test'], opts)))
        agent_kwargs = {
            'opts': opts,
            'env': test_env[1][0],
            'results_path': "",
            'encoder': encoder,
            'model': model,
            'feedback': opts.feedback
        }
        agent = PanoSeq2SeqAgent(**agent_kwargs)
        # setup trainer
        trainer = PanoSeq2SeqTrainer(opts, agent, optimizer)
        epoch = opts.start_epoch - 1
        trainer.eval(epoch, test_env)
        return

    # set up R2R environments
    if not opts.train_data_augmentation:
        train_env = R2RPanoBatch(opts, feature, img_spec, batch_size=opts.batch_size, seed=opts.seed,
                                 splits=['train'], tokenizer=tok)
    else:
        train_env = R2RPanoBatch(opts, feature, img_spec, batch_size=opts.batch_size, seed=opts.seed,
                                 splits=['synthetic'], tokenizer=tok)

    val_craft_splits = ['craft_seen', 'craft_unseen']
    val_splits = ['val_seen', 'val_unseen']
    if opts.craft_eval:
        val_splits += val_craft_splits
    val_envs = {split: (R2RPanoBatch(opts, feature, img_spec, batch_size=opts.batch_size,
                                     splits=[split], tokenizer=tok), Evaluation([split], opts))
                for split in val_splits}
    # create agent
    agent_kwargs = {
        'opts': opts,
        'env': train_env,
        'results_path': "",
        'encoder': encoder,
        'model': model,
        'feedback': opts.feedback
    }
    agent = PanoSeq2SeqAgent(**agent_kwargs)

    # setup trainer
    trainer = PanoSeq2SeqTrainer(opts, agent, optimizer, opts.train_iters_epoch)

    if opts.eval_only:
        success_rate = []
        for val_env in val_envs.items():
            success_rate.append(trainer.eval(opts.start_epoch - 1, val_env, tb_logger=None))
        return

    # set up tensorboard logger
    tb_logger = set_tb_logger(opts.log_dir, opts.exp_name, opts.resume)
    sys.stdout.flush()
    best_success_rate = best_success_rate if opts.resume else 0.0
    for epoch in range(opts.start_epoch, opts.max_num_epochs + 1):
        trainer.train(epoch, train_env, tb_logger)

        if epoch % opts.eval_every_epochs == 0:
            success_rate = []
            for val_env in val_envs.items():
                success_rate.append(trainer.eval(epoch, val_env, tb_logger))

            success_rate_compare = success_rate[1]

            if is_experiment():
                # remember best val_seen success rate and save checkpoint
                is_best = success_rate_compare >= best_success_rate
                best_success_rate = max(success_rate_compare, best_success_rate)
                print("--> Highest val_unseen success rate: {}".format(best_success_rate))
                sys.stdout.flush()

                # save the model if it is the best so far
                save_checkpoint({
                    'opts': opts,
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'encoder_state_dict': encoder.state_dict(),
                    'best_success_rate': best_success_rate,
                    'optimizer': optimizer.state_dict(),
                    'max_episode_len': opts.max_episode_len,
                }, is_best, checkpoint_dir=opts.checkpoint_dir, name=opts.exp_name)

        if opts.train_data_augmentation and epoch == opts.epochs_data_augmentation:
            train_env = R2RPanoBatch(opts, feature, img_spec, batch_size=opts.batch_size, seed=opts.seed,
                                     splits=['train'], tokenizer=tok)

    print("--> Finished training")
Beispiel #2
0
def main(opts):

    # set manual_seed and build vocab
    setup(opts, opts.seed)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # create a batch training environment that will also preprocess text
    vocab = read_vocab(opts.train_vocab)
    tok = Tokenizer(
        opts.remove_punctuation == 1,
        opts.reversed == 1,
        vocab=vocab,
        encoding_length=opts.max_cap_length,
    )

    # create language instruction encoder
    encoder_kwargs = {
        "opts": opts,
        "vocab_size": len(vocab),
        "embedding_size": opts.word_embedding_size,
        "hidden_size": opts.rnn_hidden_size,
        "padding_idx": padding_idx,
        "dropout_ratio": opts.rnn_dropout,
        "bidirectional": opts.bidirectional == 1,
        "num_layers": opts.rnn_num_layers,
    }
    print("Using {} as encoder ...".format(opts.lang_embed))
    if "lstm" in opts.lang_embed:
        encoder = EncoderRNN(**encoder_kwargs)
    else:
        raise ValueError("Unknown {} language embedding".format(
            opts.lang_embed))
    print(encoder)

    # create policy model
    policy_model_kwargs = {
        "opts": opts,
        "img_fc_dim": opts.img_fc_dim,
        "img_fc_use_batchnorm": opts.img_fc_use_batchnorm == 1,
        "img_dropout": opts.img_dropout,
        "img_feat_input_dim": opts.img_feat_input_dim,
        "rnn_hidden_size": opts.rnn_hidden_size,
        "rnn_dropout": opts.rnn_dropout,
        "max_len": opts.max_cap_length,
        "max_navigable": opts.max_navigable,
    }

    if opts.arch == "self-monitoring":
        model = SelfMonitoring(**policy_model_kwargs)
    elif opts.arch == "speaker-baseline":
        model = SpeakerFollowerBaseline(**policy_model_kwargs)
    else:
        raise ValueError("Unknown {} model for seq2seq agent".format(
            opts.arch))
    print(model)

    encoder = encoder.to(device)
    model = model.to(device)

    params = list(encoder.parameters()) + list(model.parameters())
    optimizer = torch.optim.Adam(params, lr=opts.learning_rate)

    # optionally resume from a checkpoint
    if opts.resume:
        model, encoder, optimizer, best_success_rate = resume_training(
            opts, model, encoder, optimizer)

    # if a secondary exp name is specified, this is useful when resuming from a previous saved
    # experiment and save to another experiment, e.g., pre-trained on synthetic data and fine-tune on real data
    if opts.exp_name_secondary:
        opts.exp_name += opts.exp_name_secondary

    feature, img_spec = load_features(opts.img_feat_dir)

    if opts.test_submission:
        assert (opts.resume
                ), "The model was not resumed before running for submission."
        test_env = (
            "test",
            (
                R2RPanoBatch(
                    opts,
                    feature,
                    img_spec,
                    batch_size=opts.batch_size,
                    splits=["test"],
                    tokenizer=tok,
                ),
                Evaluation(["test"]),
            ),
        )
        agent_kwargs = {
            "opts": opts,
            "env": test_env[1][0],
            "results_path": "",
            "encoder": encoder,
            "model": model,
            "feedback": opts.feedback,
        }
        agent = PanoSeq2SeqAgent(**agent_kwargs)
        # setup trainer
        trainer = PanoSeq2SeqTrainer(opts, agent, optimizer)
        epoch = opts.start_epoch - 1
        trainer.eval(epoch, test_env)
        return

    # set up R2R environments
    if not opts.train_data_augmentation:
        train_env = R2RPanoBatch(
            opts,
            feature,
            img_spec,
            batch_size=opts.batch_size,
            seed=opts.seed,
            splits=["train"],
            tokenizer=tok,
        )
    else:
        train_env = R2RPanoBatch(
            opts,
            feature,
            img_spec,
            batch_size=opts.batch_size,
            seed=opts.seed,
            splits=["synthetic"],
            tokenizer=tok,
        )

    val_envs = {
        split: (
            R2RPanoBatch(
                opts,
                feature,
                img_spec,
                batch_size=opts.batch_size,
                splits=[split],
                tokenizer=tok,
            ),
            Evaluation([split]),
        )
        for split in ["val_seen", "val_unseen"]
    }

    # create agent
    agent_kwargs = {
        "opts": opts,
        "env": train_env,
        "results_path": "",
        "encoder": encoder,
        "model": model,
        "feedback": opts.feedback,
    }
    agent = PanoSeq2SeqAgent(**agent_kwargs)

    # setup trainer
    trainer = PanoSeq2SeqTrainer(opts, agent, optimizer,
                                 opts.train_iters_epoch)

    if opts.eval_beam or opts.eval_only:
        success_rate = []
        for val_env in val_envs.items():
            success_rate.append(
                trainer.eval(opts.start_epoch - 1, val_env, tb_logger=None))
        return

    # set up tensorboard logger
    tb_logger = set_tb_logger(opts.log_dir, opts.exp_name, opts.resume)

    best_success_rate = best_success_rate if opts.resume else 0.0

    for epoch in range(opts.start_epoch, opts.max_num_epochs + 1):
        trainer.train(epoch, train_env, tb_logger)

        if epoch % opts.eval_every_epochs == 0:
            success_rate = []
            for val_env in val_envs.items():
                success_rate.append(trainer.eval(epoch, val_env, tb_logger))

            success_rate_compare = success_rate[1]

            if is_experiment():
                # remember best val_seen success rate and save checkpoint
                is_best = success_rate_compare >= best_success_rate
                best_success_rate = max(success_rate_compare,
                                        best_success_rate)
                print("--> Highest val_unseen success rate: {}".format(
                    best_success_rate))

                # save the model if it is the best so far
                save_checkpoint(
                    {
                        "opts": opts,
                        "epoch": epoch + 1,
                        "state_dict": model.state_dict(),
                        "encoder_state_dict": encoder.state_dict(),
                        "best_success_rate": best_success_rate,
                        "optimizer": optimizer.state_dict(),
                        "max_episode_len": opts.max_episode_len,
                    },
                    is_best,
                    checkpoint_dir=opts.checkpoint_dir,
                    name=opts.exp_name,
                )

        if (opts.train_data_augmentation
                and epoch == opts.epochs_data_augmentation):
            train_env = R2RPanoBatch(
                opts,
                feature,
                img_spec,
                batch_size=opts.batch_size,
                seed=opts.seed,
                splits=["train"],
                tokenizer=tok,
            )

    print("--> Finished training")