Пример #1
0
def create_augment_data():
    setup()

    # Create a batch training environment that will also preprocess text
    vocab = read_vocab(TRAIN_VOCAB)
    tok = Tokenizer(vocab=vocab, encoding_length=args.maxInput)

    # Load features
    feat_dict = read_img_features(features)
    candidate_dict = utils.read_candidates(CANDIDATE_FEATURES)

    # The datasets to be augmented
    print("Start to augment the data")
    aug_envs = []
    # aug_envs.append(
    #     R2RBatch(
    #         feat_dict, candidate_dict, batch_size=args.batchSize, splits=['train'], tokenizer=tok
    #     )
    # )
    # aug_envs.append(
    #     SemiBatch(False, 'tasks/R2R/data/all_paths_46_removetrain.json',
    #         feat_dict, candidate_dict, batch_size=args.batchSize, splits=['train', 'val_seen'], tokenizer=tok)
    # )
    aug_envs.append(
        SemiBatch(False,
                  'tasks/R2R/data/all_paths_46_removevalunseen.json',
                  "unseen",
                  feat_dict,
                  candidate_dict,
                  batch_size=args.batchSize,
                  splits=['val_unseen'],
                  tokenizer=tok))
    aug_envs.append(
        SemiBatch(False,
                  'tasks/R2R/data/all_paths_46_removetest.json',
                  "test",
                  feat_dict,
                  candidate_dict,
                  batch_size=args.batchSize,
                  splits=['test'],
                  tokenizer=tok))
    # aug_envs.append(
    #     R2RBatch(
    #         feat_dict, candidate_dict, batch_size=args.batchSize, splits=['val_seen'], tokenizer=tok
    #     )
    # )
    # aug_envs.append(
    #     R2RBatch(
    #         feat_dict, candidate_dict, batch_size=args.batchSize, splits=['val_unseen'], tokenizer=tok
    #     )
    # )

    for snapshot in os.listdir(os.path.join(log_dir, 'state_dict')):
        # if snapshot != "best_val_unseen_bleu":  # Select a particular snapshot to process. (O/w, it will make for every snapshot)
        if snapshot != "best_val_unseen_bleu":
            continue

        # Create Speaker
        listner = Seq2SeqAgent(aug_envs[0], "", tok, args.maxAction)
        speaker = Speaker(aug_envs[0], listner, tok)

        # Load Weight
        load_iter = speaker.load(os.path.join(log_dir, 'state_dict', snapshot))
        print("Load from iter %d" % (load_iter))

        # Augment the env from aug_envs
        for aug_env in aug_envs:
            speaker.env = aug_env

            # Create the aug data
            import tqdm
            path2inst = speaker.get_insts(beam=args.beam, wrapper=tqdm.tqdm)
            data = []
            for datum in aug_env.fake_data:
                datum = datum.copy()
                path_id = datum['path_id']
                if path_id in path2inst:
                    datum['instructions'] = [
                        tok.decode_sentence(path2inst[path_id])
                    ]
                    datum.pop('instr_encoding')  # Remove Redundant keys
                    datum.pop('instr_id')
                    data.append(datum)

            print("Totally, %d data has been generated for snapshot %s." %
                  (len(data), snapshot))
            print("Average Length %0.4f" % utils.average_length(path2inst))
            print(datum)  # Print a Sample

            # Save the data
            import json
            os.makedirs(os.path.join(log_dir, 'aug_data'), exist_ok=True)
            beam_tag = "_beam" if args.beam else ""
            json.dump(data,
                      open(
                          os.path.join(
                              log_dir, 'aug_data', '%s_%s%s.json' %
                              (snapshot, aug_env.name, beam_tag)), 'w'),
                      sort_keys=True,
                      indent=4,
                      separators=(',', ': '))
Пример #2
0
for env_name, (env, evaluator) in val_envs.items():
    print("............ Evaluating %s ............." % env_name)
    speaker.env = env
    path2inst, loss, word_accu, sent_accu = speaker.valid()

    r = defaultdict(dict)
    for path_id in path2inst.keys():
        # internal_bleu = evaluator.compute_internal_bleu_score(path_id)
        # if internal_bleu == 1.0:
        #     import pdb;
        #     pdb.set_trace()

        internal_bleu = evaluator.compute_internal_bleu_score(path_id)
        external_bleu = evaluator.bleu_score({path_id: path2inst[path_id]})[0]
        p = {
            "inference": tok.decode_sentence(path2inst[path_id]),
            "gt": evaluator.gt[str(path_id)]['instructions'],
            "internal_bleu": internal_bleu,
            "external_bleu": external_bleu
        }
        r[path_id] = p
    results[env_name] = r

for env_name in ["val_unseen", "val_seen"]:
    mean_internal_bleu = np.mean(
        [v['internal_bleu'] for k, v in results[env_name].items()])
    mean_external_bleu = np.mean(
        [v['external_bleu'] for k, v in results[env_name].items()])

    plt.hist([v['external_bleu'] for k, v in results[env_name].items()],
             np.linspace(0, 1, 50),