def test_batch(model, test_data, vocab, inv_vocab, modelfile_to_load):

    print('Testing (beam size = 1)...')
    print('print output to file: {}'.format(out_test_filename))
    serializers.load_hdf5(modelfile_to_load, model)
    batch_test = \
        utils_seq2seq.gen_batch_test(test_data, args.feature, params.batch_size_val, vocab, xp)
    caption_out = []
    output_file = open(out_test_filename, mode='w')
    for vid_batch_test, caption_batch_test, id_batch_test in batch_test:
        output_test = forward(model, params, vocab, inv_vocab, vid_batch_test,
                              caption_batch_test, 'test-on-train',
                              args.batchsizeval)
        for ii in range(args.batchsizeval):
            caption_out.append({
                'image_id': id_batch_test[ii],
                'caption': output_test[ii]
            })
            print('%s %s' % (id_batch_test[ii], output_test[ii]))
            output_file.write(id_batch_test[ii] + '\t' + output_test[ii] +
                              '\n')
    output_file.close()
    with open(eval_test_filename, mode='w') as f:
        json.dump(caption_out, f)
    eval_coco.eval_coco(args.cocotest, eval_test_filename)
def launch_eval_coco(args):
    # --- Init: fills mode-specific default command-line arguments
    if args.fold is None:
        fold = {"test"}  # Default value for eval_coco
    else:
        fold = set(args.fold)
    assert len(fold) == 1, \
        "Argument fold when evaluating with COCO should be a single fold"

    # --- Find which run and which config file to evaluate the run with
    if args.run_name is None and args.config is None:
        print_utils.print_error(
            "ERROR: At least of one --run_name or --config has to be specified."
        )
        sys.exit()
    elif args.run_name is None and args.config is not None:
        # Load config
        config = run_utils.load_config(args.config)
        # Verify it has a run_name specified
        if "run_name" not in config:
            print_utils.print_error(
                "ERROR: run_name was not found in the provided config file, you can specify it with --run_name"
            )
            sys.exit()
        run_name = config["run_name"]
    elif args.run_name is not None and args.config is None:
        # Load run_name's config
        run_dirpath = frame_field_learning.local_utils.get_run_dirpath(
            args.runs_dirpath, args.run_name)
        config = run_utils.load_config(config_dirpath=run_dirpath)
        run_name = args.run_name
    else:
        # Load specified config and use specified run_name
        config = run_utils.load_config(args.config)
        run_name = args.run_name

    # --- Load params in config set as relative path to another JSON file
    config = run_utils.load_defaults_in_config(
        config, filepath_key="defaults_filepath")

    # --- Second step: Replace parameters in config file from command-line arguments
    config["eval_params"]["run_name"] = run_name
    if args.samples is not None:
        config["samples"] = args.samples
    config["fold"] = list(fold)

    # Setup num_workers per process:
    if config["num_workers"] is None:
        config["num_workers"] = torch.multiprocessing.cpu_count()

    eval_coco(config)
def test(model, test_data, vocab, inv_vocab, modelfile_to_load, params):

    print('Testing ...')
    print('Beam size: {}'.format(params.beam_size))
    print('print output to file:', out_test_filename)
    serializers.load_hdf5(modelfile_to_load, model)
    batch_test = utils_seq2seq.gen_batch_test(test_data, args.feature, 1,
                                              vocab, xp)
    output_file = open(out_test_filename, mode='w')
    for vid_batch, caption_batch, id_batch in batch_test:
        output = predict(model,
                         params,
                         vocab,
                         inv_vocab,
                         vid_batch,
                         batch_size=1,
                         beam_size=params.beam_size)
        print('%s %s' % (id_batch[0], output))
        output_file.write(id_batch[0] + '\t' + output + '\n')
    output_file.close()
    utils_coco.convert(out_test_filename, eval_test_filename)
    eval_coco.eval_coco(args.cocotest, eval_test_filename)
def get_current_score(model, vocab, inv_vocab, val_data, source,
                      batch_size_val, eval_filename):
    batch_test = utils_seq2seq.gen_batch_test(val_data, source, batch_size_val,
                                              vocab, xp)
    caption_out = []
    for vid_batch_test, caption_batch_test, id_batch_test in batch_test:
        batch_size_val2 = len(vid_batch_test)
        output_test = forward(model, params, vocab, inv_vocab, vid_batch_test,
                              caption_batch_test, 'test-on-train',
                              batch_size_val2)
        for ii in range(batch_size_val2):
            caption_out.append({
                'image_id': id_batch_test[ii],
                'caption': output_test[ii]
            })

    with open(eval_filename, mode='w') as f:
        json.dump(caption_out, f)
    score = eval_coco.eval_coco(coco_ref_filename, eval_filename)
    return score