예제 #1
0
파일: run_eval.py 프로젝트: viswanathgs/r2c
def run_eval(model,
             model_path,
             ds,
             batch_size,
             num_gpus,
             label_key='label',
             probs_key='label_probs'):
    LOG.info('Loading model state from {}'.format(model_path))
    restore_model_state(model, model_path)

    ds_loader = VCRLoader.from_dataset(
        ds,
        batch_size=(batch_size // num_gpus),
        num_gpus=num_gpus,
        num_workers=(4 * num_gpus),
    )

    def _to_gpu(td):
        if num_gpus > 1:
            return td
        for k in td:
            if k != 'metadata':
                td[k] = {k2: v.cuda(async=True)
                         for k2, v in td[k].items()} if isinstance(
                             td[k], dict) else td[k].cuda(async=True)
        return td
예제 #2
0
파일: train.py 프로젝트: ihaeyong/r2c
        return td
    for k in td:
        td[k] = {k2: v.cuda(non_blocking=True)
                 for k2, v in td[k].items()} if isinstance(
                     td[k], dict) else td[k].cuda(non_blocking=True)
    return td


num_workers = (4 * NUM_GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS) - 1
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {
    'batch_size': 96 // NUM_GPUS,
    'num_gpus': NUM_GPUS,
    'num_workers': num_workers
}
train_loader = VCRLoader.from_dataset(train, **loader_params)
val_loader = VCRLoader.from_dataset(val, **loader_params)
test_loader = VCRLoader.from_dataset(test, **loader_params)

ARGS_RESET_EVERY = 100
print("Loading {} for {}".format(params['model'].get('type', 'WTF?'),
                                 'rationales' if args.rationale else 'answer'),
      flush=True)
model = Model.from_params(vocab=train.vocab, params=params['model'])
for submodule in model.detector.backbone.modules():
    if isinstance(submodule, BatchNorm2d):
        submodule.track_running_stats = False
    for p in submodule.parameters():
        p.requires_grad = False

model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda()
예제 #3
0
    'num_gpus': NUM_GPUS,
    'num_workers': num_workers
}

vcr_modes = VCR.eval_splits(
    embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),
    only_use_relevant_dets=params['dataset_reader'].get(
        'only_use_relevant_dets', True))
probs_grp = []
ids_grp = []
for (vcr_dataset,
     mode_long) in zip(vcr_modes,
                       ['answer'] + [f'rationale_{i}' for i in range(4)]):
    mode = mode_long.split('_')[0]

    test_loader = VCRLoader.from_dataset(vcr_dataset, **loader_params)

    # Load the params again because allennlp will delete them... ugh.
    params = Params.from_file(args.params)
    print("Loading {} for {}".format(params['model'].get('type', 'WTF?'),
                                     mode),
          flush=True)
    model = Model.from_params(vocab=vcr_dataset.vocab, params=params['model'])
    for submodule in model.detector.backbone.modules():
        if isinstance(submodule, BatchNorm2d):
            submodule.track_running_stats = False

    model_state = torch.load(getattr(args, f'{mode}_ckpt'),
                             map_location=device_mapping(-1))
    model.load_state_dict(model_state)