Exemplo n.º 1
0
parser = TestFlowRegressionOptions()
opt = parser.parse(display=False)
parser.save()
print('load training options.')
train_opt = io.load_json(os.path.join('checkpoints', opt.id, 'train_opt.json'))
preserved_opt = {'gpu_ids', 'is_train', 'batch_size', 'which_epoch', 'debug'}
for k, v in train_opt.iteritems():
    if k in opt and (k not in preserved_opt):
        setattr(opt, k, v)
# create model
model = FlowRegressionModel()
model.initialize(opt)
# create data loader
val_loader = CreateDataLoader(opt, split='test')
# create visualizer
visualizer = Visualizer(opt)
# test
loss_buffer = LossBuffer(size=len(val_loader))
model.output = {}
model.eval()

for i, data in enumerate(tqdm.tqdm(val_loader, desc='Test')):
    model.set_input(data)
    model.test(compute_loss=True)
    loss_buffer.add(model.get_current_errors())

errors = loss_buffer.get_errors()
info = OrderedDict([('model_id', opt.id), ('epoch', opt.which_epoch)])
log_str = visualizer.log(info, errors, log_in_file=False)
print(log_str)
    for i, data in enumerate(tqdm.tqdm(train_loader, desc='Train')):
        total_steps += 1
        model.set_input(data)
        model.optimize_parameters(
            check_grad=(opt.check_grad_freq > 0 and total_steps %
                        opt.check_grad_freq == 0))

        if total_steps % opt.display_freq == 0:
            train_error = model.get_current_errors()
            info = OrderedDict([
                ('id', opt.id),
                ('iter', total_steps),
                ('epoch', epoch),
                ('lr', model.optimizers[0].param_groups[0]['lr']),
            ])
            tqdm.tqdm.write(visualizer.log(info, train_error))

    if epoch % opt.test_epoch_freq == 0:
        # model.get_current_errors() #erase training error information
        model.output = {}
        loss_buffer = LossBuffer(size=len(val_loader))
        for i, data in enumerate(tqdm.tqdm(val_loader, desc='Test')):
            model.set_input(data)
            model.test(compute_loss=True)
            loss_buffer.add(model.get_current_errors())
        test_error = loss_buffer.get_errors()
        info = OrderedDict([
            ('time', time.ctime()),
            ('id', opt.id),
            ('epoch', epoch),
        ])
Exemplo n.º 3
0
    if opt.save_output:
        output_dir = os.path.join(model.save_dir, opt.output_dir)
        io.mkdir_if_missing(output_dir)

    total_time = 0
    for i, data in enumerate(tqdm.tqdm(val_loader, desc='Test')):
        tic = time.time()
        model.eval()
        model.netG.eval()
        model.netF.eval()
        model.set_input(data)
        model.test()
        toc = time.time()
        total_time += (toc - tic)
        loss_buffer.add(model.get_current_errors())
        # save output
        if opt.save_output:
            id_list = model.input['id']
            images = model.output['img_out'].cpu().numpy().transpose(0, 2, 3, 1)
            images = ((images + 1.0) * 127.5).clip(0, 255).astype(np.uint8)
            for (sid1, sid2), img in zip(id_list, images):
                img = img[..., [2, 1, 0]]  # convert to cv2 format
                cv2.imwrite(os.path.join(output_dir, '%s_%s.jpg' % (sid1, sid2)), img)

    test_error = loss_buffer.get_errors()
    test_error['sec_per_image'] = total_time / (opt.batch_size * len(val_loader))
    info = OrderedDict([('model_id', opt.id), ('epoch', opt.which_epoch)])
    log_str = visualizer.log(info, test_error, log_in_file=False)
    print(log_str)