Пример #1
0
def test_glean():

    model_cfg = dict(type='GLEAN',
                     generator=dict(type='GLEANStyleGANv2',
                                    in_size=16,
                                    out_size=64,
                                    style_channels=512),
                     discriminator=dict(type='StyleGAN2Discriminator',
                                        in_size=64),
                     pixel_loss=dict(type='L1Loss',
                                     loss_weight=1.0,
                                     reduction='mean'),
                     gan_loss=dict(type='GANLoss',
                                   gan_type='vanilla',
                                   real_label_val=1.0,
                                   fake_label_val=0,
                                   loss_weight=5e-3))

    train_cfg = None
    test_cfg = mmcv.Config(dict(metrics=['PSNR'], crop_border=0))

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # prepare data
    inputs = torch.rand(1, 3, 16, 16)
    targets = torch.rand(1, 3, 64, 64)
    data_batch = {'lq': inputs, 'gt': targets}

    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
    meta = [{'lq_path': ''}]

    # test forward_test (cpu)
    with pytest.raises(ValueError):  # iteration is not None or number
        with torch.no_grad():
            restorer(**data_batch,
                     test_mode=True,
                     save_image=True,
                     meta=meta,
                     iteration='1')
    with pytest.raises(AssertionError):  # test with metric but gt is None
        with torch.no_grad():
            data_batch.pop('gt')
            restorer(**data_batch, test_mode=True)

    # test forward_test (gpu)
    if torch.cuda.is_available():
        data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
        restorer = restorer.cuda()
        with pytest.raises(ValueError):  # iteration is not None or number
            with torch.no_grad():
                restorer(**data_batch,
                         test_mode=True,
                         save_image=True,
                         meta=meta,
                         iteration='1')
        with pytest.raises(AssertionError):  # test with metric but gt is None
            with torch.no_grad():
                data_batch.pop('gt')
                restorer(**data_batch, test_mode=True)
Пример #2
0
def test_indexnet():
    model_cfg, _, test_cfg = _get_model_cfg(
        'indexnet/indexnet_mobv2_1x16_78k_comp1k.py')
    model_cfg['pretrained'] = None

    # test indexnet inference
    with torch.no_grad():
        indexnet = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
        indexnet.eval()
        input_test = _demo_input_test((64, 64))
        output_test = indexnet(**input_test, test_mode=True)
        assert isinstance(output_test['pred_alpha'], np.ndarray)
        assert output_test['pred_alpha'].shape == (64, 64)
        assert_dict_keys_equal(output_test['eval_result'],
                               ['SAD', 'MSE', 'GRAD', 'CONN'])

        # test inference with gpu
        if torch.cuda.is_available():
            indexnet = build_model(
                model_cfg, train_cfg=None, test_cfg=test_cfg).cuda()
            indexnet.eval()
            input_test = _demo_input_test((64, 64), cuda=True)
            output_test = indexnet(**input_test, test_mode=True)
            assert isinstance(output_test['pred_alpha'], np.ndarray)
            assert output_test['pred_alpha'].shape == (64, 64)
            assert_dict_keys_equal(output_test['eval_result'],
                                   ['SAD', 'MSE', 'GRAD', 'CONN'])

    # test forward train though we do not guarantee the training for present
    model_cfg.loss_alpha = None
    model_cfg.loss_comp = dict(type='L1CompositionLoss')
    indexnet = build_model(
        model_cfg,
        train_cfg=mmcv.ConfigDict(train_backbone=True),
        test_cfg=test_cfg)
    input_train = _demo_input_train((64, 64), batch_size=2)
    output_train = indexnet(**input_train)
    assert output_train['num_samples'] == 2
    assert_dict_keys_equal(output_train['losses'], ['loss_comp'])

    if torch.cuda.is_available():
        model_cfg.loss_alpha = dict(type='L1Loss')
        model_cfg.loss_comp = None
        indexnet = build_model(
            model_cfg,
            train_cfg=mmcv.ConfigDict(train_backbone=True),
            test_cfg=test_cfg).cuda()
        input_train = _demo_input_train((64, 64), batch_size=2, cuda=True)
        output_train = indexnet(**input_train)
        assert output_train['num_samples'] == 2
        assert_dict_keys_equal(output_train['losses'], ['loss_alpha'])

    # test forward_dummy
    indexnet.cpu().eval()
    inputs = torch.ones((1, 4, 32, 32))
    indexnet.forward_dummy(inputs)
Пример #3
0
def test_gl_inpaintor():
    cfg = Config.fromfile(
        Path(__file__).parent.joinpath('data/inpaintor_config/gl_test.py'))

    gl = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    assert gl.__class__.__name__ == 'GLInpaintor'

    if torch.cuda.is_available():
        gt_img = torch.randn(1, 3, 256, 256)
        mask = torch.zeros_like(gt_img)[:, 0:1, ...]
        mask[..., 100:210, 100:210] = 1.
        masked_img = gt_img * (1. - mask)
        mask_bbox = torch.tensor([[100, 100, 110, 110]])
        gl.cuda()
        data_batch = dict(
            gt_img=gt_img.cuda(),
            mask=mask.cuda(),
            masked_img=masked_img.cuda(),
            mask_bbox=mask_bbox.cuda())

        optim_g = torch.optim.SGD(gl.generator.parameters(), lr=0.1)
        optim_d = torch.optim.SGD(gl.disc.parameters(), lr=0.1)
        optim_dict = dict(generator=optim_g, disc=optim_d)

        for i in range(5):
            outputs = gl.train_step(data_batch, optim_dict)

            if i <= 2:
                assert 'loss_l1_hole' in outputs['log_vars']
                assert 'fake_loss' not in outputs['log_vars']
                assert 'real_loss' not in outputs['log_vars']
                assert 'loss_g_fake' not in outputs['log_vars']
            elif i == 3:
                assert 'loss_l1_hole' not in outputs['log_vars']
                assert 'fake_loss' in outputs['log_vars']
                assert 'real_loss' in outputs['log_vars']
                assert 'loss_g_fake' not in outputs['log_vars']
            else:
                assert 'loss_l1_hole' in outputs['log_vars']
                assert 'fake_loss' in outputs['log_vars']
                assert 'real_loss' in outputs['log_vars']
                assert 'loss_g_fake' in outputs['log_vars']

        gl_dirty = build_model(
            cfg.model_dirty, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
        gl_dirty.cuda()
        res, loss = gl_dirty.generator_loss(gt_img, gt_img, gt_img, data_batch)
        assert len(loss) == 0
Пример #4
0
 def __init__(self, trt_file: str, cfg: Any, device_id: int):
     super().__init__()
     base_model = build_model(
         cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
     if isinstance(base_model, BasicRestorer):
         WraperClass = TensorRTRestorer
     self.wraper = WraperClass(base_model, trt_file, device_id)
Пример #5
0
def main():

    args = parse_args()

    if len(args.shape) == 1:
        input_shape = (3, args.shape[0], args.shape[0])
    elif len(args.shape) == 2:
        input_shape = (3, ) + tuple(args.shape)
    elif len(args.shape) == 3:
        input_shape = tuple(args.shape)
    else:
        raise ValueError('invalid input shape')

    cfg = Config.fromfile(args.config)
    model = build_model(cfg.model,
                        train_cfg=cfg.train_cfg,
                        test_cfg=cfg.test_cfg).cuda()
    model.eval()

    if hasattr(model, 'forward_dummy'):
        model.forward = model.forward_dummy
    else:
        raise NotImplementedError(
            'FLOPs counter is currently not currently supported '
            f'with {model.__class__.__name__}')

    flops, params = get_model_complexity_info(model, input_shape)
    split_line = '=' * 30
    print(f'{split_line}\nInput shape: {input_shape}\n'
          f'Flops: {flops}\nParams: {params}\n{split_line}')
    print('!!!Please be cautious if you use the results in papers. '
          'You may need to check if all ops are supported and verify that the '
          'flops computation is correct.')
Пример #6
0
def init_model(config, checkpoint=None, device='cuda:0'):
    """Initialize a model from config file.

    Args:
        config (str or :obj:`mmcv.Config`): Config file path or the config
            object.
        checkpoint (str, optional): Checkpoint path. If left as None, the model
            will not load any weights.
        device (str): Which device the model will deploy. Default: 'cuda:0'.

    Returns:
        nn.Module: The constructed model.
    """
    if isinstance(config, str):
        config = mmcv.Config.fromfile(config)
    elif not isinstance(config, mmcv.Config):
        raise TypeError('config must be a filename or Config object, '
                        f'but got {type(config)}')
    config.model.pretrained = None
    config.test_cfg.metrics = None
    model = build_model(config.model, test_cfg=config.test_cfg)
    if checkpoint is not None:
        checkpoint = load_checkpoint(model, checkpoint)

    model.cfg = config  # save the config in the model for convenience
    model.to(device)
    model.eval()
    return model
Пример #7
0
def test_gca():
    model_cfg, train_cfg, test_cfg = _get_model_cfg(
        'gca/gca_r34_4x10_200k_comp1k.py')
    model_cfg['pretrained'] = None

    # test model forward in train mode
    model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
    inputs = _demo_input_train((64, 64), batch_size=2)
    inputs['trimap'] = inputs['trimap'].expand_as(inputs['merged'])
    inputs['meta'][0]['to_onehot'] = True
    outputs = model(inputs['merged'], inputs['trimap'], inputs['meta'],
                    inputs['alpha'])
    assert outputs['num_samples'] == 2
    assert_dict_keys_equal(outputs['losses'], ['loss'])

    if torch.cuda.is_available():
        model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
        model.cuda()
        inputs = _demo_input_train((64, 64), batch_size=2, cuda=True)
        inputs['trimap'] = inputs['trimap'].expand_as(inputs['merged'])
        inputs['meta'][0]['to_onehot'] = True
        outputs = model(inputs['merged'], inputs['trimap'], inputs['meta'],
                        inputs['alpha'])
        assert outputs['num_samples'] == 2
        assert_dict_keys_equal(outputs['losses'], ['loss'])

    # test model forward in test mode
    with torch.no_grad():
        model_cfg.backbone.encoder.in_channels = 4
        model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
        inputs = _demo_input_test((64, 64))
        outputs = model(**inputs, test_mode=True)
        assert_dict_keys_equal(outputs['eval_result'],
                               ['SAD', 'MSE', 'GRAD', 'CONN'])

        if torch.cuda.is_available():
            model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
            model.cuda()
            inputs = _demo_input_test((64, 64), cuda=True)
            outputs = model(**inputs, test_mode=True)
            assert_dict_keys_equal(outputs['eval_result'],
                                   ['SAD', 'MSE', 'GRAD', 'CONN'])

    # test forward_dummy
    model.cpu().eval()
    inputs = torch.ones((1, 4, 32, 32))
    model.forward_dummy(inputs)
Пример #8
0
def check_params(cfg):

    model = build_model(cfg.model).cuda()
    #     print(model)
    num_param = sum(p.numel() for p in model.parameters())
    if num_param > 1821085:
        raise Exception('model parameters exceed limit')
    else:
        print(
            'there are total of {} parameters in the model'.format(num_param))
Пример #9
0
def main():

    print('settings:\n', args)
    #annotate training data for 1st time
    if args.annotate:
        train_annotation()

    #change config
    config_path = 'configs/restorers/srresnet_srgan/msrresnet_x4c64b16_g1_1000k_div2k.py'
    cfg = change_config(config_path)
    check_params(cfg)

    # Initialize distributed training (only need to initialize once), comment it if have already run this part
    os.environ['RANK'] = '0'
    os.environ['WORLD_SIZE'] = '1'
    os.environ['MASTER_ADDR'] = '127.0.0.1'
    os.environ['MASTER_PORT'] = '29500'  #'50297'
    init_dist('pytorch', **cfg.dist_params)

    # Build dataset
    datasets = [build_dataset(cfg.data.train)]

    # Build the SRCNN model
    model = build_model(cfg.model,
                        train_cfg=cfg.train_cfg,
                        test_cfg=cfg.test_cfg)

    # Create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))

    # Meta information
    meta = dict()
    # if cfg.get('exp_name', None) is None:
    #     cfg['exp_name'] = osp.splitext(osp.basename(cfg.work_dir))[0]
    meta['exp_name'] = '_'.join([
        'bs' + str(args.bs), 'iter' + str(args.iter),
        'block' + str(args.num_blocks), args.loss
    ])
    meta['mmedit Version'] = mmedit.__version__
    meta['seed'] = 0
    meta['start_time'] = datetime.now().strftime("%d/%m/%Y %H:%M:%S")

    # Train the model
    train_model(model,
                datasets,
                cfg,
                distributed=True,
                validate=True,
                meta=meta)
Пример #10
0
    def __init__(self, onnx_file, cfg, device_id):
        super(ONNXRuntimeEditing, self).__init__()
        ort_custom_op_path = ''
        try:
            from mmcv.ops import get_onnxruntime_op_path
            ort_custom_op_path = get_onnxruntime_op_path()
        except (ImportError, ModuleNotFoundError):
            warnings.warn('If input model has custom op from mmcv, \
                you may have to build mmcv with ONNXRuntime from source.')
        session_options = ort.SessionOptions()
        # register custom op for onnxruntime
        if osp.exists(ort_custom_op_path):
            session_options.register_custom_ops_library(ort_custom_op_path)
        sess = ort.InferenceSession(onnx_file, session_options)
        providers = ['CPUExecutionProvider']
        options = [{}]
        is_cuda_available = ort.get_device() == 'GPU'
        if is_cuda_available:
            providers.insert(0, 'CUDAExecutionProvider')
            options.insert(0, {'device_id': device_id})

        sess.set_providers(providers, options)

        self.sess = sess
        self.device_id = device_id
        self.io_binding = sess.io_binding()
        self.output_names = [_.name for _ in sess.get_outputs()]

        base_model = build_model(cfg.model,
                                 train_cfg=None,
                                 test_cfg=cfg.test_cfg)

        if isinstance(base_model, BaseMattor):
            WraperClass = ONNXRuntimeMattor
        elif isinstance(base_model, BasicRestorer):
            WraperClass = ONNXRuntimeRestorer
        self.wraper = WraperClass(self.sess, self.io_binding,
                                  self.output_names, base_model)
Пример #11
0
def test_liif():

    model_cfg = dict(type='LIIF',
                     generator=dict(type='LIIFEDSR',
                                    encoder=dict(type='EDSR',
                                                 in_channels=3,
                                                 out_channels=3,
                                                 mid_channels=64,
                                                 num_blocks=16),
                                    imnet=dict(
                                        type='MLPRefiner',
                                        in_dim=64,
                                        out_dim=3,
                                        hidden_list=[256, 256, 256, 256]),
                                    local_ensemble=True,
                                    feat_unfold=True,
                                    cell_decode=True,
                                    eval_bsize=30000),
                     rgb_mean=(0.4488, 0.4371, 0.4040),
                     rgb_std=(1., 1., 1.),
                     pixel_loss=dict(type='L1Loss',
                                     loss_weight=1.0,
                                     reduction='mean'))

    scale_max = 4
    train_cfg = None
    test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale_max))

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert restorer.__class__.__name__ == 'LIIF'

    # prepare data
    inputs = torch.rand(1, 3, 22, 11)
    targets = torch.rand(1, 128 * 64, 3)
    coord = torch.rand(1, 128 * 64, 2)
    cell = torch.rand(1, 128 * 64, 2)
    data_batch = {'lq': inputs, 'gt': targets, 'coord': coord, 'cell': cell}

    # prepare optimizer
    optim_cfg = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
    optimizer = obj_from_dict(optim_cfg, torch.optim,
                              dict(params=restorer.parameters()))

    # test train_step and forward_test (cpu)
    outputs = restorer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    assert isinstance(outputs['log_vars']['loss_pix'], float)
    assert outputs['num_samples'] == 1
    assert outputs['results']['lq'].shape == data_batch['lq'].shape
    assert outputs['results']['gt'].shape == data_batch['gt'].shape
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (1, 128 * 64, 3)

    # test train_step and forward_test (gpu)
    if torch.cuda.is_available():
        restorer = restorer.cuda()
        data_batch = {
            'lq': inputs.cuda(),
            'gt': targets.cuda(),
            'coord': coord.cuda(),
            'cell': cell.cuda()
        }

        # train_step
        optimizer = obj_from_dict(optim_cfg, torch.optim,
                                  dict(params=restorer.parameters()))
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['log_vars']['loss_pix'], float)
        assert outputs['num_samples'] == 1
        assert outputs['results']['lq'].shape == data_batch['lq'].shape
        assert outputs['results']['gt'].shape == data_batch['gt'].shape
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 128 * 64, 3)

        # val_step
        result = restorer.val_step(data_batch, meta=[{'gt_path': ''}])
        assert isinstance(result, dict)
        assert isinstance(result['eval_result'], dict)
        assert result['eval_result'].keys() == set({'PSNR', 'SSIM'})
        assert isinstance(result['eval_result']['PSNR'], np.float64)
        assert isinstance(result['eval_result']['SSIM'], np.float64)
Пример #12
0
def test_mattor_wrapper():
    try:
        import onnxruntime as ort
        from mmedit.core.export.wrappers import (ONNXRuntimeEditing,
                                                 ONNXRuntimeMattor)
    except ImportError:
        pytest.skip('ONNXRuntime is not available.')
    onnx_path = 'tmp.onnx'
    train_cfg = None
    test_cfg = dict(refine=False, metrics=['SAD', 'MSE', 'GRAD', 'CONN'])
    cfg = dict(
        model=dict(
            type='DIM',
            backbone=dict(
                type='SimpleEncoderDecoder',
                encoder=dict(type='VGG16', in_channels=4),
                decoder=dict(type='PlainDecoder')),
            pretrained='open-mmlab://mmedit/vgg16',
            loss_alpha=dict(type='CharbonnierLoss', loss_weight=0.5),
            loss_comp=dict(type='CharbonnierCompLoss', loss_weight=0.5)),
        train_cfg=train_cfg,
        test_cfg=test_cfg)
    cfg = mmcv.Config(cfg)

    pytorch_model = build_model(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    img_shape = (32, 32)
    merged = torch.rand(1, 3, img_shape[1], img_shape[0])
    trimap = torch.rand(1, 1, img_shape[1], img_shape[0])
    data_batch = {'merged': merged, 'trimap': trimap}
    inputs = torch.cat([merged, trimap], dim=1)

    pytorch_model.forward = pytorch_model.forward_dummy
    with torch.no_grad():
        torch.onnx.export(
            pytorch_model,
            inputs,
            onnx_path,
            input_names=['input'],
            output_names=['output'],
            export_params=True,
            keep_initializers_as_inputs=False,
            verbose=False,
            opset_version=11)

    wrap_model = ONNXRuntimeEditing(onnx_path, cfg, 0)
    os.remove(onnx_path)
    assert isinstance(wrap_model.wrapper, ONNXRuntimeMattor)

    if ort.get_device() == 'GPU':
        merged = merged.cuda()
        trimap = trimap.cuda()
        data_batch = {'merged': merged, 'trimap': trimap}

    ori_alpha = np.random.random(img_shape).astype(np.float32)
    ori_trimap = np.random.randint(256, size=img_shape).astype(np.float32)
    data_batch['meta'] = [
        dict(
            ori_alpha=ori_alpha,
            ori_trimap=ori_trimap,
            merged_ori_shape=img_shape)
    ]

    with torch.no_grad():
        outputs = wrap_model(**data_batch, test_mode=True)

    assert isinstance(outputs, dict)
    assert 'pred_alpha' in outputs
    pred_alpha = outputs['pred_alpha']
    assert isinstance(pred_alpha, np.ndarray)
    assert pred_alpha.shape == img_shape
Пример #13
0
def test_pconv_inpaintor(init_weights):
    cfg = Config.fromfile(
        'tests/data/inpaintor_config/pconv_inpaintor_test.py')

    if torch.cuda.is_available():
        pconv_inpaintor = build_model(cfg.model,
                                      train_cfg=cfg.train_cfg,
                                      test_cfg=cfg.test_cfg)
        assert pconv_inpaintor.__class__.__name__ == 'PConvInpaintor'
        pconv_inpaintor.cuda()
        gt_img = torch.randn((1, 3, 256, 256)).cuda()
        mask = torch.zeros_like(gt_img)
        mask[..., 50:160, 100:210] = 1.
        masked_img = gt_img * (1. - mask)
        data_batch = dict(gt_img=gt_img, mask=mask, masked_img=masked_img)
        optim_g = torch.optim.SGD(pconv_inpaintor.generator.parameters(),
                                  lr=0.1)
        optim_dict = dict(generator=optim_g)

        outputs = pconv_inpaintor.train_step(data_batch, optim_dict)
        assert outputs['results']['fake_res'].shape == (1, 3, 256, 256)
        assert outputs['results']['final_mask'].shape == (1, 3, 256, 256)
        assert 'loss_l1_hole' in outputs['log_vars']
        assert 'loss_l1_valid' in outputs['log_vars']
        assert 'loss_tv' in outputs['log_vars']

        # test forward dummy
        res = pconv_inpaintor.forward_dummy(
            torch.cat([masked_img, mask], dim=1))
        assert res.shape == (1, 3, 256, 256)

        # test forward test w/o save image
        outputs = pconv_inpaintor.forward_test(masked_img[0:1],
                                               mask[0:1],
                                               gt_img=gt_img[0:1, ...])
        assert 'eval_result' in outputs
        assert outputs['eval_result']['l1'] > 0
        assert outputs['eval_result']['psnr'] > 0
        assert outputs['eval_result']['ssim'] > 0

        # test forward test w/o eval metrics
        pconv_inpaintor.test_cfg = dict()
        pconv_inpaintor.eval_with_metrics = False
        outputs = pconv_inpaintor.forward_test(masked_img[0:1], mask[0:1])
        for key in ['fake_res', 'fake_img']:
            assert outputs[key].size() == (1, 3, 256, 256)

        # test forward test w/ save image
        with tempfile.TemporaryDirectory() as tmpdir:
            outputs = pconv_inpaintor.forward_test(
                masked_img[0:1],
                mask[0:1],
                save_image=True,
                save_path=tmpdir,
                iteration=4396,
                meta=[dict(gt_img_path='igccc.png')])

            assert os.path.exists(os.path.join(tmpdir, 'igccc_4396.png'))

        # test forward test w/ save image w/ gt_img
        with tempfile.TemporaryDirectory() as tmpdir:
            outputs = pconv_inpaintor.forward_test(
                masked_img[0:1],
                mask[0:1],
                save_image=True,
                save_path=tmpdir,
                meta=[dict(gt_img_path='igccc.png')],
                gt_img=gt_img[0:1, ...])

            assert os.path.exists(os.path.join(tmpdir, 'igccc.png'))

            with pytest.raises(AssertionError):
                outputs = pconv_inpaintor.forward_test(masked_img[0:1],
                                                       mask[0:1],
                                                       save_image=True,
                                                       save_path=tmpdir,
                                                       iteration=4396,
                                                       gt_img=gt_img[0:1, ...])
            with pytest.raises(AssertionError):
                outputs = pconv_inpaintor.forward_test(
                    masked_img[0:1],
                    mask[0:1],
                    save_image=True,
                    save_path=None,
                    iteration=4396,
                    meta=[dict(gt_img_path='igccc.png')],
                    gt_img=gt_img[0:1, ...])

    # reset mock to clear some memory usage
    init_weights.reset_mock()
Пример #14
0
def test_cain():
    model_cfg = dict(
        type='CAIN',
        generator=dict(type='CAINNet'),
        pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))

    train_cfg = None
    test_cfg = None

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert restorer.__class__.__name__ == 'CAIN'
    assert isinstance(restorer.generator, CAINNet)
    assert isinstance(restorer.pixel_loss, L1Loss)

    # prepare data
    inputs = torch.rand(1, 2, 3, 128, 128)
    target = torch.rand(1, 3, 128, 128)
    data_batch = {'inputs': inputs, 'target': target, 'meta': [{'key': '001'}]}

    # prepare optimizer
    optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
    optimizer = {
        'generator':
        obj_from_dict(optim_cfg, torch.optim,
                      dict(params=restorer.parameters()))
    }

    # test forward_test
    with torch.no_grad():
        outputs = restorer.forward_test(**data_batch)
    assert torch.equal(outputs['inputs'], data_batch['inputs'])
    assert torch.is_tensor(outputs['output'])
    assert outputs['output'].size() == (1, 3, 128, 128)

    # test train_step
    outputs = restorer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    assert isinstance(outputs['log_vars']['loss_pix'], float)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['inputs'], data_batch['inputs'])
    assert torch.equal(outputs['results']['target'], data_batch['target'])
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (1, 3, 128, 128)

    # test train_step and forward_test (gpu)
    if torch.cuda.is_available():
        restorer = restorer.cuda()
        optimizer['generator'] = obj_from_dict(
            optim_cfg, torch.optim, dict(params=restorer.parameters()))
        data_batch = {
            'inputs': inputs.cuda(),
            'target': target.cuda(),
            'meta': [{
                'key': '001'
            }]
        }

        # forward_test
        with torch.no_grad():
            outputs = restorer.forward_test(**data_batch)
        assert torch.equal(outputs['inputs'], data_batch['inputs'].cpu())
        assert torch.is_tensor(outputs['output'])
        assert outputs['output'].size() == (1, 3, 128, 128)

        # train_step
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['log_vars']['loss_pix'], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['inputs'],
                           data_batch['inputs'].cpu())
        assert torch.equal(outputs['results']['target'],
                           data_batch['target'].cpu())
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 128, 128)

    # test with metric and save image
    test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
    test_cfg = mmcv.Config(test_cfg)

    data_batch = {
        'inputs': inputs,
        'target': target,
        'meta': [{
            'key': 'fake_path/fake_name'
        }]
    }

    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    with pytest.raises(AssertionError):
        # evaluation with metrics must have target images
        restorer(inputs=inputs, test_mode=True)

    with tempfile.TemporaryDirectory() as tmpdir:
        outputs = restorer(
            **data_batch,
            test_mode=True,
            save_image=True,
            save_path=tmpdir,
            iteration=None)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['eval_result'], dict)
        assert isinstance(outputs['eval_result']['PSNR'], float)
        assert isinstance(outputs['eval_result']['SSIM'], float)

        outputs = restorer(
            **data_batch,
            test_mode=True,
            save_image=True,
            save_path=tmpdir,
            iteration=100)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['eval_result'], dict)
        assert isinstance(outputs['eval_result']['PSNR'], float)
        assert isinstance(outputs['eval_result']['SSIM'], float)
Пример #15
0
def test_restorer_wrapper():
    try:
        import onnxruntime as ort
        from mmedit.core.export.wrappers import (ONNXRuntimeEditing,
                                                 ONNXRuntimeRestorer)
    except ImportError:
        pytest.skip('ONNXRuntime is not available.')

    onnx_path = 'tmp.onnx'
    scale = 4
    train_cfg = None
    test_cfg = None
    cfg = dict(
        model=dict(
            type='BasicRestorer',
            generator=dict(
                type='SRCNN',
                channels=(3, 4, 2, 3),
                kernel_sizes=(9, 1, 5),
                upscale_factor=scale),
            pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean')),
        train_cfg=train_cfg,
        test_cfg=test_cfg)
    cfg = mmcv.Config(cfg)

    pytorch_model = build_model(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    # prepare data
    inputs = torch.rand(1, 3, 2, 2)
    targets = torch.rand(1, 3, 8, 8)
    data_batch = {'lq': inputs, 'gt': targets}

    pytorch_model.forward = pytorch_model.forward_dummy
    with torch.no_grad():
        torch.onnx.export(
            pytorch_model,
            inputs,
            onnx_path,
            input_names=['input'],
            output_names=['output'],
            export_params=True,
            keep_initializers_as_inputs=False,
            verbose=False,
            opset_version=11)

    wrap_model = ONNXRuntimeEditing(onnx_path, cfg, 0)
    # os.remove(onnx_path)
    assert isinstance(wrap_model.wrapper, ONNXRuntimeRestorer)

    if ort.get_device() == 'GPU':
        data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}

    with torch.no_grad():
        outputs = wrap_model(**data_batch, test_mode=True)

    assert isinstance(outputs, dict)
    assert 'output' in outputs
    output = outputs['output']
    assert isinstance(output, torch.Tensor)
    assert output.shape == targets.shape
Пример #16
0
def main():
    args = parse_args()

    checkpoint_list = os.listdir(args.checkpoint_dir)

    print(checkpoint_list)

    for checkpoint in checkpoint_list:
        if '.pth' in checkpoint:

            cfg = mmcv.Config.fromfile(args.config)
            # set cudnn_benchmark
            if cfg.get('cudnn_benchmark', False):
                torch.backends.cudnn.benchmark = True
            cfg.model.pretrained = None

            # init distributed env first, since logger depends on the dist info.
            if args.launcher == 'none':
                distributed = False
            else:
                distributed = True
                init_dist(args.launcher, **cfg.dist_params)

            rank, _ = get_dist_info()

            # set random seeds
            if args.seed is not None:
                if rank == 0:
                    print('set random seed to', args.seed)
                set_random_seed(args.seed, deterministic=args.deterministic)

            # build the dataloader
            # TODO: support multiple images per gpu (only minor changes are needed)
            dataset = build_dataset(cfg.data.test)
            data_loader = build_dataloader(dataset,
                                           samples_per_gpu=1,
                                           workers_per_gpu=cfg.data.get(
                                               'val_workers_per_gpu',
                                               cfg.data.workers_per_gpu),
                                           dist=distributed,
                                           shuffle=False)

            # build the model and load checkpoint
            model = build_model(cfg.model,
                                train_cfg=None,
                                test_cfg=cfg.test_cfg)

            args.save_image = args.save_path is not None

            # distributed test
            find_unused_parameters = cfg.get('find_unused_parameters', False)
            model = DistributedDataParallelWrapper(
                model,
                device_ids=[torch.cuda.current_device()],
                broadcast_buffers=False,
                find_unused_parameters=find_unused_parameters)

            device_id = torch.cuda.current_device()

            _ = load_checkpoint(
                model,
                os.path.join(args.checkpoint_dir, checkpoint),
                map_location=lambda storage, loc: storage.cuda(device_id))

            outputs = multi_gpu_test(model,
                                     data_loader,
                                     args.tmpdir,
                                     args.gpu_collect,
                                     save_path=args.save_path,
                                     save_image=args.save_image)

            if rank == 0:
                # print metrics
                stats = dataset.evaluate(outputs)
                write_file = open(
                    os.path.join(args.checkpoint_dir, 'eval_result_new.txt'),
                    'a')
                for stat in stats:
                    print('{}: Eval-{}: {}'.format(checkpoint, stat,
                                                   stats[stat]))
                    write_file.write('{}: Eval-{}: {} '.format(
                        checkpoint, stat, stats[stat]))
                write_file.write('\n')
                write_file.close()
                # save result pickle
                if args.out:
                    print('writing results to {}'.format(args.out))
                    mmcv.dump(outputs, args.out)
Пример #17
0
def test_basic_interpolator():
    model_cfg = dict(type='BasicInterpolator',
                     generator=dict(type='InterpolateExample'),
                     pixel_loss=dict(type='L1Loss',
                                     loss_weight=1.0,
                                     reduction='mean'))

    train_cfg = None
    test_cfg = None

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert restorer.__class__.__name__ == 'BasicInterpolator'
    assert isinstance(restorer.generator, InterpolateExample)
    assert isinstance(restorer.pixel_loss, L1Loss)

    # prepare data
    inputs = torch.rand(1, 2, 3, 20, 20)
    target = torch.rand(1, 3, 20, 20)
    data_batch = {'inputs': inputs, 'target': target}

    # prepare optimizer
    optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
    optimizer = {
        'generator':
        obj_from_dict(optim_cfg, torch.optim,
                      dict(params=restorer.parameters()))
    }

    # test forward train
    outputs = restorer(**data_batch, test_mode=False)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['losses'], dict)
    assert isinstance(outputs['losses']['loss_pix'], torch.FloatTensor)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['inputs'], data_batch['inputs'])
    assert torch.equal(outputs['results']['target'], data_batch['target'])
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (1, 3, 20, 20)

    # test forward_test
    with torch.no_grad():
        restorer.val_step(data_batch)
        outputs = restorer(**data_batch, test_mode=True)
    assert torch.equal(outputs['inputs'], data_batch['inputs'])
    assert torch.is_tensor(outputs['output'])
    assert outputs['output'].size() == (1, 3, 20, 20)

    # test forward_dummy
    with torch.no_grad():
        output = restorer.forward_dummy(data_batch['inputs'])
    assert torch.is_tensor(output)
    assert output.size() == (1, 3, 20, 20)

    # test train_step
    outputs = restorer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    assert isinstance(outputs['log_vars']['loss_pix'], float)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['inputs'], data_batch['inputs'])
    assert torch.equal(outputs['results']['target'], data_batch['target'])
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (1, 3, 20, 20)

    # test train_step and forward_test (gpu)
    if torch.cuda.is_available():
        restorer = restorer.cuda()
        optimizer['generator'] = obj_from_dict(
            optim_cfg, torch.optim, dict(params=restorer.parameters()))
        data_batch = {'inputs': inputs.cuda(), 'target': target.cuda()}

        # test forward train
        outputs = restorer(**data_batch, test_mode=False)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['losses'], dict)
        assert isinstance(outputs['losses']['loss_pix'],
                          torch.cuda.FloatTensor)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['inputs'],
                           data_batch['inputs'].cpu())
        assert torch.equal(outputs['results']['target'],
                           data_batch['target'].cpu())
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 20, 20)

        # forward_test
        with torch.no_grad():
            restorer.val_step(data_batch)
            outputs = restorer(**data_batch, test_mode=True)
        assert torch.equal(outputs['inputs'], data_batch['inputs'].cpu())
        assert torch.is_tensor(outputs['output'])
        assert outputs['output'].size() == (1, 3, 20, 20)

        # train_step
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['log_vars']['loss_pix'], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['inputs'],
                           data_batch['inputs'].cpu())
        assert torch.equal(outputs['results']['target'],
                           data_batch['target'].cpu())
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 20, 20)

    # test with metric and save image
    test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
    test_cfg = mmcv.Config(test_cfg)

    data_batch = {
        'inputs': inputs,
        'target': target,
        'meta': [{
            'key': '000001/0000',
            'target_path': 'fake_path/fake_name.png'
        }]
    }

    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    with pytest.raises(AssertionError):
        # evaluation with metrics must have target images
        restorer(inputs=inputs, test_mode=True)

    with tempfile.TemporaryDirectory() as tmpdir:
        outputs = restorer(**data_batch,
                           test_mode=True,
                           save_image=True,
                           save_path=tmpdir,
                           iteration=None)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['eval_result'], dict)
        assert isinstance(outputs['eval_result']['PSNR'], float)
        assert isinstance(outputs['eval_result']['SSIM'], float)

        outputs = restorer(**data_batch,
                           test_mode=True,
                           save_image=True,
                           save_path=tmpdir,
                           iteration=100)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['eval_result'], dict)
        assert isinstance(outputs['eval_result']['PSNR'], float)
        assert isinstance(outputs['eval_result']['SSIM'], float)

        outputs = restorer(
            inputs=inputs,
            target=target,
            meta=[{
                'key':
                '000001/0000',
                'inputs_path':
                ['fake_path/fake_name.png', 'fake_path/fake_name.png']
            }],
            test_mode=True,
            save_image=True,
            save_path=tmpdir,
            iteration=100)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['eval_result'], dict)
        assert isinstance(outputs['eval_result']['PSNR'], float)
        assert isinstance(outputs['eval_result']['SSIM'], float)

        with pytest.raises(ValueError):
            # iteration should be number or None
            restorer(**data_batch,
                     test_mode=True,
                     save_image=True,
                     save_path=tmpdir,
                     iteration='100')

        # test forward_test when output.shape==5
        model_cfg = dict(type='BasicInterpolator',
                         generator=dict(type='InterpolateExample2'),
                         pixel_loss=dict(type='L1Loss',
                                         loss_weight=1.0,
                                         reduction='mean'))
        train_cfg = None
        test_cfg = None
        restorer = build_model(model_cfg,
                               train_cfg=train_cfg,
                               test_cfg=test_cfg)
        outputs = restorer(
            inputs=inputs,
            target=target.unsqueeze(1),
            meta=[{
                'key':
                '000001/0000',
                'inputs_path':
                ['fake_path/fake_name.png', 'fake_path/fake_name.png']
            }],
            test_mode=True,
            save_image=True,
            save_path=tmpdir,
            iteration=100)
        outputs = restorer(
            inputs=inputs,
            target=target.unsqueeze(1),
            meta=[{
                'key':
                '000001/0000',
                'inputs_path':
                ['fake_path/fake_name.png', 'fake_path/fake_name.png']
            }],
            test_mode=True,
            save_image=True,
            save_path=tmpdir,
            iteration=None)

        with pytest.raises(ValueError):
            # iteration should be number or None
            restorer(**data_batch,
                     test_mode=True,
                     save_image=True,
                     save_path=tmpdir,
                     iteration='100')

    # test merge_frames
    input_tensors = torch.rand(2, 2, 3, 256, 256)
    output_tensors = torch.rand(2, 1, 3, 256, 256)
    result = restorer.merge_frames(input_tensors, output_tensors)
    assert isinstance(result, list)
    assert len(result) == 5
    assert result[0].shape == (256, 256, 3)

    # test split_frames
    tensors = torch.rand(1, 10, 3, 256, 256)
    result = restorer.split_frames(tensors)
    assert isinstance(result, torch.Tensor)
    assert result.shape == (9, 2, 3, 256, 256)

    # test evaluate 5d output
    test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
    test_cfg = mmcv.Config(test_cfg)
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
    output = torch.rand(1, 2, 3, 256, 256)
    target = torch.rand(1, 2, 3, 256, 256)
    restorer.evaluate(output, target)
Пример #18
0
def test_pix2pix():

    model_cfg = dict(type='Pix2Pix',
                     generator=dict(type='UnetGenerator',
                                    in_channels=3,
                                    out_channels=3,
                                    num_down=8,
                                    base_channels=64,
                                    norm_cfg=dict(type='BN'),
                                    use_dropout=True,
                                    init_cfg=dict(type='normal', gain=0.02)),
                     discriminator=dict(type='PatchDiscriminator',
                                        in_channels=6,
                                        base_channels=64,
                                        num_conv=3,
                                        norm_cfg=dict(type='BN'),
                                        init_cfg=dict(type='normal',
                                                      gain=0.02)),
                     gan_loss=dict(type='GANLoss',
                                   gan_type='vanilla',
                                   real_label_val=1.0,
                                   fake_label_val=0,
                                   loss_weight=1.0),
                     pixel_loss=dict(type='L1Loss',
                                     loss_weight=100.0,
                                     reduction='mean'))

    train_cfg = None
    test_cfg = None

    # build synthesizer
    synthesizer = build_model(model_cfg,
                              train_cfg=train_cfg,
                              test_cfg=test_cfg)

    # test checking gan loss cannot be None
    with pytest.raises(AssertionError):
        bad_model_cfg = copy.deepcopy(model_cfg)
        bad_model_cfg['gan_loss'] = None
        _ = build_model(bad_model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert synthesizer.__class__.__name__ == 'Pix2Pix'
    assert isinstance(synthesizer.generator, UnetGenerator)
    assert isinstance(synthesizer.discriminator, PatchDiscriminator)
    assert isinstance(synthesizer.gan_loss, GANLoss)
    assert isinstance(synthesizer.pixel_loss, L1Loss)
    assert synthesizer.train_cfg is None
    assert synthesizer.test_cfg is None

    # prepare data
    inputs = torch.rand(1, 3, 256, 256)
    targets = torch.rand(1, 3, 256, 256)
    data_batch = {'img_a': inputs, 'img_b': targets}
    img_meta = {}
    img_meta['img_a_path'] = 'img_a_path'
    img_meta['img_b_path'] = 'img_b_path'
    data_batch['meta'] = [img_meta]

    # prepare optimizer
    optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.5, 0.999))
    optimizer = {
        'generator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(synthesizer, 'generator').parameters())),
        'discriminator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(synthesizer, 'discriminator').parameters()))
    }

    # test forward_dummy
    with torch.no_grad():
        output = synthesizer.forward_dummy(data_batch['img_a'])
    assert torch.is_tensor(output)
    assert output.size() == (1, 3, 256, 256)

    # test forward_test
    with torch.no_grad():
        outputs = synthesizer(inputs, targets, [img_meta], test_mode=True)
    assert torch.equal(outputs['real_a'], data_batch['img_a'])
    assert torch.equal(outputs['real_b'], data_batch['img_b'])
    assert torch.is_tensor(outputs['fake_b'])
    assert outputs['fake_b'].size() == (1, 3, 256, 256)

    # val_step
    with torch.no_grad():
        outputs = synthesizer.val_step(data_batch)
    assert torch.equal(outputs['real_a'], data_batch['img_a'])
    assert torch.equal(outputs['real_b'], data_batch['img_b'])
    assert torch.is_tensor(outputs['fake_b'])
    assert outputs['fake_b'].size() == (1, 3, 256, 256)

    # test forward_train
    outputs = synthesizer(inputs, targets, [img_meta], test_mode=False)
    assert torch.equal(outputs['real_a'], data_batch['img_a'])
    assert torch.equal(outputs['real_b'], data_batch['img_b'])
    assert torch.is_tensor(outputs['fake_b'])
    assert outputs['fake_b'].size() == (1, 3, 256, 256)

    # test train_step
    outputs = synthesizer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    assert isinstance(outputs['results'], dict)
    for v in [
            'loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_pixel'
    ]:
        assert isinstance(outputs['log_vars'][v], float)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
    assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
    assert torch.is_tensor(outputs['results']['fake_b'])
    assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)

    # test train_step and forward_test (gpu)
    if torch.cuda.is_available():
        synthesizer = synthesizer.cuda()
        optimizer = {
            'generator':
            obj_from_dict(
                optim_cfg, torch.optim,
                dict(params=getattr(synthesizer, 'generator').parameters())),
            'discriminator':
            obj_from_dict(
                optim_cfg, torch.optim,
                dict(
                    params=getattr(synthesizer, 'discriminator').parameters()))
        }
        data_batch_cuda = copy.deepcopy(data_batch)
        data_batch_cuda['img_a'] = inputs.cuda()
        data_batch_cuda['img_b'] = targets.cuda()
        data_batch_cuda['meta'] = [DC(img_meta, cpu_only=True).data]

        # forward_test
        with torch.no_grad():
            outputs = synthesizer(data_batch_cuda['img_a'],
                                  data_batch_cuda['img_b'],
                                  data_batch_cuda['meta'],
                                  test_mode=True)
        assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'].cpu())
        assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'].cpu())
        assert torch.is_tensor(outputs['fake_b'])
        assert outputs['fake_b'].size() == (1, 3, 256, 256)

        # val_step
        with torch.no_grad():
            outputs = synthesizer.val_step(data_batch_cuda)
        assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'].cpu())
        assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'].cpu())
        assert torch.is_tensor(outputs['fake_b'])
        assert outputs['fake_b'].size() == (1, 3, 256, 256)

        # test forward_train
        outputs = synthesizer(data_batch_cuda['img_a'],
                              data_batch_cuda['img_b'],
                              data_batch_cuda['meta'],
                              test_mode=False)
        assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'])
        assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'])
        assert torch.is_tensor(outputs['fake_b'])
        assert outputs['fake_b'].size() == (1, 3, 256, 256)

        # train_step
        outputs = synthesizer.train_step(data_batch_cuda, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['results'], dict)
        for v in [
                'loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g',
                'loss_pixel'
        ]:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['real_a'],
                           data_batch_cuda['img_a'].cpu())
        assert torch.equal(outputs['results']['real_b'],
                           data_batch_cuda['img_b'].cpu())
        assert torch.is_tensor(outputs['results']['fake_b'])
        assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)

    # test disc_steps and disc_init_steps
    data_batch['img_a'] = inputs.cpu()
    data_batch['img_b'] = targets.cpu()
    train_cfg = dict(disc_steps=2, disc_init_steps=2)
    synthesizer = build_model(model_cfg,
                              train_cfg=train_cfg,
                              test_cfg=test_cfg)
    optimizer = {
        'generator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(synthesizer, 'generator').parameters())),
        'discriminator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(synthesizer, 'discriminator').parameters()))
    }

    # iter 0, 1
    for i in range(2):
        assert synthesizer.step_counter == i
        outputs = synthesizer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['results'], dict)
        assert outputs['log_vars'].get('loss_gan_g') is None
        assert outputs['log_vars'].get('loss_pixel') is None
        for v in ['loss_gan_d_fake', 'loss_gan_d_real']:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
        assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
        assert torch.is_tensor(outputs['results']['fake_b'])
        assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)
        assert synthesizer.step_counter == i + 1

    # iter 2, 3, 4, 5
    for i in range(2, 6):
        assert synthesizer.step_counter == i
        outputs = synthesizer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['results'], dict)
        log_check_list = [
            'loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_pixel'
        ]
        if i % 2 == 1:
            assert outputs['log_vars'].get('loss_gan_g') is None
            assert outputs['log_vars'].get('loss_pixel') is None
            log_check_list.remove('loss_gan_g')
            log_check_list.remove('loss_pixel')
        for v in log_check_list:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
        assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
        assert torch.is_tensor(outputs['results']['fake_b'])
        assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)
        assert synthesizer.step_counter == i + 1

    # test without pixel loss
    model_cfg_ = copy.deepcopy(model_cfg)
    model_cfg_.pop('pixel_loss')
    synthesizer = build_model(model_cfg_, train_cfg=None, test_cfg=None)
    optimizer = {
        'generator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(synthesizer, 'generator').parameters())),
        'discriminator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(synthesizer, 'discriminator').parameters()))
    }
    data_batch['img_a'] = inputs.cpu()
    data_batch['img_b'] = targets.cpu()
    outputs = synthesizer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    assert isinstance(outputs['results'], dict)
    assert outputs['log_vars'].get('loss_pixel') is None
    for v in ['loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g']:
        assert isinstance(outputs['log_vars'][v], float)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
    assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
    assert torch.is_tensor(outputs['results']['fake_b'])
    assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)

    # test b2a translation
    data_batch['img_a'] = inputs.cpu()
    data_batch['img_b'] = targets.cpu()
    train_cfg = dict(direction='b2a')
    synthesizer = build_model(model_cfg,
                              train_cfg=train_cfg,
                              test_cfg=test_cfg)
    optimizer = {
        'generator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(synthesizer, 'generator').parameters())),
        'discriminator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(synthesizer, 'discriminator').parameters()))
    }
    assert synthesizer.step_counter == 0
    outputs = synthesizer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    assert isinstance(outputs['results'], dict)
    for v in [
            'loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_pixel'
    ]:
        assert isinstance(outputs['log_vars'][v], float)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['real_a'], data_batch['img_b'])
    assert torch.equal(outputs['results']['real_b'], data_batch['img_a'])
    assert torch.is_tensor(outputs['results']['fake_b'])
    assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)
    assert synthesizer.step_counter == 1

    # test save image
    # show input
    train_cfg = None
    test_cfg = dict(show_input=True)
    synthesizer = build_model(model_cfg,
                              train_cfg=train_cfg,
                              test_cfg=test_cfg)
    with patch.object(mmcv, 'imwrite', return_value=True):
        # test save path not None Assertion
        with pytest.raises(AssertionError):
            with torch.no_grad():
                _ = synthesizer(inputs,
                                targets, [img_meta],
                                test_mode=True,
                                save_image=True)
        # iteration is None
        with torch.no_grad():
            outputs = synthesizer(inputs,
                                  targets, [img_meta],
                                  test_mode=True,
                                  save_image=True,
                                  save_path='save_path')
        assert torch.equal(outputs['real_a'], data_batch['img_a'])
        assert torch.equal(outputs['real_b'], data_batch['img_b'])
        assert torch.is_tensor(outputs['fake_b'])
        assert outputs['fake_b'].size() == (1, 3, 256, 256)
        assert outputs['saved_flag']
        # iteration is not None
        with torch.no_grad():
            outputs = synthesizer(inputs,
                                  targets, [img_meta],
                                  test_mode=True,
                                  save_image=True,
                                  save_path='save_path',
                                  iteration=1000)
        assert torch.equal(outputs['real_a'], data_batch['img_a'])
        assert torch.equal(outputs['real_b'], data_batch['img_b'])
        assert torch.is_tensor(outputs['fake_b'])
        assert outputs['fake_b'].size() == (1, 3, 256, 256)
        assert outputs['saved_flag']

    # not show input
    train_cfg = None
    test_cfg = dict(show_input=False)
    synthesizer = build_model(model_cfg,
                              train_cfg=train_cfg,
                              test_cfg=test_cfg)
    with patch.object(mmcv, 'imwrite', return_value=True):
        # test save path not None Assertion
        with pytest.raises(AssertionError):
            with torch.no_grad():
                _ = synthesizer(inputs,
                                targets, [img_meta],
                                test_mode=True,
                                save_image=True)
        # iteration is None
        with torch.no_grad():
            outputs = synthesizer(inputs,
                                  targets, [img_meta],
                                  test_mode=True,
                                  save_image=True,
                                  save_path='save_path')
        assert torch.equal(outputs['real_a'], data_batch['img_a'])
        assert torch.equal(outputs['real_b'], data_batch['img_b'])
        assert torch.is_tensor(outputs['fake_b'])
        assert outputs['fake_b'].size() == (1, 3, 256, 256)
        assert outputs['saved_flag']
        # iteration is not None
        with torch.no_grad():
            outputs = synthesizer(inputs,
                                  targets, [img_meta],
                                  test_mode=True,
                                  save_image=True,
                                  save_path='save_path',
                                  iteration=1000)
        assert torch.equal(outputs['real_a'], data_batch['img_a'])
        assert torch.equal(outputs['real_b'], data_batch['img_b'])
        assert torch.is_tensor(outputs['fake_b'])
        assert outputs['fake_b'].size() == (1, 3, 256, 256)
        assert outputs['saved_flag']
Пример #19
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # log env info
    env_info_dict = collect_env.collect_env()
    env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)

    # log some basic info
    logger.info('Distributed training: {}'.format(distributed))
    logger.info('mmedit Version: {}'.format(__version__))
    logger.info('Config:\n{}'.format(cfg.text))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}, deterministic: {}'.format(
            args.seed, args.deterministic))
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed

    model = build_model(cfg.model,
                        train_cfg=cfg.train_cfg,
                        test_cfg=cfg.test_cfg)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmedit_version=__version__,
            config=cfg.text,
        )

    # meta information
    meta = dict()
    if cfg.get('exp_name', None) is None:
        cfg['exp_name'] = osp.splitext(osp.basename(cfg.work_dir))[0]
    meta['exp_name'] = cfg.exp_name
    meta['mmedit Version'] = __version__
    meta['seed'] = args.seed
    meta['env_info'] = env_info

    # add an attribute for visualization convenience
    train_model(model,
                datasets,
                cfg,
                distributed=distributed,
                validate=(not args.no_validate),
                timestamp=timestamp,
                meta=meta)
Пример #20
0
def test_basicvsr_model():

    model_cfg = dict(
        type='BasicVSR',
        generator=dict(type='BasicVSRNet',
                       mid_channels=64,
                       num_blocks=30,
                       spynet_pretrained=None),
        pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
    )

    train_cfg = dict(fix_iter=1)
    train_cfg = mmcv.Config(train_cfg)
    test_cfg = None

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert restorer.__class__.__name__ == 'BasicVSR'
    assert isinstance(restorer.generator, BasicVSRNet)
    assert isinstance(restorer.pixel_loss, MSELoss)

    # prepare data
    inputs = torch.rand(1, 5, 3, 64, 64)
    targets = torch.rand(1, 5, 3, 256, 256)

    if torch.cuda.is_available():
        inputs = inputs.cuda()
        targets = targets.cuda()
        restorer = restorer.cuda()

    # prepare data and optimizer
    data_batch = {'lq': inputs, 'gt': targets}
    optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
    optimizer = {
        'generator':
        obj_from_dict(optim_cfg, torch.optim,
                      dict(params=getattr(restorer, 'generator').parameters()))
    }

    # train_step (wihout updating spynet)
    outputs = restorer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    assert isinstance(outputs['log_vars']['loss_pix'], float)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
    assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (1, 5, 3, 256, 256)

    # train with spynet updated
    outputs = restorer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    assert isinstance(outputs['log_vars']['loss_pix'], float)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
    assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (1, 5, 3, 256, 256)

    # test forward_dummy
    with torch.no_grad():
        output = restorer.forward_dummy(data_batch['lq'])
    assert torch.is_tensor(output)
    assert output.size() == (1, 5, 3, 256, 256)

    # forward_test
    with torch.no_grad():
        outputs = restorer(**data_batch, test_mode=True)
    assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
    assert torch.equal(outputs['gt'], data_batch['gt'].cpu())
    assert torch.is_tensor(outputs['output'])
    assert outputs['output'].size() == (1, 5, 3, 256, 256)

    with torch.no_grad():
        outputs = restorer(inputs, test_mode=True)
    assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
    assert torch.is_tensor(outputs['output'])
    assert outputs['output'].size() == (1, 5, 3, 256, 256)

    # test with metric and save image
    train_cfg = mmcv.ConfigDict(fix_iter=1)
    test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
    test_cfg = mmcv.Config(test_cfg)

    data_batch = {
        'lq': inputs,
        'gt': targets,
        'meta': [{
            'gt_path': 'fake_path/fake_name.png',
            'key': '000'
        }]
    }

    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    if torch.cuda.is_available():
        restorer = restorer.cuda()

    with pytest.raises(AssertionError):
        # evaluation with metrics must have gt images
        restorer(lq=inputs, test_mode=True)

    with tempfile.TemporaryDirectory() as tmpdir:
        outputs = restorer(**data_batch,
                           test_mode=True,
                           save_image=True,
                           save_path=tmpdir,
                           iteration=None)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['eval_result'], dict)
        assert isinstance(outputs['eval_result']['PSNR'], float)
        assert isinstance(outputs['eval_result']['SSIM'], float)

        outputs = restorer(**data_batch,
                           test_mode=True,
                           save_image=True,
                           save_path=tmpdir,
                           iteration=100)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['eval_result'], dict)
        assert isinstance(outputs['eval_result']['PSNR'], float)
        assert isinstance(outputs['eval_result']['SSIM'], float)

        with pytest.raises(ValueError):
            # iteration should be number or None
            restorer(**data_batch,
                     test_mode=True,
                     save_image=True,
                     save_path=tmpdir,
                     iteration='100')
Пример #21
0
def test_real_esrgan():

    model_cfg = dict(
        type='RealESRGAN',
        generator=dict(
            type='MSRResNet',
            in_channels=3,
            out_channels=3,
            mid_channels=4,
            num_blocks=1,
            upscale_factor=4),
        discriminator=dict(type='ModifiedVGG', in_channels=3, mid_channels=2),
        pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
        gan_loss=dict(
            type='GANLoss',
            gan_type='vanilla',
            loss_weight=1e-1,
            real_label_val=1.0,
            fake_label_val=0),
        is_use_sharpened_gt_in_pixel=True,
        is_use_sharpened_gt_in_percep=True,
        is_use_sharpened_gt_in_gan=True,
        is_use_ema=True,
    )

    train_cfg = None
    test_cfg = None

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert restorer.__class__.__name__ == 'RealESRGAN'
    assert isinstance(restorer.generator, MSRResNet)
    assert isinstance(restorer.discriminator, ModifiedVGG)
    assert isinstance(restorer.pixel_loss, L1Loss)
    assert isinstance(restorer.gan_loss, GANLoss)

    # prepare data
    inputs = torch.rand(1, 3, 32, 32)
    targets = torch.rand(1, 3, 128, 128)
    data_batch = {'lq': inputs, 'gt': targets, 'gt_unsharp': targets}

    # prepare optimizer
    optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
    optimizer = {
        'generator':
        obj_from_dict(optim_cfg, torch.optim,
                      dict(
                          params=getattr(restorer, 'generator').parameters())),
        'discriminator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(restorer, 'discriminator').parameters()))
    }

    # no forward train in GAN models, raise ValueError
    with pytest.raises(ValueError):
        restorer(**data_batch, test_mode=False)

    # test forward_test
    data_batch.pop('gt_unsharp')
    with torch.no_grad():
        outputs = restorer(**data_batch, test_mode=True)
    assert torch.equal(outputs['lq'], data_batch['lq'])
    assert torch.is_tensor(outputs['output'])
    assert outputs['output'].size() == (1, 3, 128, 128)

    # test forward_dummy
    with torch.no_grad():
        output = restorer.forward_dummy(data_batch['lq'])
    assert torch.is_tensor(output)
    assert output.size() == (1, 3, 128, 128)

    # val_step
    with torch.no_grad():
        outputs = restorer.val_step(data_batch)
    data_batch['gt_unsharp'] = targets
    assert torch.equal(outputs['lq'], data_batch['lq'])
    assert torch.is_tensor(outputs['output'])
    assert outputs['output'].size() == (1, 3, 128, 128)

    # test train_step
    with patch.object(
            restorer,
            'perceptual_loss',
            return_value=(torch.tensor(1.0), torch.tensor(2.0))):
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        for v in [
                'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
                'loss_pix'
        ]:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'])
        assert torch.equal(outputs['results']['gt'], data_batch['gt'])
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 128, 128)

    # test train_step and forward_test (gpu)
    if torch.cuda.is_available():
        restorer = restorer.cuda()
        optimizer = {
            'generator':
            obj_from_dict(
                optim_cfg, torch.optim,
                dict(params=getattr(restorer, 'generator').parameters())),
            'discriminator':
            obj_from_dict(
                optim_cfg, torch.optim,
                dict(params=getattr(restorer, 'discriminator').parameters()))
        }
        data_batch = {
            'lq': inputs.cuda(),
            'gt': targets.cuda(),
            'gt_unsharp': targets.cuda()
        }

        # forward_test
        data_batch.pop('gt_unsharp')
        with torch.no_grad():
            outputs = restorer(**data_batch, test_mode=True)
        assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
        assert torch.is_tensor(outputs['output'])
        assert outputs['output'].size() == (1, 3, 128, 128)

        # val_step
        with torch.no_grad():
            outputs = restorer.val_step(data_batch)
        data_batch['gt_unsharp'] = targets.cuda()
        assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
        assert torch.is_tensor(outputs['output'])
        assert outputs['output'].size() == (1, 3, 128, 128)

        # train_step
        with patch.object(
                restorer,
                'perceptual_loss',
                return_value=(torch.tensor(1.0).cuda(),
                              torch.tensor(2.0).cuda())):
            outputs = restorer.train_step(data_batch, optimizer)
            assert isinstance(outputs, dict)
            assert isinstance(outputs['log_vars'], dict)
            for v in [
                    'loss_perceptual', 'loss_gan', 'loss_d_real',
                    'loss_d_fake', 'loss_pix'
            ]:
                assert isinstance(outputs['log_vars'][v], float)
            assert outputs['num_samples'] == 1
            assert torch.equal(outputs['results']['lq'],
                               data_batch['lq'].cpu())
            assert torch.equal(outputs['results']['gt'],
                               data_batch['gt'].cpu())
            assert torch.is_tensor(outputs['results']['output'])
            assert outputs['results']['output'].size() == (1, 3, 128, 128)

    # test disc_steps and disc_init_steps and start_iter
    data_batch = {
        'lq': inputs.cpu(),
        'gt': targets.cpu(),
        'gt_unsharp': targets.cpu()
    }
    train_cfg = dict(disc_steps=2, disc_init_steps=2, start_iter=0)
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
    with patch.object(
            restorer,
            'perceptual_loss',
            return_value=(torch.tensor(1.0), torch.tensor(2.0))):
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        for v in ['loss_d_real', 'loss_d_fake']:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'])
        assert torch.equal(outputs['results']['gt'], data_batch['gt'])
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 128, 128)

    # test no discriminator (testing mode)
    model_cfg_ = model_cfg.copy()
    model_cfg_.pop('discriminator')
    restorer = build_model(model_cfg_, train_cfg=train_cfg, test_cfg=test_cfg)
    data_batch.pop('gt_unsharp')
    with torch.no_grad():
        outputs = restorer(**data_batch, test_mode=True)
    data_batch['gt_unsharp'] = targets.cpu()
    assert torch.equal(outputs['lq'], data_batch['lq'])
    assert torch.is_tensor(outputs['output'])
    assert outputs['output'].size() == (1, 3, 128, 128)

    # test without pixel loss and perceptual loss
    model_cfg_ = model_cfg.copy()
    model_cfg_.pop('pixel_loss')
    restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None)

    outputs = restorer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']:
        assert isinstance(outputs['log_vars'][v], float)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['lq'], data_batch['lq'])
    assert torch.equal(outputs['results']['gt'], data_batch['gt'])
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (1, 3, 128, 128)

    # test train_step w/o loss_percep
    restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
    with patch.object(
            restorer, 'perceptual_loss',
            return_value=(None, torch.tensor(2.0))):
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        for v in [
                'loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake',
                'loss_pix'
        ]:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'])
        assert torch.equal(outputs['results']['gt'], data_batch['gt'])
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 128, 128)

    # test train_step w/o loss_style
    restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
    with patch.object(
            restorer, 'perceptual_loss',
            return_value=(torch.tensor(2.0), None)):
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        for v in [
                'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
                'loss_pix'
        ]:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'])
        assert torch.equal(outputs['results']['gt'], data_batch['gt'])
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 128, 128)
Пример #22
0
    if model_type == 'mattor' and args.trimap_path is None:
        raise ValueError('Please set `--trimap-path` to convert mattor model.')

    assert args.opset_version == 11, 'MMEditing only support opset 11 now'

    config = mmcv.Config.fromfile(args.config)
    config.model.pretrained = None
    # ONNX does not support spectral norm
    if model_type == 'mattor':
        if hasattr(config.model.backbone.encoder, 'with_spectral_norm'):
            config.model.backbone.encoder.with_spectral_norm = False
            config.model.backbone.decoder.with_spectral_norm = False
        config.test_cfg.metrics = None

    # build the model
    model = build_model(config.model, test_cfg=config.test_cfg)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')

    # remove alpha from test_pipeline
    if model_type == 'mattor':
        keys_to_remove = ['alpha', 'ori_alpha']
    elif model_type == 'restorer':
        keys_to_remove = ['gt', 'gt_path']
    for key in keys_to_remove:
        for pipeline in list(config.test_pipeline):
            if 'key' in pipeline and key == pipeline['key']:
                config.test_pipeline.remove(pipeline)
            if 'keys' in pipeline and key in pipeline['keys']:
                pipeline['keys'].remove(key)
                if len(pipeline['keys']) == 0:
                    config.test_pipeline.remove(pipeline)
Пример #23
0
def test_tdan_model():

    model_cfg = dict(
        type='TDAN',
        generator=dict(type='TDANNet',
                       in_channels=3,
                       mid_channels=64,
                       out_channels=3,
                       num_blocks_before_align=5,
                       num_blocks_after_align=10),
        pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
        lq_pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
    )

    train_cfg = None
    test_cfg = None

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert restorer.__class__.__name__ == 'TDAN'
    assert isinstance(restorer.generator, TDANNet)
    assert isinstance(restorer.pixel_loss, MSELoss)

    # prepare data
    inputs = torch.rand(1, 5, 3, 8, 8)
    targets = torch.rand(1, 3, 32, 32)

    # test train_step and forward_test (gpu)
    if torch.cuda.is_available():
        restorer = restorer.cuda()
        data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}

        # prepare optimizer
        optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
        optimizer = {
            'generator':
            obj_from_dict(
                optim_cfg, torch.optim,
                dict(params=getattr(restorer, 'generator').parameters()))
        }

        # train_step
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['log_vars']['loss_pix'], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
        assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 32, 32)

        # test forward_dummy
        with torch.no_grad():
            output = restorer.forward_dummy(data_batch['lq'])
        assert isinstance(output, tuple)
        assert torch.is_tensor(output[0])
        assert output[0].size() == (1, 3, 32, 32)
        assert torch.is_tensor(output[1])
        assert output[1].size() == (1, 5, 3, 8, 8)

        # forward_test
        with torch.no_grad():
            outputs = restorer(**data_batch, test_mode=True)
        assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
        assert torch.equal(outputs['gt'], data_batch['gt'].cpu())
        assert torch.is_tensor(outputs['output'])
        assert outputs['output'].size() == (1, 3, 32, 32)

        with torch.no_grad():
            outputs = restorer(inputs.cuda(), test_mode=True)
        assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
        assert torch.is_tensor(outputs['output'])
        assert outputs['output'].size() == (1, 3, 32, 32)

    # test with metric and save image
    if torch.cuda.is_available():
        train_cfg = mmcv.ConfigDict(tsa_iter=1)
        test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
        test_cfg = mmcv.Config(test_cfg)

        data_batch = {
            'lq': inputs.cuda(),
            'gt': targets.cuda(),
            'meta': [{
                'gt_path': 'fake_path/fake_name.png',
                'key': '000/00000000'
            }]
        }

        restorer = build_model(model_cfg,
                               train_cfg=train_cfg,
                               test_cfg=test_cfg).cuda()

        with pytest.raises(AssertionError):
            # evaluation with metrics must have gt images
            restorer(lq=inputs.cuda(), test_mode=True)

        with tempfile.TemporaryDirectory() as tmpdir:
            outputs = restorer(**data_batch,
                               test_mode=True,
                               save_image=True,
                               save_path=tmpdir,
                               iteration=None)
            assert isinstance(outputs, dict)
            assert isinstance(outputs['eval_result'], dict)
            assert isinstance(outputs['eval_result']['PSNR'], float)
            assert isinstance(outputs['eval_result']['SSIM'], float)

            outputs = restorer(**data_batch,
                               test_mode=True,
                               save_image=True,
                               save_path=tmpdir,
                               iteration=100)
            assert isinstance(outputs, dict)
            assert isinstance(outputs['eval_result'], dict)
            assert isinstance(outputs['eval_result']['PSNR'], float)
            assert isinstance(outputs['eval_result']['SSIM'], float)

            with pytest.raises(ValueError):
                # iteration should be number or None
                restorer(**data_batch,
                         test_mode=True,
                         save_image=True,
                         save_path=tmpdir,
                         iteration='100')
Пример #24
0
def test_basic_restorer():
    model_cfg = dict(type='BasicRestorer',
                     generator=dict(type='MSRResNet',
                                    in_channels=3,
                                    out_channels=3,
                                    mid_channels=4,
                                    num_blocks=1,
                                    upscale_factor=4),
                     pixel_loss=dict(type='L1Loss',
                                     loss_weight=1.0,
                                     reduction='mean'))

    train_cfg = None
    test_cfg = None

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert restorer.__class__.__name__ == 'BasicRestorer'
    assert isinstance(restorer.generator, MSRResNet)
    assert isinstance(restorer.pixel_loss, L1Loss)

    # prepare data
    inputs = torch.rand(1, 3, 2, 2)
    targets = torch.rand(1, 3, 8, 8)
    data_batch = {'lq': inputs, 'gt': targets}

    # prepare optimizer
    optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
    optimizer = {
        'generator':
        obj_from_dict(optim_cfg, torch.optim,
                      dict(params=restorer.parameters()))
    }

    # test forward train
    outputs = restorer(**data_batch, test_mode=False)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['losses'], dict)
    assert isinstance(outputs['losses']['loss_pix'], torch.FloatTensor)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['lq'], data_batch['lq'])
    assert torch.equal(outputs['results']['gt'], data_batch['gt'])
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (1, 3, 8, 8)

    # test forward_test
    with torch.no_grad():
        outputs = restorer(**data_batch, test_mode=True)
    assert torch.equal(outputs['lq'], data_batch['lq'])
    assert torch.is_tensor(outputs['output'])
    assert outputs['output'].size() == (1, 3, 8, 8)

    # test forward_dummy
    with torch.no_grad():
        output = restorer.forward_dummy(data_batch['lq'])
    assert torch.is_tensor(output)
    assert output.size() == (1, 3, 8, 8)

    # test train_step
    outputs = restorer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    assert isinstance(outputs['log_vars']['loss_pix'], float)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['lq'], data_batch['lq'])
    assert torch.equal(outputs['results']['gt'], data_batch['gt'])
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (1, 3, 8, 8)

    # test train_step and forward_test (gpu)
    if torch.cuda.is_available():
        restorer = restorer.cuda()
        optimizer['generator'] = obj_from_dict(
            optim_cfg, torch.optim, dict(params=restorer.parameters()))
        data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}

        # test forward train
        outputs = restorer(**data_batch, test_mode=False)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['losses'], dict)
        assert isinstance(outputs['losses']['loss_pix'],
                          torch.cuda.FloatTensor)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
        assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 8, 8)

        # forward_test
        with torch.no_grad():
            outputs = restorer(**data_batch, test_mode=True)
        assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
        assert torch.is_tensor(outputs['output'])
        assert outputs['output'].size() == (1, 3, 8, 8)

        # train_step
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['log_vars']['loss_pix'], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
        assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 8, 8)

    # test with metric and save image
    test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
    test_cfg = mmcv.Config(test_cfg)

    data_batch = {
        'lq': inputs,
        'gt': targets,
        'meta': [{
            'lq_path': 'fake_path/fake_name.png'
        }]
    }

    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    with pytest.raises(AssertionError):
        # evaluation with metrics must have gt images
        restorer(lq=inputs, test_mode=True)

    with tempfile.TemporaryDirectory() as tmpdir:
        outputs = restorer(**data_batch,
                           test_mode=True,
                           save_image=True,
                           save_path=tmpdir,
                           iteration=None)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['eval_result'], dict)
        assert isinstance(outputs['eval_result']['PSNR'], float)
        assert isinstance(outputs['eval_result']['SSIM'], float)

        outputs = restorer(**data_batch,
                           test_mode=True,
                           save_image=True,
                           save_path=tmpdir,
                           iteration=100)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['eval_result'], dict)
        assert isinstance(outputs['eval_result']['PSNR'], float)
        assert isinstance(outputs['eval_result']['SSIM'], float)

        with pytest.raises(ValueError):
            # iteration should be number or None
            restorer(**data_batch,
                     test_mode=True,
                     save_image=True,
                     save_path=tmpdir,
                     iteration='100')
Пример #25
0
def test_real_basicvsr():

    model_cfg = dict(
        type='RealBasicVSR',
        generator=dict(type='RealBasicVSRNet'),
        discriminator=dict(type='UNetDiscriminatorWithSpectralNorm',
                           in_channels=3,
                           mid_channels=64,
                           skip_connection=True),
        pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
        cleaning_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
        gan_loss=dict(type='GANLoss',
                      gan_type='vanilla',
                      loss_weight=1e-1,
                      real_label_val=1.0,
                      fake_label_val=0),
        is_use_sharpened_gt_in_pixel=True,
        is_use_sharpened_gt_in_percep=True,
        is_use_sharpened_gt_in_gan=True,
        is_use_ema=True,
    )

    train_cfg = None
    test_cfg = None

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert restorer.__class__.__name__ == 'RealBasicVSR'
    assert isinstance(restorer.generator, RealBasicVSRNet)
    assert isinstance(restorer.discriminator,
                      UNetDiscriminatorWithSpectralNorm)
    assert isinstance(restorer.pixel_loss, L1Loss)
    assert isinstance(restorer.gan_loss, GANLoss)

    # prepare data
    inputs = torch.rand(1, 5, 3, 64, 64)
    targets = torch.rand(1, 5, 3, 256, 256)
    data_batch = {'lq': inputs, 'gt': targets, 'gt_unsharp': targets}

    # prepare optimizer
    optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
    optimizer = {
        'generator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(restorer, 'generator').parameters())),
        'discriminator':
        obj_from_dict(
            optim_cfg, torch.optim,
            dict(params=getattr(restorer, 'discriminator').parameters()))
    }

    # no forward train in GAN models, raise ValueError
    with pytest.raises(ValueError):
        restorer(**data_batch, test_mode=False)

    # test train_step
    with patch.object(restorer,
                      'perceptual_loss',
                      return_value=(torch.tensor(1.0), torch.tensor(2.0))):
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        for v in [
                'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
                'loss_pix', 'loss_clean'
        ]:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'])
        assert torch.equal(outputs['results']['gt'], data_batch['gt'])
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (5, 3, 256, 256)

    # test train_step (gpu)
    if torch.cuda.is_available():
        restorer = restorer.cuda()
        optimizer = {
            'generator':
            obj_from_dict(
                optim_cfg, torch.optim,
                dict(params=getattr(restorer, 'generator').parameters())),
            'discriminator':
            obj_from_dict(
                optim_cfg, torch.optim,
                dict(params=getattr(restorer, 'discriminator').parameters()))
        }
        data_batch = {
            'lq': inputs.cuda(),
            'gt': targets.cuda(),
            'gt_unsharp': targets.cuda()
        }

        # train_step
        with patch.object(restorer,
                          'perceptual_loss',
                          return_value=(torch.tensor(1.0).cuda(),
                                        torch.tensor(2.0).cuda())):
            outputs = restorer.train_step(data_batch, optimizer)
            assert isinstance(outputs, dict)
            assert isinstance(outputs['log_vars'], dict)
            for v in [
                    'loss_perceptual', 'loss_gan', 'loss_d_real',
                    'loss_d_fake', 'loss_pix', 'loss_clean'
            ]:
                assert isinstance(outputs['log_vars'][v], float)
            assert outputs['num_samples'] == 1
            assert torch.equal(outputs['results']['lq'],
                               data_batch['lq'].cpu())
            assert torch.equal(outputs['results']['gt'],
                               data_batch['gt'].cpu())
            assert torch.is_tensor(outputs['results']['output'])
            assert outputs['results']['output'].size() == (5, 3, 256, 256)

    # test disc_steps and disc_init_steps and start_iter
    data_batch = {
        'lq': inputs.cpu(),
        'gt': targets.cpu(),
        'gt_unsharp': targets.cpu()
    }
    train_cfg = dict(disc_steps=2, disc_init_steps=2, start_iter=0)
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
    with patch.object(restorer,
                      'perceptual_loss',
                      return_value=(torch.tensor(1.0), torch.tensor(2.0))):
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        for v in ['loss_d_real', 'loss_d_fake']:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'])
        assert torch.equal(outputs['results']['gt'], data_batch['gt'])
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (5, 3, 256, 256)

    # test without pixel loss and perceptual loss
    model_cfg_ = model_cfg.copy()
    model_cfg_.pop('pixel_loss')
    restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None)

    outputs = restorer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']:
        assert isinstance(outputs['log_vars'][v], float)
    assert outputs['num_samples'] == 1
    assert torch.equal(outputs['results']['lq'], data_batch['lq'])
    assert torch.equal(outputs['results']['gt'], data_batch['gt'])
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (5, 3, 256, 256)

    # test train_step w/o loss_percep
    restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
    with patch.object(restorer,
                      'perceptual_loss',
                      return_value=(None, torch.tensor(2.0))):
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        for v in [
                'loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake',
                'loss_pix', 'loss_clean'
        ]:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'])
        assert torch.equal(outputs['results']['gt'], data_batch['gt'])
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (5, 3, 256, 256)

    # test train_step w/o loss_style
    restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
    with patch.object(restorer,
                      'perceptual_loss',
                      return_value=(torch.tensor(2.0), None)):
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        for v in [
                'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
                'loss_pix', 'loss_clean'
        ]:
            assert isinstance(outputs['log_vars'][v], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'])
        assert torch.equal(outputs['results']['gt'], data_batch['gt'])
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (5, 3, 256, 256)
Пример #26
0
def test_dim():
    model_cfg, train_cfg, test_cfg = _get_model_cfg(
        'dim/dim_stage3_v16_pln_1x1_1000k_comp1k.py')
    model_cfg['pretrained'] = None

    # 1. test dim model with refiner
    train_cfg.train_refiner = True
    test_cfg.refine = True

    # test model forward in train mode
    model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
    input_train = _demo_input_train((64, 64))
    output_train = model(**input_train)
    assert output_train['num_samples'] == 1
    assert_dict_keys_equal(output_train['losses'],
                           ['loss_alpha', 'loss_comp', 'loss_refine'])

    # test model forward in train mode with gpu
    if torch.cuda.is_available():
        model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
        model.cuda()
        input_train = _demo_input_train((64, 64), cuda=True)
        output_train = model(**input_train)
        assert output_train['num_samples'] == 1
        assert_dict_keys_equal(output_train['losses'],
                               ['loss_alpha', 'loss_comp', 'loss_refine'])

    # test model forward in test mode
    with torch.no_grad():
        model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
        input_test = _demo_input_test((64, 64))
        output_test = model(**input_test, test_mode=True)
        assert isinstance(output_test['pred_alpha'], np.ndarray)
        assert_dict_keys_equal(output_test['eval_result'],
                               ['SAD', 'MSE', 'GRAD', 'CONN'])

        # test model forward in test mode with gpu
        if torch.cuda.is_available():
            model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
            model.cuda()
            input_test = _demo_input_test((64, 64), cuda=True)
            output_test = model(**input_test, test_mode=True)
            assert isinstance(output_test['pred_alpha'], np.ndarray)
            assert_dict_keys_equal(output_test['eval_result'],
                                   ['SAD', 'MSE', 'GRAD', 'CONN'])

    # 2. test dim model without refiner
    model_cfg['refiner'] = None
    test_cfg['metrics'] = None

    # test model forward in train mode
    model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
    input_train = _demo_input_train((64, 64))
    output_train = model(**input_train)
    assert output_train['num_samples'] == 1
    assert_dict_keys_equal(output_train['losses'], ['loss_alpha', 'loss_comp'])

    # test model forward in train mode with gpu
    if torch.cuda.is_available():
        model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
        model.cuda()
        input_train = _demo_input_train((64, 64), cuda=True)
        output_train = model(**input_train)
        assert output_train['num_samples'] == 1
        assert_dict_keys_equal(output_train['losses'],
                               ['loss_alpha', 'loss_comp'])

    # test model forward in test mode
    with torch.no_grad():
        model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
        input_test = _demo_input_test((64, 64))
        output_test = model(**input_test, test_mode=True)
        assert isinstance(output_test['pred_alpha'], np.ndarray)
        assert output_test['eval_result'] is None

        # check test with gpu
        if torch.cuda.is_available():
            model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
            model.cuda()
            input_test = _demo_input_test((64, 64), cuda=True)
            output_test = model(**input_test, test_mode=True)
            assert isinstance(output_test['pred_alpha'], np.ndarray)
            assert output_test['eval_result'] is None

    # test forward_dummy
    model.cpu().eval()
    inputs = torch.ones((1, 4, 32, 32))
    model.forward_dummy(inputs)
Пример #27
0
def test_ttsr():
    model_cfg = dict(type='TTSR',
                     generator=dict(type='TTSRNet',
                                    in_channels=3,
                                    out_channels=3,
                                    mid_channels=64,
                                    num_blocks=(16, 16, 8, 4)),
                     extractor=dict(type='LTE'),
                     transformer=dict(type='SearchTransformer'),
                     discriminator=dict(type='TTSRDiscriminator', in_size=64),
                     pixel_loss=dict(type='L1Loss',
                                     loss_weight=1.0,
                                     reduction='mean'),
                     perceptual_loss=dict(type='PerceptualLoss',
                                          layer_weights={'29': 1.0},
                                          vgg_type='vgg19',
                                          perceptual_weight=1e-2,
                                          style_weight=0.001,
                                          criterion='mse'),
                     transferal_perceptual_loss=dict(
                         type='TransferalPerceptualLoss',
                         loss_weight=1e-2,
                         use_attention=False,
                         criterion='mse'),
                     gan_loss=dict(type='GANLoss',
                                   gan_type='vanilla',
                                   loss_weight=1e-3,
                                   real_label_val=1.0,
                                   fake_label_val=0))

    scale = 4
    train_cfg = None
    test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale))

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    model_cfg = dict(type='TTSR',
                     generator=dict(type='TTSRNet',
                                    in_channels=3,
                                    out_channels=3,
                                    mid_channels=64,
                                    num_blocks=(16, 16, 8, 4)),
                     extractor=dict(type='LTE'),
                     transformer=dict(type='SearchTransformer'),
                     discriminator=dict(type='TTSRDiscriminator', in_size=64),
                     pixel_loss=dict(type='L1Loss',
                                     loss_weight=1.0,
                                     reduction='mean'),
                     perceptual_loss=dict(type='PerceptualLoss',
                                          layer_weights={'29': 1.0},
                                          vgg_type='vgg19',
                                          perceptual_weight=1e-2,
                                          style_weight=0.001,
                                          criterion='mse'),
                     transferal_perceptual_loss=dict(
                         type='TransferalPerceptualLoss',
                         loss_weight=1e-2,
                         use_attention=False,
                         criterion='mse'),
                     gan_loss=dict(type='GANLoss',
                                   gan_type='vanilla',
                                   loss_weight=1e-3,
                                   real_label_val=1.0,
                                   fake_label_val=0))

    scale = 4
    train_cfg = None
    test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale))

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert restorer.__class__.__name__ == 'TTSR'

    # prepare data
    inputs = torch.rand(1, 3, 16, 16)
    targets = torch.rand(1, 3, 64, 64)
    ref = torch.rand(1, 3, 64, 64)
    data_batch = {
        'lq': inputs,
        'gt': targets,
        'ref': ref,
        'lq_up': ref,
        'ref_downup': ref
    }

    # prepare optimizer
    optim_cfg_g = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
    optim_cfg_d = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
    optimizer = dict(
        generator=obj_from_dict(optim_cfg_g, torch.optim,
                                dict(params=restorer.parameters())),
        discriminator=obj_from_dict(optim_cfg_d, torch.optim,
                                    dict(params=restorer.parameters())))

    # test train_step and forward_test (cpu)
    outputs = restorer.train_step(data_batch, optimizer)
    assert isinstance(outputs, dict)
    assert isinstance(outputs['log_vars'], dict)
    assert isinstance(outputs['log_vars']['loss_pix'], float)
    assert outputs['num_samples'] == 1
    assert outputs['results']['lq'].shape == data_batch['lq'].shape
    assert outputs['results']['gt'].shape == data_batch['gt'].shape
    assert torch.is_tensor(outputs['results']['output'])
    assert outputs['results']['output'].size() == (1, 3, 64, 64)

    # test train_step and forward_test (gpu)
    if torch.cuda.is_available():
        restorer = restorer.cuda()
        data_batch = {
            'lq': inputs.cuda(),
            'gt': targets.cuda(),
            'ref': ref.cuda(),
            'lq_up': ref.cuda(),
            'ref_downup': ref.cuda()
        }

        # train_step
        optimizer = dict(
            generator=obj_from_dict(optim_cfg_g, torch.optim,
                                    dict(params=restorer.parameters())),
            discriminator=obj_from_dict(optim_cfg_d, torch.optim,
                                        dict(params=restorer.parameters())))
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['log_vars']['loss_pix'], float)
        assert outputs['num_samples'] == 1
        assert outputs['results']['lq'].shape == data_batch['lq'].shape
        assert outputs['results']['gt'].shape == data_batch['gt'].shape
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 64, 64)

        # val_step
        result = restorer.val_step(data_batch, meta=[{'gt_path': ''}])
        assert isinstance(result, dict)
        assert isinstance(result['eval_result'], dict)
        assert result['eval_result'].keys() == set({'PSNR', 'SSIM'})
        assert isinstance(result['eval_result']['PSNR'], np.float64)
        assert isinstance(result['eval_result']['SSIM'], np.float64)
Пример #28
0
def test_one_stage_inpaintor():
    cfg = Config.fromfile('tests/data/inpaintor_config/one_stage_gl.py')

    # mock perceptual loss for test speed
    cfg.model.loss_composed_percep = None
    inpaintor = build_model(cfg.model,
                            train_cfg=cfg.train_cfg,
                            test_cfg=cfg.test_cfg)

    # modify attributes for mocking
    inpaintor.with_composed_percep_loss = True
    inpaintor.loss_percep = None

    # test attributes
    assert inpaintor.__class__.__name__ == 'OneStageInpaintor'
    assert isinstance(inpaintor.generator, GLEncoderDecoder)
    assert inpaintor.with_l1_hole_loss
    assert inpaintor.with_l1_valid_loss
    assert inpaintor.with_tv_loss
    assert inpaintor.with_composed_percep_loss
    assert inpaintor.with_out_percep_loss
    assert inpaintor.with_gan
    assert inpaintor.with_gp_loss
    assert inpaintor.with_disc_shift_loss
    assert inpaintor.is_train
    assert inpaintor.train_cfg['disc_step'] == 1
    assert inpaintor.disc_step_count == 0

    with patch.object(inpaintor,
                      'loss_percep',
                      return_value=(torch.tensor(1.0), None)):

        input_x = torch.randn(1, 3, 256, 256)
        with pytest.raises(NotImplementedError):
            inpaintor.forward_train(input_x)

        if torch.cuda.is_available():
            gt_img = torch.randn(1, 3, 256, 256).cuda()
            mask = torch.zeros_like(gt_img)[:, 0:1, ...]
            mask[..., 20:100, 100:120] = 1.
            masked_img = gt_img * (1. - mask)
            inpaintor.cuda()
            data_batch = dict(gt_img=gt_img, mask=mask, masked_img=masked_img)
            output = inpaintor.forward_test(**data_batch)
            assert 'eval_result' in output

            output = inpaintor.val_step(data_batch)
            assert 'eval_result' in output

            optim_g = torch.optim.SGD(inpaintor.generator.parameters(), lr=0.1)
            optim_d = torch.optim.SGD(inpaintor.disc.parameters(), lr=0.1)
            optim_dict = dict(generator=optim_g, disc=optim_d)

            outputs = inpaintor.train_step(data_batch, optim_dict)
            assert outputs['num_samples'] == 1
            results = outputs['results']
            assert results['fake_res'].shape == (1, 3, 256, 256)
            assert 'loss_l1_hole' in outputs['log_vars']
            assert 'loss_l1_valid' in outputs['log_vars']
            assert 'loss_composed_percep' in outputs['log_vars']
            assert 'loss_composed_style' not in outputs['log_vars']
            assert 'loss_out_percep' in outputs['log_vars']
            assert 'loss_out_style' not in outputs['log_vars']
            assert 'loss_tv' in outputs['log_vars']
            assert 'fake_loss' in outputs['log_vars']
            assert 'real_loss' in outputs['log_vars']
            assert 'loss_g_fake' in outputs['log_vars']

            # test forward dummy
            res = inpaintor.forward_dummy(torch.cat([masked_img, mask], dim=1))
            assert res.shape == (1, 3, 256, 256)

            # test forward test w/o save image
            outputs = inpaintor.forward_test(masked_img[0:1],
                                             mask[0:1],
                                             gt_img=gt_img[0:1, ...])
            assert 'eval_result' in outputs
            assert outputs['eval_result']['l1'] > 0
            assert outputs['eval_result']['psnr'] > 0
            assert outputs['eval_result']['ssim'] > 0

            # test forward test w/o eval metrics
            inpaintor.test_cfg = dict()
            inpaintor.eval_with_metrics = False
            outputs = inpaintor.forward_test(masked_img[0:1], mask[0:1])
            for key in ['fake_res', 'fake_img']:
                assert outputs[key].size() == (1, 3, 256, 256)

            # test forward test w/ save image
            with tempfile.TemporaryDirectory() as tmpdir:
                outputs = inpaintor.forward_test(
                    masked_img[0:1],
                    mask[0:1],
                    save_image=True,
                    save_path=tmpdir,
                    iteration=4396,
                    meta=[dict(gt_img_path='igccc.png')])

                assert os.path.exists(os.path.join(tmpdir, 'igccc_4396.png'))

            # test forward test w/ save image w/ gt_img
            with tempfile.TemporaryDirectory() as tmpdir:
                outputs = inpaintor.forward_test(
                    masked_img[0:1],
                    mask[0:1],
                    save_image=True,
                    save_path=tmpdir,
                    meta=[dict(gt_img_path='igccc.png')],
                    gt_img=gt_img[0:1, ...])

                assert os.path.exists(os.path.join(tmpdir, 'igccc.png'))

                with pytest.raises(AssertionError):
                    outputs = inpaintor.forward_test(masked_img[0:1],
                                                     mask[0:1],
                                                     save_image=True,
                                                     save_path=tmpdir,
                                                     iteration=4396,
                                                     gt_img=gt_img[0:1, ...])
                with pytest.raises(AssertionError):
                    outputs = inpaintor.forward_test(
                        masked_img[0:1],
                        mask[0:1],
                        save_image=True,
                        save_path=None,
                        iteration=4396,
                        meta=[dict(gt_img_path='igccc.png')],
                        gt_img=gt_img[0:1, ...])

            cfg_ = copy.deepcopy(cfg)
            cfg_.train_cfg.disc_step = 2
            inpaintor = build_model(cfg_.model,
                                    train_cfg=cfg_.train_cfg,
                                    test_cfg=cfg_.test_cfg)
            inpaintor.cuda()
            assert inpaintor.train_cfg.disc_step == 2
            outputs = inpaintor.train_step(data_batch, optim_dict)
            assert 'loss_l1_hole' not in outputs['log_vars']
Пример #29
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    rank, _ = get_dist_info()

    # set random seeds
    if args.seed is not None:
        if rank == 0:
            print('set random seed to', args.seed)
        set_random_seed(args.seed, deterministic=args.deterministic)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)

    loader_cfg = {
        **dict((k, cfg.data[k]) for k in ['workers_per_gpu'] if k in cfg.data),
        **dict(samples_per_gpu=1,
               drop_last=False,
               shuffle=False,
               dist=distributed),
        **cfg.data.get('test_dataloader', {})
    }

    data_loader = build_dataloader(dataset, **loader_cfg)

    # build the model and load checkpoint
    model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)

    args.save_image = args.save_path is not None
    empty_cache = cfg.get('empty_cache', False)
    if not distributed:
        _ = load_checkpoint(model, args.checkpoint, map_location='cpu')
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model,
                                  data_loader,
                                  save_path=args.save_path,
                                  save_image=args.save_image)
    else:
        find_unused_parameters = cfg.get('find_unused_parameters', False)
        model = DistributedDataParallelWrapper(
            model,
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False,
            find_unused_parameters=find_unused_parameters)

        device_id = torch.cuda.current_device()
        _ = load_checkpoint(
            model,
            args.checkpoint,
            map_location=lambda storage, loc: storage.cuda(device_id))
        outputs = multi_gpu_test(model,
                                 data_loader,
                                 args.tmpdir,
                                 args.gpu_collect,
                                 save_path=args.save_path,
                                 save_image=args.save_image,
                                 empty_cache=empty_cache)

    if rank == 0:
        print('')
        # print metrics
        stats = dataset.evaluate(outputs)
        for stat in stats:
            print('Eval-{}: {}'.format(stat, stats[stat]))

        # save result pickle
        if args.out:
            print('writing results to {}'.format(args.out))
            mmcv.dump(outputs, args.out)
Пример #30
0
def test_edvr_model():

    model_cfg = dict(
        type='EDVR',
        generator=dict(
            type='EDVRNet',
            in_channels=3,
            out_channels=3,
            mid_channels=8,
            num_frames=5,
            deform_groups=2,
            num_blocks_extraction=1,
            num_blocks_reconstruction=1,
            center_frame_idx=2,
            with_tsa=False),
        pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='sum'),
    )

    train_cfg = None
    test_cfg = None

    # build restorer
    restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)

    # test attributes
    assert restorer.__class__.__name__ == 'EDVR'
    assert isinstance(restorer.generator, EDVRNet)
    assert isinstance(restorer.pixel_loss, L1Loss)

    # prepare data
    inputs = torch.rand(1, 5, 3, 8, 8)
    targets = torch.rand(1, 3, 32, 32)

    # test train_step and forward_test (gpu)
    if torch.cuda.is_available():
        restorer = restorer.cuda()
        data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}

        # prepare optimizer
        optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
        optimizer = {
            'generator':
            obj_from_dict(
                optim_cfg, torch.optim,
                dict(params=getattr(restorer, 'generator').parameters()))
        }

        # train_step
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['log_vars']['loss_pix'], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
        assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 32, 32)

        # with TSA
        model_cfg['generator']['with_tsa'] = True

        with pytest.raises(KeyError):
            # In TSA mode, train_cfg must contain "tsa_iter"
            train_cfg = dict(other_conent='xxx')
            restorer = build_model(
                model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
            outputs = restorer.train_step(data_batch, optimizer)

            train_cfg = None
            restorer = build_model(
                model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
            outputs = restorer.train_step(data_batch, optimizer)

        train_cfg = mmcv.ConfigDict(tsa_iter=1)
        restorer = build_model(
            model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
        optimizer = {
            'generator':
            obj_from_dict(
                optim_cfg, torch.optim,
                dict(params=getattr(restorer, 'generator').parameters()))
        }
        # train without updating tsa module
        outputs = restorer.train_step(data_batch, optimizer)
        # train with updating tsa module
        outputs = restorer.train_step(data_batch, optimizer)
        assert isinstance(outputs, dict)
        assert isinstance(outputs['log_vars'], dict)
        assert isinstance(outputs['log_vars']['loss_pix'], float)
        assert outputs['num_samples'] == 1
        assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
        assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
        assert torch.is_tensor(outputs['results']['output'])
        assert outputs['results']['output'].size() == (1, 3, 32, 32)

        # test forward_dummy
        with torch.no_grad():
            output = restorer.forward_dummy(data_batch['lq'])
        assert torch.is_tensor(output)
        assert output.size() == (1, 3, 32, 32)

        # forward_test
        with torch.no_grad():
            outputs = restorer(**data_batch, test_mode=True)
        assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
        assert torch.equal(outputs['gt'], data_batch['gt'].cpu())
        assert torch.is_tensor(outputs['output'])
        assert outputs['output'].size() == (1, 3, 32, 32)

        with torch.no_grad():
            outputs = restorer(inputs.cuda(), test_mode=True)
        assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
        assert torch.is_tensor(outputs['output'])
        assert outputs['output'].size() == (1, 3, 32, 32)

    # test with metric and save image
    if torch.cuda.is_available():
        train_cfg = mmcv.ConfigDict(tsa_iter=1)
        test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
        test_cfg = mmcv.Config(test_cfg)

        data_batch = {
            'lq': inputs.cuda(),
            'gt': targets.cuda(),
            'meta': [{
                'gt_path': 'fake_path/fake_name.png',
                'key': '000/00000000'
            }]
        }

        restorer = build_model(
            model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()

        with pytest.raises(AssertionError):
            # evaluation with metrics must have gt images
            restorer(lq=inputs.cuda(), test_mode=True)

        with tempfile.TemporaryDirectory() as tmpdir:
            outputs = restorer(
                **data_batch,
                test_mode=True,
                save_image=True,
                save_path=tmpdir,
                iteration=None)
            assert isinstance(outputs, dict)
            assert isinstance(outputs['eval_result'], dict)
            assert isinstance(outputs['eval_result']['PSNR'], float)
            assert isinstance(outputs['eval_result']['SSIM'], float)

            outputs = restorer(
                **data_batch,
                test_mode=True,
                save_image=True,
                save_path=tmpdir,
                iteration=100)
            assert isinstance(outputs, dict)
            assert isinstance(outputs['eval_result'], dict)
            assert isinstance(outputs['eval_result']['PSNR'], float)
            assert isinstance(outputs['eval_result']['SSIM'], float)

            with pytest.raises(ValueError):
                # iteration should be number or None
                restorer(
                    **data_batch,
                    test_mode=True,
                    save_image=True,
                    save_path=tmpdir,
                    iteration='100')