def test_eval_hook():
    with pytest.raises(TypeError):
        test_dataset = ExampleModel()
        data_loader = [
            DataLoader(test_dataset,
                       batch_size=1,
                       sampler=None,
                       num_worker=0,
                       shuffle=False)
        ]
        EvalIterHook(data_loader)

    test_dataset = ExampleDataset()
    test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
    loader = DataLoader(test_dataset, batch_size=1)
    model = ExampleModel()
    data_loader = DataLoader(test_dataset,
                             batch_size=1,
                             sampler=None,
                             num_workers=0,
                             shuffle=False)
    eval_hook = EvalIterHook(data_loader)
    optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
    optimizer = obj_from_dict(optim_cfg, torch.optim,
                              dict(params=model.parameters()))
    with tempfile.TemporaryDirectory() as tmpdir:
        runner = mmcv.runner.IterBasedRunner(model=model,
                                             optimizer=optimizer,
                                             work_dir=tmpdir,
                                             logger=logging.getLogger())
        runner.register_hook(eval_hook)
        runner.run([loader], [('train', 1)], 1)
        test_dataset.evaluate.assert_called_with([torch.tensor([1])],
                                                 logger=runner.logger)
示例#2
0
def test_wandb_hook():
    try:
        import torch
        import torch.nn as nn
        from torch.utils.data import DataLoader
    except ImportError:
        warnings.warn('Skipping test_save_checkpoint in the absense of torch')
        return

    import mmcv.runner
    hook = mmcv.runner.hooks.WandbLoggerHook()
    loader = DataLoader(torch.ones((5, 5)))

    model = nn.Linear(1, 1)
    runner = mmcv.runner.Runner(
        model=model,
        batch_processor=lambda model, x, **kwargs: {
            'log_vars': {
                'accuracy': 0.98
            },
            'num_samples': 5
        })
    runner.register_hook(hook)
    runner.run([loader, loader], [('train', 1), ('val', 1)], 1)
    hook.wandb.init.assert_called()
    hook.wandb.log.assert_called_with({'accuracy/val': 0.98}, step=5)
    hook.wandb.join.assert_called()
示例#3
0
def test_pavi_hook():
    try:
        import torch
        import torch.nn as nn
        from torch.utils.data import DataLoader
    except ImportError:
        warnings.warn('Skipping test_pavi_hook in the absense of torch')
        return
    sys.modules['pavi'] = MagicMock()

    model = nn.Linear(1, 1)
    loader = DataLoader(torch.ones((5, 5)))
    work_dir = osp.join(osp.dirname(osp.abspath(__file__)), 'data')
    runner = mmcv.runner.Runner(model=model,
                                work_dir=work_dir,
                                batch_processor=lambda model, x, **kwargs: {
                                    'log_vars': {
                                        'loss': 2.333
                                    },
                                    'num_samples': 5
                                })

    hook = mmcv.runner.hooks.PaviLoggerHook(add_graph=False,
                                            add_last_ckpt=True)
    runner.register_hook(hook)
    runner.run([loader, loader], [('train', 1), ('val', 1)], 1)

    assert hasattr(hook, 'writer')
    hook.writer.add_scalars.assert_called_with('val', {'loss': 2.333}, 5)
    hook.writer.add_snapshot_file.assert_called_with(
        tag='data',
        snapshot_file_path=osp.join(work_dir, 'latest.pth'),
        iteration=5)
示例#4
0
def test_momentum_runner_hook():
    """
    xdoctest -m tests/test_hooks.py test_momentum_runner_hook
    """
    sys.modules['pavi'] = MagicMock()
    loader = DataLoader(torch.ones((10, 2)))
    runner = _build_demo_runner()

    # add momentum scheduler
    hook = mmcv.runner.hooks.momentum_updater.CyclicMomentumUpdaterHook(
        by_epoch=False,
        target_ratio=(0.85 / 0.95, 1),
        cyclic_times=1,
        step_ratio_up=0.4)
    runner.register_hook(hook)

    # add momentum LR scheduler
    hook = mmcv.runner.hooks.lr_updater.CyclicLrUpdaterHook(by_epoch=False,
                                                            target_ratio=(10,
                                                                          1),
                                                            cyclic_times=1,
                                                            step_ratio_up=0.4)
    runner.register_hook(hook)
    runner.register_hook(mmcv.runner.hooks.IterTimerHook())

    # add pavi hook
    hook = mmcv.runner.hooks.PaviLoggerHook(interval=1,
                                            add_graph=False,
                                            add_last_ckpt=True)
    runner.register_hook(hook)
    runner.run([loader], [('train', 1)], 1)
    shutil.rmtree(runner.work_dir)

    # TODO: use a more elegant way to check values
    assert hasattr(hook, 'writer')
    calls = [
        call('train', {
            'learning_rate': 0.01999999999999999,
            'momentum': 0.95
        }, 0),
        call('train', {
            'learning_rate': 0.2,
            'momentum': 0.85
        }, 4),
        call('train', {
            'learning_rate': 0.155,
            'momentum': 0.875
        }, 6),
    ]
    hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
示例#5
0
def test_wandb_hook():
    sys.modules['wandb'] = MagicMock()
    runner = _build_demo_runner()
    hook = mmcv.runner.hooks.WandbLoggerHook()
    loader = DataLoader(torch.ones((5, 2)))

    runner.register_hook(hook)
    runner.run([loader, loader], [('train', 1), ('val', 1)], 1)
    hook.wandb.init.assert_called_with()
    hook.wandb.log.assert_called_with({
        'learning_rate': 0.02,
        'momentum': 0.95
    },
                                      step=5)
    hook.wandb.join.assert_called_with()
示例#6
0
def test_cosine_runner_hook():
    """
    xdoctest -m tests/test_hooks.py test_cosine_runner_hook
    """
    sys.modules['pavi'] = MagicMock()
    loader = DataLoader(torch.ones((10, 2)))
    runner = _build_demo_runner()

    # add momentum scheduler
    hook = mmcv.runner.hooks.momentum_updater \
        .CosineAnealingMomentumUpdaterHook(
            min_momentum_ratio=0.99 / 0.95,
            by_epoch=False,
            warmup_iters=2,
            warmup_ratio=0.9 / 0.95)
    runner.register_hook(hook)

    # add momentum LR scheduler
    hook = mmcv.runner.hooks.lr_updater.CosineAnealingLrUpdaterHook(
        by_epoch=False, min_lr_ratio=0, warmup_iters=2, warmup_ratio=0.9)
    runner.register_hook(hook)
    runner.register_hook(mmcv.runner.hooks.IterTimerHook())

    # add pavi hook
    hook = mmcv.runner.hooks.PaviLoggerHook(interval=1,
                                            add_graph=False,
                                            add_last_ckpt=True)
    runner.register_hook(hook)
    runner.run([loader], [('train', 1)], 1)
    shutil.rmtree(runner.work_dir)

    # TODO: use a more elegant way to check values
    assert hasattr(hook, 'writer')
    calls = [
        call('train', {
            'learning_rate': 0.02,
            'momentum': 0.95
        }, 0),
        call('train', {
            'learning_rate': 0.01,
            'momentum': 0.97
        }, 5),
        call('train', {
            'learning_rate': 0.0004894348370484647,
            'momentum': 0.9890211303259032
        }, 9)
    ]
    hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
示例#7
0
def test_pavi_hook():
    sys.modules['pavi'] = MagicMock()

    loader = DataLoader(torch.ones((5, 2)))
    runner = _build_demo_runner()
    hook = mmcv.runner.hooks.PaviLoggerHook(add_graph=False,
                                            add_last_ckpt=True)
    runner.register_hook(hook)
    runner.run([loader, loader], [('train', 1), ('val', 1)], 1)

    assert hasattr(hook, 'writer')
    hook.writer.add_scalars.assert_called_with('val', {
        'learning_rate': 0.02,
        'momentum': 0.95
    }, 5)
    hook.writer.add_snapshot_file.assert_called_with(
        tag='data',
        snapshot_file_path=osp.join(runner.work_dir, 'latest.pth'),
        iteration=5)
def test_dist_eval_hook_epoch():
    with pytest.raises(TypeError):
        test_dataset = ExampleModel()
        data_loader = [
            DataLoader(test_dataset,
                       batch_size=1,
                       sampler=None,
                       num_worker=0,
                       shuffle=False)
        ]
        DistEvalHook(data_loader)

    test_dataset = ExampleDataset()
    test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
    loader = DataLoader(test_dataset, batch_size=1)
    model = ExampleModel()
    data_loader = DataLoader(test_dataset,
                             batch_size=1,
                             sampler=None,
                             num_workers=0,
                             shuffle=False)
    optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
    optimizer = obj_from_dict(optim_cfg, torch.optim,
                              dict(params=model.parameters()))

    # test DistEvalHook
    with tempfile.TemporaryDirectory() as tmpdir:
        if use_mmcv_hook:
            p = patch('mmcv.engine.multi_gpu_test', multi_gpu_test)
            p.start()
        eval_hook = DistEvalHook(data_loader, by_epoch=True, interval=2)
        runner = mmcv.runner.EpochBasedRunner(model=model,
                                              optimizer=optimizer,
                                              work_dir=tmpdir,
                                              logger=logging.getLogger(),
                                              max_epochs=2)
        runner.register_hook(eval_hook)
        runner.run([loader], [('train', 1)])
        test_dataset.evaluate.assert_called_with([torch.tensor([1])],
                                                 logger=runner.logger)
        if use_mmcv_hook:
            p.stop()
示例#9
0
def test_mlflow_hook(log_model):
    sys.modules['mlflow'] = MagicMock()
    sys.modules['mlflow.pytorch'] = MagicMock()

    runner = _build_demo_runner()
    loader = DataLoader(torch.ones((5, 2)))

    hook = mmcv.runner.hooks.MlflowLoggerHook(exp_name='test',
                                              log_model=log_model)
    runner.register_hook(hook)
    runner.run([loader, loader], [('train', 1), ('val', 1)], 1)

    hook.mlflow.set_experiment.assert_called_with('test')
    hook.mlflow.log_metrics.assert_called_with(
        {
            'learning_rate': 0.02,
            'momentum': 0.95
        }, step=5)
    if log_model:
        hook.mlflow_pytorch.log_model.assert_called_with(
            runner.model, 'models')
    else:
        assert not hook.mlflow_pytorch.log_model.called
示例#10
0
def test_iter_eval_hook():
    with pytest.raises(TypeError):
        test_dataset = ExampleModel()
        data_loader = [
            DataLoader(test_dataset,
                       batch_size=1,
                       sampler=None,
                       num_worker=0,
                       shuffle=False)
        ]
        EvalHook(data_loader)

    test_dataset = ExampleDataset()
    test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
    test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
    loader = DataLoader(test_dataset, batch_size=1)
    model = ExampleModel()
    data_loader = DataLoader(test_dataset,
                             batch_size=1,
                             sampler=None,
                             num_workers=0,
                             shuffle=False)
    optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
    optimizer = obj_from_dict(optim_cfg, torch.optim,
                              dict(params=model.parameters()))

    # test EvalHook
    with tempfile.TemporaryDirectory() as tmpdir:
        eval_hook = EvalHook(data_loader, by_epoch=False, efficient_test=True)
        runner = mmcv.runner.IterBasedRunner(model=model,
                                             optimizer=optimizer,
                                             work_dir=tmpdir,
                                             logger=logging.getLogger())
        runner.register_hook(eval_hook)
        runner.run([loader], [('train', 1)], 1)
        test_dataset.evaluate.assert_called_with([torch.tensor([1])],
                                                 logger=runner.logger)