Esempio n. 1
0
def test_evaluator_dict(path):
    device = 'cpu'
    progress_bar = False
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = MyModelWithLossFn(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([(torch.rand(20, ), torch.rand(10, ))
                                        for i in range(10)])
    extensions = _make_extensions()

    evaluator1 = engine.create_evaluator(model_with_loss,
                                         device=device,
                                         progress_bar=progress_bar)
    evaluator2 = engine.create_evaluator(model,
                                         device=device,
                                         progress_bar=progress_bar)

    trainer = engine.create_trainer(
        model_with_loss,
        optimizer,
        20,
        device=device,
        evaluator={
            '1': evaluator1,  # called 20 times.
            '2': (evaluator2, (5, 'iteration')),  # called 40 times.
        },
        extensions=extensions,
        out_dir=path)
    path = 'pytorch_pfn_extras.training._evaluator.Evaluator.run'
    with mock.patch(path) as patched:
        trainer.run(data, data)
        assert patched.call_count == 20 + 40
Esempio n. 2
0
def test_trainer_with_code_block(device, progress_bar, path):
    model = MyModel()
    model_with_loss = MyModelWithLossDictOutput(model)
    ppe.to(model_with_loss, device)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20, ),
        't': torch.rand(10, )
    } for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar,
                                        logic=ppe.handler.CodeBlockLogic())

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    20,
                                    device=device,
                                    evaluator=evaluator,
                                    extensions=extensions,
                                    out_dir=path,
                                    logic=ppe.handler.CodeBlockLogic())
    trainer.run(data, data)
def test_evaluator_trigger(evaluator_trigger, path):
    device = 'cpu'
    progress_bar = False
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = MyModelWithLossFn(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([(torch.rand(20, ), torch.rand(10, ))
                                        for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar)

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    20,
                                    device=device,
                                    evaluator=(evaluator,
                                               evaluator_trigger[1]),
                                    extensions=extensions,
                                    out_dir=path)
    path = 'pytorch_pfn_extras.training._evaluator._Evaluator.run'
    with mock.patch(path) as patched:
        trainer.run(data, data)
        assert patched.call_count == evaluator_trigger[0]
Esempio n. 4
0
def test_evaluator_async(accuracy):
    device = 'async-cpu'
    model = AsyncModel(accuracy)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20),
        't': torch.rand(1)
    } for i in range(1000)],
                                       batch_size=10)

    options = {'eval_report_keys': ['accuracy'], 'async': True}
    # Register the handler
    ppe.runtime.runtime_registry.register(device, DeferRuntime)

    ppe.to(model, device)
    evaluator = engine.create_evaluator(
        model,
        device=device,
        options=options,
        metrics=[ppe.training.metrics.AccuracyMetric('t', 'y')])

    reporter = ppe.reporting.Reporter()
    observation = {}
    with reporter.scope(observation):
        evaluator.run(data)
    assert pytest.approx(observation['val/accuracy'], accuracy)
    assert model._pending_called
Esempio n. 5
0
def test_trainer_profile():
    device = 'cpu'
    model = MyModel()
    model_with_loss = MyModelWithLossDictOutput(model)
    ppe.to(model_with_loss, device)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20, ),
        't': torch.rand(10, )
    } for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss, device=device)

    trace_handler = mock.Mock()
    warmup = 1
    active = len(data) - warmup
    profile = torch.profiler.profile(
        activities=[torch.profiler.ProfilerActivity.CPU],
        on_trace_ready=trace_handler,
        schedule=torch.profiler.schedule(wait=0, warmup=warmup, active=active),
    )
    trainer = engine.create_trainer(
        model_with_loss,
        optimizer,
        20,
        device=device,
        evaluator=evaluator,
        extensions=extensions,
        profile=profile,
    )
    trainer.run(data, data)
    assert trace_handler.call_count == 20  # n_epochs
Esempio n. 6
0
def test_trainer_with_code_block_with_multiple_optimizers(
        device, progress_bar, path):
    if not torch.cuda.is_available() and device == 'cuda':
        pytest.skip()
    model = MyModel()
    model_with_loss = MyModelWithLossDictOutput(model)
    ppe.to(model_with_loss, device)
    optimizer0 = torch.optim.SGD(model.parameters(), lr=0.1)
    optimizer1 = torch.optim.Adam(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20, ),
        't': torch.rand(10, )
    } for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar,
                                        logic=ppe.handler.CodeBlockLogic())

    trainer = engine.create_trainer(model_with_loss, {
        "0": optimizer0,
        "1": optimizer1
    },
                                    20,
                                    device=device,
                                    evaluator=evaluator,
                                    extensions=extensions,
                                    out_dir=path,
                                    logic=ppe.handler.CodeBlockLogic())
    trainer.run(data, data)
Esempio n. 7
0
def test_trainer_dict_input(device, progress_bar, path):
    if not torch.cuda.is_available() and device == 'cuda':
        pytest.skip()
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = MyModelWithLossDictOutput(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20, ),
        't': torch.rand(10, )
    } for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar)

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    20,
                                    device=device,
                                    evaluator=evaluator,
                                    extensions=extensions,
                                    out_dir=path)
    trainer.run(data, data)
Esempio n. 8
0
def test_train_with_evaluator(device, progress_bar, path):
    if not torch.cuda.is_available() and device == 'cuda':
        pytest.skip()
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = MyModelWithLossFn(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([(torch.rand(20, ), torch.rand(10, ))
                                        for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar)

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    20,
                                    device=device,
                                    evaluator=evaluator,
                                    extensions=extensions,
                                    out_dir=path)
    mpath = 'pytorch_pfn_extras.training._evaluator.Evaluator.run'
    with mock.patch(mpath) as patched:
        trainer.run(data, data)
        assert patched.call_count == 20
Esempio n. 9
0
def test_evaluator_with_metric(device, accuracy):
    model = MyModel(accuracy)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20),
        't': torch.rand(1)
    } for i in range(10)],
                                       batch_size=10)

    evaluator = engine.create_evaluator(
        model,
        device=device,
        metrics=[ppe.training.metrics.AccuracyMetric('t', 'y')],
        options={'eval_report_keys': ['accuracy']})
    evaluator.handler.eval_setup(evaluator, data)
    reporter = ppe.reporting.Reporter()
    observation = {}
    with reporter.scope(observation):
        evaluator.run(data)
    assert pytest.approx(observation['val/accuracy'], accuracy)
Esempio n. 10
0
def test_trainer_namedtuple_input(device, progress_bar, path):
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = ModelNamedTupleIO(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader(
        [Input(torch.rand(20, ), torch.rand(10, ), str(i)) for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar)

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    20,
                                    device=device,
                                    evaluator=evaluator,
                                    extensions=extensions,
                                    out_dir=path)
    trainer.run(data, data)
Esempio n. 11
0
    def add_dump(self, name, dir):
        with open(f'{dir}/summary') as f:
            summary = json.loads(f.read())

        class DummyModel(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.param = torch.nn.Parameter(torch.tensor(1.0))

            def forward(self, *args, **kwargs):
                return ()

        model = DummyModel()
        ppe.to(model, 'cpu')

        engine = None
        args = []
        if summary['evaluator']:
            engine = _engine_module.create_evaluator(model)
            args = [[None] * summary['eval_len']]
        if summary['trainer']:
            engine = _engine_module.create_trainer(
                {'main': model},
                {'main': torch.optim.SGD(model.parameters(), lr=0.01)},
                summary['max_epochs'],
                evaluator=engine,
            )
            args = [[None] * summary['train_len']] + args

        engine.handler = _ComparableHandler(
            engine.handler, name, self._compare_dump, self._trigger, dir=dir)

        child_evaluator = getattr(engine, 'evaluator', None)
        if child_evaluator is not None:
            # For trainer with evaluator
            child_evaluator.handler = engine.handler

        self._engines[name] = engine, args, {}
def test_evaluator_async(accuracy):
    device = 'cpu'
    model = AsyncModel(accuracy)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20),
        't': torch.rand(1)
    } for i in range(1000)],
                                       batch_size=10)

    options = {'eval_report_keys': ['accuracy']}

    ppe.to(model, device)
    evaluator = engine.create_evaluator(
        model,
        device=device,
        options=options,
        metrics=[ppe.training.metrics.AccuracyMetric('t', 'y')])

    reporter = ppe.reporting.Reporter()
    observation = {}
    with reporter.scope(observation):
        evaluator.run(data)
    assert pytest.approx(observation['val/accuracy']) == accuracy