コード例 #1
0
def test_trainer_invalid_options(path):
    device = 'cpu'
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = MyModelWithLossFn(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    extensions = _make_extensions()
    options = {'UNKNOWN_OPTIONS': True}
    with pytest.raises(ValueError, match="UNKNOWN_OPTIONS"):
        engine.create_trainer(
            model_with_loss, optimizer, 20,
            device=device, extensions=extensions,
            out_dir=path,
            options=options,
        )
コード例 #2
0
def test_trainer_with_code_block(device, progress_bar, path):
    model = MyModel()
    model_with_loss = MyModelWithLossDictOutput(model)
    ppe.to(model_with_loss, device)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20, ),
        't': torch.rand(10, )
    } for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar,
                                        logic=ppe.handler.CodeBlockLogic())

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    20,
                                    device=device,
                                    evaluator=evaluator,
                                    extensions=extensions,
                                    out_dir=path,
                                    logic=ppe.handler.CodeBlockLogic())
    trainer.run(data, data)
コード例 #3
0
def test_trainer_defer_wrong_order(path):
    class WrongOrderHandler(ppe.handler.Handler):
        def _complete_train_step(self, trainer, outs, block, sn, sm, rt):
            p_iter = self.pending_iters[sn][0]
            if p_iter.idx < 10:
                super()._complete_train_step(trainer, p_iter.deferred, block,
                                             sn, sm, rt)
            else:
                p_iter.cback(90, None, is_deferred=block)

    device = 'cpu'
    model = MyModel()
    model_with_loss = MyModelWithLossAsync(model)
    ppe.to(model_with_loss, device)
    # Register the handler
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([(torch.rand(20, ), torch.rand(10, ))
                                        for i in range(100)])

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    2,
                                    device=device,
                                    handler_class=WrongOrderHandler,
                                    out_dir=path)
    with pytest.raises(RuntimeError, match="Completed a not expected"):
        trainer.run(data)
コード例 #4
0
def test_trainer_defer(path):
    class Extension:
        def __init__(self, is_async):
            self.name = 'Dummy'
            self.trigger = (1, 'iteration')
            self.called = 0
            self.is_async = is_async

        def __call__(self, manager):
            self.called += 1

    device = 'cpu'
    model = MyModel()
    model_with_loss = MyModelWithLossAsync(model)
    ppe.to(model_with_loss, device)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([(torch.rand(20, ), torch.rand(10, ))
                                        for i in range(100)])

    extensions = [Extension(True), Extension(False)]

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    2,
                                    device=device,
                                    extensions=extensions,
                                    out_dir=path)
    trainer.run(data)
    assert trainer.manager.iteration == 200
    assert trainer.manager.execution == 200
    assert extensions[0].called == 200
    assert extensions[1].called == 200
コード例 #5
0
def test_evaluator_trigger(evaluator_trigger, path):
    device = 'cpu'
    progress_bar = False
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = MyModelWithLossFn(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([(torch.rand(20, ), torch.rand(10, ))
                                        for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar)

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    20,
                                    device=device,
                                    evaluator=(evaluator,
                                               evaluator_trigger[1]),
                                    extensions=extensions,
                                    out_dir=path)
    path = 'pytorch_pfn_extras.training._evaluator.Evaluator.run'
    with mock.patch(path) as patched:
        trainer.run(data, data)
        assert patched.call_count == evaluator_trigger[0]
コード例 #6
0
def test_trainer_profile():
    device = 'cpu'
    model = MyModel()
    model_with_loss = MyModelWithLossDictOutput(model)
    ppe.to(model_with_loss, device)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20, ),
        't': torch.rand(10, )
    } for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss, device=device)

    trace_handler = mock.Mock()
    warmup = 1
    active = len(data) - warmup
    profile = torch.profiler.profile(
        activities=[torch.profiler.ProfilerActivity.CPU],
        on_trace_ready=trace_handler,
        schedule=torch.profiler.schedule(wait=0, warmup=warmup, active=active),
    )
    trainer = engine.create_trainer(
        model_with_loss,
        optimizer,
        20,
        device=device,
        evaluator=evaluator,
        extensions=extensions,
        profile=profile,
    )
    trainer.run(data, data)
    assert trace_handler.call_count == 20  # n_epochs
コード例 #7
0
def test_trainer_with_code_block_with_multiple_optimizers(
        device, progress_bar, path):
    if not torch.cuda.is_available() and device == 'cuda':
        pytest.skip()
    model = MyModel()
    model_with_loss = MyModelWithLossDictOutput(model)
    ppe.to(model_with_loss, device)
    optimizer0 = torch.optim.SGD(model.parameters(), lr=0.1)
    optimizer1 = torch.optim.Adam(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20, ),
        't': torch.rand(10, )
    } for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar,
                                        logic=ppe.handler.CodeBlockLogic())

    trainer = engine.create_trainer(model_with_loss, {
        "0": optimizer0,
        "1": optimizer1
    },
                                    20,
                                    device=device,
                                    evaluator=evaluator,
                                    extensions=extensions,
                                    out_dir=path,
                                    logic=ppe.handler.CodeBlockLogic())
    trainer.run(data, data)
コード例 #8
0
def test_trainer_dict_input(device, progress_bar, path):
    if not torch.cuda.is_available() and device == 'cuda':
        pytest.skip()
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = MyModelWithLossDictOutput(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([{
        'x': torch.rand(20, ),
        't': torch.rand(10, )
    } for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar)

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    20,
                                    device=device,
                                    evaluator=evaluator,
                                    extensions=extensions,
                                    out_dir=path)
    trainer.run(data, data)
コード例 #9
0
def test_train_with_evaluator(device, progress_bar, path):
    if not torch.cuda.is_available() and device == 'cuda':
        pytest.skip()
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = MyModelWithLossFn(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader([(torch.rand(20, ), torch.rand(10, ))
                                        for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar)

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    20,
                                    device=device,
                                    evaluator=evaluator,
                                    extensions=extensions,
                                    out_dir=path)
    mpath = 'pytorch_pfn_extras.training._evaluator.Evaluator.run'
    with mock.patch(mpath) as patched:
        trainer.run(data, data)
        assert patched.call_count == 20
コード例 #10
0
 def _get_trainer(self, epochs, out_dir):
     model = MyModel()
     ppe.to(model, 'cpu')
     model_with_loss = MyModelWithLossFn(model)
     optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
     extensions = _make_extensions()
     trainer = engine.create_trainer(
         model_with_loss, optimizer, 20,
         device='cpu', extensions=extensions,
         out_dir=out_dir
     )
     return trainer
コード例 #11
0
def test_trainer_no_to(path):
    model = MyModel()
    model_with_loss = MyModelWithLossFn(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader(
        [(torch.rand(20,), torch.rand(10,)) for i in range(10)])
    extensions = _make_extensions()

    trainer = engine.create_trainer(
        model_with_loss, optimizer, 20,
        device='cpu', extensions=extensions,
        out_dir=path,
    )
    with pytest.raises(RuntimeError, match="ppe.to"):
        trainer.run(data, data)
コード例 #12
0
def test_trainer(device, path):
    if not torch.cuda.is_available() and device == 'cuda':
        pytest.skip()
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = MyModelWithLossFn(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader(
        [(torch.rand(20,), torch.rand(10,)) for i in range(10)])
    extensions = _make_extensions()

    trainer = engine.create_trainer(
        model_with_loss, optimizer, 20,
        device=device, extensions=extensions,
        out_dir=path,
    )
    trainer.run(data)
コード例 #13
0
    def get_result_from_trainer():
        model = MyModel()
        ppe.to(model, device)
        model_with_loss = MyModelWithLossFn(model)
        optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
        extensions = _make_extensions()

        trainer = engine.create_trainer(
            model_with_loss, optimizer, 20,
            device=device, extensions=extensions,
            out_dir=path
        )
        trainer.run(train_data)

        model.eval()
        with torch.no_grad():
            return [model(x.to(device)) for x, in data]
コード例 #14
0
def test_trainer_namedtuple_input(device, progress_bar, path):
    model = MyModel()
    ppe.to(model, device)
    model_with_loss = ModelNamedTupleIO(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    data = torch.utils.data.DataLoader(
        [Input(torch.rand(20, ), torch.rand(10, ), str(i)) for i in range(10)])
    extensions = _make_extensions()

    evaluator = engine.create_evaluator(model_with_loss,
                                        device=device,
                                        progress_bar=progress_bar)

    trainer = engine.create_trainer(model_with_loss,
                                    optimizer,
                                    20,
                                    device=device,
                                    evaluator=evaluator,
                                    extensions=extensions,
                                    out_dir=path)
    trainer.run(data, data)
コード例 #15
0
ファイル: comparer.py プロジェクト: shinh/pytorch-pfn-extras
    def add_dump(self, name, dir):
        with open(f'{dir}/summary') as f:
            summary = json.loads(f.read())

        class DummyModel(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.param = torch.nn.Parameter(torch.tensor(1.0))

            def forward(self, *args, **kwargs):
                return ()

        model = DummyModel()
        ppe.to(model, 'cpu')

        engine = None
        args = []
        if summary['evaluator']:
            engine = _engine_module.create_evaluator(model)
            args = [[None] * summary['eval_len']]
        if summary['trainer']:
            engine = _engine_module.create_trainer(
                {'main': model},
                {'main': torch.optim.SGD(model.parameters(), lr=0.01)},
                summary['max_epochs'],
                evaluator=engine,
            )
            args = [[None] * summary['train_len']] + args

        engine.handler = _ComparableHandler(
            engine.handler, name, self._compare_dump, self._trigger, dir=dir)

        child_evaluator = getattr(engine, 'evaluator', None)
        if child_evaluator is not None:
            # For trainer with evaluator
            child_evaluator.handler = engine.handler

        self._engines[name] = engine, args, {}