Exemplo n.º 1
0
def system_check():
    import traceback

    try:
        import torch
        from olympus.models import Model
        from olympus.optimizers import Optimizer

        batch = torch.randn((32, 3, 64, 64)).cuda()
        model = Model('resnet18', input_size=(3, 64, 64),
                      output_size=(10, )).cuda()

        model.init()

        optimizer = Optimizer('sgd', params=model.parameters())

        optimizer.init(**optimizer.defaults)

        optimizer.zero_grad()
        loss = model(batch).sum()

        optimizer.backward(loss)
        optimizer.step()

        return True
    except:
        error(traceback.format_exc())
        return False
Exemplo n.º 2
0
def test_optimizer_factory_init():
    # set init using its name
    model: Model = new_model()
    optim = Optimizer('sgd', params=model.parameters())

    assert optim.get_space() == SGD.get_space()
    optim.init(**SGD.defaults())
Exemplo n.º 3
0
def test_build_optimizer(optimizer):
    model = new_model()

    optimizer = Optimizer(
        optimizer,
        params=model.parameters()
    )

    optimizer.init(**optimizer.defaults)
    optimizer_work(optimizer, model)
Exemplo n.º 4
0
    @staticmethod
    def get_space():
        return {'gamma': 'loguniform(0.97, 1)'}


if __name__ == '__main__':
    model = Model(
        'logreg',
        input_size=(290,),
        output_size=(10,)
    )

    optimizer = Optimizer('sgd', params=model.parameters())

    # If you use an hyper parameter optimizer, it will generate this for you
    optimizer.init(lr=1e-4, momentum=0.02, weight_decay=1e-3)

    schedule = LRSchedule(schedule=MyExponentialLR)
    schedule.init(optimizer=optimizer, gamma=0.97)

    optimizer.zero_grad()

    input = torch.randn((10, 290))
    output = model(input)
    loss = output.sum()
    loss.backward()

    optimizer.step()

    print(optimizer.param_groups[0]['lr'])
    schedule.epoch(1)
Exemplo n.º 5
0
def setup():
    model = Model('logreg', input_size=(28, ), output_size=(10, ))

    optimizer = Optimizer('sgd', params=model.parameters())
    optimizer.init(**optimizer.defaults)
    return model, optimizer