Esempio n. 1
0
def system_check():
    import traceback

    try:
        import torch
        from olympus.models import Model
        from olympus.optimizers import Optimizer

        batch = torch.randn((32, 3, 64, 64)).cuda()
        model = Model('resnet18', input_size=(3, 64, 64),
                      output_size=(10, )).cuda()

        model.init()

        optimizer = Optimizer('sgd', params=model.parameters())

        optimizer.init(**optimizer.defaults)

        optimizer.zero_grad()
        loss = model(batch).sum()

        optimizer.backward(loss)
        optimizer.step()

        return True
    except:
        error(traceback.format_exc())
        return False
Esempio n. 2
0
from olympus.optimizers import Optimizer, LRSchedule

from olympus.models import Model
from olympus.observers import ObserverList, ProgressView, Speed
from olympus.utils import fetch_device, option

epochs = 2
device = fetch_device()
base = option('base_path', '/tmp/olympus')

# Model
model = Model('resnet18', input_size=(1, 28, 28), output_size=(10, ))

# Optimizer
optimizer = Optimizer('sgd',
                      params=model.parameters(),
                      weight_decay=0.001,
                      lr=1e-5,
                      momentum=1e-5)

# Schedule
lr_schedule = LRSchedule('exponential', optimizer=optimizer, gamma=0.99)

data = Dataset('fake_mnist', path=f'{base}/data')

splits = SplitDataset(data, split_method='original')

# Dataloader
loader = DataLoader(splits, sampler_seed=1, batch_size=32)

# event handler
Esempio n. 3
0
        return [base_lr * self.gamma ** self.last_epoch
                for base_lr in self.base_lrs]

    @staticmethod
    def get_space():
        return {'gamma': 'loguniform(0.97, 1)'}


if __name__ == '__main__':
    model = Model(
        'logreg',
        input_size=(290,),
        output_size=(10,)
    )

    optimizer = Optimizer('sgd', params=model.parameters())

    # If you use an hyper parameter optimizer, it will generate this for you
    optimizer.init(lr=1e-4, momentum=0.02, weight_decay=1e-3)

    schedule = LRSchedule(schedule=MyExponentialLR)
    schedule.init(optimizer=optimizer, gamma=0.97)

    optimizer.zero_grad()

    input = torch.randn((10, 290))
    output = model(input)
    loss = output.sum()
    loss.backward()

    optimizer.step()
Esempio n. 4
0
def setup():
    model = Model('logreg', input_size=(28, ), output_size=(10, ))

    optimizer = Optimizer('sgd', params=model.parameters())
    optimizer.init(**optimizer.defaults)
    return model, optimizer