예제 #1
0
def test_checkpoint_bad_setup():
    chk = CheckPointer(storage=StateStorage(folder='/tmp'), time_buffer=0)

    task = TaskMock()
    task.state = 2

    with pytest.raises(BadCheckpoint):
        chk.on_end_epoch(task, 1, dict())

    assert task.state == 2
예제 #2
0
def test_checkpoint_none():
    chk = CheckPointer(storage=StateStorage(folder='/tmp'), time_buffer=0)

    task = TaskMock()
    task.state = 5
    chk.on_new_trial(task, 0, dict(a=2, b=2), None)
    chk.on_end_epoch(task, 1, dict())

    task = TaskMock()
    chk.on_new_trial(task, 0, dict(a=2, b=2), None)
    assert task.state == 5

    task = TaskMock()
    chk.on_new_trial(task, 0, dict(a=2, b=4), None)
    assert task.state == 1
예제 #3
0
    def __init__(self,
                 detector,
                 optimizer,
                 lr_scheduler,
                 dataloader,
                 criterion=None,
                 device=None,
                 storage=None):
        super(ObjectDetection, self).__init__(device=device)

        self._first_epoch = 0
        self.current_epoch = 0
        self.detector = detector
        self.optimizer = optimizer
        self.lr_scheduler = lr_scheduler
        self.dataloader = dataloader
        self.criterion = criterion
        self.storage = storage

        self.metrics.append(ElapsedRealTime().every(batch=1))
        self.metrics.append(SampleCount().every(batch=1, epoch=1))
        speed = Speed()
        self.metrics.append(speed)
        self.metrics.append(ProgressView(speed))
        self.metrics.append(OnlineLoss())

        if storage:
            self.metrics.append(CheckPointer(storage=storage))
예제 #4
0
    def __init__(self, classifier, optimizer, lr_scheduler, dataloader, criterion=None, device=None,
                 storage=None, preprocessor=None, metrics=None):
        super(Classification, self).__init__(device=device)
        criterion = select(criterion, CrossEntropyLoss())

        self._first_epoch = 0
        self.current_epoch = 0
        self.classifier = classifier
        self.optimizer = optimizer
        self.lr_scheduler = lr_scheduler
        self.dataloader = dataloader
        self.criterion = criterion
        self.preprocessor = Preprocessor()
        # ------------------------------------------------------------------

        self.metrics.append(ElapsedRealTime().every(batch=1))
        self.metrics.append(SampleCount().every(batch=1, epoch=1))
        self.metrics.append(OnlineTrainAccuracy())
        self.metrics.append(Speed())

        # All metrics must be before ProgressView and CheckPointer
        if metrics:
            for metric in metrics:
                self.metrics.append(metric)

        self.metrics.append(ProgressView(self.metrics.get('Speed')))

        if storage:
            self.metrics.append(CheckPointer(storage=storage))
        # ------------------------------------------------------------------

        if preprocessor is not None:
            self.preprocessor = preprocessor

        self.hyper_parameters = {}
예제 #5
0
def test_checkpoint_params():
    chk = CheckPointer(storage=StateStorage(folder='/tmp'), time_buffer=0)

    task = TaskMock()
    task.state = 4
    chk.on_new_trial(task, 0, dict(a=1, b=1, uid='1235'), None)
    chk.on_end_epoch(task, 1, dict())

    task = TaskMock()
    chk.on_new_trial(task, 0, dict(a=1, b=1, uid='1235'), None)
    assert task.state == 4
예제 #6
0
def test_checkpoint_argument():
    chk = CheckPointer(storage=StateStorage(folder='/tmp'), time_buffer=0)

    task = TaskMock()
    task.state = 3
    chk.on_new_trial(task, 0, dict(a=1, b=3), uid='1236')
    chk.on_end_epoch(task, 1, dict())

    task = TaskMock()
    chk.on_new_trial(task, 0, dict(a=1, b=3), uid='1236')
    assert task.state == 3
예제 #7
0
    def __init__(self,
                 model: AbstractActorCritic,
                 dataloader,
                 optimizer,
                 lr_scheduler,
                 device,
                 ppo_epoch=5,
                 ppo_batch_size=32,
                 ppo_clip_param=10,
                 ppo_max_grad_norm=1000,
                 criterion=None,
                 storage=None,
                 logger=None):
        super(PPO, self).__init__(device=device)

        if criterion is None:
            criterion = lambda x: x.sum()

        self.actor_critic = model
        self.lr_scheduler = lr_scheduler
        self.optimizer: Optimizer = optimizer
        self.criterion: Module = criterion
        self.gamma: float = 0.99
        self.eps = np.finfo(np.float32).eps.item()
        self.action_sampler: Callable[[], Distribution] = Categorical
        self.tensor_shape = None
        self.frame_count: int = 0
        self.dataloader = dataloader
        self.storage = storage
        self._first_epoch = 0
        self.current_epoch = 0

        self.ppo_epoch = ppo_epoch
        self.ppo_batch_size = ppo_batch_size
        self.ppo_clip_param = ppo_clip_param
        self.ppo_max_grad_norm = ppo_max_grad_norm

        self.metrics.append(NamedMetric(name='loss'))
        self.metrics.append(ElapsedRealTime())
        self.metrics.append(Speed())
        self.metrics.append(ProgressView(self.metrics.get('Speed')))

        if storage:
            self.metrics.append(CheckPointer(storage=storage))

        self.hyper_parameters = {}
        self.batch_size = None
예제 #8
0
def test_checkpoint_rng():
    def get_samples():
        if torch.cuda.is_available():
            a = torch.cuda.FloatTensor(1).normal_()
        else:
            a = 0

        b = random.random()
        c = numpy.random.uniform()
        d = torch.rand(1)
        return a, b, c, d

    chk = CheckPointer(storage=StateStorage(folder='/tmp'), time_buffer=0)

    task = TaskMock()
    task.state = 5
    chk.on_new_trial(task, 0, dict(a=2, b=2), None)
    # Save checkpoint
    chk.on_end_epoch(task, 1, dict())

    a, b, c, d = get_samples()

    a2, b2, c2, d2 = get_samples()

    if torch.cuda.is_available():
        assert a != a2
    assert b != b2
    assert c != c2
    assert d != d2

    task = TaskMock()
    chk.on_new_trial(task, 0, dict(a=2, b=2), None)
    assert task.state == 5

    a2, b2, c2, d2 = get_samples()

    if torch.cuda.is_available():
        assert a == a2
    assert b == b2
    assert c == c2
    assert d == d2
예제 #9
0
def test_checkpoint_best():
    s = StateStorage(folder='/tmp/chk')
    chk = CheckPointer(storage=s, time_buffer=0, keep_best='loss')

    task = TaskMock(loss=10000)
    task.loss = 10000
    chk.on_new_trial(task, 0, dict(a=2, b=2), None)
    # Save checkpoint
    chk.on_end_epoch(task, 1, dict())

    ntask = TaskMock()
    chk.load_best(ntask)
    assert ntask.loss == 10000

    #
    task.loss = 1000
    chk.on_end_epoch(task, 2, dict())

    ntask = TaskMock()
    chk.load_best(ntask)
    assert ntask.loss == 1000

    # loss increased
    task.loss = 1001
    chk.on_end_epoch(task, 3, dict())

    ntask = TaskMock()
    chk.load_best(ntask)
    assert ntask.loss == 1000

    task.loss = 999
    chk.on_end_epoch(task, 3, dict())

    ntask = TaskMock()
    chk.load_best(ntask)
    assert ntask.loss == 999

    task.loss = 998
    chk.on_end_epoch(task, 3, dict())

    ntask = TaskMock()
    chk.load_best(ntask)
    assert ntask.loss == 998
예제 #10
0
def main():
    from sspace.space import compute_identity

    args = arguments()
    tickers = [
        # 1     2      3     4      5       6     7     8      9    10
        'MO',
        'AEP',
        'BA',
        'BMY',
        'CPB',
        'CAT',
        'CVX',
        'KO',
        'CL',
        'COP',  # 1
        'ED',
        'CVS',
        'DHI',
        'DHR',
        'DRI',
        'DE',
        'D',
        'DTE',
        'ETN',
        'EBAY',  # 2
        'F',
        'BEN',
        'HSY',
        'HBAN',
        'IBM',
        'K',
        'GIS',
        'MSI',
        'NSC',
        'TXN'
    ]
    start, end = '2000-01-01', '2019-05-10'

    device = fetch_device()

    task = finance_baseline(tickers, start, end, args.optimizer,
                            args.batch_size, device, args.window)

    lr = 1e-8
    uid = compute_identity(
        dict(tickers=tickers,
             start=start,
             end=end,
             window=args.window,
             lr=lr,
             epochs=args.epochs), 16)

    if args.uri is not None:
        logger = metric_logger(args.uri, args.database,
                               f'{DEFAULT_EXP_NAME}_{uid}')
        task.metrics.append(logger)

    if args.storage is not None:
        storage = StateStorage(
            folder=option('state.storage', '/home/setepenre/zshare/tmp'))
        task.metrics.append(
            CheckPointer(storage=storage,
                         time_buffer=5,
                         keep_best='validation_loss',
                         save_init=True))

    optimizer = task.optimizer.defaults
    optimizer['lr'] = lr

    task.init(optimizer=optimizer, uid=uid)
    task.fit(args.epochs)

    stats = task.metrics.value()
    print(stats)
    return float(stats['validation_loss'])