示例#1
0
def test_workout_basic():
    model = get_model()
    loss = F.mse_loss
    optim = torch.optim.Adam(model.parameters())
    workout = Workout(model, loss, optim)

    data = get_data(100)
    workout.fit(data, data)
    assert workout.epoch == 1
示例#2
0
def test_predict():
    model = get_model()
    loss = F.mse_loss
    optim = torch.optim.Adam(model.parameters())
    workout = Workout(model, loss, optim)

    data = torch.randn(16, 10)
    result = workout.predict(data)
    assert len(result) == 16
示例#3
0
 def __call__(self, workout: Workout, mode: Mode):
     for metric in self.metrics:
         if workout.has_metric(metric):
             value = workout.get_metric(metric)
             if value is not None:
                 name = self.prefix + metric
                 try:
                     value = float(value)
                     self.writer.add_scalar(name, value, workout.step)
                 except ValueError:
                     logging.warning("ignoring metric %s", name)
示例#4
0
    def __call__(self, workout: Workout, mode: Mode):
        if mode != mode.EVAL:
            return

        value = workout.get_metric(self.metric)

        if self.minimize & value < self.value:
            self.value = value
        elif not self.minimize & value > self.value:
            self.value = value
        else:
            workout.stop()
示例#5
0
    def __call__(self, workout: Workout, mode: Mode):
        if workout.epoch > self.epoch:
            self.start_time = time.time()
            self.epoch = workout.epoch

        if mode == mode.TRAIN:
            return

        result = "[{:3}:{:6}]".format(workout.epoch, workout.step)
        for metric in self.metrics:
            if workout.has_metric(metric):
                result += self._format(metric, workout.get_metric(metric))
        result += " - time: {:.1f}s".format(time.time() - self.start_time)
        print(result)
示例#6
0
 def __call__(self, workout: Workout, mode: Mode):
     if mode == Mode.EVAL:
         if self.include_metric is not None:
             value = workout.get_metric(self.include_metric)
             self.scheduler.step(value)
         else:
             self.scheduler.step()
示例#7
0
def test_paramhistogram():
    writer = Mock()
    loss = Mock()
    callback = ParamHistogram(writer, include_gradient=False)
    model = resnet18()
    workout = Workout(model, loss, callbacks=[callback])
    callback(workout, Mode.EVAL)
    assert writer.add_histogram.is_called()
示例#8
0
    def __call__(self, workout: Workout, mode: Mode):

        if workout.epoch > self.epoch:
            self._close_meter(workout)

        result = "[{:3}:{:6}]".format(workout.epoch, workout.step)
        for metric in self.metrics:
            if workout.has_metric(metric):
                result += self._format(metric, workout.get_metric(metric))

        progressbar = self._get_meter(workout)
        progressbar.update(1)
        if mode != mode.EVAL:
            progressbar.set_description(result, refresh=False)
        else:
            progressbar.set_description(result)
            self._close_meter(workout)
示例#9
0
def test_workout():
    model = get_model()
    loss = F.mse_loss
    optim = torch.optim.Adam(model.parameters())
    workout = Workout(model, loss, optim)

    data = get_data(100)
    workout.fit(data)
    assert workout.epoch == 1

    workout.fit(data, epochs=10)
    assert workout.epoch == 11

    valid_data = get_data(100)
    for minibatch in valid_data:
        workout.validate(*minibatch)
    assert workout.epoch == 11

    workout.fit(data, valid_data, epochs=5)
    assert workout.epoch == 16
示例#10
0
def test_workout_metrics():
    model = get_model()
    loss = F.mse_loss
    optim = torch.optim.Adam(model.parameters())

    def my_metric(*_):
        return 1.0

    workout = Workout(model, loss, optim, my_metric=my_metric)
    data = get_data(100)
    workout.fit(data, epochs=10)

    assert workout.has_metric('my_metric')
    assert not workout.has_metric('my_metric2')
    assert len(workout.get_metrics()) == 3
    assert workout.get_metric("my_metric") == 1.0
示例#11
0
def test_lr_scheduler():
    model = get_model()
    loss = F.mse_loss
    optim = torch.optim.Adam(model.parameters(), lr=1e-02)
    workout = Workout(model, loss, optim)

    scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=10, gamma=0.1)
    callback = LRScheduler(scheduler)

    data = get_data(100)
    workout.fit(data, epochs=30, callbacks=[callback])
    assert workout.optim.param_groups[0]['lr'] == 1e-05

    model = get_model()
    val_data = get_data(25)
    optim = torch.optim.Adam(model.parameters(), lr=1e-02)
    workout = Workout(model, loss, optim)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim)
    callback = LRScheduler(scheduler, "val_loss")
    workout.fit(data, val_data, epochs=100, callbacks=[callback])
    assert workout.optim.param_groups[0]['lr'] < 1e-02
示例#12
0
def get_workout():
    workout = Workout(Mock(), Mock(), optim=Mock(), mover=Mock())
    workout.batches = 10
    return workout
示例#13
0
 def __call__(self, workout: Workout, mode: Mode):
     if mode == mode.EVAL:
         workout.save(self.filename)
示例#14
0
 def __call__(self, workout: Workout, mode: Mode):
     for idx, group in enumerate(workout.optim.param_groups):
         name = self.prefix + idx
         value = group["lr"]
         workout.update_history(name, value)
示例#15
0
def test_workout_state():
    model = get_model()
    loss = F.mse_loss
    workout = Workout(model, loss)

    state = workout.state_dict()
    workout = Workout(model, loss)
    workout.load_state_dict(state)

    filename = "./tmp_file.dat"
    result = workout.save(filename)
    assert result == filename

    result = workout.load(filename)
    assert result == filename
    os.remove(filename)

    filename1 = workout.save()
    filename2 = workout.load()
    os.remove(filename1)
    assert filename1 == filename2
    dir1 = os.path.dirname(filename1)
    os.rmdir(dir1)
    dir1 = os.path.dirname(dir1)
    os.rmdir(dir1)