def test_quadratic_model(): m = QuadraticModel() w = SimpleWrapper(m) h = w.fit({'initial_epoch': 0, 'epochs': 5}) compare_dicts( h, { 'epochs': [e for e in range(0, 3)], 'acc': [4, 1, 0, 1, 4], 'loss': [-4, -1, 0, -1, -4], 'val_acc': [4.5, 1.5, 0.5, 1.5, 4.5], 'val_loss': [-4.5, -1.5, -0.5, -1.5, -4.5] })
def test_copy_history(keras_model): x_train, y_train = range(10), range(10) x_val, y_val = range(10), range(10) m = Member(SimpleWrapper(keras_model), 1, step_args={ 'epochs_per_step': 2, 'fit_args': { 'x': x_train, 'y': y_train, 'batch_size': 42 } }, eval_args={ 'x': x_val, 'y': y_val }) m.step() m.step() assert len(m.observations()['observations']) == 2 m2 = copy(m) assert len(m2.observations()['observations']) == 2 m2.step() m2.step() assert len(m.observations()['observations']) == 2 assert len(m2.observations()['observations']) == 4
def test_member_ready(keras_model): """This member should be ready after the sixth and twelth epochs.""" from pybt.policy.ready import ReadyAfter from pybt.policy.done import StopAfter m = Member(SimpleWrapper(keras_model), 1, stopping_criteria=StopAfter(epochs=13), ready_strategy=ReadyAfter(epochs=6), step_args={ 'epochs_per_step': 3, 'fit_args': { 'x': {}, 'y': {} } }, eval_args={}) assert m.ready() == False m.step() assert m.ready() == False m.step() assert m.ready() == True m.step() assert m.ready() == False m.step() assert m.ready() == True m.step() assert m.ready() == False
def test_fit(keras_model): good_results = { 'acc': [-76, -96, -28], 'val_loss': [4.75, 5.75, 6.75], 'loss': [4.5, 5.5, 6.5], 'val_acc': [4.25, 5.25, 6.25], 'epochs': [4, 5, 6] } m = SimpleWrapper(keras_model) obs = m.fit(fit_args={'initial_epoch': 4, 'epochs': 7}) print('result:\n{}'.format(obs)) print('should be:\n{}'.format(good_results)) compare_dicts(obs, good_results)
def test_linear_small_steps_when_ready(): """Test that we can train multi-epoch steps.""" stop_after_epochs = 30 ready_after_epochs = 6 epochs_per_step = 3 t = Trainer(SimpleWrapper(LinearModel()), step_args={ 'epochs_per_step': epochs_per_step, 'fit_args': { 'x': {}, 'y': {} } }, eval_args={ 'x': {}, 'y': {} }, stopping_criteria=StopAfter(stop_after_epochs), ready_strategy=ReadyAfter(ready_after_epochs)) model, score = t.train() assert score == 30 assert model.evaluate({}) == (-30, 30) assert len(t.observations()) == (stop_after_epochs // epochs_per_step) + 1
def test_quadratic_unit_steps(): """Test that we can train unit-sized steps.""" import random random.seed(0) stop_after_epochs = 10 ready_after_epochs = 1 epochs_per_step = 1 t = Trainer(SimpleWrapper(QuadraticModel()), step_args={ 'epochs_per_step': epochs_per_step, 'fit_args': { 'x': {}, 'y': {} } }, eval_args={ 'x': {}, 'y': {} }, stopping_criteria=StopAfter(stop_after_epochs), ready_strategy=ReadyAfter(ready_after_epochs)) model, score = t.train() assert floor(score) == 54 loss, acc = model.evaluate({})
def test_multiple_train_calls(): stop_after_epochs = 30 ready_after_epochs = 1 epochs_per_step = 3 t = Trainer(SimpleWrapper(LinearModel()), step_args={ 'epochs_per_step': epochs_per_step, 'fit_args': { 'x': {}, 'y': {} } }, eval_args={ 'x': {}, 'y': {} }, stopping_criteria=StopAfter(stop_after_epochs), ready_strategy=ReadyAfter(ready_after_epochs)) model, score = t.train() assert score == 30 assert model.evaluate({}) == (-30, 30) assert len(t.observations()) == (stop_after_epochs // epochs_per_step) + 1 # Nothing should happen because the done criteria has already been met. m2, s2 = t.train() assert s2 == 30
def test_member_done(keras_model): from pybt.policy.done import StopAfter m = Member(SimpleWrapper(keras_model), 1, step_args={ 'epochs_per_step': 2, 'fit_args': { 'x': {}, 'y': {} } }, eval_args={}, stopping_criteria=StopAfter(epochs=4)) assert m.done() == False m.step() assert m.done() == False m.step() assert m.done() == True
def test_step_history(keras_model): images, labels = range(10), range(10) m = Member(SimpleWrapper(keras_model), 1, step_args={ 'epochs_per_step': 2, 'fit_args': { 'x': images, 'y': labels } }, eval_args={ 'x': images, 'y': labels }) assert len(m.observations()['observations']) == 0 m.step() assert len(m.observations()['observations']) == 1 m.step() assert len(m.observations()['observations']) == 2
def test_truncation(keras_model): """After 5 steps, we should swap the current member with one in the top 25%.""" from pybt.policy.exploit import Truncation from pybt.policy.ready import ReadyAfter from pybt.policy.done import StopAfter t = Trainer(model=SimpleWrapper(keras_model), stopping_criteria=StopAfter(5), ready_strategy=ReadyAfter(5), exploit_strategy=Truncation(upper=.4, lower=.4), step_args={ 'epochs_per_step': 1, 'fit_args': { 'x': {}, 'y': {} } }) _, score = t.train() assert score in [84., 90.]
def test_linear_state_based_metrics(): m = LinearModel() w = SimpleWrapper(m) assert w != m # History should reflect the change in metrics over for a window # specified by the epoch range but continuing from where the model # state was as of the last call to fit(). assert w.eval({}) == (0., 0.) h = w.fit({'initial_epoch': 0, 'epochs': 3}) compare_dicts( h, { 'epochs': [e for e in range(0, 3)], 'acc': [1, 2, 3], 'loss': [-1, -2, -3], 'val_acc': [1.5, 2.5, 3.5], 'val_loss': [-1.5, -2.5, -3.5] }) # Run for a few more epochs h = w.fit({'initial_epoch': 3, 'epochs': 5}) compare_dicts( h, { 'epochs': [e for e in range(3, 5)], 'acc': [4, 5], 'loss': [-4, -5], 'val_acc': [4.5, 5.5], 'val_loss': [-4.5, -5.5] }) # And now run for a few more, but reset the epoch range -- the key here # is that the metrics should continue relative to the previous state, # not the epoch range h = w.fit({'initial_epoch': 1, 'epochs': 3}) compare_dicts( h, { 'epochs': [e for e in range(1, 4)], 'acc': [6, 7, 8], 'loss': [-6, -7, -8], 'val_acc': [6.5, 7.5, 8.5], 'val_loss': [-6.5, -7.5, -8.5] })
def test_linear_copy(): from copy import copy m = LinearModel() w = SimpleWrapper(m) w_copy = copy(w) assert w != w_copy assert w._model != w_copy._model assert w.eval({}) == w_copy.eval({}) # Move original model ahead 3 epochs h = w.fit({'initial_epoch': 0, 'epochs': 3}) compare_dicts( h, { 'epochs': [e for e in range(0, 3)], 'acc': [1, 2, 3], 'loss': [-1, -2, -3], 'val_acc': [1.5, 2.5, 3.5], 'val_loss': [-1.5, -2.5, -3.5] }) assert w.eval({}) == (-3, 3) assert w.eval({}) != w_copy.eval({}) # Move copy ahead 2 epochs h = w_copy.fit({'initial_epoch': 1, 'epochs': 3}) compare_dicts( h, { 'epochs': [e for e in range(1, 3)], 'acc': [1, 2], 'loss': [-1, -2], 'val_acc': [1.5, 2.5], 'val_loss': [-1.5, -2.5] }) assert w_copy.eval({}) == (-2, 2) assert w.eval({}) != w_copy.eval({}) # Move original ahead 1 epochs h = w.fit({'initial_epoch': 3, 'epochs': 4}) compare_dicts( h, { 'epochs': [e for e in range(3, 4)], 'acc': [4], 'loss': [-4], 'val_acc': [4.5], 'val_loss': [-4.5] }) assert w.eval({}) == (-4, 4) assert w.eval({}) != w_copy.eval({}) # Move copy ahead 2 epochs h = w_copy.fit({'initial_epoch': 3, 'epochs': 5}) compare_dicts( h, { 'epochs': [e for e in range(3, 5)], 'acc': [3, 4], 'loss': [-3, -4], 'val_acc': [3.5, 4.5], 'val_loss': [-3.5, -4.5] }) assert w_copy.eval({}) == (-4, 4) assert w.eval({}) == w_copy.eval({})