def test_mypytorchtask(): logger.set_level(logger.DEBUG) saver = Saver() task = MyPytorchTask(saver=saver, model=MyPytorchModel, optimizer='SGD', optimizer_args=dict(lr=0.1), loss='CrossEntropyLoss', metrics=['acc', 'lrs']) task.set_hps({'num_epochs': 5}) task.execute() task.finalize() assert saver.load_ml(key='mypytorch')['_num_epochs'] == 5
def test_saver(): saver = Saver(mode='shelve') assert saver._serial_id == 0 saver.open() assert saver._state == 'open' saver['key0'] = 'value0' assert saver.keys('shelve') == ['key0'] assert saver['key0'] == 'value0' assert saver._shelve['key0'] == 'value0' # change shelve to dict saver.set_mode('dict') saver['key1'] = 'value1' assert saver.keys('dict') == ['key1'] assert saver['key1'] == 'value1' assert saver._dict['key1'] == 'value1' assert 'key0' not in saver._dict assert 'key1' not in saver._shelve # save dict objects to shelve saver.save() assert set(saver.keys()) == set(['key0', 'key1']) assert 'key0' not in saver._dict assert 'key1' not in saver._dict assert 'key0' in saver._shelve assert 'key1' in saver._shelve # move data from shelve to dict saver.to_memory('key0') assert 'key0' not in saver._shelve assert 'key0' in saver._dict # move data from dict to shelve saver.to_storage('key0') assert 'key0' in saver._shelve assert 'key0' not in saver._dict saver.set_mode('shelve') saver.dump_ml(key='ml0', param0='param0', param1='param1') assert saver.load_ml(key='ml0')['param0'] == 'param0' saver.close() assert saver._state == 'close'
def test_keras_ensemble(): logger.set_level(logger.DEBUG) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' saver = Saver() storegate = build_storegate() args_mlptask = { # BaseTask 'saver': saver, 'storegate': storegate, # KerasBaseTask 'optimizer': 'adam', 'num_epochs': 2, 'max_patience': 1, 'loss': 'mse', 'run_eagerly': True, # MLPTask 'activation': 'relu', 'activation_last': 'sigmoid', } subtask0 = MLPTask(name='subtask0', input_var_names=['var0', 'var1'], output_var_names=['output0'], true_var_names=['label'], layers=[4, 1], **args_mlptask) subtask1 = MLPTask(name='subtask1', input_var_names=['var0', 'var1'], output_var_names=['output0'], true_var_names=['label'], layers=[4, 1], **args_mlptask) subtask0.execute() subtask1.execute() task = EnsembleTask( subtasks=[subtask0, subtask1], dropout_rate=None, individual_loss=True, individual_loss_weights=1.0, saver=saver, storegate=storegate, save_weights=True, # do_training=False, phases=["train", "valid", "test"], ) assert task.get_inputs()[0].shape[1] == len(['var0', 'var1']) assert task.input_var_names == ['var0', 'var1'] assert task.output_var_names == ['output0'] task.execute() task.finalize() task2 = EnsembleTask( name='EnsembleTask2', subtasks=[subtask0, subtask1], dropout_rate=None, individual_loss=True, individual_loss_weights=1.0, saver=saver, storegate=storegate, phases=["test"], load_weights=saver.load_ml(task.get_unique_id())['model_path'], ) task2.execute() task2.finalize() y_pred = task.predict(phase='test') y_pred_load = task2.predict(phase='test') assert (np.array_equal(y_pred, y_pred_load))
def test_keras_mlp(): logger.set_level(logger.DEBUG) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' storegate = build_storegate() saver = Saver() args_task = { # BaseTask 'saver': saver, 'storegate': storegate, # MLBaseTask 'phases': None, 'save_weights': True, # KerasBaseTask 'input_var_names': ['var0', 'var1'], 'output_var_names': 'output0', 'true_var_names': 'label', 'optimizer': 'adam', 'num_epochs': 2, 'max_patience': 1, 'loss': 'binary_crossentropy', 'run_eagerly': True, # MLPTask 'layers': [8, 1], 'activation': 'relu', 'activation_last': 'sigmoid', 'batch_norm': True, } task = MLPTask(**args_task) assert task._layers == [8, 1] task.set_hps({ 'layers': [4, 1], 'input_shapes': None, 'output_var_names': 'output0' }) assert task._layers == [4, 1] assert task._input_shapes == [len(['var0', 'var1'])] assert task.get_inputs()[0].shape[1] == len(['var0', 'var1']) task.execute() task.finalize() # Validate model save/load args_task['phases'] = ['test'] args_task['save_weights'] = False args_task['load_weights'] = saver.load_ml(task._name)['model_path'] args_task['layers'] = [4, 1] task2 = MLPTask(**args_task) # Fail due to call models before defining the model with pytest.raises(ValueError): y_pred_load = task2.predict(phase='test') task2.execute() task2.finalize() y_pred = task.predict(phase='test') y_pred_load = task2.predict(phase='test') # assert (np.sum(np.square(y_pred - y_pred_load)) < 1e-10) assert (np.array_equal(y_pred, y_pred_load))
def test_keras_conv2d(): logger.set_level(logger.DEBUG) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' storegate = build_storegate() saver = Saver() args_task = { # BaseTask 'saver': saver, 'storegate': storegate, # MLBaseTask 'phases': None, 'save_weights': True, # KerasBaseTask 'input_var_names': ['var0'], 'output_var_names': ['output0'], 'optimizer': 'adam', 'num_epochs': 2, 'max_patience': 1, 'loss': 'binary_crossentropy', 'run_eagerly': True, # MLPTask 'true_var_names': ['label'], 'layers': [4, 1], 'input_shapes': (1, 3, 3, 1), 'activation': 'relu', 'activation_last': 'sigmoid', # Conv2DTask 'conv2d_layers': [ ('conv2d', { 'filters': 4, 'kernel_size': (2, 2) }), ('maxpooling2d', { 'pool_size': (2, 2) }), ('upsampling2d', { 'size': (2, 2) }), ], } from multiml.task.keras import Conv2DTask task = Conv2DTask(**args_task) task.execute() task.finalize() # Validate model save/load args_task['phases'] = ['test'] args_task['save_weights'] = False args_task['load_weights'] = saver.load_ml(task._name)['model_path'] task2 = Conv2DTask(**args_task) task2.execute() task2.finalize() y_pred = task.predict(phase='test') y_pred_load = task2.predict(phase='test') assert (np.array_equal(y_pred, y_pred_load))