def test_experiment_fit_gen(self, get_model, get_loss_metric, get_custom_l, get_callback_fix): new_session() model, metrics, cust_objects = prepare_model(get_model(get_custom_l), get_loss_metric, get_custom_l) model_name = model.__class__.__name__ _, data_val_use = make_data(train_samples, test_samples) expe = Experiment(model) for val in [1, data_val_use]: gen, data, data_stream = make_gen(batch_size) if val == 1: val, data_2, data_stream_2 = make_gen(batch_size) expe.fit_gen([gen], [val], nb_epoch=2, model=model, metrics=metrics, custom_objects=cust_objects, samples_per_epoch=64, nb_val_samples=128, verbose=2, overwrite=True, callbacks=get_callback_fix) close_gens(gen, data, data_stream) if val == 1: close_gens(val, data_2, data_stream_2) if K.backend() == 'tensorflow': K.clear_session() print(self)
def test_fit_gen_async(self): gen, data, data_stream = make_gen(batch_size) val, data_2, data_stream_2 = make_gen(batch_size) experiments = make_experiments() param_search = HParamsSearch(experiments, metric='loss', op=np.min) param_search.fit_gen_async([gen], [val], nb_epoch=2, verbose=2, nb_val_samples=128, samples_per_epoch=64, overwrite=True) param_search.summary(verbose=True, metrics={'val_loss': np.min}) close_gens(gen, data, data_stream) close_gens(val, data_2, data_stream_2) print(self)
def test_utils(): assert get_function_name("bob") == "bob" test_switch = switch_backend('sklearn') assert test_switch is not None gen, data, data_stream = make_gen(batch_size) open_dataset_gen(data_stream) gen.close() data.close(None) data_stream.close() for i in range(1, 20): utls.window(list(range(i * 2)), i)
def test_experiment_fit_gen_async(self, get_model, get_loss_metric, get_custom_l): new_session() model, metrics, cust_objects = prepare_model(get_model(get_custom_l), get_loss_metric, get_custom_l) _, data_val_use = make_data(train_samples, test_samples) expe = Experiment(model) expected_value = 2 for val in [None, 1, data_val_use]: gen, data, data_stream = make_gen(batch_size) if val == 1: val, data_2, data_stream_2 = make_gen(batch_size) _, thread = expe.fit_gen_async([gen], [val], nb_epoch=2, model=model, metrics=metrics, custom_objects=cust_objects, samples_per_epoch=64, nb_val_samples=128, verbose=2, overwrite=True) thread.join() for k in expe.full_res['metrics']: if 'iter' not in k: assert len(expe.full_res['metrics'][k]) == expected_value close_gens(gen, data, data_stream) if val == 1: close_gens(val, data_2, data_stream_2) if K.backend() == 'tensorflow': K.clear_session() print(self)
def test_experiment_fit_gen_async(self, get_model, get_loss_metric, get_custom_l): new_session() model, metrics, cust_objects = prepare_model(get_model(get_custom_l), get_loss_metric, get_custom_l) _, data_val_use = make_data(train_samples, test_samples) expe = Experiment(model) expected_value = 2 for val in [None, 1, data_val_use]: gen, data, data_stream = make_gen(batch_size) if val == 1: val, data_2, data_stream_2 = make_gen(batch_size) _, thread = expe.fit_gen_async([gen], [val], nb_epoch=2, model=model, metrics=metrics, custom_objects=cust_objects, samples_per_epoch=64, nb_val_samples=128, verbose=2, overwrite=True) thread.join() for k in expe.full_res['metrics']: if 'iter' not in k: assert len( expe.full_res['metrics'][k]) == expected_value close_gens(gen, data, data_stream) if val == 1: close_gens(val, data_2, data_stream_2) if K.backend() == 'tensorflow': K.clear_session() print(self)
def get_generators(request): if request.param == 'one to many': nb_train, nb_val = 4, 48 gen_t, data_t, d_stream_t = make_gen(batch_size, nb_train) gen, data, d_stream = make_gen(batch_size, nb_val) elif request.param == 'many to one': nb_train, nb_val = 48, 4 gen_t, data_t, d_stream_t = make_gen(batch_size, nb_train) gen, data, d_stream = make_gen(batch_size, nb_val) elif request.param == 'val 1': nb_train, nb_val = 4, 4 gen_t, data_t, d_stream_t = make_gen(batch_size, nb_train) gen, data, d_stream = make_gen(batch_size, nb_val) elif request.param == 'many to many': nb_train, nb_val = 48, 48 gen_t, data_t, d_stream_t = make_gen(batch_size, nb_train) gen, data, d_stream = make_gen(batch_size, nb_val) return gen_t, data_t, d_stream_t, gen, data, d_stream, (nb_train, nb_val)