def test_experiment_fit_gen(self, get_model, get_loss_metric, get_custom_l, get_callback_fix): new_session() model, metrics, cust_objects = prepare_model(get_model(get_custom_l), get_loss_metric, get_custom_l) model_name = model.__class__.__name__ _, data_val_use = make_data(train_samples, test_samples) expe = Experiment(model) for val in [1, data_val_use]: gen, data, data_stream = make_gen(batch_size) if val == 1: val, data_2, data_stream_2 = make_gen(batch_size) expe.fit_gen([gen], [val], nb_epoch=2, model=model, metrics=metrics, custom_objects=cust_objects, samples_per_epoch=64, nb_val_samples=128, verbose=2, overwrite=True, callbacks=get_callback_fix) close_gens(gen, data, data_stream) if val == 1: close_gens(val, data_2, data_stream_2) if K.backend() == 'tensorflow': K.clear_session() print(self)
def test_fit_gen_async(self): gen, data, data_stream = make_gen(batch_size) val, data_2, data_stream_2 = make_gen(batch_size) experiments = make_experiments() param_search = HParamsSearch(experiments, metric='loss', op=np.min) param_search.fit_gen_async([gen], [val], nb_epoch=2, verbose=2, nb_val_samples=128, samples_per_epoch=64, overwrite=True) param_search.summary(verbose=True, metrics={'val_loss': np.min}) close_gens(gen, data, data_stream) close_gens(val, data_2, data_stream_2) print(self)
def test_experiment_generator_setups(self, get_generators): gen_t, data_t, d_stream_t, gen, data, d_stream, nb = get_generators nb_train, nb_val = nb test_model = model() test_model.compile(loss='binary_crossentropy', optimizer='rmsprop') expe = Experiment(test_model) expe.fit_gen([gen_t], [gen], nb_epoch=2, samples_per_epoch=nb_train, nb_val_samples=nb_val, verbose=2, overwrite=True) close_gens(gen_t, data_t, d_stream_t) close_gens(gen, data, d_stream) if K.backend() == 'tensorflow': K.clear_session() print(self)
def test_experiment_fit_gen_async_nogenval(self, get_model_data_expe): ''' Main case: gen on train, data on val Subcases: 10 chunks on train 1 chunk on train ''' data, data_val, is_classif, model, metric, expe = get_model_data_expe for Nchunks_gen, expected_value in szip([True, False], [10, 1]): gen_train, data_train, data_stream_train = make_gen( Nchunks_gen, is_classif, train=True) for data_val_loc in [None, data_val]: _, thread = expe.fit_gen_async([gen_train], [data_val_loc], model=model, overwrite=True, metrics=metric) thread.join() assert len(expe.full_res['metrics'][ 'score']) == expected_value assert len(expe.full_res['metrics'][ 'val_score']) == expected_value if data_val_loc is not None: assert None not in expe.full_res['metrics'][ 'val_score'] else: assert np.all([np.isnan(v) for v in expe.full_res[ 'metrics']['val_score']]) assert expe.data_id is not None assert expe.mod_id is not None assert expe.params_dump is not None assert expe close_gens(gen_train, data_train, data_stream_train) print(self)
def test_experiment_fit_gen_async(self, get_model, get_loss_metric, get_custom_l): new_session() model, metrics, cust_objects = prepare_model(get_model(get_custom_l), get_loss_metric, get_custom_l) _, data_val_use = make_data(train_samples, test_samples) expe = Experiment(model) expected_value = 2 for val in [None, 1, data_val_use]: gen, data, data_stream = make_gen(batch_size) if val == 1: val, data_2, data_stream_2 = make_gen(batch_size) _, thread = expe.fit_gen_async([gen], [val], nb_epoch=2, model=model, metrics=metrics, custom_objects=cust_objects, samples_per_epoch=64, nb_val_samples=128, verbose=2, overwrite=True) thread.join() for k in expe.full_res['metrics']: if 'iter' not in k: assert len(expe.full_res['metrics'][k]) == expected_value close_gens(gen, data, data_stream) if val == 1: close_gens(val, data_2, data_stream_2) if K.backend() == 'tensorflow': K.clear_session() print(self)
def test_experiment_fit_gen_async(self, get_model, get_loss_metric, get_custom_l): new_session() model, metrics, cust_objects = prepare_model(get_model(get_custom_l), get_loss_metric, get_custom_l) _, data_val_use = make_data(train_samples, test_samples) expe = Experiment(model) expected_value = 2 for val in [None, 1, data_val_use]: gen, data, data_stream = make_gen(batch_size) if val == 1: val, data_2, data_stream_2 = make_gen(batch_size) _, thread = expe.fit_gen_async([gen], [val], nb_epoch=2, model=model, metrics=metrics, custom_objects=cust_objects, samples_per_epoch=64, nb_val_samples=128, verbose=2, overwrite=True) thread.join() for k in expe.full_res['metrics']: if 'iter' not in k: assert len( expe.full_res['metrics'][k]) == expected_value close_gens(gen, data, data_stream) if val == 1: close_gens(val, data_2, data_stream_2) if K.backend() == 'tensorflow': K.clear_session() print(self)
def test_experiment_fit_gen_async_withgenval(self, get_model_data_expe): ''' Main case: gen on train, gen on val Subcases: 10 chunks on train / 10 chunks on val 10 chunks on train / 1 chunk on val 1 chunk on train / 10 chunks on val ''' data, data_val, is_classif, model, metric, expe = get_model_data_expe for Nchunks_gen, Nchunks_val in szip([True, True, False], [True, False, True]): gen_train, data_train, data_stream_train = make_gen( Nchunks_gen, is_classif, train=True) gen_test, data_test, data_stream_test = make_gen( Nchunks_val, is_classif, train=False) _, thread = expe.fit_gen_async( [gen_train], [gen_test], overwrite=True, metrics=metric) thread.join() expected_value_gen = 10 if not Nchunks_gen: expected_value_gen = 1 assert len(expe.full_res['metrics'][ 'score']) == expected_value_gen assert len(expe.full_res['metrics'][ 'val_score']) == 10 assert expe.data_id is not None assert expe.mod_id is not None assert expe.params_dump is not None assert expe close_gens(gen_train, data_train, data_stream_train) print(self)