def test_build_all_models_for_3_random_models_whose_type_should_be_kerasSequential( self): mock_object = model_developement.Model_Development( input_data=np.random.rand(3, 100, 5), running_mode="partial") mock_model_list = mock_object.build_all_models() self.assertEqual(type(mock_model_list[2]), tf.keras.Sequential) self.assertEqual(type(mock_model_list[3]), tf.keras.Sequential)
def test_create_models_dict_for_2_given_models(self): mock_object = model_developement.Model_Development( input_data=np.random.rand(3, 100, 5), running_mode="partial") mock_models = ["model1", "model2"] expected = 2 self.assertEqual(len(mock_object.create_models_dict(mock_models)), expected)
def test_create_report(self): mock_object = model_developement.Model_Development( input_data=np.random.rand(3, 100, 5), running_mode="partial") mock_results = {"loss": [10], "result2": [20]} mock_report = mock_object.create_report(model_id=1, results=mock_results) expected_attributes = ["models", "epochs", "loss", "result2"] self.assertListEqual(list(mock_report.columns), expected_attributes)
def test_filter_valid_combinations_for_absence_of_invalid_combination_based_on_the_conditions( self): mock_object = model_developement.Model_Development( input_data=np.random.rand(10, 2)) mock_comb_dict = mock_object.filter_valid_combinations() random_dict = mock_comb_dict[random.randint(0, len(mock_comb_dict) - 1)] self.assertFalse((random_dict["extra_conv_layer"] is False and random_dict["conv3_length"] > parameters.Hyper_Params().conv1_length.min()))
def test_generate_all_combinations_for_the_first_and_last_value(self): mock_object = model_developement.Model_Development( input_data=np.random.rand(10, 2)) mock_combinations = mock_object.generate_all_combinations() expected_first = list( parameters.Hyper_Params().__dict__.values())[0][0] expected_last = list( parameters.Hyper_Params().__dict__.values())[-1][-1] self.assertEqual(mock_combinations[0][0], expected_first) self.assertEqual(mock_combinations[-1][-1], expected_last)
def test_generate_all_combinations_for_the_number_of_generated_combinations( self): mock_object = model_developement.Model_Development( input_data=np.random.rand(10, 2)) mock_combinations = mock_object.generate_all_combinations() expected = [ len(element) for element in list(parameters.Hyper_Params().__dict__.values()) ] expected = np.product(expected) self.assertEqual(len(mock_combinations), expected)
def test_model_builder_for_building_valid_model_depending_on_the_given_params( self): mock_hyper_param_dict = { "conv1_length": 64, "conv2_length": 64, "extra_conv_layer": True, "conv3_length": 64, "dense1_length": 64 } mock_object = model_developement.Model_Development( input_data=np.random.rand(3, 100, 5)) mock_model = mock_object.model_builder(**mock_hyper_param_dict) self.assertEqual(type(mock_model), tf.keras.Sequential)
def test_turn_combinations_to_dict_for_the_dict_keys_of_random_sample_from_the_generated_dicts( self): mock_object = model_developement.Model_Development( input_data=np.random.rand(10, 2)) mock_comb_dict = mock_object.turn_combinations_to_dict() expected = list(parameters.Hyper_Params().__dict__.keys()) self.assertListEqual( list(mock_comb_dict[random.randint(0, len(mock_comb_dict) - 1)].keys()), expected) self.assertListEqual( list(mock_comb_dict[random.randint(0, len(mock_comb_dict) - 1)].keys()), expected)
def test_create_models_dict_for_not_given_models(self): mock_object = model_developement.Model_Development( input_data=np.random.rand(3, 100, 5), running_mode="partial") expected = 4 # valid models are 4 instead of the 10 combinations in the partial mode self.assertEqual(len(mock_object.create_models_dict()), expected)
test_report = explore.custom_stat_report(data=train_set, name="Test") explore.box_plots(data=train_set, show=True) scaled_train_data = explore.scaler_min_max(data=train_set) scaled_test_data = explore.scaler_min_max(data=test_set) sliding_window_train_data, sliding_window_train_target = explore.turn_dfs_into_arrays( given_data=scaled_train_data) sliding_window_test_data, sliding_window_test_target = explore.turn_dfs_into_arrays( given_data=scaled_test_data) """ Learning rate strategy """ learning_rate = learning_rate_strategy.Learning_Rate_Strategy() learning_rate.plot_lr_strategy(show=True) learning_rate.create_lr_log(strategy_index=1, save=True) learning_rate_dict = learning_rate_strategy.create_lr_dict() """ Model development """ for key in learning_rate_dict: model_dev = model_developement.Model_Development( input_data=sliding_window_train_data, folder_customisation=key) models_dict = model_dev.create_models_dict() model_dev.train_models(given_models=models_dict, training_targets=sliding_window_train_target, lr_callback=learning_rate_dict[key]) """ Best Model Prediction """ model_selection = model_developement.Model_Selection() best_model_char = model_selection.get_best_model_characteristics() best_model = load_model( os.path.join(os.getcwd(), "saved_models" + best_model_char["best_lr_strategy"], "model_" + "1" + ".h5")) prediction = best_model.predict([sliding_window_test_data]) """ Final evaluation """ mse = MeanSquaredError() mae = MeanAbsoluteError()