def run_models(algorithm, similarity_score, test_data_path, results_path,
                   new_model_running, event):
        """

        :param algorithm:
        :param similarity_score:
        :param test_data_path:
        :param results_path:
        :param new_model_running:
        :param event:
        :return:
        """

        if new_model_running:
            training_data_path, save_model, algorithms, threshold, features_list, target_features_list = ModelsExecution.get_new_model_parameters(
            )
        else:
            training_data_path, save_model, algorithms, threshold = ModelsExecution.get_load_model_parameters(
            )

        # Set new nested dictionary for a chosen algorithm
        results_data = InputSettings.get_results_metrics_data()
        results_data[algorithm] = dict()
        InputSettings.update_results_metrics_data(results_data)

        # Checks whether the current flow in the system is new model creation or loading an existing model
        if new_model_running:
            algorithm_model_path = None
            algorithm_features_list = features_list[algorithm]
            algorithm_target_features_list = target_features_list[algorithm]
            train_scaler_path = None
            target_scaler_path = None
        else:
            algorithm_path = InputSettings.get_existing_algorithm_path(
                algorithm)
            model_data_json_path = os.path.join(
                *[str(algorithm_path), 'model_data.json'])
            algorithm_json_file = read_json_file(f"{model_data_json_path}")
            algorithm_features_list = algorithm_json_file['features']
            algorithm_target_features_list = algorithm_json_file[
                'target_features']
            threshold = algorithm_json_file['threshold']
            algorithm_model_path = get_model_path(algorithm_path)
            train_scaler_path = get_scaler_path(algorithm_path, 'train')
            target_scaler_path = get_scaler_path(algorithm_path, 'target')

        # Dynamic execution for each chosen model
        model_execution_function = ModelsExecution.get_algorithm_execution_function(
            algorithm)
        model_execution_function(
            test_data_path, results_path, similarity_score, training_data_path,
            save_model, new_model_running, algorithm_model_path, threshold,
            algorithm_features_list, algorithm_target_features_list,
            train_scaler_path, target_scaler_path, event)
    def get_parameters(cls):
        """
        get parameters which belong to both flows
        :return: both flows parameters
        """

        return (
            InputSettings.get_similarity(),
            InputSettings.get_test_data_path(),
            InputSettings.get_results_path(),
            InputSettings.get_new_model_running(),
        )
    def test_set_new_model_parameters(self):
        """
        Test valid inputs for each path
        :return:
        """

        self.app.set_features_columns_options = MagicMock(return_value={})
        self.new_model_window.set_new_model_parameters()
        self.assertEqual(self.new_model_window.training_input.get(),
                         InputSettings.get_training_data_path())
        self.assertEqual(self.new_model_window.test_input.get(),
                         InputSettings.get_test_data_path())
        self.assertEqual(self.new_model_window.results_input.get(),
                         InputSettings.get_results_path())
    def get_new_model_parameters(cls):
        """
        get the parameters which belong to new model flow
        :return: new model parameters
        """

        return (
            InputSettings.get_training_data_path(),
            InputSettings.get_saving_model(),
            InputSettings.get_algorithms(),
            None,
            InputSettings.get_users_selected_features(),
            InputSettings.get_users_selected_target_features(),
        )
    def get_load_model_parameters(cls):
        """
        get the parameters which belong to load existing model flow
        :return: existing model parameters
        """

        return (None, False, InputSettings.get_existing_algorithms(), None)
    def init_models():
        """
        executes all the algorithms which were chosen - suitable for both flows
        :return:
        """

        similarity_score, test_data_path, results_path, new_model_running = ModelsExecution.get_parameters(
        )

        # Init evaluation metrics data which will be presented in the results table
        InputSettings.init_results_metrics_data()

        # Set test data - flight routes
        flight_routes = get_subdirectories(test_data_path)
        InputSettings.set_flight_routes(flight_routes)

        return similarity_score, test_data_path, results_path, new_model_running
    def get_parameters(cls):
        """
        get tuning parameters
        :return: tuning flows parameters
        """

        return (InputSettings.get_tune_model_input_path(),
                InputSettings.get_tune_flow_input_features(),
                InputSettings.get_tune_flow_target_features(),
                InputSettings.get_tune_flow_window_size(),
                InputSettings.get_tune_flow_algorithm(),
                InputSettings.get_tune_model_results_path())
示例#8
0
 def get_window_size(self, algorithm):
     return InputSettings.get_window_size(algorithm)
示例#9
0
 def set_tune_model_results_path_path(self, results_path):
     InputSettings.set_tune_model_results_path(results_path)
示例#10
0
 def get_tune_flow_window_size(self):
     return InputSettings.get_tune_flow_window_size()
示例#11
0
 def set_tune_model_configuration(self, input_features, target_features, window_size, algorithm):
     InputSettings.set_tune_model_configuration(input_features, target_features, window_size, algorithm)
示例#12
0
 def set_tune_model_features(self):
     InputSettings.set_tune_model_features()
示例#13
0
 def set_tune_model_input_path(self, input_path):
     InputSettings.set_tune_model_input_path(input_path)
示例#14
0
 def set_results_selected_flight_route(self, selected_flight_route):
     InputSettings.set_results_selected_flight_route(selected_flight_route)
示例#15
0
 def set_results_selected_algorithm(self, selected_algorithm):
     InputSettings.set_results_selected_algorithm(selected_algorithm)
示例#16
0
 def get_results_selected_similarity_function(self):
     return InputSettings.get_results_selected_similarity_function()
示例#17
0
 def get_results_metrics_data(self):
     return InputSettings.get_results_metrics_data()
示例#18
0
 def get_results_selected_algorithm(self):
     return InputSettings.get_results_selected_algorithm()
示例#19
0
 def get_tune_model_input_path(self):
     return InputSettings.get_tune_model_input_path()
示例#20
0
 def get_results_selected_flight_route(self):
     return InputSettings.get_results_selected_flight_route()
示例#21
0
 def get_tune_model_features(self):
     return InputSettings.get_tune_model_features()
示例#22
0
 def reset_input_settings_params(self):
     InputSettings.reset_input_settings_params()
示例#23
0
 def get_tune_flow_target_features(self):
     return InputSettings.get_tune_flow_target_features()
示例#24
0
 def get_flight_routes(self):
     return InputSettings.get_flight_routes()
示例#25
0
 def get_tune_flow_algorithm(self):
     return InputSettings.get_tune_flow_algorithm()
示例#26
0
 def get_existing_algorithms(self):
     return InputSettings.get_existing_algorithms()
示例#27
0
 def get_tune_model_results_path_path(self):
     return InputSettings.get_tune_model_results_path()
示例#28
0
 def get_similarity_functions(self):
     return InputSettings.get_similarity()
示例#29
0
def report_results(results_dir_path,
                   test_data_path,
                   FLIGHT_ROUTES,
                   algorithm_name,
                   similarity_function,
                   routes_duration,
                   attack_duration,
                   verbose=1):
    """
    report all the results, according to the algorithm in the input
    :param results_dir_path: the path of results directory
    :param test_data_path: the path of test dataset directory
    :param FLIGHT_ROUTES: names of existing flight routes
    :param algorithm_name: the name of the algorithm that we want to report about
    :param similarity_function: the similarity function we currently report about
    :param routes_duration: routes time duration
    :param attack_duration: attacks time duration
    :param verbose: default = 1 , otherwise = can be changed to 0
    :return: all the reports are saved to suitable csv files
    """

    # Set new nested dictionary for a flight route from all the existing flights routes
    from utils.input_settings import InputSettings
    results_data = InputSettings.get_results_metrics_data()

    # Iterate over all existing flight routes in order to present them in the final results table
    for flight_route in FLIGHT_ROUTES:
        flight_dir = os.path.join(test_data_path, flight_route)
        ATTACKS = get_subdirectories(flight_dir)

        try:
            results_data[algorithm_name][flight_route]
        except KeyError:
            results_data[algorithm_name][flight_route] = dict()

        results_data[algorithm_name][flight_route][similarity_function] = dict(
        )

    metrics_list = ['fpr', 'tpr', 'acc', 'delay']

    for metric in metrics_list:

        results = pd.DataFrame(columns=ATTACKS)

        # Iterate over all the flight routes in order to save each results' permutation in a csv file
        for i, flight_route in enumerate(FLIGHT_ROUTES):
            flight_route_metric_path = os.path.join(*[
                str(results_dir_path),
                str(flight_route),
                str(flight_route) + '_' + str(metric) + '.csv'
            ])
            df = pd.read_csv(f"{flight_route_metric_path}")
            mean = df.mean(axis=0).values
            std = df.std(axis=0).values
            output = []
            for x, y in zip(mean, std):
                if math.isnan(y):
                    output.append(f"{round(x, 2)}")
                else:
                    output.append(f"{round(x, 2)}±{round(y, 2)}%")
            results.loc[i] = output

            results_data[algorithm_name][flight_route][similarity_function][
                metric] = dict()

            # Iterate over all existing attacks in test data set
            for j, attack in enumerate(ATTACKS):
                results_data[algorithm_name][flight_route][
                    similarity_function][metric][attack] = dict()
                results_data[algorithm_name][flight_route][
                    similarity_function][metric][attack] = output[j]

                results_data[algorithm_name][flight_route][
                    attack + '_duration'] = dict()
                results_data[algorithm_name][flight_route][attack + '_duration'] = \
                    routes_duration[attack][0]

                results_data[algorithm_name][flight_route][
                    attack + '_attack_duration'] = dict()
                results_data[algorithm_name][flight_route][attack + '_attack_duration'] = \
                    attack_duration[attack][0]

        results.index = FLIGHT_ROUTES

        if verbose:
            print(results)

        # Update evaluated evaluation metric for each attack according to the current algorithm and metric
        InputSettings.update_results_metrics_data(results_data)

        final_metric_path = os.path.join(
            *[str(results_dir_path), 'final_' + str(metric) + '.csv'])
        results.to_csv(f"{final_metric_path}")
示例#30
0
 def set_results_selected_similarity_function(self, similarity_function):
     InputSettings.set_results_selected_similarity_function(similarity_function)