def run_models(algorithm, similarity_score, test_data_path, results_path,
                   new_model_running, event):
        """

        :param algorithm:
        :param similarity_score:
        :param test_data_path:
        :param results_path:
        :param new_model_running:
        :param event:
        :return:
        """

        if new_model_running:
            training_data_path, save_model, algorithms, threshold, features_list, target_features_list = ModelsExecution.get_new_model_parameters(
            )
        else:
            training_data_path, save_model, algorithms, threshold = ModelsExecution.get_load_model_parameters(
            )

        # Set new nested dictionary for a chosen algorithm
        results_data = InputSettings.get_results_metrics_data()
        results_data[algorithm] = dict()
        InputSettings.update_results_metrics_data(results_data)

        # Checks whether the current flow in the system is new model creation or loading an existing model
        if new_model_running:
            algorithm_model_path = None
            algorithm_features_list = features_list[algorithm]
            algorithm_target_features_list = target_features_list[algorithm]
            train_scaler_path = None
            target_scaler_path = None
        else:
            algorithm_path = InputSettings.get_existing_algorithm_path(
                algorithm)
            model_data_json_path = os.path.join(
                *[str(algorithm_path), 'model_data.json'])
            algorithm_json_file = read_json_file(f"{model_data_json_path}")
            algorithm_features_list = algorithm_json_file['features']
            algorithm_target_features_list = algorithm_json_file[
                'target_features']
            threshold = algorithm_json_file['threshold']
            algorithm_model_path = get_model_path(algorithm_path)
            train_scaler_path = get_scaler_path(algorithm_path, 'train')
            target_scaler_path = get_scaler_path(algorithm_path, 'target')

        # Dynamic execution for each chosen model
        model_execution_function = ModelsExecution.get_algorithm_execution_function(
            algorithm)
        model_execution_function(
            test_data_path, results_path, similarity_score, training_data_path,
            save_model, new_model_running, algorithm_model_path, threshold,
            algorithm_features_list, algorithm_target_features_list,
            train_scaler_path, target_scaler_path, event)
Example #2
0
def report_results(results_dir_path,
                   test_data_path,
                   FLIGHT_ROUTES,
                   algorithm_name,
                   similarity_function,
                   routes_duration,
                   attack_duration,
                   verbose=1):
    """
    report all the results, according to the algorithm in the input
    :param results_dir_path: the path of results directory
    :param test_data_path: the path of test dataset directory
    :param FLIGHT_ROUTES: names of existing flight routes
    :param algorithm_name: the name of the algorithm that we want to report about
    :param similarity_function: the similarity function we currently report about
    :param routes_duration: routes time duration
    :param attack_duration: attacks time duration
    :param verbose: default = 1 , otherwise = can be changed to 0
    :return: all the reports are saved to suitable csv files
    """

    # Set new nested dictionary for a flight route from all the existing flights routes
    from utils.input_settings import InputSettings
    results_data = InputSettings.get_results_metrics_data()

    # Iterate over all existing flight routes in order to present them in the final results table
    for flight_route in FLIGHT_ROUTES:
        flight_dir = os.path.join(test_data_path, flight_route)
        ATTACKS = get_subdirectories(flight_dir)

        try:
            results_data[algorithm_name][flight_route]
        except KeyError:
            results_data[algorithm_name][flight_route] = dict()

        results_data[algorithm_name][flight_route][similarity_function] = dict(
        )

    metrics_list = ['fpr', 'tpr', 'acc', 'delay']

    for metric in metrics_list:

        results = pd.DataFrame(columns=ATTACKS)

        # Iterate over all the flight routes in order to save each results' permutation in a csv file
        for i, flight_route in enumerate(FLIGHT_ROUTES):
            flight_route_metric_path = os.path.join(*[
                str(results_dir_path),
                str(flight_route),
                str(flight_route) + '_' + str(metric) + '.csv'
            ])
            df = pd.read_csv(f"{flight_route_metric_path}")
            mean = df.mean(axis=0).values
            std = df.std(axis=0).values
            output = []
            for x, y in zip(mean, std):
                if math.isnan(y):
                    output.append(f"{round(x, 2)}")
                else:
                    output.append(f"{round(x, 2)}±{round(y, 2)}%")
            results.loc[i] = output

            results_data[algorithm_name][flight_route][similarity_function][
                metric] = dict()

            # Iterate over all existing attacks in test data set
            for j, attack in enumerate(ATTACKS):
                results_data[algorithm_name][flight_route][
                    similarity_function][metric][attack] = dict()
                results_data[algorithm_name][flight_route][
                    similarity_function][metric][attack] = output[j]

                results_data[algorithm_name][flight_route][
                    attack + '_duration'] = dict()
                results_data[algorithm_name][flight_route][attack + '_duration'] = \
                    routes_duration[attack][0]

                results_data[algorithm_name][flight_route][
                    attack + '_attack_duration'] = dict()
                results_data[algorithm_name][flight_route][attack + '_attack_duration'] = \
                    attack_duration[attack][0]

        results.index = FLIGHT_ROUTES

        if verbose:
            print(results)

        # Update evaluated evaluation metric for each attack according to the current algorithm and metric
        InputSettings.update_results_metrics_data(results_data)

        final_metric_path = os.path.join(
            *[str(results_dir_path), 'final_' + str(metric) + '.csv'])
        results.to_csv(f"{final_metric_path}")
Example #3
0
 def get_results_metrics_data(self):
     return InputSettings.get_results_metrics_data()