euclidean_dists = [] for transformation, name, heat_map_plot in zip(TRANSFORMATIONS, TRANSFORMATION_NAMES, heat_map_plots): # set up anomaly detectors anomaly_detectors = [] ad_config = defaults.DEFAULT_KNN_CONFIG for k_value in K_VALUES: ad_config['evaluator_config']['k'] = k_value if transformation is None: if 'representation_config' in ad_config: ad_config.pop('representation_config') else: ad_config['representation_config'] = {'method': transformation} anomaly_detectors.append(anomaly_detection.create_anomaly_detector(**ad_config)) # init test test = [utils.load_sequence(TEST_FILE)] test_suite = utils.TestSuite(anomaly_detectors, K_VALUES, [test], ['test']) # execute test test_suite.evaluate(display_progress=True) # get plots results = test_suite.results utils.plots.plot_normalized_anomaly_vector_heat_map(results, K_VALUES, plot=heat_map_plot) heat_map_plot.set_title(name) heat_map_plot.set_ylabel('k')
import defaults """ Visualizes the impact of the kNN k values on anomaly detection accuracy by evaluating the accuracy of a given kNN anomaly detection problem on a single sequence. """ K_VALUES = range(1, 101, 1) TEST_FILE = 'sequences/random_walk_added_noise' # set up anomaly detectors anomaly_detectors = [] ad_config = defaults.DEFAULT_KNN_CONFIG for k_value in K_VALUES: ad_config['evaluator_config']['k'] = k_value anomaly_detectors.append(anomaly_detection.create_anomaly_detector(**ad_config)) # init test test = [utils.load_sequence(TEST_FILE)] test_suite = utils.TestSuite(anomaly_detectors, K_VALUES, [test], ['test']) # execute test test_suite.evaluate(display_progress=True) # get plots results = test_suite.results fig1, plot1 = utils.plot_normalized_anomaly_vector_heat_map(results, K_VALUES, ylabel='k') fig2, plot2 = utils.plot_mean_error_values(results, K_VALUES, K_VALUES, xlabel='k') fig3, plot3 = utils.plot_execution_times(results, K_VALUES, K_VALUES, xlabel='k')
TRANSFORMATION_NAMES, heat_map_plots): # set up anomaly detectors anomaly_detectors = [] ad_config = defaults.DEFAULT_KNN_CONFIG for k_value in K_VALUES: ad_config['evaluator_config']['k'] = k_value if transformation is None: if 'representation_config' in ad_config: ad_config.pop('representation_config') else: ad_config['representation_config'] = {'method': transformation} anomaly_detectors.append( anomaly_detection.create_anomaly_detector(**ad_config)) # init test test = [utils.load_sequence(TEST_FILE)] test_suite = utils.TestSuite(anomaly_detectors, K_VALUES, [test], ['test']) # execute test test_suite.evaluate(display_progress=True) # get plots results = test_suite.results utils.plots.plot_normalized_anomaly_vector_heat_map(results, K_VALUES, plot=heat_map_plot) heat_map_plot.set_title(name)
K_VALUES = range(1, 50, 1) W_VALUES = range(1, 50, 1) TEST_FILE = 'sequences/random_walk_added_noise' # generate anomaly_detectors ad_config = defaults.DEFAULT_KNN_CONFIG anomaly_detectors = [] ad_label_matrix = [] for k in K_VALUES: ad_row = [] for w in W_VALUES: ad_config['evaluator_config']['k'] = k ad_config['evaluation_filter_config']['width'] = w ad_config['reference_filter_config']['width'] = w ad = anomaly_detection.create_anomaly_detector(**ad_config) anomaly_detectors.append(ad) ad_row.append(str(k) + ',' + str(w)) ad_label_matrix.append(ad_row) ad_label_list = sum(ad_label_matrix, []) test = [utils.load_sequence(TEST_FILE)] test_suite = utils.TestSuite(anomaly_detectors, ad_label_list, [test], 'test') #execute test_suite.evaluate(display_progress=True) results = test_suite.results # plot the distances