def store_losses(setting, per_ad_distances, row_ids, are_crashes, db: Database): for i in range(len(per_ad_distances["VAE"])): setting_id = setting.id row_id = row_ids[i] row_id = row_id.item() if are_crashes[i] == 0: is_crash = False else: is_crash = True vae_loss = per_ad_distances["VAE"][i] sae_loss = per_ad_distances["SAE"][i] cae_loss = per_ad_distances["CAE"][i] dae_loss = per_ad_distances["DAE"][i] deeproad_loss = per_ad_distances["DEEPROAD"][i] deeproad_loss = deeproad_loss.item() to_store = SingleImgDistance(setting_id=setting_id, row_id=row_id, is_crash=is_crash, vae_loss=vae_loss, cae_loss=cae_loss, dae_loss=dae_loss, sae_loss=sae_loss, deeproad_loss=deeproad_loss) to_store.insert_into_db(db) if i % 1000: db.commit() db.commit()
def set_true_labels(): db = Database(name=get_db_path(), delete_existing=False) eval_window.remove_all_stored_records(db=db) settings = eval_setting.get_all_settings(db) for setting in settings: driving_log = "/home/zhongzzy9/Documents/self-driving-car/2020_CARLA_challenge/collected_data_customized/customized_0/"+str(setting.track)+'/driving_log.csv' logger.info("labelling windows for setting " + str(setting.id)) current_window_count = 0 print('single') current_ad_type = "single-img" single_img_entries = eval_single_img_distances.load_all_for_setting(db, setting_id=setting.id) current_window_count = _set_true_labels(entries=single_img_entries, current_setting_id=setting.id, current_ad_type=current_ad_type, current_window_count=current_window_count, db=db, driving_log=driving_log) eval_single_img_distances.update_true_label_on_db(db=db, records=single_img_entries) print('seq') # current_ad_type = "seq" # seq_img_entries = eval_seq_img_distances.load_all_for_setting(db, setting_id=setting.id) # _set_true_labels(entries=seq_img_entries, current_setting_id=setting.id, current_ad_type=current_ad_type, current_window_count=current_window_count, db=db, driving_log=driving_log) # eval_seq_img_distances.update_true_label_on_db(db=db, records=seq_img_entries) db.commit()
def store_losses(setting, per_ad_distances, row_ids, are_crashes, db: Database): assert per_ad_distances # modification: len(per_ad_distances["VAE"]) -> len(list(per_ad_distances.items())[0][1]) for i in range(len(list(per_ad_distances.items())[0][1])): setting_id = setting.id row_id = row_ids[i] row_id = row_id.item() if are_crashes[i] == 0: is_crash = False else: is_crash = True # add if conditions to make these losses are None when they are not evaluated vae_loss = None sae_loss = None cae_loss = None dae_loss = None deeproad_loss = None if "VAE" in per_ad_distances: vae_loss = per_ad_distances["VAE"][i] if "SAE" in per_ad_distances: sae_loss = per_ad_distances["SAE"][i] if "CAE" in per_ad_distances: cae_loss = per_ad_distances["CAE"][i] if "DAE" in per_ad_distances: dae_loss = per_ad_distances["DAE"][i] if "DEEPROAD" in per_ad_distances: deeproad_loss = per_ad_distances["DEEPROAD"][i] deeproad_loss = deeproad_loss.item() to_store = SingleImgDistance(setting_id=setting_id, row_id=row_id, is_crash=is_crash, vae_loss=vae_loss, cae_loss=cae_loss, dae_loss=dae_loss, sae_loss=sae_loss, deeproad_loss=deeproad_loss) to_store.insert_into_db(db) if i % 1000: db.commit() db.commit()
def _create_all_settings(db: Database): settings = [] id = SETTING_START_ID for agent in EVAL_AGENTS: for track in EVAL_TRACKS: setting = Setting(id=id, agent=agent, track=track) setting.insert_into_db(db=db) id = id + 1 settings.append(setting) db.commit() return settings
def store_seq_losses(setting, per_ad_distances, row_ids, are_crashes, db: Database): for i in range(len(per_ad_distances["IMG-LSTM"])): setting_id = setting.id row_id = row_ids[i] row_id = row_id.item() if are_crashes[i] == 0: is_crash = False else: is_crash = True lstm_loss = per_ad_distances["IMG-LSTM"][i] to_store = SeqBasedDistance(setting_id=setting_id, row_id=row_id, is_crash=is_crash, lstm_loss=lstm_loss) to_store.insert_into_db(db) if i % 1000: db.commit() db.commit()
def _create_all_settings(db: Database): settings = [] id = SETTING_START_ID for agent in EVAL_AGENTS: for track in EVAL_TRACKS: for time in EVAL_TIME: for weather in EVAL_WEATHER: if not (time == "DayOnly" and weather == "Sunny"): setting = Setting(id=id, agent=agent, track=track, time=time, weather=weather) setting.insert_into_db(db=db) id = id + 1 settings.append(setting) db.commit() return settings
def set_true_labels(): db = Database(name=db_path.DB_PATH, delete_existing=False) eval_window.remove_all_stored_records(db=db) settings = eval_setting.get_all_settings(db) for setting in settings: logger.info("labelling windows for setting " + str(setting.id)) current_window_count = 0 current_ad_type = "single-img" single_img_entries = eval_single_img_distances.load_all_for_setting(db, setting_id=setting.id) current_window_count = _set_true_labels(entries=single_img_entries, current_setting_id=setting.id, current_ad_type=current_ad_type, current_window_count=current_window_count, db=db) eval_single_img_distances.update_true_label_on_db(db=db, records=single_img_entries) current_ad_type = "seq" single_img_entries = eval_seq_img_distances.load_all_for_setting(db, setting_id=setting.id) _set_true_labels(entries=single_img_entries, current_setting_id=setting.id, current_ad_type=current_ad_type, current_window_count=current_window_count, db=db) eval_seq_img_distances.update_true_label_on_db(db=db, records=single_img_entries) db.commit()
def calc_precision_recall(): logger.warning( "ATTENTION: Thresholds are hardcoded. Copy-paste after recalculating thresholds " + "(hence, after each training of the models)!") db_name = db_path.DB_PATH db = Database(name=db_name, delete_existing=False) eval_prec_recall.remove_all_from_prec_recall(db=db) for ad_name, ad_thresholds in THRESHOLDS.items(): _eval(ad_name=ad_name, ad_thresholds=ad_thresholds, db=db) db.commit() for ad_name, ad_thresholds in SEQ_THRESHOLDS.items(): _eval(ad_name=ad_name, ad_thresholds=ad_thresholds, db=db) db.commit()
def main(): train_data_dir = train_args.data_dir # train_dataset_name = training_runner.dataset_name_from_dir(train_data_dir) db_name = "../models/trained-anomaly-detectors/collected_data_customized-based-eval.sqlite" # Prepare Database db = Database(db_name, True) # Prepare Settings settings = _create_all_settings(db) # Prepare ADs single_img_based_ads, sequence_based_ads = _prepare_ads(train_data_dir, train_args) for i, setting in enumerate(settings): data_dir = os.path.join(eval_dir, setting.get_folder_name()) if len(single_img_based_ads) > 0: handle_single_image_based_ads(db=db, data_dir=data_dir, setting=setting, single_img_based_ads=single_img_based_ads, raw_data_dir=eval_dir, mode=mode) if len(sequence_based_ads) > 0: handle_sequence_based_ads(db=db, data_dir=data_dir, setting=setting, sequence_based_ads=sequence_based_ads, raw_data_dir=eval_dir)
def main(): # Eval Config, change this line to evaluate agains another set eval_dir = "../datasets/eval_data/preliminary-runs/" train_args = utils_args.load_train_args() train_args.always_calc_thresh = False for train_data_dir in train_args.data_dir: train_dataset_name = training_runner.dataset_name_from_dir( train_data_dir) db_name = "../models/trained-anomaly-detectors/" + train_dataset_name + "-based-eval.sqlite" # Prepare Database db = Database(db_name, True) # Prepare Settings settings = _create_all_settings(db) # Prepare ADs train_args.delete_trained = False single_img_based_ads, sequence_based_ads = _prepare_ads( train_data_dir, train_args) # Evaluate for Single Image Based for setting in settings: data_dir = eval_dir + setting.get_folder_name() if len(single_img_based_ads) > 0: handle_single_image_based_ads( db=db, data_dir=data_dir, setting=setting, single_img_based_ads=single_img_based_ads) if len(sequence_based_ads) > 0: handle_sequence_based_ads( db=db, data_dir=data_dir, setting=setting, sequence_based_ads=sequence_based_ads)
import logging import utils_logging from eval_db.database import Database from eval_scripts import db_path, b_precision_recall_auroc from eval_scripts.utils import threshold_independent_plotters logger = logging.Logger("c_timeline") utils_logging.log_info(logger) if __name__ == '__main__': db_name = db_path.DB_PATH db = Database(name=db_name, delete_existing=False) # Temporary security check to make sure we don't generate based on the wrong db for the paper... assert db_path.DB_PATH == "../../models/trained-anomaly-detectors/20190821-ALL-MODELS-MODIFIED_TRACKS.sqlite" if b_precision_recall_auroc.AUROC_CALC_SAMPLING_FACTOR != 1: logger.warning( "Sampling is >1, this is good for testing, but must not be the case for final version graphs" ) # threshold_dependent_reaction_plotter = ThresholdDependentReactionTimePlotter(db=db) # threshold_dependent_reaction_plotter.compute_and_plot() reaction_plotter = threshold_independent_plotters.ReactionTimePlotter( db=db) reaction_plotter.compute_and_plot() # k_plotter = threshold_independent_plotters.KSizePlotter(db=db) # k_plotter.compute_and_plot()