def do_kitti_detection_evaluation(dataset, predictions, output_folder, logger): predict_folder = os.path.join(output_folder, 'data') # only recognize data mkdir(predict_folder) for image_id, prediction in predictions.items(): predict_txt = image_id + '.txt' predict_txt = os.path.join(predict_folder, predict_txt) generate_kitti_3d_detection(prediction, predict_txt) logger.info("Evaluate on KITTI dataset") output_dir = os.path.abspath(output_folder) os.chdir('./smoke/data/datasets/evaluation/kitti/kitti_eval') label_dir = "/home/niels/Documents/Uni/MA/Implementation/SMOKE/datasets/kitti/testing/label_2" if not os.path.isfile('evaluate_object_3d_offline'): subprocess.Popen( 'g++ -O3 -DNDEBUG -o evaluate_object_3d_offline evaluate_object_3d_offline.cpp', shell=True) command = "./evaluate_object_3d_offline {} {}".format( label_dir, output_dir) output = subprocess.check_output(command, shell=True, universal_newlines=True).strip() logger.info(output) os.chdir('../../../../../../tools')
def kitti_evaluation(dataset, predictions, output_dir): """Do evaluation by process kitti eval program Args: dataset (paddle.io.Dataset): [description] predictions (Paddle.Tensor): [description] output_dir (str): path of save prediction """ # Clear data dir before do evaluate if os.path.exists(os.path.join(output_dir, 'data')): shutil.rmtree(os.path.join(output_dir, 'data')) predict_folder = os.path.join(output_dir, 'data') # only recognize data mkdir(predict_folder) type_id_conversion = getattr(dataset, 'TYPE_ID_CONVERSION') id_type_conversion = {value:key for key, value in type_id_conversion.items()} for image_id, prediction in predictions.items(): predict_txt = image_id + '.txt' predict_txt = os.path.join(predict_folder, predict_txt) generate_kitti_3d_detection(prediction, predict_txt, id_type_conversion) output_dir = os.path.abspath(output_dir) root_dir = os.getcwd() os.chdir('./tools/kitti_eval_offline') label_dir = getattr(dataset, 'label_dir') label_dir = os.path.join(root_dir, label_dir) if not os.path.isfile('evaluate_object_3d_offline'): subprocess.Popen('g++ -O3 -DNDEBUG -o evaluate_object_3d_offline evaluate_object_3d_offline.cpp', shell=True) command = "./evaluate_object_3d_offline {} {}".format(label_dir, output_dir) os.system(command)
def do_kitti_detection_evaluation(dataset, predictions, output_folder, logger ): predict_folder = os.path.join(output_folder, 'data') # only recognize data mkdir(predict_folder) for image_id, prediction in predictions.items(): predict_txt = image_id + '.txt' predict_txt = os.path.join(predict_folder, predict_txt) generate_kitti_3d_detection(prediction, predict_txt) logger.info("Evaluate on KITTI dataset") output_dir = os.path.abspath(output_folder) original_dir = os.getcwd() os.chdir('./smoke/data/datasets/evaluation/kitti/kitti_eval') label_dir = getattr(dataset, 'label_dir') if not os.path.isfile('evaluate_object_offline'): subprocess.Popen('g++ -O3 -DNDEBUG -o evaluate_object_offline evaluate_object_offline.cpp', shell=True) print(os.listdir()) #command = "./evaluate_object_offline {} {}".format("/app/datasets/kitti/training/label_2", output_dir) #output = subprocess.check_output(command, shell=True, universal_newlines=True).strip() #logger.info(output) os.chdir(original_dir)
def default_setup(cfg, args): output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) rank = comm.get_rank() logger = setup_logger(output_dir, rank) logger.info("Using {} GPUs".format(args.num_gpus)) logger.info("Collecting environment info") logger.info("\n" + collect_env_info()) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) # make sure each worker has a different, yet deterministic seed if specified seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
def do_kitti_detection_evaluation(dataset, predictions, output_folder, logger): predict_folder = os.path.join(output_folder, 'data') # only recognize data mkdir(predict_folder) for image_id, prediction in predictions.items(): predict_txt = image_id + '.txt' predict_txt = os.path.join(predict_folder, predict_txt) generate_kitti_3d_detection(prediction, predict_txt) logger.info("Evaluate on KITTI dataset") output_dir = os.path.abspath(output_folder) print("---ANI! output_dir - ", output_dir, "---") print("---ANI! os.getcwd() - ", os.getcwd(), "---") cur_dir = os.getcwd() # ch_dir = os.path.join(cur_dir, "./smoke/data/datasets/evaluation/kitti/kitti_eval") os.chdir('./smoke/data/datasets/evaluation/kitti/kitti_eval') # os.chdir(ch_dir) print("---ANI! output_dir after first change - ", output_dir, "---") print("---ANI! os.getcwd() after first change - ", os.getcwd(), "---") label_dir = getattr(dataset, 'label_dir') logger.info( "---ANI! label_dir before manual change is {} ---".format(label_dir)) # TODO Change name to evaluate_object_offline for 40 points # TODO Change name to evaluate_object_3d_offline for 11 points executable_name = "evaluate_object_3d_offline" if not os.path.isfile(executable_name): # subprocess.Popen('g++ -O3 -DNDEBUG -o evaluate_object_3d_offline evaluate_object_3d_offline.cpp', shell=True) subprocess.call('g++ -O3 -DNDEBUG -o {} {}.cpp'.format( executable_name, executable_name), shell=True) logger.info("Compiling executable for {} for first time!".format( executable_name)) else: logger.info( "Compiled executable {} already exists!".format(executable_name)) logger.info( "---ANI! label_dir: {} ---\n---ANI! output_dir: {} ---\n".format( label_dir, output_dir)) label_dir = os.path.join(cur_dir, 'datasets/kitti/training/label_2') label_dir = os.path.abspath(label_dir) command = "./{} {} {}".format(executable_name, label_dir, output_dir) logger.info("---ANI! command: {} ---".format(command)) output = subprocess.check_output(command, shell=True, universal_newlines=True).strip() logger.info(output) ch_dir = os.path.join(cur_dir, "tools") # os.chdir('../tools') os.chdir(ch_dir) print("---ANI! output_dir after second change - ", output_dir, "---") print("---ANI! os.getcwd() after second change - ", os.getcwd(), "---")
def do_kitti_detection_evaluation(dataset, predictions, output_folder, logger, eval=True): predict_folder = os.path.join(output_folder, 'data') # only recognize data mkdir(predict_folder) for image_id, prediction in predictions.items(): predict_txt = image_id + '.txt' predict_txt = os.path.join(predict_folder, predict_txt) generate_kitti_3d_detection(prediction, predict_txt) if eval: # logger.info("Evaluate on KITTI dataset") # output_dir = os.path.abspath(output_folder) # os.chdir('../smoke/data/datasets/evaluation/kitti/kitti_eval') # label_dir = getattr(dataset, 'label_dir') # if not os.path.isfile('evaluate_object_3d_offline'): # subprocess.Popen('g++ -O3 -DNDEBUG -o evaluate_object_3d_offline evaluate_object_3d_offline.cpp', shell=True) # command = "./evaluate_object_3d_offline {} {}".format(label_dir, output_dir) # output = subprocess.check_output(command, shell=True, universal_newlines=True).strip() # logger.info(output) # os.chdir('../tools') logger.info("Evaluate on KITTI dataset") output_dir = os.path.abspath(output_folder) smoke_dir = os.getcwd() print(os.getcwd()) os.chdir( os.path.join(smoke_dir, 'smoke/data/datasets/evaluation/kitti/')) label_dir = getattr(dataset, 'label_dir') eval_results = evaluate( os.path.join(smoke_dir, label_dir), os.path.join(output_dir, 'data'), label_split_file=os.path.join( smoke_dir, 'datasets/kitti/training/ImageSets/val.txt')) logger.info(eval_results) os.chdir(smoke_dir) return
def run_test(cfg, model): eval_types = ("detection",) output_folders = [None] * len(cfg.DATASETS.TEST) dataset_names = cfg.DATASETS.TEST if cfg.OUTPUT_DIR: for idx, dataset_name in enumerate(dataset_names): output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name) mkdir(output_folder) output_folders[idx] = output_folder data_loaders_val = build_test_loader(cfg) # import pdb; pdb.set_trace() for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val): inference( model, data_loaders_val, dataset_name=dataset_name, eval_types=eval_types, device=cfg.MODEL.DEVICE, output_folder=output_folder, ) comm.synchronize()