def main(): # torch.manual_seed(0) # torch.backends.cudnn.deterministic = True # torch.backends.cudnn.benchmark = False # np.random.seed(0) args = parse_args() cfg = Config.fromfile(args.config) cfg.local_rank = args.local_rank # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir distributed = False if "WORLD_SIZE" in os.environ: distributed = int(os.environ["WORLD_SIZE"]) > 1 if distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend="nccl", init_method="env://") cfg.gpus = torch.distributed.get_world_size() else: cfg.gpus = args.gpus # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info("Distributed testing: {}".format(distributed)) logger.info( f"torch.backends.cudnn.benchmark: {torch.backends.cudnn.benchmark}") model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) dataset = build_dataset(cfg.data.val) data_loader = build_dataloader( dataset, batch_size=cfg.data.samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False, ) checkpoint = load_checkpoint(model, args.checkpoint, map_location="cpu") # put model on gpus if distributed: model = apex.parallel.convert_syncbn_model(model) model = DistributedDataParallel( model.cuda(cfg.local_rank), device_ids=[cfg.local_rank], output_device=cfg.local_rank, # broadcast_buffers=False, find_unused_parameters=True, ) else: model = model.cuda() model.eval() mode = "val" logger.info(f"work dir: {args.work_dir}") if cfg.local_rank == 0: prog_bar = torchie.ProgressBar(len(data_loader.dataset) // cfg.gpus) detections = {} cpu_device = torch.device("cpu") for i, data_batch in enumerate(data_loader): with torch.no_grad(): outputs = batch_processor( model, data_batch, train_mode=False, local_rank=args.local_rank, ) for output in outputs: token = output["metadata"]["token"] for k, v in output.items(): if k not in [ "metadata", ]: output[k] = v.to(cpu_device) detections.update({ token: output, }) if args.local_rank == 0: prog_bar.update() synchronize() all_predictions = all_gather(detections) if args.local_rank != 0: return predictions = {} for p in all_predictions: predictions.update(p) result_dict, _ = dataset.evaluation(predictions, output_dir=args.work_dir) for k, v in result_dict["results"].items(): print(f"Evaluation {k}: {v}") if args.txt_result: res_dir = os.path.join(os.getcwd(), "predictions") for k, dt in predictions.items(): with open( os.path.join(res_dir, "%06d.txt" % int(dt["metadata"]["token"])), "w") as fout: lines = kitti.annos_to_kitti_label(dt) for line in lines: fout.write(line + "\n") ap_result_str, ap_dict = kitti_evaluate( "/data/Datasets/KITTI/Kitti/object/training/label_2", res_dir, label_split_file="/data/Datasets/KITTI/Kitti/ImageSets/val.txt", current_class=0, ) print(ap_result_str)
def main(): args = parse_args() assert args.out or args.show or args.json_out, ( "Please specify at least one operation (save or show the results) " 'with the argument "--out" or "--show" or "--json_out"' ) if args.out is not None and not args.out.endswith((".pkl", ".pickle")): raise ValueError("The output file must be a pkl file.") if args.json_out is not None and args.json_out.endswith(".json"): args.json_out = args.json_out[:-5] cfg = torchie.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get("cudnn_benchmark", False): torch.backends.cudnn.benchmark = True # cfg.model.pretrained = None cfg.data.test.test_mode = True # cfg.data.val.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == "none": distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) # dataset = build_dataset(cfg.data.val) data_loader = build_dataloader( dataset, batch_size=cfg.data.samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False, ) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) checkpoint = load_checkpoint(model, args.checkpoint, map_location="cpu") # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if "CLASSES" in checkpoint["meta"]: model.CLASSES = checkpoint["meta"]["CLASSES"] else: model.CLASSES = dataset.CLASSES model = MegDataParallel(model, device_ids=[0]) result_dict, detections = test( data_loader, model, save_dir=None, distributed=distributed ) for k, v in result_dict["results"].items(): print(f"Evaluation {k}: {v}") rank, _ = get_dist_info() if args.out and rank == 0: print("\nwriting results to {}".format(args.out)) torchie.dump(detections, args.out) if args.txt_result: res_dir = os.path.join(os.getcwd(), "predictions") for dt in detections: with open( os.path.join(res_dir, "%06d.txt" % int(dt["metadata"]["token"])), "w" ) as fout: lines = kitti.annos_to_kitti_label(dt) for line in lines: fout.write(line + "\n") ap_result_str, ap_dict = kitti_evaluate( "/data/Datasets/KITTI/Kitti/object/training/label_2", res_dir, label_split_file="/data/Datasets/KITTI/Kitti/ImageSets/val.txt", current_class=0, ) print(ap_result_str)
def test_v2(dataloader, model, device="cuda", distributed=False, eval_id=None, vis_id=None): ''' example: python test_v2.py --eval_id 6 8 --vis_id 6 ''' # prepare model if distributed: model = model.module model.eval() # prepare samples kitti_dataset = dataloader.dataset # det3d.datasets.kitti.kitti.KittiDataset samples = [] valid_ids = get_dataset_ids('val') for id in eval_id: index = valid_ids.index(id) samples.append(kitti_dataset[index]) batch_samples = collate_kitti(samples) example = example_to_device(batch_samples, device=torch.device(device)) # evaluation results_dict = {} with torch.no_grad(): # outputs: predicted results in lidar coord. outputs = model(example, return_loss=False, rescale=True) for output in outputs: token = output["metadata"]["token"] for k, v in output.items(): if k not in [ "metadata", ]: output[k] = v.to(torch.device("cpu")) results_dict.update({ token: output, }) # pred_annos: convert predictions in lidar to cam coord. res_dir = os.path.join("./", "sample_eval_results") os.makedirs(res_dir, exist_ok=True) pred_annos = kitti_dataset.convert_detection_to_kitti_annos( results_dict, partial=True) # save predicted results to txt files. for dt in pred_annos: with open( os.path.join(res_dir, "%06d.txt" % int(dt["metadata"]["token"])), "w") as fout: lines = kitti.annos_to_kitti_label(dt) for line in lines: fout.write(line + "\n") # visualization part if vis_id is not None: assert vis_id in eval_id from det3d.visualization.kitti_data_vis.kitti.kitti_object import show_lidar_with_boxes_rect import numpy as np index = eval_id.index(vis_id) pred_box_loc = pred_annos[index]['location'] pred_box_dim = pred_annos[index]['dimensions'] pred_box_ry = pred_annos[index]['rotation_y'].reshape(-1, 1) pred_boxes = np.concatenate( (pred_box_loc, pred_box_dim[:, [1, 2, 0]], pred_box_ry), axis=1) pred_scores = pred_annos[index]['score'] index = valid_ids.index(vis_id) show_lidar_with_boxes_rect( sample_id=vis_id, pred_boxes3d=pred_boxes, pred_scores=pred_scores, )