def eval_single_ckpt(model, test_loader, args, output_dir, eval_output_dir, logger, epoch_id, dist_test=False): ckpt_dir = output_dir / 'ckpt' ckpt = ckpt_dir / 'checkpoint_epoch_110.pth' # load checkpoint model.load_params_from_file(filename=ckpt, logger=logger, to_cpu=dist_test) model.cuda() model.eval() # start evaluation for i, batch_dict in enumerate(test_loader): load_data_to_gpu(batch_dict) with torch.no_grad(): pred_dicts = model(batch_dict) print('GT BBOXES') print(batch_dict['gt_boxes']) print('CLS PREDS') print(pred_dicts['cls_preds']) print('BBOX PREDS') print(pred_dicts['box_preds']) #print(pred_dicts.keys()) if i > 5: break
def run_model(self, points): t_t = time.time() # rospy.loginfo('Input: pointcloud with shape {}'.format(points.shape)) input_dict = { 'points': points, 'frame_id': 0, } data_dict = self.dataset.prepare_data(data_dict=input_dict) data_dict = self.dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) torch.cuda.synchronize() t = time.time() # pred_dicts, _ = self.net.forward(data_dict) with torch.no_grad(): pred_dicts, _ = self.net(data_dict) torch.cuda.synchronize() inference_time = time.time() - t self.inference_times.append(inference_time) rospy.loginfo(f" PointRCNN inference cost time: {time.time() - t}") rospy.loginfo("Stdev: {}".format(np.std(self.inference_times))) boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy() scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy() types = pred_dicts[0]["pred_labels"].detach().cpu().numpy() # rospy.loginfo('Detected {} persons.'.format(boxes_lidar.shape[0])) return scores, boxes_lidar, types
def detector(self, points): with torch.no_grad(): # prepare data and load data to gpu input_dict = { 'points': points, 'frame_id': 0, } data_dict = self.demo_dataset.prepare_data(data_dict=input_dict) data_dict = self.demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) # inference once in batch size : 1 pred_dicts = self.model.forward(data_dict)[0][0] # analysis the result pred_scores = pred_dicts['pred_scores'].detach().cpu().numpy() indices = pred_scores > self.score_threshold pred_scores = pred_scores[indices] pred_boxes = pred_dicts['pred_boxes'].detach().cpu().numpy( )[indices] pred_labels = pred_dicts['pred_labels'].detach().cpu().numpy( )[indices] self.viz(pred_boxes, "excavator/LiDAR_80_1") return pred_boxes, pred_scores, pred_labels
def main(): args, cfg = parse_config() logger = common_utils.create_logger() logger.info('-----------------Quick Demo of OpenPCDet-------------------------') demo_dataset = DemoDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger ) logger.info(f'Total number of samples: \t{len(demo_dataset)}') model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() with torch.no_grad(): for idx, data_dict in enumerate(demo_dataset): logger.info(f'Visualized sample index: \t{idx + 1}') data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) V.draw_scenes( points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels'] ) if not OPEN3D_FLAG: mlab.show(stop=True) logger.info('Demo done.')
def update(self): idx = self.offset % len(self.dataset) # idx = self.data_idx[idx] with torch.no_grad(): data_dict = self.dataset.__getitem__(idx) logger.info(f'Visualized sample index: \t{idx + 1}') data_dict = self.dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = self.model.forward(data_dict) # img_path = os.path.join(self.root_path, example['image_path']) # img = cv2.imread(img_path) # Show gt_objs = None if self.dataset.split == 'val': gt_objs = self.dataset.val_data_list[idx]['annos'][ 'gt_boxes_lidar'] self.update_view( idx, points=data_dict['points'][:, 1:].cpu().numpy(), objs=pred_dicts[0]['pred_boxes'].cpu(), ref_scores=pred_dicts[0]['pred_scores'].cpu().numpy(), ref_labels=pred_dicts[0]['pred_labels'].cpu().numpy(), gt_objs=gt_objs, # img=img )
def run(self): t_t = time.time() input_dict = { 'points': self.points, 'frame_id': 0, } data_dict = self.demo_dataset.prepare_data(data_dict=input_dict) data_dict = self.demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) torch.cuda.synchronize() t = time.time() pred_dicts, _ = self.net.forward(data_dict) torch.cuda.synchronize() print(f" pvrcnn inference cost time: {time.time() - t}") # pred = remove_low_score_nu(pred_dicts[0], 0.45) # boxes_lidar = pred["pred_boxes"].detach().cpu().numpy() # scores = pred["pred_scores"].detach().cpu().numpy() # types = pred["pred_labels"].detach().cpu().numpy() boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy() scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy() types = pred_dicts[0]["pred_labels"].detach().cpu().numpy() # print(f" pred boxes: { boxes_lidar }") # print(f" pred_scores: { scores }") # print(f" pred_labels: { types }") return scores, boxes_lidar, types
def run(self, points, calib, frame): t_t = time.time() num_features = 4 # X,Y,Z,intensity self.points = points.reshape([-1, num_features]) frame = 0 timestamps = np.empty((len(self.points), 1)) timestamps[:] = frame self.points = np.append(self.points, timestamps, axis=1) self.points[:, 0] += move_lidar_center input_dict = { 'points': self.points, 'frame_id': frame, } data_dict = self.demo_dataset.prepare_data(data_dict=input_dict) data_dict = self.demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) torch.cuda.synchronize() t = time.time() pred_dicts, _ = self.net.forward(data_dict) torch.cuda.synchronize() inference_time = time.time() - t inference_time_list.append(inference_time) mean_inference_time = sum(inference_time_list) / len( inference_time_list) boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy() scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy() types = pred_dicts[0]["pred_labels"].detach().cpu().numpy() pred_boxes = np.copy(boxes_lidar) pred_dict = self.get_template_prediction(scores.shape[0]) if scores.shape[0] == 0: return pred_dict pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera( pred_boxes, calib) pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes( pred_boxes_camera, calib, image_shape=image_shape) pred_dict['name'] = np.array(cfg.CLASS_NAMES)[types - 1] pred_dict['alpha'] = -np.arctan2( -pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6] pred_dict['bbox'] = pred_boxes_img pred_dict['dimensions'] = pred_boxes_camera[:, 3:6] pred_dict['location'] = pred_boxes_camera[:, 0:3] pred_dict['rotation_y'] = pred_boxes_camera[:, 6] pred_dict['score'] = scores pred_dict['boxes_lidar'] = pred_boxes return scores, boxes_lidar, types, pred_dict
def inference_with_info(): demo_dataset = DemoDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), logger=logger) logger.info(f'Total number of samples: \t{len(demo_dataset)}') model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) with torch.no_grad(): model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() for idx, data_dict in tqdm(enumerate(demo_dataset)): data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) det_boxes = pred_dicts[0]['pred_boxes'].cpu().detach().numpy() scores = pred_dicts[0]['pred_scores'].cpu().numpy() labels = pred_dicts[0]['pred_labels'].cpu().numpy() gt_boxes = demo_dataset.val_data_list[idx]['annos']['gt_boxes_lidar'] # Evaluate current frame info = '' for iou_idx in range(len(ious)): for dist_range_idx in range(len(dist_ranges)): tp, num_valid_det, num_valid_gt, dist_err = get_metrics(gt_boxes, det_boxes, dist_ranges[dist_range_idx], ious[iou_idx]) total_num_tp[iou_idx, dist_range_idx] += tp total_num_valid_det[iou_idx, dist_range_idx] += num_valid_det total_num_valid_gt[iou_idx, dist_range_idx] += num_valid_gt total_dist_err[iou_idx, dist_range_idx] += dist_err info += 'tp: {}, dt: {}, gt: {}\n'.format(tp, num_valid_det, num_valid_gt) det_boxes = det_boxes[:, np.newaxis, :].repeat(3, axis=1) gt_boxes = gt_boxes[:, np.newaxis, :].repeat(3, axis=1) image = plot_multiframe_boxes(data_dict['points'][:, 1:].cpu().numpy(), det_boxes, cfg.DATA_CONFIG.POINT_CLOUD_RANGE, gt_boxes=gt_boxes, scores=scores, labels=labels) info = info.split("\n") fontScale = 0.6 thickness = 1 fontFace = cv2.FONT_HERSHEY_SIMPLEX text_size, baseline = cv2.getTextSize(str(info), fontFace, fontScale, thickness) for i, text in enumerate(info): if text: draw_point = (10, 10 + (text_size[1] + 2 + baseline) * i) cv2.putText(image, text, draw_point, fontFace=fontFace, fontScale=fontScale, color=(0, 255, 0), thickness=thickness) [bag_name, _, frame] = demo_dataset.val_data_list[idx]['point_cloud']['lidar_idx'].split('/') image_file = os.path.join(save_path, bag_name + '_' + frame[:-4] + '.png') cv2.imwrite(image_file, image)
def main(): args, cfg = parse_config() log_file = 'log_inference_%s.txt' % datetime.datetime.now().strftime( '%Y%m%d-%H%M%S') logger = common_utils.create_logger(log_file, rank=0) logger.info( '-----------------Inference of OpenPCDet-------------------------') test_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger) logger.info(f'Total number of samples: \t{len(test_dataset)}') model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=False) model.cuda() model.eval() if args.save_video_path is not None: fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(os.path.join(args.save_video_path, 'result.avi'), fourcc, 10.0, (400, 1600)) bev_range = [-5, -20, -2, 155, 20, 5] with torch.no_grad(): for idx, data_dict in tqdm(enumerate(test_dataset)): data_dict = test_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) if args.save_video_path is not None: boxes = pred_dicts[0]['pred_boxes'].cpu().detach().numpy() boxes = boxes[:, np.newaxis, :].repeat(3, axis=1) gt_boxes = None if test_dataset.split == 'val': gt_boxes = test_dataset.val_data_list[idx]['annos'][ 'gt_boxes_lidar'] gt_boxes = gt_boxes[:, np.newaxis, :].repeat(3, axis=1) image = plot_multiframe_boxes( data_dict['points'][:, 1:].cpu().numpy(), boxes, bev_range, gt_boxes=gt_boxes) cv2.imshow('show_result', image) cv2.waitKey(1) out.write(image) out.release()
def main(): args, cfg = parse_config() logger = common_utils.create_logger() logger.info('-----------------Quick Demo of OpenPCDet-------------------------') demo_dataset = DemoDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger ) logger.info(f'Total number of samples: \t{len(demo_dataset)}') model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() with torch.no_grad(): for idx, data_dict in enumerate(demo_dataset): logger.info(f'Visualized sample index: \t{idx + 1}') data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) points=data_dict['points'][:, 1:] ref_boxes=pred_dicts[0]['pred_boxes'] ref_scores=pred_dicts[0]['pred_scores'] ref_labels=pred_dicts[0]['pred_labels'] points_cp=points.cpu() points_nu=points_cp.numpy() ref_boxes_cp=ref_boxes.cpu() ref_boxes_nu=ref_boxes_cp.numpy() ref_scores_cp=ref_scores.cpu() ref_scores_nu=ref_scores_cp.numpy() ref_labels_cp=ref_labels.cpu() ref_labels_nu=ref_labels_cp.numpy() print(points.shape,ref_boxes.shape) print(ref_scores.shape,ref_labels.shape) #with open('./save1.npy', 'wb') as f: # np.save(f,points_nu) #with open('./save2.npy', 'wb') as f: # np.save(f,ref_boxes_nu) #with open('./save3.npy', 'wb') as f: # np.save(f,ref_scores_nu) #with open('./save4.npy', 'wb') as f: # np.save(f,ref_labels_nu) #np.array(points.shape).tofile(f) #points.tofile(f) # V.draw_scenes( # points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], # ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels'] # ) # mlab.show(stop=True) logger.info('Demo done.')
def main(): args, cfg = parse_config() logger = common_utils.create_logger() logger.info('-----------------Quick Demo of OpenPCDet-------------------------') demo_dataset = DemoDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger ) logger.info(f'Total number of samples: \t{len(demo_dataset)}') if args.saved_pred == "": model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() with torch.no_grad(): for idx, data_dict in enumerate(demo_dataset): logger.info(f'Visualized sample index: \t{idx + 1}') data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) with open('../saved_pred/curr_pickle.pkl', 'wb+') as f: data_ = { "data_dict": data_dict['points'][:, 1:], "pred_dicts": pred_dicts } pkl.dump(data_, f) else: with open('../saved_pred/curr_pickle.pkl', 'rb') as f: data_ = pkl.load(f) data_dict = data_["data_dict"] pred_dicts = data_["pred_dicts"] vdisplay = Xvfb(width=1920, height=1080) vdisplay.start() V.draw_scenes( points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels'] ) vdisplay.stop() mlab.show(stop=True) mlab.savefig("./test_eg.png") logger.info('Demo done.')
def main(): args, cfg = parse_config() logger = common_utils.create_logger() logger.info( '-----------------Quick Demo of OpenPCDet-------------------------') demo_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger) logger.info(f'Total number of samples: \t{len(demo_dataset)}') model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() with torch.no_grad(): for idx, (data_dict, data_path) in enumerate(demo_dataset): logger.info(f'Visualized sample index: \t{idx + 1}') logger.info("Process: %s" % data_path) pred_s = dict() pred_s["data_path"] = data_path data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) pred_s['pred_boxes'] = pred_dicts[0]['pred_boxes'].cpu().numpy( ).tolist() pred_s['pred_scores'] = pred_dicts[0]['pred_scores'].cpu().numpy( ).tolist() pred_s['pred_labels'] = pred_dicts[0]['pred_labels'].cpu().numpy( ).tolist() json_path = os.path.splitext(data_path)[0] + ".json" with open(json_path, 'w') as fp: json.dump(pred_s, fp) # exit(1) # V.draw_scenes( # points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], # ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels'] # ) # # mlab.show(stop=True) logger.info('Demo done.')
def main(): args, cfg = parse_config() logger = common_utils.create_logger() logger.info( '-----------------Quick Demo of OpenPCDet-------------------------') demo_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger) logger.info(f'Total number of samples: \t{len(demo_dataset)}') data_name_list = demo_dataset.sample_file_list # print(data_name_list) print('evaluation data size=', len(data_name_list)) model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() with torch.no_grad(): for idx, data_dict in enumerate(demo_dataset): # logger.info(f'Visualized sample index: \t{idx + 1}') logger.info(f'Detecte sample: \t{data_name_list[idx]}') data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) print(pred_dicts) # print(data_dict) # print(type(pred_dicts[0]['pred_boxes'])) # print(pred_dicts[0]['pred_boxes']) res = pred_dicts[0]['pred_boxes'].cpu().numpy().round(8) save_filename = str(data_name_list[idx]) np.savetxt('evaluation/' + save_filename[save_filename.rfind('/') + 1:].replace( '.bin', '.txt'), res, fmt='%.08f') # test_f.writelines(pred_dicts[0]['pred_boxes']) V.draw_scenes(points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']) mlab.show(stop=True) logger.info('Demo done.')
def main(): args, cfg = parse_config() logger = common_utils.create_logger() logger.info( '-----------------Quick Demo of OpenPCDet-------------------------') demo_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger) logger.info(f'Total number of samples: \t{len(demo_dataset)}') # display = Display(visible=0, size =(1280, 1024)) # display.start() model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() with torch.no_grad(): for idx, data_dict in enumerate(demo_dataset): logger.info(f'Visualized sample index: \t{idx + 1}') data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) # V.draw_scenes( # points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], # ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels'] # ) boxes_file = open(args.save + '_boxes', 'wb') scores_file = open(args.save + '_scores', 'wb') labels_file = open(args.save + '_labels', 'wb') pickle.dump(pred_dicts[0]['pred_boxes'].cpu().numpy(), boxes_file, protocol=4) pickle.dump(pred_dicts[0]['pred_scores'].cpu().numpy(), scores_file, protocol=4) pickle.dump(pred_dicts[0]['pred_labels'].cpu().numpy(), labels_file, protocol=4) boxes_file.close() scores_file.close() labels_file.close() logger.info('Demo done.')
def predict(self, pointcloud): data_dict = self.preprocesiing.preprocess_pointcloud(pointcloud) with torch.no_grad(): data_dict = self.preprocesiing.collate_batch([data_dict]) load_data_to_gpu(data_dict) t1 = time.time() pred_dicts, _ = self.model.forward(data_dict) print(pred_dicts) t2 = time.time() print("3D Model time= ", t2 - t1) return pred_dicts
def main(): for sweep in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]: lines = open( 'cfgs/dataset_configs/nuscenes_dataset.yaml').read().splitlines() lines[4] = 'MAX_SWEEPS: ' + str(sweep) open('cfgs/dataset_configs/nuscenes_dataset.yaml', 'w').write('\n'.join(lines)) args, cfg = parse_config() logger = common_utils.create_logger() demo_dataset = NuScenesDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), logger=logger) model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() times = [] for i in range(args.frames): with torch.no_grad(): data_dict = demo_dataset[i] start = time.time() data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) end = time.time() times.append(end - start) print("sweeps", sweep) print("min", 1 / min(times)) print("1st_quantile", 1 / np.quantile(times, .25)) print("median", 1 / statistics.median(times)) print("1st_quantile", 1 / np.quantile(times, .75)) print("max", 1 / max(times)) print()
def main(): args, cfg = parse_config() logger = common_utils.create_logger() logger.info('-----------------Quick Demo of OpenPCDet-------------------------') demo_dataset = DemoDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger ) logger.info(f'Total number of samples: \t{len(demo_dataset)}') model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() avg_time = 0 avg_fps = 0 with torch.no_grad(): for idx, data_dict in enumerate(demo_dataset): logger.info(f'Visualized sample index: {idx + 1}') data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) start = time() pred_dicts, _ = model.forward(data_dict) end = time() time_forward = (end - start) * 1000 if idx > 0: avg_time += time_forward avg_fps += 1000 / time_forward logger.info(f'Time in ms for sample index: {idx + 1} is {"{:.2f}".format(time_forward)} ms') logger.info(f'FPS for sample index: {idx + 1} is {"{:.2f}".format(1000 / time_forward)} frame per second') V.draw_scenes( points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels'] ) mlab.show(stop=True) avg_time /= len(demo_dataset) - 1 avg_fps /= len(demo_dataset) - 1 logger.info(f'Average Time in ms for {paper.type}: is {"{:.2f}".format(avg_time)} ms') logger.info(f'Average FPS for {paper.type}: is {"{:.2f}".format(avg_fps)} frame per second') logger.info('Demo done.')
def main(): args, cfg = parse_config() logger = common_utils.create_logger() #logger记录日志 logger.info( '-----------------Quick Demo of OpenPCDet-------------------------') #建立一个DemoDataset类,其中储存关于输入数据的所有信息,包含六个参数 # dataset_cfg=cfg.DATA_CONFIG # 数据参数 # 包含数据集 / 数据路径 / 信息路径 / 数据处理器 / 数据增强器等 # class_names=cfg.CLASS_NAMES # 类别名 # training=False # 是否训练 # root_path=Path(args.data_path) # 数据路径 # ext=args.ext # 扩展 # logger=logger # 日志 demo_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger) logger.info(f'Total number of samples: \t{len(demo_dataset)}') model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() with torch.no_grad(): #目的是使得其中的数据不需要计算梯度,也不会进行反向传播 for idx, data_dict in enumerate(demo_dataset): logger.info(f'Visualized sample index: \t{idx + 1}') data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) V.draw_scenes(points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']) mlab.show(stop=True) logger.info('Demo done.')
def run(self, points): t1 = time.time() print(f"input points shape: {points.shape}") num_features = 4 #kitti model #num_features = 5 self.points = points.reshape([-1, num_features]) input_dict = { 'points': self.points, 'frame_id': 0, } data_dict = self.demo_datasets.prepare_data(data_dict=input_dict) data_dict = self.demo_datasets.collate_batch([data_dict]) load_data_to_gpu(data_dict) torch.cuda.synchronize() pred_dicts, _ = self.net.forward(data_dict) torch.cuda.synchronize() t2 = time.time() print(f"net inference cost time: {t2 - t1}") # pred = remove_low_score_nu(pred_dicts[0], 0.45) # 'vehicle', 'pedestrian', 'bicycle' # class_scores = [0.5, 0.20, 0.20, 0.50] class_scores = [0.5, 0.5, 0.5, 0.5, 0.3, 0.3, 0.3, 0.3, 0.3] pred = remove_low_score_ck(pred_dicts[0], class_scores) boxes_lidar = pred['pred_boxes'].detach().cpu().numpy() boxes_lidar = transform_to_original(boxes_lidar) scores = pred['pred_scores'].detach().cpu().numpy() types = pred['pred_labels'].detach().cpu().numpy() #print(f" pred boxes: { boxes_lidar }") print(f" pred labels: {types}") print(f" pred scores: {scores}") #print(pred_dicts) return scores, boxes_lidar, types
def main(): # 1 输入参数 args, cfg = parse_config() # cfg的参数在tools/cfg/kitti_models/pv-rcnn.yaml logger = common_utils.create_logger() logger.info( '-----------------Quick Demo of OpenPCDet-------------------------') demo_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger) logger.info(f'Total number of samples: \t{len(demo_dataset)}') # 2 调用的这些包就是pcdet/models/detectors下的各个py文件, model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) # 3 参数加载 model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) # cuda( ) 和 eval( ) 都是数据处理 model.cuda() model.eval() with torch.no_grad(): for idx, data_dict in enumerate(demo_dataset): logger.info(f'Visualized sample index: \t{idx + 1}') # 样本数 # 4. collate_batch data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) # 传递数据给gpu的 pred_dicts, _ = model.forward( data_dict ) # 在神经网络中向前传递数据data_dict,得到预测数据pred_dicts 定位到forward,因为是PVRCNN类下的函数,先看__init__ /home/hcq/pointcloud/PCDet/pcdet/models/detectors/pv_rcnn.py # 可视化V V.draw_scenes(points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']) mlab.show(stop=True) logger.info('Demo done.')
def run_model(**kwargs): """Run inference on the pre-loaded OpenPCDet library model. Args: **kwargs: One keyword argument per input data field from the evaluation script. Returns: Dict from string to numpy ndarray. """ data_dict = _process_inputs(kwargs) with torch.no_grad(): data_dict = dataset_processor.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) # Rename the outputs to the format expected by the evaluation script. return { 'boxes': pred_dicts[0]['pred_boxes'].cpu().numpy(), 'scores': pred_dicts[0]['pred_scores'].cpu().numpy(), 'classes': pred_dicts[0]['pred_labels'].cpu().numpy() }
def run(self, points): t_t = time.time() print(f"input points shape: {points.shape}") num_features = 4 self.points = points.reshape([-1, num_features]) #print("points", self.points) timestamps = np.zeros((len(self.points), 1)) self.points = np.append(self.points, timestamps, axis=1) self.points[:, 0] += movelidarcenter #print("points2", self.points) input_dict = { 'points': self.points, 'frame_id': 0, } data_dict = self.demo_dataset.prepare_data(data_dict=input_dict) data_dict = self.demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) torch.cuda.synchronize() t = time.time() pred_dicts, _ = self.net.forward(data_dict) torch.cuda.synchronize() print(f"inference time: {time.time() - t}") boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy() scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy() types = pred_dicts[0]["pred_labels"].detach().cpu().numpy() # print(f" pred boxes: { boxes_lidar }") # print(f" pred_scores: { scores }") # print(f" pred_labels: { types }") return scores, boxes_lidar, types
def main(): args, cfg = parse_config() logger = common_utils.create_logger() logger.info( '-----------------Quick Inference of OpenPCDet-------------------------' ) demo_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=logger) logger.info(f'Total number of samples: \t{len(demo_dataset)}') model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() with torch.no_grad(): for idx, data_dict in enumerate(demo_dataset): logger.info(f'Run inference of sample index: \t{idx + 1}') data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, recall_dicts, batch_dict = model.forward(data_dict) #visualization if args.vis: V.visualize_voxel_prediction(batch_dict) #evaluate point predictions if args.point: E.save_prediction_point_argoverse(batch_dict, save_gt=args.save_gt) logger.info('Quick inference done.')
def run(self, points, calib, image_shape): t_t = time.time() print(f"input points shape: {points.shape}") num_features = 4 self.points = points.reshape([-1, num_features]) #print("points", self.points) timestamps = np.zeros((len(self.points), 1)) self.points = np.append(self.points, timestamps, axis=1) self.points[:, 0] += movelidarcenter #print("points2", self.points) input_dict = { 'points': self.points, 'frame_id': 0, } data_dict = self.demo_dataset.prepare_data(data_dict=input_dict) data_dict = self.demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) torch.cuda.synchronize() t = time.time() pred_dicts, _ = self.net.forward(data_dict) torch.cuda.synchronize() print(f"inference time: {time.time() - t}") boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy() scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy() types = pred_dicts[0]["pred_labels"].detach().cpu().numpy() # print(f" pred boxes: { boxes_lidar }") # print(f" pred_scores: { scores }") # print(f" pred_labels: { types }") pred_boxes = boxes_lidar pred_dict = self.get_template_prediction(scores.shape[0]) if scores.shape[0] == 0: return pred_dict #calib = get_calib(sample_idx) #P2 = np.concatenate([calib.P2, np.array([[0., 0., 0., 1.]])], axis=0) #R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype) #R0_4x4[3, 3] = 1. #R0_4x4[:3, :3] = calib.R0 #V2C_4x4 = np.concatenate([calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0) #calib = {'P2': P2, 'R0_rect': R0_4x4, 'Tr_velo_to_cam': V2C_4x4} #calib = input_dict['calib'][batch_index] #image_shape = input_dict['image_shape'][batch_index] pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera( pred_boxes, calib) pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes( pred_boxes_camera, calib, image_shape=image_shape) pred_dict['name'] = np.array(cfg.CLASS_NAMES)[types - 1] pred_dict['alpha'] = -np.arctan2( -pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6] pred_dict['bbox'] = pred_boxes_img pred_dict['dimensions'] = pred_boxes_camera[:, 3:6] pred_dict['location'] = pred_boxes_camera[:, 0:3] pred_dict['rotation_y'] = pred_boxes_camera[:, 6] pred_dict['score'] = scores pred_dict['boxes_lidar'] = pred_boxes return pred_dict
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None): result_dir.mkdir(parents=True, exist_ok=True) final_output_dir = result_dir / 'final_result' / 'data' if save_to_file: final_output_dir.mkdir(parents=True, exist_ok=True) metric = { 'gt_num': 0, } for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST: metric['recall_roi_%s' % str(cur_thresh)] = 0 metric['recall_rcnn_%s' % str(cur_thresh)] = 0 dataset = dataloader.dataset class_names = dataset.class_names det_annos = [] logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id) if dist_test: num_gpus = torch.cuda.device_count() local_rank = cfg.LOCAL_RANK % num_gpus model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[local_rank], broadcast_buffers=False) model.eval() if cfg.LOCAL_RANK == 0: progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True) start_time = time.time() for i, batch_dict in enumerate(dataloader): load_data_to_gpu(batch_dict) with torch.no_grad(): pred_dicts, ret_dict = model(batch_dict) disp_dict = {} statistics_info(cfg, ret_dict, metric, disp_dict) annos = dataset.generate_prediction_dicts( batch_dict, pred_dicts, class_names, output_path=final_output_dir if save_to_file else None) det_annos += annos if cfg.LOCAL_RANK == 0: progress_bar.set_postfix(disp_dict) progress_bar.update() if cfg.LOCAL_RANK == 0: progress_bar.close() if dist_test: rank, world_size = common_utils.get_dist_info() det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir') metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir') logger.info('*************** Performance of EPOCH %s *****************' % epoch_id) sec_per_example = (time.time() - start_time) / len(dataloader.dataset) logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example) if cfg.LOCAL_RANK != 0: return {} ret_dict = {} if dist_test: for key, val in metric[0].items(): for k in range(1, world_size): metric[0][key] += metric[k][key] metric = metric[0] gt_num_cnt = metric['gt_num'] for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST: cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max( gt_num_cnt, 1) cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max( gt_num_cnt, 1) logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall)) logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall)) ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall total_pred_objects = 0 for anno in det_annos: total_pred_objects += anno['name'].__len__() logger.info('Average predicted number of objects(%d samples): %.3f' % (len(det_annos), total_pred_objects / max(1, len(det_annos)))) with open(result_dir / 'result.pkl', 'wb') as f: pickle.dump(det_annos, f) result_str, result_dict = dataset.evaluation( det_annos, class_names, eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC, output_path=final_output_dir) logger.info(result_str) ret_dict.update(result_dict) with open(result_dir / 'ap_dict.pkl', 'wb') as f: pickle.dump(ret_dict, f) logger.info('Result is save to %s' % result_dir) logger.info('****************Evaluation done.*****************') return ret_dict
def save_pseudo_label_epoch(model, val_loader, rank, leave_pbar, ps_label_dir, cur_epoch): """ Generate pseudo label with given model. Args: model: model to predict result for pseudo label val_loader: data_loader to predict pseudo label rank: process rank leave_pbar: tqdm bar controller ps_label_dir: dir to save pseudo label cur_epoch """ val_dataloader_iter = iter(val_loader) total_it_each_epoch = len(val_loader) if rank == 0: pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='generate_ps_e%d' % cur_epoch, dynamic_ncols=True) pos_ps_meter = common_utils.AverageMeter() ign_ps_meter = common_utils.AverageMeter() model.eval() for cur_it in range(total_it_each_epoch): try: target_batch = next(val_dataloader_iter) except StopIteration: target_dataloader_iter = iter(val_loader) target_batch = next(target_dataloader_iter) # generate gt_boxes for target_batch and update model weights with torch.no_grad(): load_data_to_gpu(target_batch) pred_dicts, ret_dict = model(target_batch) pos_ps_batch, ign_ps_batch = save_pseudo_label_batch( target_batch, pred_dicts=pred_dicts, need_update=(cfg.SELF_TRAIN.get('MEMORY_ENSEMBLE', None) and cfg.SELF_TRAIN.MEMORY_ENSEMBLE.ENABLED and cur_epoch > 0)) # log to console and tensorboard pos_ps_meter.update(pos_ps_batch) ign_ps_meter.update(ign_ps_batch) disp_dict = { 'pos_ps_box': "{:.3f}({:.3f})".format(pos_ps_meter.val, pos_ps_meter.avg), 'ign_ps_box': "{:.3f}({:.3f})".format(ign_ps_meter.val, ign_ps_meter.avg) } if rank == 0: pbar.update() pbar.set_postfix(disp_dict) pbar.refresh() if rank == 0: pbar.close() gather_and_dump_pseudo_label_result(rank, ps_label_dir, cur_epoch)
def main(): args, cfg = parse_config() logger = common_utils.create_logger() logger.info('-----------------Quick Demo of OpenPCDet-------------------------') demo_dataset = NuScenesDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), logger=logger ) logger.info(f'Total number of samples: \t{len(demo_dataset)}') model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() start = time.time() with torch.no_grad(): data_dict = demo_dataset[args.idx] print(type(data_dict)) print(data_dict) logger.info(f'Visualized sample index: \t{args.idx}') data_dict = demo_dataset.collate_batch([data_dict]) # print(type(data_dict)) # print(data_dict) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) # for bb in pred_dicts[0]['pred_boxes']: # for x in bb: # print(float(x), end=" ") # print() # pred_dicts[0]['pred_boxes'][:,0] = 1 # pred_dicts[0]['pred_boxes'][:,1] = 0 # pred_dicts[0]['pred_boxes'][:,2] = 1 # pred_dicts[0]['pred_boxes'][:,3] = 1 # pred_dicts[0]['pred_boxes'][:,4] = 1 # pred_dicts[0]['pred_boxes'][:,5] = 1 # pred_dicts[0]['pred_boxes'][:,6] = 0 # pred_dicts[0]['pred_boxes'] = pred_dicts[0]['pred_boxes'].cpu().numpy() # foo = np.zeros((len(pred_dicts[0]['pred_boxes']), 2), dtype=pred_dicts[0]['pred_boxes'].dtype) # pred_dicts[0]['pred_boxes'] = np.concatenate((pred_dicts[0]['pred_boxes'], foo), axis=1) # pred_dicts[0]['pred_boxes'][:,7] = 0 # pred_dicts[0]['pred_boxes'][:,8] = 0 mask = (pred_dicts[0]['pred_scores']>0.3).float() indices = torch.nonzero(mask) V.draw_scenes( points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'][indices].reshape(-1, 9), ref_scores=pred_dicts[0]['pred_scores'][indices].reshape(-1), ref_labels=pred_dicts[0]['pred_labels'][indices].reshape(-1) ) # mask = (pred_dicts[0]['pred_scores']>0.5).float() # indices = torch.nonzero(mask) # V.draw_scenes( # points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'][indices].reshape(-1, 7), # ref_scores=pred_dicts[0]['pred_scores'][indices].reshape(-1), ref_labels=pred_dicts[0]['pred_labels'][indices].reshape(-1) # ) # print(pred_dicts[0]['pred_boxes'][indices]) # V.draw_scenes( # points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], # ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels'] # ) mlab.show(stop=True) end = time.time() print(end-start) logger.info('Demo done.')
def main(): args, cfg = parse_config() curr_seq = args.seq_path.split("/")[-1] logger = common_utils.create_logger() logger.info( '-----------------Quick Demo of OpenPCDet-------------------------') demo_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.seq_path), ext=args.ext, logger=logger) logger.info(f'Total number of samples: \t{len(demo_dataset)}') model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) model.cuda() model.eval() # Removing existing csv file csv_file_path = '%s/%s.csv' % (args.output_dir, curr_seq) if os.path.exists(csv_file_path): os.remove(csv_file_path) # Loading the ground truth from kitti-odometry gt_path = "/".join( args.seq_path.split("/")[0:-2]) + "/label_02/" + curr_seq + ".txt" gt_data = np.genfromtxt(gt_path, dtype=str)[:, [0, 2, 13, 14, 15, 10, 11, 12, 16]] gt_data = gt_data[np.logical_or( gt_data[:, 1] == 'Car', np.logical_or(gt_data[:, 1] == 'Cyclist', gt_data[:, 1] == 'Pedestrian')), :] gt_data = gt_data[:, [0, 2, 3, 4, 5, 6, 7, 8]] # Converting to floats gt_data = gt_data.astype(np.float) with torch.no_grad(): for idx, data_dict in enumerate(demo_dataset): logger.info(f'Visualized sample index: \t{idx + 1}') data_dict = demo_dataset.collate_batch([data_dict]) load_data_to_gpu(data_dict) pred_dicts, _ = model.forward(data_dict) # Creating output dir if it does not already exist Path(args.output_dir).mkdir(parents=True, exist_ok=True) relevant_gt_boxes = gt_data[gt_data[:, 0] == idx][:, 1:] data_ = { "data_dict": data_dict['points'][:, 1:].cpu().detach().numpy(), "pred_boxes": pred_dicts[0]["pred_boxes"].cpu().detach().numpy(), "pred_labels": pred_dicts[0]["pred_labels"].cpu().detach().numpy(), "pred_scores": pred_dicts[0]["pred_scores"].cpu().detach().numpy(), "gt_boxes": relevant_gt_boxes } with open('%s/curr_pickle_%s.pkl' % (args.output_dir, str(idx)), 'wb+') as f: pkl.dump(data_, f) # Writing to text file in kitti format for tracking step frame_data = np.zeros((data_["pred_labels"].shape[0], 15)) frame_data[:, 0] = idx # Frame ID frame_data[:, 1] = data_["pred_labels"] # Labels frame_data[:, 2:6] = 0 # 2d bounding boxes frame_data[:, 6] = data_["pred_scores"] # 2d bounding boxes frame_data[:, 7:10] = data_["pred_boxes"][:, 3:6] frame_data[:, 10:13] = data_["pred_boxes"][:, 0:3] frame_data[:, 13] = data_["pred_boxes"][:, -1] frame_data[:, 14] = 0 # Alpha with open('%s/%s.csv' % (args.output_dir, curr_seq), 'a') as f: np.savetxt(f, frame_data, delimiter=",")
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None): result_dir.mkdir(parents=True, exist_ok=True) final_output_dir = result_dir / 'final_result' / 'data' if save_to_file: final_output_dir.mkdir(parents=True, exist_ok=True) metric = { 'gt_num': 0, } for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST: metric['recall_roi_%s' % str(cur_thresh)] = 0 metric['recall_rcnn_%s' % str(cur_thresh)] = 0 dataset = dataloader.dataset class_names = dataset.class_names det_annos = [] logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id) if dist_test: num_gpus = torch.cuda.device_count() local_rank = cfg.LOCAL_RANK % num_gpus model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[local_rank], broadcast_buffers=False) model.eval() if cfg.LOCAL_RANK == 0: progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True) start_time = time.time() run_time = 0 query_batch = {} target_batch = {} # pred_dicts= {} # ret_dict = {} # print("FIRST FOWARD PASS") for i, batch_dict in enumerate(dataloader): # print("BATCH DICT",int(batch_dict['frame_id'])) run_start_time = time.time() load_data_to_gpu(batch_dict) if int(batch_dict['frame_id']) == 451: break # print("BATCH FRAME ID", batch_dict['frame_id']) if int(batch_dict['frame_id']) == 27: # print("TARGET LINE PASSED") target_batch = batch_dict.copy() # print("SIZE",len(target_batch)) if int(batch_dict['frame_id']) == 450: # print("QUERY LINE PASSED") query_batch = batch_dict.copy() # print("SIZE",len(query_batch)) # print("PRE MODEL RUN") if len(query_batch) != 0 and len(target_batch) != 0: # print("query_batch", query_batch.keys()) print("query_batch gt boxes", query_batch['gt_boxes']) print("query_batch voxels", query_batch['voxels']) print("query_batch voxel coords", query_batch['voxel_coords']) with torch.no_grad(): # print("query",len(query_batch)) # print("target",len(target_batch)) pred_dicts, ret_dict = model([query_batch, target_batch]) # pred_dicts, ret_dict = model(batch_dict) print(pred_dicts) # print("FORWARD PASS COMPLETE") disp_dict = {} run_end_time = time.time() run_duration = run_end_time - run_start_time run_time += run_duration statistics_info(cfg, ret_dict, metric, disp_dict) annos = dataset.generate_prediction_dicts( batch_dict, pred_dicts, class_names, output_path=final_output_dir if save_to_file else None) det_annos += annos if cfg.LOCAL_RANK == 0: progress_bar.set_postfix(disp_dict) progress_bar.update() if cfg.LOCAL_RANK == 0: progress_bar.close() if dist_test: rank, world_size = common_utils.get_dist_info() det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir') metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir') else: world_size = 1 logger.info('*************** Performance of EPOCH %s *****************' % epoch_id) logger.info('Run time per sample: %.4f second.' % (run_time / (len(dataloader.dataset) / world_size))) sec_per_example = (time.time() - start_time) / (len(dataloader.dataset) / world_size) logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example) if cfg.LOCAL_RANK != 0: return {} ret_dict = {} if dist_test: for key, val in metric[0].items(): for k in range(1, world_size): metric[0][key] += metric[k][key] metric = metric[0] gt_num_cnt = metric['gt_num'] for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST: cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max( gt_num_cnt, 1) cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max( gt_num_cnt, 1) logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall)) logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall)) ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall total_pred_objects = 0 for anno in det_annos: total_pred_objects += anno['name'].__len__() logger.info('Average predicted number of objects(%d samples): %.3f' % (len(det_annos), total_pred_objects / max(1, len(det_annos)))) with open(result_dir / 'result.pkl', 'wb') as f: pickle.dump(det_annos, f) # result_str, result_dict = dataset.evaluation( # det_annos, class_names, # eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC, # output_path=final_output_dir # ) # logger.info(result_str) # ret_dict.update(result_dict) # logger.info('Result is save to %s' % result_dir) logger.info('****************Evaluation done.*****************') return ret_dict
def eval_one_epoch_for_semantic(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None): result_dir.mkdir(parents=True, exist_ok=True) final_output_dir = result_dir / 'final_result' / 'data' if save_to_file: final_output_dir.mkdir(parents=True, exist_ok=True) dataset = dataloader.dataset class_names = dataset.class_names det_annos = [] logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id) if dist_test: num_gpus = torch.cuda.device_count() local_rank = cfg.LOCAL_RANK % num_gpus model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[local_rank], broadcast_buffers=False) model.eval() if cfg.LOCAL_RANK == 0: progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True) start_time = time.time() preds = [] targets = [] for i, batch_dict in enumerate(dataloader): load_data_to_gpu(batch_dict) with torch.no_grad(): batch_dict = model(batch_dict) if 'image_seg_label' in batch_dict: target = batch_dict['image_seg_label'].cpu().numpy().astype( np.int32) else: target = [None] * batch_dict['batch_size'] pred = batch_dict['pred_image_seg'].cpu().numpy() pred_copy = pred.copy() pred = np.argmax(pred, axis=1).astype(np.int32) preds.extend([pred[b] for b in range(batch_dict['batch_size'])]) targets.extend([target[b] for b in range(batch_dict['batch_size'])]) if save_to_file: for b in range(batch_dict['batch_size']): filename = final_output_dir / f'{batch_dict["sample_id"][b]}.npy' np.save(filename, pred_copy[b].astype(np.float32)) disp_dict = {} if cfg.LOCAL_RANK == 0: progress_bar.set_postfix(disp_dict) progress_bar.update() if cfg.LOCAL_RANK == 0: progress_bar.close() # if dist_test: # rank, world_size = common_utils.get_dist_info() # det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir') # metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir') logger.info('*************** Performance of EPOCH %s *****************' % epoch_id) sec_per_example = (time.time() - start_time) / len(dataloader.dataset) logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example) if cfg.LOCAL_RANK != 0: return {} ret_dict = {} # if dist_test: # for key, val in metric[0].items(): # for k in range(1, world_size): # metric[0][key] += metric[k][key] # metric = metric[0] # gt_num_cnt = metric['gt_num'] with open(result_dir / 'result.pkl', 'wb') as f: pickle.dump(det_annos, f) result_str, result_dict = dataset.evaluation( preds, targets, ) logger.info(result_str) ret_dict.update(result_dict) logger.info('Result is save to %s' % result_dir) logger.info('****************Evaluation done.*****************') return ret_dict