Exemplo n.º 1
0
	def __init__(self, node_name=''):

		self.rosparam_(node_name)

		self.create_logger_(node_name)
		self.logger.info('ROS parameters loaded successfully')
		self.logger.info(
			'********************** Start logging **********************')
		save_config_to_file(cfg, logger=self.logger)
		self.logger.info('ROS buffer_len: {}'.format(self.buffer_len))
		self.logger.info('ROS lidar_topic: {}'.format(self.lidar_topic))
		self.logger.info('ROS debug_flag: {}'.format(self.debug_flag))
		self.logger.info(
			'ROS depth_threshold: {}'.format(self.depth_threshold))
		self.logger.info(
			'ROS score_threshold: {}'.format(self.score_threshold))
		self.logger.info('ROS model_checkpoint: {}'.format(self.model_checkpoint))

		self.model = PointRCNN(num_classes=self.num_class,
							   use_xyz=True, mode='TEST')
		self.model.cuda()
		load_checkpoint(
			self.model,
			filename=str(self.base_dir / self.model_checkpoint),
			logger=self.logger)
		self.model.eval()

		self.logger.info('Model initialization complete')
Exemplo n.º 2
0
def repeat_eval_ckpt(root_result_dir, ckpt_dir):
    root_result_dir = os.path.join(root_result_dir, 'eval', 'eval_all_' + args.extra_tag)
    os.makedirs(root_result_dir, exist_ok=True)

    log_file = os.path.join(root_result_dir, 'log_eval_all_%s.txt' % cfg.TEST.SPLIT)
    logger = create_logger(log_file)
    logger.info('**********************Start logging**********************')

    # save config
    for key, val in vars(args).items():
        logger.info("{:16} {}".format(key, val))
    save_config_to_file(cfg, logger=logger)

    # create dataloader & network
    test_loader = create_dataloader(logger)
    model = PointRCNN(num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')
    model.cuda()

    # copy important files to backup
    backup_dir = os.path.join(root_result_dir, 'backup_files')
    os.makedirs(backup_dir, exist_ok=True)
    os.system('cp *.py %s/' % backup_dir)
    os.system('cp ../lib/net/*.py %s/' % backup_dir)
    os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)

    # evaluated ckpt record
    ckpt_record_file = os.path.join(root_result_dir, 'eval_list_%s.txt' % cfg.TEST.SPLIT)
    with open(ckpt_record_file, 'a'):
        pass

    # tensorboard log
    tb_log = SummaryWriter(log_dir=os.path.join(root_result_dir, 'tensorboard_%s' % cfg.TEST.SPLIT))

    while True:
        # check whether there is checkpoint which is not evaluated
        cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file)
        if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
            wait_second = 30
            print('Wait %s second for next check: %s' % (wait_second, ckpt_dir))
            time.sleep(wait_second)
            continue

        # load checkpoint
        train_utils.load_checkpoint(model, filename=cur_ckpt)

        # start evaluation
        cur_result_dir = os.path.join(root_result_dir, 'epoch_%s' % cur_epoch_id, cfg.TEST.SPLIT)
        tb_dict = eval_one_epoch(model, test_loader, cur_epoch_id, cur_result_dir, logger)

        step = int(float(cur_epoch_id))
        if step == float(cur_epoch_id):
            for key, val in tb_dict.items():
                tb_log.add_scalar(key, val, step)

        # record this epoch which has been evaluated
        with open(ckpt_record_file, 'a') as f:
            print('%s' % cur_epoch_id, file=f)
        logger.info('Epoch %s has been evaluated' % cur_epoch_id)
Exemplo n.º 3
0
def eval_single_ckpt(root_result_dir):
    root_result_dir = os.path.join(root_result_dir, 'eval')
    # set epoch_id and output dir
    num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
    epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
    root_result_dir = os.path.join(root_result_dir, 'epoch_%s' % epoch_id,
                                   cfg.TEST.SPLIT)
    if args.test:
        root_result_dir = os.path.join(root_result_dir, 'test_mode')

    if args.extra_tag != 'default':
        root_result_dir = os.path.join(root_result_dir, args.extra_tag)
    root_result_dir = os.path.join(
        root_result_dir,
        args.depth_ckpt.split('/')[-1].split('.')[0])
    os.makedirs(root_result_dir, exist_ok=True)

    log_file = os.path.join(root_result_dir, 'log_eval_one.txt')
    logger = create_logger(log_file)
    logger.info('**********************Start logging**********************')
    for key, val in vars(args).items():
        logger.info("{:16} {}".format(key, val))
    save_config_to_file(cfg, logger=logger)

    # create dataloader & network
    test_loader = create_dataloader(logger)
    model = PointRCNN(num_classes=test_loader.dataset.num_class,
                      use_xyz=True,
                      mode='TEST')
    model.cuda()

    # Create depth model
    depth_model = DepthModel(maxdepth=80,
                             maxdisp=192,
                             down=2,
                             pretrain=args.depth_ckpt,
                             save_tag=cfg.TAG,
                             mode='TEST',
                             mgpus=args.mgpus)

    # copy important files to backup
    backup_dir = os.path.join(root_result_dir, 'backup_files')
    os.makedirs(backup_dir, exist_ok=True)
    os.system('cp *.py %s/' % backup_dir)
    os.system('cp ../lib/net/*.py %s/' % backup_dir)
    os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)

    # load checkpoint
    load_ckpt_based_on_args(model, logger)

    # start evaluation
    eval_one_epoch(model, depth_model, test_loader, epoch_id, root_result_dir,
                   logger)
Exemplo n.º 4
0
def eval_all_ckpt(root_result_dir):
    root_result_dir = os.path.join('/'.join(args.rcnn_ckpt.split('/')[:-1]),
                                   'all', 'eval')
    os.makedirs(root_result_dir, exist_ok=True)
    # set epoch_id and output dir
    ckpt_dir = '/'.join(args.rcnn_ckpt.split('/')[:-1])
    ckpt_list = os.listdir(ckpt_dir)
    ckpt_list = [x for x in ckpt_list if x[-4:] == '.pth']
    ckpt_list.sort()
    BEST_precision = 0.
    BEST_iter = None
    log_file = os.path.join(root_result_dir, 'log_eval_all.txt')
    logger = create_logger(log_file)
    logger.info('**********************Start logging**********************')
    for key, val in vars(args).items():
        logger.info("{:16} {}".format(key, val))
    save_config_to_file(cfg, logger=logger)

    # create dataloader & network
    test_loader = create_dataloader(logger)
    model = PointRCNN(num_classes=test_loader.dataset.num_class,
                      use_xyz=True,
                      mode='TEST')
    model.cuda()

    for ckpt in tqdm.tqdm(reversed(ckpt_list[25:])):
        args.rcnn_ckpt = os.path.join(ckpt_dir, ckpt)
        num_list = re.findall(
            r'\d+', args.rcnn_ckpt) if args.rcnn_ckpt is not None else []
        iter_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'

        cur_root_result_dir = os.path.join(root_result_dir, cfg.TEST.SPLIT)
        if args.test:
            cur_root_result_dir = os.path.join(root_result_dir, 'test_mode')

        if args.extra_tag != 'default':
            cur_root_result_dir = os.path.join(cur_root_result_dir,
                                               args.extra_tag)
        os.makedirs(cur_root_result_dir, exist_ok=True)

        # load checkpoint
        load_ckpt_based_on_args(model, logger)

        precision, _, _ = eval_one_epoch_joint(model, test_loader, iter_id,
                                               cur_root_result_dir, logger)
        if precision > BEST_precision:
            BEST_precision = precision
            BEST_iter = iter_id
        print('best_precision: %.4f, best_iter: %s,' %
              (BEST_precision, BEST_iter))
        print(args.rcnn_ckpt[-4:])
Exemplo n.º 5
0
def eval_single_ckpt(root_result_dir):
    root_result_dir = os.path.join(root_result_dir, 'eval')

    # set epoch_id and output dir
    num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
    epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
    root_result_dir = os.path.join(root_result_dir, 'epoch_%s' % epoch_id,
                                   cfg.TEST.SPLIT)

    # Checks if TEST mode in on or not
    if args.test:
        root_result_dir = os.path.join(root_result_dir, 'test_mode')

    if args.extra_tag != 'default':
        root_result_dir = os.path.join(root_result_dir, args.extra_tag)
    # Create root_result_dir if it doesn't exists
    os.makedirs(root_result_dir, exist_ok=True)

    # Log File initialize
    log_file = os.path.join(root_result_dir, 'log_eval_one.txt')
    logger = create_logger(log_file)
    logger.info('**********************Start logging**********************')
    for key, val in vars(args).items():
        logger.info("{:16} {}".format(key, val))
    save_config_to_file(cfg, logger=logger)

    # create dataloader & network
    test_loader = create_dataloader(logger)
    model = PointRCNN(num_classes=test_loader.dataset.num_class,
                      use_xyz=True,
                      mode='TEST')
    model.cuda()

    # copy important files to backup (Can comment out as takes extra space by backing up files)
    '''
    backup_dir = os.path.join(root_result_dir, 'backup_files')
    os.makedirs(backup_dir, exist_ok=True)
    os.system('cp *.py %s/' % backup_dir)
    os.system('cp ../lib/net/*.py %s/' % backup_dir)
    os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)
    '''

    # load checkpoint
    load_ckpt_based_on_args(model, logger)

    # start evaluation
    eval_one_epoch(model, test_loader, epoch_id, root_result_dir, logger)
Exemplo n.º 6
0
def eval_single_ckpt(root_result_dir):
    root_result_dir = os.path.join(root_result_dir, 'eval')
    # set epoch_id and output dir
    num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
    epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
    root_result_dir = os.path.join(root_result_dir, 'epoch_%s' % epoch_id, cfg.TEST.SPLIT)
    if args.test:
        root_result_dir = os.path.join(root_result_dir, 'test_mode')

    if args.extra_tag != 'default':
        root_result_dir = os.path.join(root_result_dir, args.extra_tag)
    os.makedirs(root_result_dir, exist_ok=True)

    log_file = os.path.join(root_result_dir, 'log_eval_one.txt')
    logger = create_logger(log_file)
    logger.info('**********************Start logging**********************')
    for key, val in vars(args).items():
        logger.info("{:16} {}".format(key, val))
    save_config_to_file(cfg, logger=logger)

    # create dataloader & network
    while True:
        try:
            test_loader = create_dataloader(logger)
            while test_loader.dataset.npoints < 100 or test_loader.dataset.sample_id_list == 0:
                test_loader = create_dataloader(logger)
            model = PointRCNN(num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')
            model.cuda()

            # copy important files to backup
            backup_dir = os.path.join(root_result_dir, 'backup_files')
            os.makedirs(backup_dir, exist_ok=True)
            os.system('cp *.py %s/' % backup_dir)
            os.system('cp ../lib/net/*.py %s/' % backup_dir)
            os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)

            # load checkpoint
            load_ckpt_based_on_args(model, logger)

            # start evaluation
            eval_one_epoch(model, test_loader, epoch_id, root_result_dir, logger)
        except Exception as e:
            print("No Poincloud found. Waiting for the next frame.")
            print("Chill time: Sleeping for 2 sec while waiting")
            print(e)
            time.sleep(2) 
Exemplo n.º 7
0
def eval_single_ckpt(root_result_dir):
    root_result_dir = os.path.join(root_result_dir, 'eval')
    # set epoch_id and output dir
    num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
    epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
    root_result_dir = os.path.join(root_result_dir, 'epoch_%s' % epoch_id,
                                   cfg.TEST.SPLIT)
    if args.test:
        root_result_dir = os.path.join(root_result_dir, 'test_mode')

    if args.extra_tag != 'default':
        root_result_dir = os.path.join(root_result_dir, args.extra_tag)
    os.makedirs(root_result_dir, exist_ok=True)

    log_file = os.path.join(root_result_dir, 'log_eval_one.txt')
    logger = create_logger(log_file)
    logger.info('**********************Start logging**********************')
    for key, val in vars(args).items():
        logger.info("{:16} {}".format(key, val))
    save_config_to_file(cfg, logger=logger)

    # create dataloader & network
    test_loader = create_dataloader(logger)
    model = PointRCNN(num_classes=test_loader.dataset.num_class,
                      use_xyz=True,
                      mode='TEST')
    model.cuda()

    # copy important files to backup
    backup_dir = os.path.join(root_result_dir, 'backup_files')
    os.makedirs(backup_dir, exist_ok=True)
    os.system('cp *.py %s/' % backup_dir)
    os.system('cp ../lib/net/*.py %s/' % backup_dir)
    os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)

    # load checkpoint
    load_ckpt_based_on_args(model, logger)

    # start evaluation
    ret_dict = eval_one_epoch(model, test_loader, epoch_id, root_result_dir,
                              logger)
    json_file = open(root_result_dir + 'evaldict.json', 'w')
    json_file.write(json.dumps(ret_dict))
    json_file.close()
Exemplo n.º 8
0
    def __init__(self, use_masked=True):
        super().__init__()
        np.random.seed(1024)

        # label path
        self.label_root = os.path.join(HOME_DIR,
                                       'data/KITTI/object/training/label_2/')
        # load config
        config_path = os.path.join(HOME_DIR, 'tools/configs/pg.json')
        self.config = load_config(config_path)
        self.npoints = cfg.RPN.NUM_POINTS

        root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
        ckpt_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG, 'ckpt')

        # create logger
        logger = create_logger(os.path.join(OUTPUT_DIR, 'log_pg.txt'))
        logger.info(
            '**********************Start logging**********************')
        for key, val in self.config.items():
            logger.info("{:16} {}".format(key, val))
        save_config_to_file(cfg, logger=logger)

        # create PointRCNN dataloader & network
        self.test_loader = create_dataloader(self.config, logger)
        self.test_iter = iter(self.test_loader)
        self.model = PointRCNN(num_classes=self.test_loader.dataset.num_class,
                               use_xyz=True,
                               mode='TEST')

        self.use_masked = use_masked
        # load checkpoint
        load_ckpt_based_on_cfg(self.config, self.model, logger)

        # If want parallel
        # self.model = torch.nn.DataParallel(self.model)
        self.model.cuda()
        self.model.eval()

        self.data = None
def eval_single_ckpt(root_result_dir):
    root_result_dir = os.path.join(root_result_dir, 'eval')

    # set epoch_id and output dir
    epoch_id = 'no_number'
    root_result_dir = os.path.join(root_result_dir, 'epoch_%s' % epoch_id,
                                   cfg.TEST.SPLIT)

    # Checks if TEST mode in on or not
    if args.test:
        root_result_dir = os.path.join(root_result_dir, 'test_mode')

    # Create root_result_dir if it doesn't exists
    os.makedirs(root_result_dir, exist_ok=True)

    # Log File initialize
    log_file = os.path.join(root_result_dir, 'log_eval_one.txt')
    logger = create_logger(log_file)
    logger.info('**********************Start logging**********************')
    for key, val in vars(args).items():
        logger.info("{:16} {}".format(key, val))
    save_config_to_file(cfg, logger=logger)

    # Check if its a Single file or not
    model = PointRCNN(num_classes=2, use_xyz=True, mode='TEST')
    model.cuda()
    # load checkpoint
    load_ckpt_based_on_args(model, logger)

    if (args.single_file != None):
        eval_one_epoch_joint_single_file(model, get_lidar(args.single_file),
                                         args.single_file, root_result_dir,
                                         logger)

    else:
        # create dataloader & network
        test_loader = create_dataloader(logger)
        # start evaluation
        eval_one_epoch(model, test_loader, epoch_id, root_result_dir, logger)
Exemplo n.º 10
0
    def __init__(self):
        # dnn
        cfg_from_file(cfg_file)
        cfg.RCNN.ENABLED = True
        cfg.RPN.ENABLED = cfg.RPN.FIXED = True
        cfg.RPN.LOC_XZ_FINE = False
        self.pc_roi = [[-25, 25], [-3, 2], [-25, 25]]
        self.down_sample = {'axis': int(0), 'depth': self.pc_roi[0][1] / 2}  # [axis,depth]
        self.mode = 'TEST'
        with torch.no_grad():
            self.model = PointRCNN(num_classes=2, use_xyz=True, mode=self.mode)
            self.model.cuda()
            self.model.eval()
            load_checkpoint(model=self.model, optimizer=None, filename=pointrcnn_weight)

        # ros
        self.pc_sub = rospy.Subscriber(pc_topic, PointCloud2, self.pc_cb, queue_size=1, buff_size=2 ** 24)

        self.pc_pub = rospy.Publisher(pack_name + "/networks_input", PointCloud2, queue_size=1) if is_viz else None
        self.mk_pub = rospy.Publisher(pack_name + "/networks_output", MarkerArray, queue_size=1) if is_viz else None
        self.Tr_velo_kitti_cam = np.array([0.0, - 1.0, 0.0, 0.0,
                                           0.0, 0.0, -1.0, 1.5,
                                           1.0, 0.0, 0.0, 0.0,
                                           0.0, 0.0, 0.0, 1.0]).reshape(4, 4) if is_tf else np.identity(4)
Exemplo n.º 11
0
class ROSPointRCNN(object):
    def __init__(self):
        # dnn
        cfg_from_file(cfg_file)
        cfg.RCNN.ENABLED = True
        cfg.RPN.ENABLED = cfg.RPN.FIXED = True
        cfg.RPN.LOC_XZ_FINE = False
        self.pc_roi = [[-25, 25], [-3, 2], [-25, 25]]
        self.down_sample = {'axis': int(0), 'depth': self.pc_roi[0][1] / 2}  # [axis,depth]
        self.mode = 'TEST'
        with torch.no_grad():
            self.model = PointRCNN(num_classes=2, use_xyz=True, mode=self.mode)
            self.model.cuda()
            self.model.eval()
            load_checkpoint(model=self.model, optimizer=None, filename=pointrcnn_weight)

        # ros
        self.pc_sub = rospy.Subscriber(pc_topic, PointCloud2, self.pc_cb, queue_size=1, buff_size=2 ** 24)

        self.pc_pub = rospy.Publisher(pack_name + "/networks_input", PointCloud2, queue_size=1) if is_viz else None
        self.mk_pub = rospy.Publisher(pack_name + "/networks_output", MarkerArray, queue_size=1) if is_viz else None
        self.Tr_velo_kitti_cam = np.array([0.0, - 1.0, 0.0, 0.0,
                                           0.0, 0.0, -1.0, 1.5,
                                           1.0, 0.0, 0.0, 0.0,
                                           0.0, 0.0, 0.0, 1.0]).reshape(4, 4) if is_tf else np.identity(4)

    def pc_cb(self, data):
        pts_input = self.extract_networks_input_from_pc2rosmsg(data)
        if self.pc_pub is not None:
            self.pc_pub.publish(numpy2pc2(pts_input, data.header.frame_id))

        np.random.seed(666)
        with torch.no_grad():
            # 准备输入数据
            MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
            inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
            inputs = torch.unsqueeze(inputs, 0)

            # 模型推理
            input_data = {'pts_input': inputs}
            ret_dict = self.model(input_data)

            # 分析结果
            batch_size = 1
            roi_scores_raw = ret_dict['roi_scores_raw']  # (B, M) 提案置信度预测
            roi_boxes3d = ret_dict['rois']  # (B, M, 7) 提案框
            seg_result = ret_dict['seg_result'].long()  # (B, N) 前景点分割

            rcnn_cls = ret_dict['rcnn_cls'].view(batch_size, -1, ret_dict['rcnn_cls'].shape[1])  # (B, M, n) bin分类结果
            rcnn_reg = ret_dict['rcnn_reg'].view(batch_size, -1, ret_dict['rcnn_reg'].shape[1])  # (B, M, C) res回归结果

            # 解算3D BBOx
            anchor_size = MEAN_SIZE
            pred_boxes3d = decode_bbox_target(roi_boxes3d.view(-1, 7), rcnn_reg.view(-1, rcnn_reg.shape[-1]),
                                              anchor_size=anchor_size,
                                              loc_scope=cfg.RCNN.LOC_SCOPE,
                                              loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
                                              num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
                                              get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
                                              loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
                                              get_ry_fine=True).view(batch_size, -1, 7)

            # cfg.SCORE_THRESH 置信度阈值
            if rcnn_cls.shape[2] == 1:
                batch_raw_scores = rcnn_cls  # (B, M, 1)
                batch_norm_scores = torch.sigmoid(batch_raw_scores)  # (B,M,1)
                batch_pred_classes = (batch_norm_scores > cfg.RCNN.SCORE_THRESH).long()  # (B,M,1)
            else:
                batch_pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)
                batch_raw_scores = rcnn_cls[:, batch_pred_classes]
                batch_norm_scores = F.softmax(rcnn_cls, dim=1)[:, batch_pred_classes]

            # scores threshold
            inds = batch_norm_scores > cfg.RCNN.SCORE_THRESH
            for batch in range(batch_size):
                inds_in_each_batch = inds[batch].view(-1)
                if inds_in_each_batch.sum() == 0:  # batch 内没有超过阈值的3dbbox
                    continue

                pred_boxes3d_in_each_batch = pred_boxes3d[batch, inds_in_each_batch]
                raw_scores_in_each_batch = batch_raw_scores[batch, inds_in_each_batch]
                norm_scores_in_each_batch = batch_norm_scores[batch, inds_in_each_batch]

                # 非极大值抑制
                boxes_bev_in_each_batch = kitti_utils.boxes3d_to_bev_torch(pred_boxes3d_in_each_batch)
                keep_idx = iou3d_utils.nms_gpu(boxes_bev_in_each_batch,
                                               raw_scores_in_each_batch,
                                               cfg.RCNN.NMS_THRESH).view(-1)
                pred_boxes3d_in_each_batch = pred_boxes3d_in_each_batch[keep_idx]
                raw_scores_in_each_batch = raw_scores_in_each_batch[keep_idx]

                output = {'boxes3d': pred_boxes3d_in_each_batch.cpu().numpy(),
                          'scores': raw_scores_in_each_batch.cpu().numpy()}
                self.visualize(output, data.header.frame_id)

    def visualize(self, result, frame_id):
        boxes = result['boxes3d']
        scores = result['scores']

        self.visualize_lidar_plane(boxes, frame_id)
        print("Number of detections pr msg: ", boxes.shape[0])

    def visualize_lidar_plane(self, bbox3d, frame_id):
        marker_array = MarkerArray()
        marker = Marker()
        marker.header.frame_id = frame_id
        marker.type = marker.LINE_LIST
        marker.action = marker.ADD
        marker.header.stamp = rospy.Time.now()

        # marker scale (scale y and z not used due to being linelist)
        marker.scale.x = 0.08
        # marker color
        marker.color.a = 1.0
        marker.color.r = 1.0
        marker.color.g = 1.0
        marker.color.b = 0.0

        marker.pose.position.x = 0.0
        marker.pose.position.y = 0.0
        marker.pose.position.z = 0.0

        marker.pose.orientation.x = 0.0
        marker.pose.orientation.y = 0.0
        marker.pose.orientation.z = 0.0
        marker.pose.orientation.w = 1.0
        marker.points = []
        corner_for_box_list = [0, 1, 0, 3, 2, 3, 2, 1, 4, 5, 4, 7, 6, 7, 6, 5, 3, 7, 0, 4, 1, 5, 2, 6]
        corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)  # (N,8,3)
        for box_nr in range(corners3d.shape[0]):
            box3d_pts_3d_velo = corners3d[box_nr]  # (8,3)
            for corner in corner_for_box_list:
                p = np.array(box3d_pts_3d_velo[corner, 0:4])
                transformed_p = transform_point(p, np.linalg.inv(self.Tr_velo_kitti_cam))
                p = Point()
                p.x = transformed_p[0]
                p.y = transformed_p[1]
                p.z = transformed_p[2]
                marker.points.append(p)
        marker_array.markers.append(marker)

        id = 0
        for m in marker_array.markers:
            m.id = id
            id += 1
        self.mk_pub.publish(marker_array)
        marker_array.markers = []
        pass

    def extract_networks_input_from_pc2rosmsg(self, data):
        random_select = True

        pts_lidar = ros_numpy.point_cloud2.pointcloud2_to_xyz_array(data)
        pts_lidar = transform_pointcloud(pts_lidar, self.Tr_velo_kitti_cam)
        pts_lidar = cropped_roi(pts_lidar, self.pc_roi)
        pts_lidar = pts_lidar[:, 0:3]  # only xyz

        if cfg.RPN.NUM_POINTS < len(pts_lidar):
            pts_depth = pts_lidar[:, self.down_sample['axis']]
            pts_near_flag = pts_depth < self.down_sample['depth']
            far_idxs_choice = np.where(pts_near_flag == 0)[0]
            near_idxs = np.where(pts_near_flag == 1)[0]
            near_idxs_choice = np.random.choice(near_idxs, cfg.RPN.NUM_POINTS - len(far_idxs_choice), replace=False)

            choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
                if len(far_idxs_choice) > 0 else near_idxs_choice
            np.random.shuffle(choice)

        pts_input = pts_lidar[choice, :]
        return pts_input
Exemplo n.º 12
0
    # copy important files to backup
    backup_dir = os.path.join(root_result_dir, 'backup_files')
    os.makedirs(backup_dir, exist_ok=True)
    os.system('cp *.py %s/' % backup_dir)
    os.system('cp ../lib/net/*.py %s/' % backup_dir)
    os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)

    # tensorboard log
    tb_log = SummaryWriter(
        log_dir=os.path.join(root_result_dir, 'tensorboard'))

    # create dataloader & network & optimizer
    train_loader, test_loader = create_dataloader(logger)
    model = PointRCNN(num_classes=train_loader.dataset.num_class,
                      use_xyz=True,
                      mode='TRAIN')
    optimizer = create_optimizer(model)

    if args.mgpus:
        model = nn.DataParallel(model)
    model.cuda()

    # load checkpoint if it is possible
    start_epoch = it = 0
    last_epoch = -1
    if args.ckpt is not None:
        pure_model = model.module if isinstance(
            model, torch.nn.DataParallel) else model
        it, start_epoch = train_utils.load_checkpoint(pure_model,
                                                      optimizer,
Exemplo n.º 13
0
    os.makedirs(backup_dir, exist_ok=True)
    os.system('cp *.py %s/' % backup_dir)
    os.system('cp ../lib/net/*.py %s/' % backup_dir)
    os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)
    os.system('cp ../lib/datasets/kitti_boxplace_dataset.py %s/' % backup_dir)
    os.system('cp ./train_utils/train_utils.py %s/' % backup_dir)
    os.system('cp ../lib/utils/loss_utils.py %s/' % backup_dir)

    # tensorboard log
    tb_log = SummaryWriter(
        log_dir=os.path.join(root_result_dir, 'tensorboard'))

    # create dataloader & network & optimizer
    train_loader, test_loader = create_dataloader(logger)
    model = PointRCNN(num_classes=train_loader.dataset.num_class,
                      num_point=cfg.RCNN.NUM_POINTS,
                      use_xyz=True,
                      mode='TRAIN')
    optimizer = create_optimizer(model)

    if args.mgpus:
        model = nn.DataParallel(model)
    model.cuda()

    # load checkpoint if it is possible
    start_iter = it = 0
    last_iter = -1

    if args.pretrain_ckpt is not None:
        pure_model = model.module if isinstance(
            model, torch.nn.DataParallel) else model
        train_utils.load_part_ckpt(pure_model,
Exemplo n.º 14
0
class PointRCNNEnv():
    def __init__(self, use_masked=True):
        super().__init__()
        np.random.seed(1024)

        # label path
        self.label_root = os.path.join(HOME_DIR,
                                       'data/KITTI/object/training/label_2/')
        # load config
        config_path = os.path.join(HOME_DIR, 'tools/configs/pg.json')
        self.config = load_config(config_path)
        self.npoints = cfg.RPN.NUM_POINTS

        root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
        ckpt_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG, 'ckpt')

        # create logger
        logger = create_logger(os.path.join(OUTPUT_DIR, 'log_pg.txt'))
        logger.info(
            '**********************Start logging**********************')
        for key, val in self.config.items():
            logger.info("{:16} {}".format(key, val))
        save_config_to_file(cfg, logger=logger)

        # create PointRCNN dataloader & network
        self.test_loader = create_dataloader(self.config, logger)
        self.test_iter = iter(self.test_loader)
        self.model = PointRCNN(num_classes=self.test_loader.dataset.num_class,
                               use_xyz=True,
                               mode='TEST')

        self.use_masked = use_masked
        # load checkpoint
        load_ckpt_based_on_cfg(self.config, self.model, logger)

        # If want parallel
        # self.model = torch.nn.DataParallel(self.model)
        self.model.cuda()
        self.model.eval()

        self.data = None

    # def _batch_detector(self, batch_pts):
    #     """ Input a single or batch sample of point clouds, output prediction result
    #     """
    #     with torch.no_grad():
    #         self.model.eval()
    #         thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]

    def reset(self):
        """ reset env; here it is equivlent to load an image and a bin from the KITTI dataset. Set the image returned as s0

        data = {'sample_id': sample_id,
                'random_select': self.random_select,
                'pts_rect': pts_rect,
                'pts_intensity': pts_intensity,
                'gt_boxes3d': all_gt_boxes3d,
                'npoints': self.npoints,
                'image': image}
        """

        # load the data sample at the reset step
        self.data = next(self.test_iter)
        RGB_Image = self.data['image']

        return RGB_Image

    def step(self, action):
        """step [Input the sampled map, output ]
        """

        # TODO: this is where we need to
        obs_pts = self._get_obs(action)  # Here we output masked_pts as the obs
        rew = self._get_reward(obs_pts)

        # we set it as 1 step MDP so done is always true
        done = True
        info = {}
        return obs_pts, rew, done, info

    def _get_reward(self, obs):
        """step [Input the sampled point cloud, output the detection success]
        """
        batch_mAP = self._eval_data(masked_pts=obs)
        return batch_mAP

    def _get_obs(self, scanning_mask):
        """Here we set next obs as the sampled point cloud 
        """
        masked_pts = self._get_pts_from_mask(scanning_mask)
        return masked_pts

    def _get_pts_from_mask(self, scanning_mask):
        """ mask pts from 2d angular map
        Input: 
            :param scanning_mask: (B, H, W)
        Return:
            :param pts: (B, N, 4)
        """

        # load ang_depth_map from dir
        ang_depth_map = self.data['angle_map']

        # expand mask 2d->3d to enable broadcast
        mask = np.expand_dims(scanning_mask, axis=3)
        masked_ang_depth_map = [
            ang_depth_map[k] * mask[k]
            for k in range(self.config['batch_size'])
        ]

        # masked_pts = masked_ang_depth_map.reshape((self.config['batch_size'], -1, 4))
        masked_pts_arr = [
            masked_pts[masked_pts[:, :, 0] > 0]
            for masked_pts in masked_ang_depth_map
        ]  # around ~(15000,4)

        adjusted_masked_pts = []
        for masked_pts in masked_pts_arr:
            if masked_pts.shape[0] <= self.npoints:
                padding = np.full((self.npoints - masked_pts.shape[0], 4), -1)
                adjusted_masked_pts.append(
                    np.concatenate((masked_pts, padding), axis=0))
            else:
                adjusted_masked_pts.append(masked_pts[:self.npoints, :])

        masked_pts = np.array(adjusted_masked_pts)
        return masked_pts

    def render(self):
        """Placeholder for the rendering capacity
        """
        raise NotImplementedError

    def _eval_data(self, masked_pts=None):
        """eval data with sampled pts
        """
        with torch.no_grad():
            MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
            batch_size = self.config['batch_size']

            # get valid point (projected points should be in image)
            sample_id, pts_rect, pts_intensity, gt_boxes3d, npoints, labels = \
            self.data['sample_id'], self.data['pts_rect'], self.data['pts_intensity'], self.data['gt_boxes3d'], self.data['npoints'], self.data['label']

            cls_types = [[
                labels[k][i].cls_type for i in range(len(labels[k]))
            ] for k in range(batch_size)]

            calib = [
                self.test_loader.dataset.get_calib(idx) for idx in sample_id
            ]
            if self.use_masked:
                # use masked/sampled pts if True
                pts_rect = np.array([
                    c.lidar_to_rect(masked_pts[k][:, 0:3])
                    for k, c in enumerate(calib)
                ])
                pts_intensity = [
                    masked_pts[k][:, 3] for k in range(batch_size)
                ]
                npoints = masked_pts.shape[0]

            inputs = torch.from_numpy(pts_rect).cuda(
                non_blocking=True).float().view(self.config['batch_size'], -1,
                                                3)
            gt_boxes3d = torch.from_numpy(gt_boxes3d).cuda(non_blocking=True)
            input_data = {'pts_input': inputs}

            # model inference
            ret_dict = self.model(input_data)

            roi_scores_raw = ret_dict['roi_scores_raw']  # (B, M)
            roi_boxes3d = ret_dict['rois']  # (B, M, 7)
            # seg_result = ret_dict['seg_result'].long()  # (B, N)

            rcnn_cls = ret_dict['rcnn_cls'].view(batch_size, -1,
                                                 ret_dict['rcnn_cls'].shape[1])
            rcnn_reg = ret_dict['rcnn_reg'].view(
                batch_size, -1, ret_dict['rcnn_reg'].shape[1])  # (B, M, C)

            norm_scores = torch.sigmoid(rcnn_cls)

            # remove low confidence scores
            thresh_mask = norm_scores > cfg.RCNN.SCORE_THRESH

            # bounding box regression
            anchor_size = MEAN_SIZE

            pred_boxes3d = decode_bbox_target(
                roi_boxes3d.view(-1, 7),
                rcnn_reg.view(-1, rcnn_reg.shape[-1]),
                anchor_size=anchor_size,
                loc_scope=cfg.RCNN.LOC_SCOPE,
                loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
                num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
                get_xz_fine=True,
                get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
                loc_y_scope=cfg.RCNN.LOC_Y_SCOPE,
                loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
                get_ry_fine=True).view(batch_size, -1, 7)

            # select boxes (list of tensors)
            pred_boxes3d_selected = [
                pred_boxes3d[k][thresh_mask[k].view(-1)]
                for k in range(batch_size)
            ]
            raw_scores_selected = [
                roi_scores_raw[k][thresh_mask[k].view(-1)]
                for k in range(batch_size)
            ]
            norm_scores_selected = [
                norm_scores[k][thresh_mask[k].view(-1)]
                for k in range(batch_size)
            ]

            # rotated NMS
            boxes_bev_selected = [
                kitti_utils.boxes3d_to_bev_torch(bboxes)
                for bboxes in pred_boxes3d_selected
            ]
            keep_idx = [
                iou3d_utils.nms_gpu(boxes_bev_selected[k],
                                    raw_scores_selected[k],
                                    cfg.RCNN.NMS_THRESH).view(-1)
                for k in range(batch_size)
            ]
            pred_boxes3d_selected = [
                pred_boxes3d_selected[k][keep_idx[k]]
                for k in range(batch_size)
            ]
            scores_selected = [
                raw_scores_selected[k][keep_idx[k]] for k in range(batch_size)
            ]
            norm_scores_selected = [
                norm_scores_selected[k][keep_idx[k]] for k in range(batch_size)
            ]

            # want car gt_boxes
            keep_idx = [[
                i for i in range(len(cls_types[k])) if cls_types[k][i] == 'Car'
            ] for k in range(batch_size)]
            gt_boxes3d_selected = [
                gt_boxes3d[k][keep_idx[k]] for k in range(batch_size)
            ]

            # what if no boxes with cars?
            has_info = [k for k in range(batch_size) if len(keep_idx[k]) > 0]
            gt_boxes3d_selected = [gt_boxes3d_selected[x] for x in has_info]
            pred_boxes3d_selected = [
                pred_boxes3d_selected[x] for x in has_info
            ]
            batch_size = len(has_info)
            if batch_size == 0:
                return None

            # Intersect over union
            iou3d = [
                iou3d_utils.boxes_iou3d_gpu(gt_boxes3d_selected[k],
                                            pred_boxes3d_selected[k])
                for k in range(batch_size)
            ]

            # get the max iou for each ground truth bounding box
            gt_max_iou = [
                torch.max(iou3d[k], dim=0)[0] for k in range(batch_size)
            ]

            # get precision at each index (to get auc)
            precision_vals = []
            for k in range(batch_size):
                batch_iou = gt_max_iou[k]
                batch_precision = []
                num_correct = 0
                for i in range(len(batch_iou)):
                    if batch_iou[i] > 0.7:
                        num_correct += 1
                    batch_precision.append(num_correct / (i + 1))

                precision_vals.append(batch_precision)

            aps = []
            for k in range(batch_size):
                batch_prec = precision_vals[k]
                ap = 0
                for i in range(len(batch_prec)):
                    ap += max(batch_prec[i:])

                aps.append(ap)

            num_gt_boxes = sum([len(gt_max_iou[k]) for k in range(batch_size)])

            return sum(aps) / num_gt_boxes
Exemplo n.º 15
0
    os.mkdir(save_dir)
ckpt_file = '/raid/meng/Pointcloud_Detection/PointRCNN4_weak/output/rpn/weaklyRPN0500/410_floss03_8000/ckpt/checkpoint_iter_07620.pth'
cfg_from_file(
    '/raid/meng/Pointcloud_Detection/PointRCNN1.1_weak/tools/cfgs/weaklyRPN.yaml'
)

cfg.RPN.SCORE_THRESH = 0.1
PROP_DIST = 0.3
BACKGROUND_ADDING = False
BACK_THRESH = 0.3
COSINE_DISTANCE = False
COS_THRESH = 0.3

from lib.net.point_rcnn import PointRCNN
model = PointRCNN(num_classes=data_loader.dataset.num_class,
                  use_xyz=True,
                  mode='TEST')
model.cuda()
checkpoint = torch.load(ckpt_file)
model_state = checkpoint['model_state']

update_model_state = {
    key: val
    for key, val in model_state.items() if key in model.state_dict()
}
state_dict = model.state_dict()
state_dict.update(update_model_state)
model.load_state_dict(state_dict)

update_keys = update_model_state.keys().__len__()
model.eval()
Exemplo n.º 16
0
    # copy important files to backup
    backup_dir = os.path.join(root_result_dir, 'backup_files')
    os.makedirs(backup_dir, exist_ok=True)
    os.system('cp *.py %s/' % backup_dir)
    os.system('cp ../lib/net/*.py %s/' % backup_dir)
    os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)

    # tensorboard log
    tb_log = SummaryWriter(
        log_dir=os.path.join(root_result_dir, 'tensorboard'))

    # create dataloader & network & optimizer
    train_loader, test_loader = create_dataloader(logger)
    model = PointRCNN(num_classes=train_loader.dataset.num_class,
                      use_xyz=True,
                      mode='TRAIN').to(device)
    optimizer = create_optimizer(model)

    # if args.mgpus:
    #     model = nn.DataParallel(model)
    # model.cuda()

    model, optimizer = amp.initialize(model, optimizer, opt_level="O2")

    # load checkpoint if it is possible
    start_epoch = it = 0
    last_epoch = -1
    if args.ckpt is not None:
        pure_model = model.module if isinstance(
            model, torch.nn.DataParallel) else model