def filter_points_in_ann(self, ann_token: str, wlh_factor: int = 3):
     points_xyz = np.vstack(
         (self.points[:2, :], np.zeros(self.nbr_points())))
     mask = points_in_box(box=self.box(ann_token),
                          points=points_xyz,
                          wlh_factor=wlh_factor)
     self.points = self.points[:, mask]  # np.shape(pc_cs.points) = 6 x N
Esempio n. 2
0
    def test_points_in_box(self):
        """ Test the box.in_box method. """

        vel = (np.nan, np.nan, np.nan)

        def qyaw(yaw):
            return Quaternion(axis=(0, 0, 1), angle=yaw)

        # Check points inside box
        box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 0.0], qyaw(0.0), 1, 2.0, vel)
        points = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.0]]).transpose()
        mask = points_in_box(box, points, wlh_factor=1.0)
        self.assertEqual(mask.all(), True)

        # Check points outside box
        box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 0.0], qyaw(0.0), 1, 2.0, vel)
        points = np.array([[0.1, 0.0, 0.0], [0.5, -1.1, 0.0]]).transpose()
        mask = points_in_box(box, points, wlh_factor=1.0)
        self.assertEqual(mask.all(), False)

        # Check corner cases
        box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 0.0], qyaw(0.0), 1, 2.0, vel)
        points = np.array([[-1.0, -1.0, 0.0], [1.0, 1.0, 0.0]]).transpose()
        mask = points_in_box(box, points, wlh_factor=1.0)
        self.assertEqual(mask.all(), True)

        # Check rotation (45 degs) and translation (by [1,1])
        rot = 45
        trans = [1.0, 1.0]
        box = Box([0.0 + trans[0], 0.0 + trans[1], 0.0], [2.0, 2.0, 0.0],
                  qyaw(rot / 180.0 * np.pi), 1, 2.0, vel)
        points = np.array([[0.70 + trans[0], 0.70 + trans[1], 0.0],
                           [0.71 + 1.0, 0.71 + 1.0, 0.0]]).transpose()
        mask = points_in_box(box, points, wlh_factor=1.0)
        self.assertEqual(mask[0], True)
        self.assertEqual(mask[1], False)

        # Check 3d box
        box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 2.0], qyaw(0.0), 1, 2.0, vel)
        points = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]).transpose()
        mask = points_in_box(box, points, wlh_factor=1.0)
        self.assertEqual(mask.all(), True)

        # Check wlh factor
        for wlh_factor in [0.5, 1.0, 1.5, 10.0]:
            box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 0.0], qyaw(0.0), 1, 2.0, vel)
            points = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.0]]).transpose()
            mask = points_in_box(box, points, wlh_factor=wlh_factor)
            self.assertEqual(mask.all(), True)

        for wlh_factor in [0.1, 0.49]:
            box = Box([0.0, 0.0, 0.0], [2.0, 2.0, 0.0], qyaw(0.0), 1, 2.0, vel)
            points = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.0]]).transpose()
            mask = points_in_box(box, points, wlh_factor=wlh_factor)
            self.assertEqual(mask[0], True)
            self.assertEqual(mask[1], False)
Esempio n. 3
0
def filter_eval_boxes(nusc: NuScenes,
                      eval_boxes: EvalBoxes,
                      max_dist: Dict[str, float],
                      verbose: bool = False) -> EvalBoxes:
    """
    Applies filtering to boxes. Distance, bike-racks and points per box.
    :param nusc: An instance of the NuScenes class.
    :param eval_boxes: An instance of the EvalBoxes class.
    :param max_dist: Maps the detection name to the eval distance threshold for that class.
    :param verbose: Whether to print to stdout.
    """
    # Accumulators for number of filtered boxes.
    total, dist_filter, point_filter, bike_rack_filter = 0, 0, 0, 0
    for ind, sample_token in enumerate(eval_boxes.sample_tokens):

        # Filter on distance first
        total += len(eval_boxes[sample_token])
        eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if
                                          box.ego_dist < max_dist[box.detection_name]]
        dist_filter += len(eval_boxes[sample_token])

        # Then remove boxes with zero points in them. Eval boxes have -1 points by default.
        eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if not box.num_pts == 0]
        point_filter += len(eval_boxes[sample_token])

        # Perform bike-rack filtering
        sample_anns = nusc.get('sample', sample_token)['anns']
        bikerack_recs = [nusc.get('sample_annotation', ann) for ann in sample_anns if
                         nusc.get('sample_annotation', ann)['category_name'] == 'static_object.bicycle_rack']
        bikerack_boxes = [Box(rec['translation'], rec['size'], Quaternion(rec['rotation'])) for rec in bikerack_recs]

        filtered_boxes = []
        for box in eval_boxes[sample_token]:
            if box.detection_name in ['bicycle', 'motorcycle']:
                in_a_bikerack = False
                for bikerack_box in bikerack_boxes:
                    if np.sum(points_in_box(bikerack_box, np.expand_dims(np.array(box.translation), axis=1))) > 0:
                        in_a_bikerack = True
                if not in_a_bikerack:
                    filtered_boxes.append(box)
            else:
                filtered_boxes.append(box)

        eval_boxes.boxes[sample_token] = filtered_boxes
        bike_rack_filter += len(eval_boxes.boxes[sample_token])
    if verbose:
        print("=> Original number of boxes: %d" % total)
        print("=> After distance based filtering: %d" % dist_filter)
        print("=> After LIDAR points based filtering: %d" % point_filter)
        print("=> After bike rack filtering: %d" % bike_rack_filter)

    return eval_boxes
    def get_lidar_instances(self, nsweeps=1, enlarge=1):
        channel = 'LIDAR_TOP'
        sample_data_token = self.sample['data'][channel]
        sd_record = self.nusc.get('sample_data', sample_data_token)
        _, boxes, _ = self.nusc.get_sample_data(
            sample_data_token, use_flat_vehicle_coordinates=False)

        # Get aggregated point cloud in lidar frame.
        chan = sd_record['channel']
        pc, _ = LidarPointCloud.from_file_multisweep(self.nusc, self.sample,
                                                     chan, channel, nsweeps)
        points = pc.points
        anno_points = []
        anno_boxes = []
        for box in boxes:
            mask = points_in_box(box, points[0:3, :], wlh_factor=enlarge)
            if True in mask:
                anno_points.append(points[:, mask])
                anno_boxes.append(box)
        return anno_points, anno_boxes
Esempio n. 5
0
    def render(self, events: DataFrame, timestamp: int, frame_gt: List[TrackingBox], frame_pred: List[TrackingBox], points=None, pose_record=None, cs_record=None, ifplotgt=False,\
                threshold=0.1, ifpltsco=False, outscen_class=False,nusc=None, ifplthis=False, pltve=False) \
            -> None:
        """
        Render function for a given scene timestamp
        :param events: motmetrics events for that particular
        :param timestamp: timestamp for the rendering
        :param frame_gt: list of ground truth boxes
        :param frame_pred: list of prediction boxes
        """
        # Init.
        #print('Rendering {}'.format(timestamp))
        switches = events[events.Type == 'SWITCH']
        switch_ids = switches.HId.values  #对应frame_pred的tracking_id
        switch_gt_ids = switches.OId.values  #对应GT的tracking_id
        FN = events[events.Type == 'MISS']
        #FN = FN.HId.values
        FN = FN.OId.values  #对应GT的tracking_id
        FP = events[events.Type == 'FP']
        FP = FP.HId.values  #对应frame_pred的tracking_id

        fig, ax = plt.subplots()
        #plt.style.use('dark_background')         #  黑  背景颜色
        plt.style.use('classic')  #  白  背景颜色

        points.render_height(ax,
                             view=np.eye(4),
                             x_lim=(-50, 50),
                             y_lim=(-50, 50),
                             marker_size=0.1)  #BEV
        #points.render_height(ax, view=np.eye(4) )#BEV
        #points = points.rotate(Quaternion( pose_record["rotation"]).inverse)
        sam_anno = []
        if len(frame_gt) > 0:
            sample = nusc.get('sample', frame_gt[0].sample_token)
            sample_annotation_tokens = sample['anns']  #标注
            for anno_token in sample_annotation_tokens:
                sam_anno.append(
                    nusc.get('sample_annotation',
                             anno_token)["instance_token"])
        vislev = {'v0-40': 0, 'v40-60': 1, 'v60-80': 2, 'v80-100': 3}
        #points.render_intensity(ax)
        # Plot GT boxes.
        if ifplotgt:

            for i, b in enumerate(frame_gt):
                color = 'k'
                #qua = tuple(self.list_add(list(b.rotation),[0.924,0.0,0.0,0.383]))
                box = Box(np.array(b.ego_translation, dtype=float),
                          b.size,
                          Quaternion(b.rotation),
                          name=b.tracking_name,
                          token=b.tracking_id)
                #qua = tuple(self.list_add(list(pose_record["rotation"]),[ 0.831,0.0,0.0,0.556]))
                #box.translate(-np.array(pose_record["translation"]))
                box.rotate(Quaternion(pose_record["rotation"]).inverse)
                # move box to sensor coord system
                box.translate(-np.array(cs_record["translation"]))
                box.rotate(Quaternion(cs_record["rotation"]).inverse)
                if outscen_class:
                    #####TrackingRenderer.gt_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0 }     #car lidar点云数
                    #####TrackingRenderer.gt_ptsnumrange =  {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0 }     #ped lidar点云数
                    num_pts = frame_gt[i].num_pts
                    #####car
                    if TrackingRenderer.outscen == 'car':
                        if num_pts > 0 and num_pts <= 5:
                            TrackingRenderer.gt_ptsnumrange['0-5nums'] += 1
                        elif num_pts > 5 and num_pts <= 10:
                            TrackingRenderer.gt_ptsnumrange['5-10nums'] += 1
                        elif num_pts > 10 and num_pts <= 50:
                            TrackingRenderer.gt_ptsnumrange['10-50nums'] += 1
                        elif num_pts > 50 and num_pts <= 200:
                            TrackingRenderer.gt_ptsnumrange['50-200nums'] += 1
                        elif num_pts > 200:
                            TrackingRenderer.gt_ptsnumrange['>200nums'] += 1
                    else:
                        ####ped
                        if num_pts > 0 and num_pts <= 5:
                            TrackingRenderer.gt_ptsnumrange['0-5nums'] += 1
                        elif num_pts > 5 and num_pts <= 10:
                            TrackingRenderer.gt_ptsnumrange['5-10nums'] += 1
                        elif num_pts > 10 and num_pts <= 20:
                            TrackingRenderer.gt_ptsnumrange['10-20nums'] += 1
                        elif num_pts > 20 and num_pts <= 30:
                            TrackingRenderer.gt_ptsnumrange['20-30nums'] += 1
                        elif num_pts > 30:
                            TrackingRenderer.gt_ptsnumrange['>30nums'] += 1
                    if box.token in FN:
                        #####distance
                        dis = math.sqrt(box.center[0]**2 + box.center[1]**2)
                        if dis > 0 and dis <= 15:
                            TrackingRenderer.fn_disrange[
                                '<15m'] = TrackingRenderer.fn_disrange[
                                    '<15m'] + 1
                        elif dis > 15 and dis <= 30:
                            TrackingRenderer.fn_disrange[
                                '15-30m'] = TrackingRenderer.fn_disrange[
                                    '15-30m'] + 1
                        elif dis > 30 and dis <= 45:
                            TrackingRenderer.fn_disrange[
                                '30-45m'] = TrackingRenderer.fn_disrange[
                                    '30-45m'] + 1
                        elif dis > 45 and dis <= 54:
                            TrackingRenderer.fn_disrange[
                                '45-54m'] = TrackingRenderer.fn_disrange[
                                    '45-54m'] + 1
                        else:
                            TrackingRenderer.fn_disrange[
                                '-1'] = TrackingRenderer.fn_disrange['-1'] + 1
                        #####ve TrackingRenderer.fn_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 }            #car 绝对速度
                        #####   TrackingRenderer.fn_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 }          #ped 绝对速度
                        ve = math.sqrt(frame_gt[i].velocity[0]**2 +
                                       frame_gt[i].velocity[1]**2)
                        if TrackingRenderer.outscen == 'car':
                            if ve > 0 and ve <= 0.1:
                                TrackingRenderer.fn_verange['0-0.1m/s'] += 1
                            elif ve > 0.1 and ve <= 2.5:
                                TrackingRenderer.fn_verange['0.1-2.5m/s'] += 1
                            elif ve > 2.5 and ve <= 5:
                                TrackingRenderer.fn_verange['2.5-5m/s'] += 1
                            elif ve > 5 and ve <= 10:
                                TrackingRenderer.fn_verange['5-10m/s'] += 1
                            else:
                                TrackingRenderer.fn_verange['>10m/s'] += 1
                        else:
                            if ve > 0 and ve <= 0.1:
                                TrackingRenderer.fn_verange['0-0.1m/s'] += 1
                            elif ve > 0.1 and ve <= 1.0:
                                TrackingRenderer.fn_verange['0.1-1.0m/s'] += 1
                            elif ve > 1.0 and ve <= 1.5:
                                TrackingRenderer.fn_verange['1.0-1.5m/s'] += 1
                            elif ve > 1.5 and ve <= 2:
                                TrackingRenderer.fn_verange['1.5-2m/s'] += 1
                            else:
                                TrackingRenderer.fn_verange['>2m/s'] += 1
                        #####num_pts TrackingRenderer.fn_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0 }     #car lidar点云数 抽样参考比例:0.21 0.23 0.26,0.2,0.1
                        #############TrackingRenderer.fn_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0 }      #ped lidar点云数
                        num_pts = frame_gt[i].num_pts
                        if TrackingRenderer.outscen == 'car':
                            if num_pts > 0 and num_pts <= 5:
                                TrackingRenderer.fn_ptsnumrange['0-5nums'] += 1
                            elif num_pts > 5 and num_pts <= 10:
                                TrackingRenderer.fn_ptsnumrange[
                                    '5-10nums'] += 1
                            elif num_pts > 10 and num_pts <= 50:
                                TrackingRenderer.fn_ptsnumrange[
                                    '10-50nums'] += 1
                            elif num_pts > 50 and num_pts <= 200:
                                TrackingRenderer.fn_ptsnumrange[
                                    '50-200nums'] += 1
                            elif num_pts > 200:
                                TrackingRenderer.fn_ptsnumrange[
                                    '>200nums'] += 1
                            else:
                                TrackingRenderer.fn_ptsnumrange['-1'] += 1
                        else:
                            if num_pts > 0 and num_pts <= 5:
                                TrackingRenderer.fn_ptsnumrange['0-5nums'] += 1
                            elif num_pts > 5 and num_pts <= 10:
                                TrackingRenderer.fn_ptsnumrange[
                                    '5-10nums'] += 1
                            elif num_pts > 10 and num_pts <= 20:
                                TrackingRenderer.fn_ptsnumrange[
                                    '10-20nums'] += 1
                            elif num_pts > 20 and num_pts <= 30:
                                TrackingRenderer.fn_ptsnumrange[
                                    '20-30nums'] += 1
                            elif num_pts > 200:
                                TrackingRenderer.fn_ptsnumrange['>30nums'] += 1
                            else:
                                TrackingRenderer.fn_ptsnumrange['-1'] += 1
                        ######读取
                        #sample = nusc.get('sample', frame_gt[i].sample_token)
                        #sample_annotation_tokens = sample['anns'] #标注
                        try:
                            ind = sam_anno.index(b.tracking_id)
                            sample_annotation = nusc.get(
                                'sample_annotation',
                                sample_annotation_tokens[ind])
                            ####TrackingRenderer.vis_ratio = {'0-0.4':0, '0.4-0.6':0,  '0.6-0.8':0, '0.8-1.0':0}                                 #相机视角  0-40%, 40-60%, 60-80% and 80-100%  The visibility of an instance is the fraction of annotation visible in all 6 images.
                            visibility = nusc.get(
                                'visibility',
                                sample_annotation['visibility_token'])
                            vis_level = vislev[visibility["level"]]
                            if vis_level == 0:
                                TrackingRenderer.vis_ratio['0-0.4'] += 1
                            elif vis_level == 1:
                                TrackingRenderer.vis_ratio['0.4-0.6'] += 1
                            elif vis_level == 2:
                                TrackingRenderer.vis_ratio['0.6-0.8'] += 1
                            elif vis_level == 3:
                                TrackingRenderer.vis_ratio['0.8-1.0'] += 1
                            ####TrackingRenderer.gt_ratio = {'ang_muta':0, 've_muta':0,  'aug_other':0, 've_other':0, 'firfn_trk':0, 'nonfirfn_trk':0}                            #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
                            pre_token = sample_annotation['prev']
                            if pre_token == '':  #仅作为first_FN
                                TrackingRenderer.gt_ratio['firfn_trk'] += 1

                            else:
                                TrackingRenderer.gt_ratio['nonfirfn_trk'] += 1
                                pre_annotation = nusc.get(
                                    'sample_annotation', pre_token)
                                vari_ang = abs(
                                    R.from_quat(list(frame_gt[i].rotation)).
                                    as_euler('zxy', degrees=False)[0] -
                                    R.from_quat(
                                        list(pre_annotation['rotation'])
                                    ).as_euler('zxy', degrees=False)[0])
                                if vari_ang > 0.52:  #30度
                                    TrackingRenderer.gt_ratio['angvar>30'] += 1
                                elif 0.52 > vari_ang and vari_ang > 0.35:
                                    TrackingRenderer.gt_ratio[
                                        '30>angvar>20'] += 1
                                elif 0.35 > vari_ang and vari_ang > 0.17:
                                    TrackingRenderer.gt_ratio[
                                        '20>angvar>10'] += 1
                                elif vari_ang < 0.17:
                                    TrackingRenderer.gt_ratio['10>angvar'] += 1
                                else:
                                    pass
                                pre_ve = nusc.box_velocity(
                                    pre_annotation['token'])
                                ve_varity = abs(ve - math.sqrt(pre_ve[0]**2 +
                                                               pre_ve[1]**2))
                                if ve_varity > TrackingRenderer.mutave_thr[0]:
                                    TrackingRenderer.gt_ratio[
                                        'vevari>%s' %
                                        (TrackingRenderer.mutave_thr[0])] += 1
                                elif ve_varity < TrackingRenderer.mutave_thr[
                                        0] and ve_varity >= TrackingRenderer.mutave_thr[
                                            1]:
                                    TrackingRenderer.gt_ratio[
                                        '%s<vevari<%s' %
                                        (TrackingRenderer.mutave_thr[1],
                                         TrackingRenderer.mutave_thr[0])] += 1
                                elif ve_varity < TrackingRenderer.mutave_thr[
                                        1] and ve_varity >= TrackingRenderer.mutave_thr[
                                            2]:
                                    TrackingRenderer.gt_ratio[
                                        '%s<vevari<%s' %
                                        (TrackingRenderer.mutave_thr[2],
                                         TrackingRenderer.mutave_thr[1])] += 1
                                else:
                                    TrackingRenderer.gt_ratio[
                                        'vevari<%s' %
                                        TrackingRenderer.mutave_thr[2]] += 1
                        except ValueError:  #标注错误
                            TrackingRenderer.fault_datas += 1
                box.render(ax,
                           view=np.eye(4),
                           colors=(color, color, color),
                           linewidth=1)
        else:
            pass

        # Plot predicted boxes.
        pred_trackid = []
        for i, b in enumerate(frame_pred):
            box = Box(
                b.ego_translation,
                b.size,
                Quaternion(b.rotation),
                name=b.tracking_name,
                token=b.tracking_id,
                score=b.tracking_score,
            )

            # move box to ego vehicle coord system  before has done the translation
            box.rotate(Quaternion(pose_record["rotation"]).inverse)
            # move box to sensor coord system
            box.translate(-np.array(cs_record["translation"]))
            box.rotate(Quaternion(cs_record["rotation"]).inverse)
            pred_trackid.append(b.tracking_id)
            if outscen_class:
                if b.tracking_id in FP:
                    #####distance  TrackingRenderer.fp_disrange = {'<15m':0, '15-30m':0, '30-45m':0, '45-54m':0}
                    dis = math.sqrt(box.center[0]**2 + box.center[1]**2)
                    if dis > 0 and dis <= 15:
                        TrackingRenderer.fp_disrange[
                            '<15m'] = TrackingRenderer.fp_disrange['<15m'] + 1
                    elif dis > 15 and dis <= 30:
                        TrackingRenderer.fp_disrange[
                            '15-30m'] = TrackingRenderer.fp_disrange[
                                '15-30m'] + 1
                    elif dis > 30 and dis <= 45:
                        TrackingRenderer.fp_disrange[
                            '30-45m'] = TrackingRenderer.fp_disrange[
                                '30-45m'] + 1
                    elif dis > 45 and dis <= 54:
                        TrackingRenderer.fp_disrange[
                            '45-54m'] = TrackingRenderer.fp_disrange[
                                '45-54m'] + 1
                    else:
                        TrackingRenderer.fp_disrange[
                            '-1'] = TrackingRenderer.fp_disrange['-1'] + 1
                    #####ve TrackingRenderer.fp_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 }            #car 绝对速度
                    #####TrackingRenderer.fp_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 }         #ped 绝对速度
                    ve = math.sqrt(frame_pred[i].velocity[0]**2 +
                                   frame_pred[i].velocity[1]**2)
                    if TrackingRenderer.outscen == 'car':
                        if ve > 0 and ve <= 0.1:
                            TrackingRenderer.fp_verange['0-0.1m/s'] += 1
                        elif ve > 0.1 and ve <= 2.5:
                            TrackingRenderer.fp_verange['0.1-2.5m/s'] += 1
                        elif ve > 2.5 and ve <= 5:
                            TrackingRenderer.fp_verange['2.5-5m/s'] += 1
                        elif ve > 5 and ve <= 10:
                            TrackingRenderer.fp_verange['5-10m/s'] += 1
                        else:
                            TrackingRenderer.fp_verange['>10m/s'] += 1
                    else:
                        if ve > 0 and ve <= 0.1:
                            TrackingRenderer.fp_verange['0-0.1m/s'] += 1
                        elif ve > 0.1 and ve <= 1.0:
                            TrackingRenderer.fp_verange['0.1-1.0m/s'] += 1
                        elif ve > 1.0 and ve <= 1.5:
                            TrackingRenderer.fp_verange['1.0-1.5m/s'] += 1
                        elif ve > 1.5 and ve <= 2:
                            TrackingRenderer.fp_verange['1.5-2m/s'] += 1
                        else:
                            TrackingRenderer.fp_verange['>2m/s'] += 1
                    #####num_pts TrackingRenderer.fp_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0 }     #car lidar点云数 抽样参考比例:0.21 0.23 0.26,0.2,0.1
                    ########## TrackingRenderer.fp_ptsnumrange =  {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0 }     #ped lidar点云数
                    points_xyzr = np.stack(points.points, 1)
                    points_xyz = points_xyzr[:, :3]

                    points_mask = points_in_box(box, points.points[:3])
                    mask_indx = np.arange(points_mask.shape[0])
                    mask_indx = mask_indx[points_mask]
                    num_pts = mask_indx.shape[0]
                    if TrackingRenderer.outscen == 'car':
                        if num_pts > 0 and num_pts <= 5:
                            TrackingRenderer.fp_ptsnumrange['0-5nums'] += 1
                        elif num_pts > 5 and num_pts <= 10:
                            TrackingRenderer.fp_ptsnumrange['5-10nums'] += 1
                        elif num_pts > 10 and num_pts <= 50:
                            TrackingRenderer.fp_ptsnumrange['10-50nums'] += 1
                        elif num_pts > 50 and num_pts <= 200:
                            TrackingRenderer.fp_ptsnumrange['50-200nums'] += 1
                        elif num_pts > 200:
                            TrackingRenderer.fp_ptsnumrange['>200nums'] += 1
                    else:
                        if num_pts > 0 and num_pts <= 5:
                            TrackingRenderer.fp_ptsnumrange['0-5nums'] += 1
                        elif num_pts > 5 and num_pts <= 10:
                            TrackingRenderer.fp_ptsnumrange['5-10nums'] += 1
                        elif num_pts > 10 and num_pts <= 20:
                            TrackingRenderer.fp_ptsnumrange['10-20nums'] += 1
                        elif num_pts > 20 and num_pts <= 30:
                            TrackingRenderer.fp_ptsnumrange['20-30nums'] += 1
                        elif num_pts > 30:
                            TrackingRenderer.fp_ptsnumrange['>30nums'] += 1
                    #####TrackingRenderer.fpscorrange = {'0-0.1':0, '0.2-0.4':0, '0.4-0.6':0,'0.6-1.0':0}
                    score = box.score
                    if score >= 0 and score <= 0.1:
                        TrackingRenderer.fpscorrange['0-0.1'] += 1
                    if score >= 0.2 and score <= 0.4:
                        TrackingRenderer.fpscorrange['0.2-0.4'] += 1
                    if score >= 0.4 and score <= 0.6:
                        TrackingRenderer.fpscorrange['0.4-0.6'] += 1
                    if score >= 0.6 and score <= 1.0:
                        TrackingRenderer.fpscorrange['0.6-1.0'] += 1
                    #####TrackingRenderer.trk_ratio = {'ang_muta':0, 've_muta':0, 'aug_other':0, 've_other':0}                            #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
                    if box.token in TrackingRenderer.his_trackid:
                        pre_box = TrackingRenderer.his_track[
                            TrackingRenderer.his_trackid.index(box.token)]
                        vari_ang = abs(
                            (R.from_quat(list(frame_pred[i].rotation)).
                             as_euler('zxy', degrees=False)[0]) -
                            (R.from_quat(list(pre_box.rotation)).as_euler(
                                'zxy', degrees=False)[0]))
                        if vari_ang > 0.52:  #30度
                            TrackingRenderer.trk_ratio['angvar>30'] += 1
                        elif 0.52 > vari_ang > 0.35:
                            TrackingRenderer.trk_ratio['30>angvar>20'] += 1
                        elif 0.35 > vari_ang > 0.17:
                            TrackingRenderer.trk_ratio['20>angvar>10'] += 1
                        elif vari_ang < 0.17:
                            TrackingRenderer.trk_ratio['10>angvar'] += 1
                        pre_ve = pre_box.velocity
                        ve = frame_pred[i].velocity
                        ve_varity = abs(
                            math.sqrt(ve[0]**2 + ve[1]**2) -
                            math.sqrt(pre_ve[0]**2 + pre_ve[1]**2))
                        if ve_varity > TrackingRenderer.mutave_thr[
                                0]:  # car均匀加速度为2.778m/s^2  3*0.5s=1.5
                            TrackingRenderer.trk_ratio[
                                'vevari>%s' %
                                TrackingRenderer.mutave_thr[0]] += 1
                        elif ve_varity < TrackingRenderer.mutave_thr[
                                0] and ve_varity >= TrackingRenderer.mutave_thr[
                                    1]:
                            TrackingRenderer.trk_ratio[
                                '%s<vevari<%s' %
                                (TrackingRenderer.mutave_thr[1],
                                 TrackingRenderer.mutave_thr[0])] += 1
                        elif ve_varity < TrackingRenderer.mutave_thr[
                                1] and ve_varity >= TrackingRenderer.mutave_thr[
                                    2]:
                            TrackingRenderer.trk_ratio[
                                '%s<vevari<%s' %
                                (TrackingRenderer.mutave_thr[2],
                                 TrackingRenderer.mutave_thr[1])] += 1
                        else:
                            TrackingRenderer.trk_ratio[
                                'vevari<%s' %
                                TrackingRenderer.mutave_thr[2]] += 1

            # Determine color for this tracking id.
            if b.tracking_id not in self.id2color.keys():
                self.id2color[b.tracking_id] = (
                    float(hash(b.tracking_id + 'r') % 256) / 255,
                    float(hash(b.tracking_id + 'g') % 256) / 255,
                    float(hash(b.tracking_id + 'b') % 256) / 255)

            if ifplthis:
                box_for_path = copy.deepcopy(box)
                box_for_path.rotate(Quaternion(cs_record["rotation"]))
                box_for_path.translate(np.array(cs_record["translation"]))

                box_for_path.rotate(Quaternion(pose_record["rotation"]))
                box_for_path.translate(np.array(
                    pose_record["translation"]))  #到全局
                # 记录轨迹坐标
                if b.tracking_id in self.track_path.keys():
                    self.track_path[b.tracking_id].append(box_for_path)
                else:
                    self.track_path[b.tracking_id] = [box_for_path]

            # Render box. Highlight identity switches in red.
            if b.tracking_id in switch_ids:
                color = self.id2color[b.tracking_id]
                box.render(ax, view=np.eye(4), colors=('r', 'r', color))
                if outscen_class:
                    ###TrackingRenderer.ids_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 }           #car 绝对速度
                    ###TrackingRenderer.ids_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 }        #ped 绝对速度
                    ve = math.sqrt(frame_pred[i].velocity[0]**2 +
                                   frame_pred[i].velocity[1]**2)
                    if TrackingRenderer.outscen == 'car':
                        if ve > 0 and ve <= 0.1:
                            TrackingRenderer.ids_verange['0-0.1m/s'] += 1
                        elif ve > 0.1 and ve <= 2.5:
                            TrackingRenderer.ids_verange['0.1-2.5m/s'] += 1
                        elif ve > 2.5 and ve <= 5:
                            TrackingRenderer.ids_verange['2.5-5m/s'] += 1
                        elif ve > 5 and ve <= 10:
                            TrackingRenderer.ids_verange['5-10m/s'] += 1
                        else:
                            TrackingRenderer.ids_verange['>10m/s'] += 1
                    else:
                        if ve > 0 and ve <= 0.1:
                            TrackingRenderer.ids_verange['0-0.1m/s'] += 1
                        elif ve > 0.1 and ve <= 1.0:
                            TrackingRenderer.ids_verange['0.1-1.0m/s'] += 1
                        elif ve > 1.0 and ve <= 1.5:
                            TrackingRenderer.ids_verange['1.0-1.5m/s'] += 1
                        elif ve > 1.5 and ve <= 2:
                            TrackingRenderer.ids_verange['1.5-2m/s'] += 1
                        else:
                            TrackingRenderer.ids_verange['>2m/s'] += 1
                    ####TrackingRenderer.ids_factratio = {'delay_trk':0, 'del_oth_trk':0, 'reappear':0, 'reapother':0, 've_muta':0, 've_other':0, 'reapdeltrk':0 }
                    indx = np.where(switch_ids == b.tracking_id)
                    gt_id = switch_gt_ids[indx]
                    try:
                        x = sam_anno.index(gt_id)
                        #sample = nusc.get('sample', frame_gt[x].sample_token)
                        #sample_annotation_tokens = sample['anns'] #标注
                        sample_annotation = nusc.get(
                            'sample_annotation', sample_annotation_tokens[x])
                        if sample_annotation['prev'] == '':  #参考意义不大
                            TrackingRenderer.ids_factratio['del_oth_trk'] += 1
                        else:
                            TrackingRenderer.ids_factratio['delay_trk'] += 1
                            visibility = nusc.get(
                                'visibility',
                                sample_annotation['visibility_token'])
                            vis_level = vislev[visibility["level"]]
                            pre_annotation = nusc.get(
                                "sample_annotation", sample_annotation['prev'])
                            pre_vis = nusc.get(
                                'visibility',
                                pre_annotation['visibility_token'])
                            pre_vislevel = vislev[pre_vis["level"]]
                            if vis_level > pre_vislevel or (vis_level
                                                            == pre_vislevel
                                                            and vis_level < 3):
                                TrackingRenderer.ids_factratio['reappear'] += 1
                            elif vis_level == pre_vislevel:
                                TrackingRenderer.ids_factratio[
                                    'reapdeltrk'] += 1
                            else:
                                TrackingRenderer.ids_factratio[
                                    'reapother'] += 1
                            pre_ve = nusc.box_velocity(pre_annotation['token'])
                            ve = nusc.box_velocity(sample_annotation['token'])
                            ve_varity = abs(
                                math.sqrt(ve[0]**2 + ve[1]**2) -
                                math.sqrt(pre_ve[0]**2 + pre_ve[1]**2))
                            if ve_varity > TrackingRenderer.mutave_thr[
                                    0]:  # car均匀加速度为2.778m/s^2  3*0.5s=1.5
                                TrackingRenderer.ids_factratio[
                                    'vevari>%s' %
                                    (TrackingRenderer.mutave_thr[0])] += 1
                            elif ve_varity < TrackingRenderer.mutave_thr[
                                    0] and ve_varity >= TrackingRenderer.mutave_thr[
                                        1]:
                                TrackingRenderer.ids_factratio[
                                    '%s<vevari<%s' %
                                    (TrackingRenderer.mutave_thr[1],
                                     TrackingRenderer.mutave_thr[0])] += 1
                            elif ve_varity < TrackingRenderer.mutave_thr[
                                    1] and ve_varity >= TrackingRenderer.mutave_thr[
                                        2]:
                                TrackingRenderer.ids_factratio[
                                    '%s<vevari<%s' %
                                    (TrackingRenderer.mutave_thr[2],
                                     TrackingRenderer.mutave_thr[1])] += 1
                            else:
                                TrackingRenderer.ids_factratio[
                                    'vevari<%s' %
                                    TrackingRenderer.mutave_thr[2]] += 1
                    except:  #标注错误
                        pass
            else:
                color = self.id2color[b.tracking_id]
                box.render(ax, view=np.eye(4), colors=(color, color, color))

            # Render other infos
            if ifpltsco:
                corners = view_points(box.corners(),
                                      view=np.eye(4),
                                      normalize=False)[:2, :]

                # ax.text(4,5,"hhaa0.8", fontsize=5)
                # ax.text(4,5,"%.2f\n%.2f,%.2f"%(b.tracking_score,b.velocity[0],b.velocity[1]), fontsize=5)

                ax.text(box.center[0],
                        box.center[1],
                        "%.2f\nvx=%.2f,vy=%.2f" %
                        (b.tracking_score, b.velocity[0], b.velocity[1]),
                        fontdict={
                            'size': '6',
                            'color': 'b'
                        })
                #ax.text(box.center[0],box.center[1],"%.2f\n%.2f,%.2f"%(b.tracking_score,b.velocity[0],b.velocity[1]), fontsize=5)
            #if pltve:

        #删去当前帧多余轨迹线
        keys = list(self.track_path.keys())
        for key in keys:
            if key not in pred_trackid:
                self.track_path.pop(key)
        # 画历史轨迹线:
        if ifplthis:
            for id in self.track_path.keys():
                color = self.id2color[id]
                for box_path in self.track_path[id]:
                    #转到当前帧局部
                    # move box to ego vehicle coord system  before has done the translation
                    box_path.translate(-np.array(pose_record["translation"]))
                    box_path.rotate(
                        Quaternion(pose_record["rotation"]).inverse)
                    # move box to sensor coord system
                    box_path.translate(-np.array(cs_record["translation"]))
                    box_path.rotate(Quaternion(cs_record["rotation"]).inverse)

                    ax.scatter(box_path.center[0], box_path.center[1], 10,
                               color)
                    #转回全局
                    box_path.rotate(Quaternion(cs_record["rotation"]))
                    box_path.translate(np.array(cs_record["translation"]))

                    box_path.rotate(Quaternion(pose_record["rotation"]))
                    box_path.translate(np.array(pose_record["translation"]))

        TrackingRenderer.his_track = frame_pred
        TrackingRenderer.his_trackid = pred_trackid
        # Plot MOTA metrics. ly
        # 目标位置 左上角,距离 Y 轴 0.01 倍距离,距离 X 轴 0.95倍距离
        self.cursumfp += len(FP)  #当前场景累积值
        self.cursumfn += len(FN)

        #print("FN=%d,FP=%d,switch_ids=%d,cursumfp=%d,cursumfn=%d" % ( len(FN), len(FP), len(switch_ids), self.cursumfp, self.cursumfn ))
        #ax.text(0.01, 0.95, "IDS:%d\nFP:%d\nFN:%d\ncur_sce sumfp:%d sumfn:%d\nthreshold:%f"%(len(switch_ids),len(FP),len(FN),self.cursumfp,self.cursumfn,threshold), transform=ax.transAxes, fontdict={'size': '10', 'color': 'b'})

        # Plot ego pose.
        plt.scatter(0, 0, s=96, facecolors='none', edgecolors='k', marker='o')
        plt.xlim(-50, 50)
        plt.ylim(-50, 50)
        plt.axis('off')
        # Save to disk and close figure.
        fig.savefig(os.path.join(self.save_path, '{}.png'.format(timestamp)))
        plt.close(fig)
Esempio n. 6
0
def preprocess(nusc,
               split_names,
               root_dir,
               out_dir,
               keyword=None,
               keyword_action=None,
               subset_name=None,
               location=None):
    # cannot process day/night and location at the same time
    assert not (bool(keyword) and bool(location))
    if keyword:
        assert keyword_action in ['filter', 'exclude']

    # init dict to save
    pkl_dict = {}
    for split_name in split_names:
        pkl_dict[split_name] = []

    for i, sample in enumerate(nusc.sample):
        curr_scene_name = nusc.get('scene', sample['scene_token'])['name']

        # get if the current scene is in train, val or test
        curr_split = None
        for split_name in split_names:
            if curr_scene_name in getattr(splits, split_name):
                curr_split = split_name
                break
        if curr_split is None:
            continue

        if subset_name == 'night':
            if curr_split == 'train':
                if curr_scene_name in splits.val_night:
                    curr_split = 'val'
        if subset_name == 'singapore':
            if curr_split == 'train':
                if curr_scene_name in splits.val_singapore:
                    curr_split = 'val'

        # filter for day/night
        if keyword:
            scene_description = nusc.get("scene",
                                         sample["scene_token"])["description"]
            if keyword.lower() in scene_description.lower():
                if keyword_action == 'exclude':
                    # skip sample
                    continue
            else:
                if keyword_action == 'filter':
                    # skip sample
                    continue

        if location:
            scene = nusc.get("scene", sample["scene_token"])
            if location not in nusc.get("log", scene['log_token'])['location']:
                continue

        lidar_token = sample["data"]["LIDAR_TOP"]
        cam_front_token = sample["data"]["CAM_FRONT"]
        lidar_path, boxes_lidar, _ = nusc.get_sample_data(lidar_token)
        cam_path, boxes_front_cam, cam_intrinsic = nusc.get_sample_data(
            cam_front_token)

        print('{}/{} {} {}'.format(i + 1, len(nusc.sample), curr_scene_name,
                                   lidar_path))

        sd_rec_lidar = nusc.get('sample_data', sample['data']["LIDAR_TOP"])
        cs_record_lidar = nusc.get('calibrated_sensor',
                                   sd_rec_lidar['calibrated_sensor_token'])
        pose_record_lidar = nusc.get('ego_pose',
                                     sd_rec_lidar['ego_pose_token'])
        sd_rec_cam = nusc.get('sample_data', sample['data']["CAM_FRONT"])
        cs_record_cam = nusc.get('calibrated_sensor',
                                 sd_rec_cam['calibrated_sensor_token'])
        pose_record_cam = nusc.get('ego_pose', sd_rec_cam['ego_pose_token'])

        calib_infos = {
            "lidar2ego_translation": cs_record_lidar['translation'],
            "lidar2ego_rotation": cs_record_lidar['rotation'],
            "ego2global_translation_lidar": pose_record_lidar['translation'],
            "ego2global_rotation_lidar": pose_record_lidar['rotation'],
            "ego2global_translation_cam": pose_record_cam['translation'],
            "ego2global_rotation_cam": pose_record_cam['rotation'],
            "cam2ego_translation": cs_record_cam['translation'],
            "cam2ego_rotation": cs_record_cam['rotation'],
            "cam_intrinsic": cam_intrinsic,
        }

        # load lidar points
        pts = np.fromfile(lidar_path, dtype=np.float32,
                          count=-1).reshape([-1, 5])[:, :3].T

        # map point cloud into front camera image
        pts_valid_flag, pts_cam_coord, pts_img = map_pointcloud_to_image(
            pts, (900, 1600, 3), calib_infos)
        # fliplr so that indexing is row, col and not col, row
        pts_img = np.ascontiguousarray(np.fliplr(pts_img))

        # only use lidar points in the front camera image
        pts = pts[:, pts_valid_flag]

        num_pts = pts.shape[1]
        seg_labels = np.full(num_pts,
                             fill_value=len(class_names_to_id),
                             dtype=np.uint8)
        # only use boxes that are visible in camera
        valid_box_tokens = [box.token for box in boxes_front_cam]
        boxes = [box for box in boxes_lidar if box.token in valid_box_tokens]
        for box in boxes:
            # get points that lie inside of the box
            fg_mask = points_in_box(box, pts)
            det_class = category_to_detection_name(box.name)
            if det_class is not None:
                seg_labels[fg_mask] = class_names_to_id[det_class]

        # convert to relative path
        lidar_path = lidar_path.replace(root_dir + '/', '')
        cam_path = cam_path.replace(root_dir + '/', '')

        # transpose to yield shape (num_points, 3)
        pts = pts.T

        # append data to train, val or test list in pkl_dict
        data_dict = {
            'points': pts,
            'seg_labels': seg_labels,
            'points_img': pts_img,  # row, col format, shape: (num_points, 2)
            'lidar_path': lidar_path,
            'camera_path': cam_path,
            'boxes': boxes_lidar,
            "sample_token": sample["token"],
            "scene_name": curr_scene_name,
            "calib": calib_infos
        }
        pkl_dict[curr_split].append(data_dict)

    # save to pickle file
    save_dir = osp.join(out_dir, 'preprocess')
    os.makedirs(save_dir, exist_ok=True)
    for split_name in split_names:
        save_path = osp.join(
            save_dir,
            '{}{}.pkl'.format(split_name,
                              '_' + subset_name if subset_name else ''))
        with open(save_path, 'wb') as f:
            pickle.dump(pkl_dict[split_name], f)
            print('Wrote preprocessed data to ' + save_path)
nbr_samples = nusc.scene[scene_index]['nbr_samples']
first_sample_token = my_scene['first_sample_token']
curr_sample = nusc.get('sample', first_sample_token)
prog = 0
for _ in range(nbr_samples):
    sample_tokens[prog] = curr_sample['token']
    if curr_sample['next']:
        next_token = curr_sample['next']
        curr_sample = nusc.get('sample', next_token)
    prog += 1
my_sample_token = sample_tokens[sample_index]
my_sample = nusc.get('sample', my_sample_token)
sensor_channel = "RADAR_FRONT"
my_sample_data = nusc.get('sample_data', my_sample['data'][sensor_channel])
radar_data = get_sensor_sample_data(nusc, my_sample, sensor_channel)
my_annotation_token = my_sample['anns'][18]
#note that the annotation here is in globar coordinate symstem
my_annotation_metadata = nusc.get('sample_annotation', my_annotation_token)
#pay attention to the class Box in nuscenes-devkit
#in get_sample_data will transform the coordinate from global to sensor frame
_, boxes, _ = nusc.get_sample_data(my_sample_data['token'])
box_list = []

for box in boxes:
    mask = points_in_box(box, radar_data[0:3, :])
    #caculate the number of points contains in a bounding box
    num_of_pts = np.count_nonzero(mask)
    if num_of_pts != 0:
        box_list.append((box.name, num_of_pts))
print(box_list)
Esempio n. 8
0
    def create_annotations(self, sample_token, sensor_channels):
        """
        Create annotations for the the given sample token.

        1 bounding box vector contains:


        :param sample_token: the sample_token to get the annotation for
        :param sensor_channels: list of channels for cropping the labels, e.g. ['CAM_FRONT', 'RADAR_FRONT']
            This works only for CAMERA atm

        :returns: 
            annotations dictionary:
            {
                'labels': [] # <list of n int>  
                'bboxes': [] # <list of n x 4 float> [xmin, ymin, xmax, ymax]
                'distances': [] # <list of n float>  Center of box given as x, y, z.
                'visibilities': [] # <list of n float>  Visibility of annotated object
            }
        """

        if any([s for s in sensor_channels if 'RADAR' in s]):
            print("[WARNING] Cropping to RADAR is not supported atm")
            sensor_channels = [
                c for c in sensor_channels if 'CAM' in sensor_channels
            ]

        sample = self.nusc.get('sample', sample_token)
        annotations_count = 0
        annotations = {
            'labels': [],  # <list of n int>
            'bboxes': [],  # <list of n x 4 float> [xmin, ymin, xmax, ymax]
            'distances':
            [],  # <list of n float>  Center of box given as x, y, z.
            'visibilities': [],
            'num_radar_pts': [
            ]  #<list of n int>  number of radar points that cover that annotation
        }

        # Camera parameters
        for selected_sensor_channel in sensor_channels:
            sd_rec = self.nusc.get('sample_data',
                                   sample['data'][selected_sensor_channel])

            # Create Boxes:
            _, boxes, camera_intrinsic = self.nusc.get_sample_data(
                sd_rec['token'], box_vis_level=BoxVisibility.ANY)
            imsize_src = (sd_rec['width'], sd_rec['height']
                          )  # nuscenes has (width, height) convention

            bbox_resize = [1. / sd_rec['height'], 1. / sd_rec['width']]
            if not self.normalize_bbox:
                bbox_resize[0] *= float(self.image_min_side)
                bbox_resize[1] *= float(self.image_max_side)

            # Create labels for all boxes that are visible
            for box in boxes:

                # Add labels to boxes
                if box.name in self.classes:
                    box.label = self.classes[box.name]
                    # Check if box is visible and transform box to 1D vector
                    if box_in_image(box=box,
                                    intrinsic=camera_intrinsic,
                                    imsize=imsize_src,
                                    vis_level=BoxVisibility.ANY):

                        ## Points in box method for annotation filterS
                        # check if bounding box has an according radar point
                        if self.only_radar_annotated == 2:

                            pcs, times = RadarPointCloud.from_file_multisweep(self.nusc, sample, self.radar_sensors[0], \
                                selected_sensor_channel, nsweeps=self.n_sweeps, min_distance=0.0, merge=False)

                            for pc in pcs:
                                pc.points = radar.enrich_radar_data(pc.points)

                            if len(pcs) > 0:
                                radar_sample = np.concatenate(
                                    [pc.points for pc in pcs], axis=-1)
                            else:
                                print(
                                    "[WARNING] only_radar_annotated=2 and sweeps=0 removes all annotations"
                                )
                                radar_sample = np.zeros(
                                    shape=(len(radar.channel_map), 0))
                            radar_sample = radar_sample.astype(
                                dtype=np.float32)

                            mask = points_in_box(box, radar_sample[0:3, :])
                            if True not in mask:
                                continue

                        # If visible, we create the corresponding label
                        box2d = box.box2d(camera_intrinsic
                                          )  # returns [xmin, ymin, xmax, ymax]
                        box2d[0] *= bbox_resize[1]
                        box2d[1] *= bbox_resize[0]
                        box2d[2] *= bbox_resize[1]
                        box2d[3] *= bbox_resize[0]

                        annotations['bboxes'].insert(annotations_count, box2d)
                        annotations['labels'].insert(annotations_count,
                                                     box.label)
                        annotations['num_radar_pts'].insert(
                            annotations_count,
                            self.nusc.get('sample_annotation',
                                          box.token)['num_radar_pts'])

                        distance = (box.center[0]**2 + box.center[1]**2 +
                                    box.center[2]**2)**0.5
                        annotations['distances'].insert(
                            annotations_count, distance)
                        annotations['visibilities'].insert(
                            annotations_count,
                            int(
                                self.nusc.get('sample_annotation',
                                              box.token)['visibility_token']))
                        annotations_count += 1
                else:
                    # The current name has been ignored
                    pass

        annotations['labels'] = np.array(annotations['labels'])
        annotations['bboxes'] = np.array(annotations['bboxes'])
        annotations['distances'] = np.array(annotations['distances'])
        annotations['num_radar_pts'] = np.array(annotations['num_radar_pts'])
        annotations['visibilities'] = np.array(annotations['visibilities'])

        # num_radar_pts mathod for annotation filter
        if self.only_radar_annotated == 1:

            anns_to_keep = np.where(annotations['num_radar_pts'])[0]

            for key in annotations:
                annotations[key] = annotations[key][anns_to_keep]

        return annotations
Esempio n. 9
0
def get_points_after_view_transform(points, box_t):

    wlh = box_t.wlh
    box_len = np.sqrt(wlh[0]**2 + wlh[1]**2)
    points_new = points[:, 0:3].transpose()
    mask = points_in_box(box_t, points_new, wlh_factor=1.0)
    if mask.sum() <= 1:
        global number_of_less
        number_of_less += 1
        return None
    points_in = points_new.transpose()[mask]
    points_in = points_in - box_t.center
    points_in = points_in.transpose()

    ## 2. get normal vector of forwarding direction
    box_coors = box_t.corners().transpose()
    pq = box_coors[3] - box_coors[0]
    pr = box_coors[4] - box_coors[0]
    normal_1 = np.cross(pq, pr)

    ## 3. get the rotation angle between normal vector and base vector
    def unit_vector(vector):
        """ Returns the unit vector of the vector. """
        return vector / np.linalg.norm(vector)

    def angle_between(v1, v2):
        """ Returns the angle in radians between vectors 'v1' and 'v2'::

                >>> angle_between((1, 0, 0), (0, 1, 0))
                1.5707963267948966
                >>> angle_between((1, 0, 0), (1, 0, 0))
                0.0
                >>> angle_between((1, 0, 0), (-1, 0, 0))
                3.141592653589793
        """
        v1_u = unit_vector(v1)
        v2_u = unit_vector(v2)
        return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))

    base_vector = np.array([0, 1, 0])
    angle_t = angle_between(normal_1, base_vector)

    ### get the axis of rotation
    axis_t = np.cross(normal_1, base_vector)

    ## 4. get the matrix representation of view; angle --> Quaternion --> matrix representation

    quat = Quaternion._from_axis_angle(axis_t, angle_t)
    FLOAT_EPS = np.finfo(np.float).eps

    def _quat_to_matrix(quat):
        w, x, y, z = quat[0], quat[1], quat[2], quat[3]
        Nq = w * w + x * x + y * y + z * z
        if Nq < FLOAT_EPS:
            return np.eye(3)
        s = 2.0 / Nq
        X = x * s
        Y = y * s
        Z = z * s
        wX = w * X
        wY = w * Y
        wZ = w * Z
        xX = x * X
        xY = x * Y
        xZ = x * Z
        yY = y * Y
        yZ = y * Z
        zZ = z * Z
        return np.array([[1.0 - (yY + zZ), xY - wZ, xZ + wY],
                         [xY + wZ, 1.0 - (xX + zZ), yZ - wX],
                         [xZ - wY, yZ + wX, 1.0 - (xX + yY)]])

    view_t = _quat_to_matrix(quat)

    ## 5. view transformation
    points_t = view_points(points_in, view_t, normalize=False)
    return points_t