Exemplo n.º 1
0
    def render(self, events: DataFrame, timestamp: int, frame_gt: List[TrackingBox], frame_pred: List[TrackingBox]) \
            -> None:
        """
        Render function for a given scene timestamp
        :param events: motmetrics events for that particular
        :param timestamp: timestamp for the rendering
        :param frame_gt: list of ground truth boxes
        :param frame_pred: list of prediction boxes
        """
        # Init.
        print('Rendering {}'.format(timestamp))
        switches = events[events.Type == 'SWITCH']
        switch_ids = switches.HId.values
        fig, ax = plt.subplots()

        # Plot GT boxes.
        for b in frame_gt:
            color = 'k'
            box = Box(b.ego_translation,
                      b.size,
                      Quaternion(b.rotation),
                      name=b.tracking_name,
                      token=b.tracking_id)
            box.render(ax,
                       view=np.eye(4),
                       colors=(color, color, color),
                       linewidth=1)

        # Plot predicted boxes.
        for b in frame_pred:
            box = Box(b.ego_translation,
                      b.size,
                      Quaternion(b.rotation),
                      name=b.tracking_name,
                      token=b.tracking_id)

            # Determine color for this tracking id.
            if b.tracking_id not in self.id2color.keys():
                self.id2color[b.tracking_id] = (
                    float(hash(b.tracking_id + 'r') % 256) / 255,
                    float(hash(b.tracking_id + 'g') % 256) / 255,
                    float(hash(b.tracking_id + 'b') % 256) / 255)

            # Render box. Highlight identity switches in red.
            if b.tracking_id in switch_ids:
                color = self.id2color[b.tracking_id]
                box.render(ax, view=np.eye(4), colors=('r', 'r', color))
            else:
                color = self.id2color[b.tracking_id]
                box.render(ax, view=np.eye(4), colors=(color, color, color))

        # Plot ego pose.
        plt.scatter(0, 0, s=96, facecolors='none', edgecolors='k', marker='o')
        plt.xlim(-50, 50)
        plt.ylim(-50, 50)

        # Save to disk and close figure.
        fig.savefig(os.path.join(self.save_path, '{}.png'.format(timestamp)))
        plt.close(fig)
Exemplo n.º 2
0
    def render(self, events: DataFrame, timestamp: int, frame_gt: List[TrackingBox], frame_pred: List[TrackingBox], points=None, pose_record=None, cs_record=None, ifplotgt=False,\
                threshold=0.1, ifpltsco=False, outscen_class=False,nusc=None, ifplthis=False, pltve=False) \
            -> None:
        """
        Render function for a given scene timestamp
        :param events: motmetrics events for that particular
        :param timestamp: timestamp for the rendering
        :param frame_gt: list of ground truth boxes
        :param frame_pred: list of prediction boxes
        """
        # Init.
        #print('Rendering {}'.format(timestamp))
        switches = events[events.Type == 'SWITCH']
        switch_ids = switches.HId.values  #对应frame_pred的tracking_id
        switch_gt_ids = switches.OId.values  #对应GT的tracking_id
        FN = events[events.Type == 'MISS']
        #FN = FN.HId.values
        FN = FN.OId.values  #对应GT的tracking_id
        FP = events[events.Type == 'FP']
        FP = FP.HId.values  #对应frame_pred的tracking_id

        fig, ax = plt.subplots()
        #plt.style.use('dark_background')         #  黑  背景颜色
        plt.style.use('classic')  #  白  背景颜色

        points.render_height(ax,
                             view=np.eye(4),
                             x_lim=(-50, 50),
                             y_lim=(-50, 50),
                             marker_size=0.1)  #BEV
        #points.render_height(ax, view=np.eye(4) )#BEV
        #points = points.rotate(Quaternion( pose_record["rotation"]).inverse)
        sam_anno = []
        if len(frame_gt) > 0:
            sample = nusc.get('sample', frame_gt[0].sample_token)
            sample_annotation_tokens = sample['anns']  #标注
            for anno_token in sample_annotation_tokens:
                sam_anno.append(
                    nusc.get('sample_annotation',
                             anno_token)["instance_token"])
        vislev = {'v0-40': 0, 'v40-60': 1, 'v60-80': 2, 'v80-100': 3}
        #points.render_intensity(ax)
        # Plot GT boxes.
        if ifplotgt:

            for i, b in enumerate(frame_gt):
                color = 'k'
                #qua = tuple(self.list_add(list(b.rotation),[0.924,0.0,0.0,0.383]))
                box = Box(np.array(b.ego_translation, dtype=float),
                          b.size,
                          Quaternion(b.rotation),
                          name=b.tracking_name,
                          token=b.tracking_id)
                #qua = tuple(self.list_add(list(pose_record["rotation"]),[ 0.831,0.0,0.0,0.556]))
                #box.translate(-np.array(pose_record["translation"]))
                box.rotate(Quaternion(pose_record["rotation"]).inverse)
                # move box to sensor coord system
                box.translate(-np.array(cs_record["translation"]))
                box.rotate(Quaternion(cs_record["rotation"]).inverse)
                if outscen_class:
                    #####TrackingRenderer.gt_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0 }     #car lidar点云数
                    #####TrackingRenderer.gt_ptsnumrange =  {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0 }     #ped lidar点云数
                    num_pts = frame_gt[i].num_pts
                    #####car
                    if TrackingRenderer.outscen == 'car':
                        if num_pts > 0 and num_pts <= 5:
                            TrackingRenderer.gt_ptsnumrange['0-5nums'] += 1
                        elif num_pts > 5 and num_pts <= 10:
                            TrackingRenderer.gt_ptsnumrange['5-10nums'] += 1
                        elif num_pts > 10 and num_pts <= 50:
                            TrackingRenderer.gt_ptsnumrange['10-50nums'] += 1
                        elif num_pts > 50 and num_pts <= 200:
                            TrackingRenderer.gt_ptsnumrange['50-200nums'] += 1
                        elif num_pts > 200:
                            TrackingRenderer.gt_ptsnumrange['>200nums'] += 1
                    else:
                        ####ped
                        if num_pts > 0 and num_pts <= 5:
                            TrackingRenderer.gt_ptsnumrange['0-5nums'] += 1
                        elif num_pts > 5 and num_pts <= 10:
                            TrackingRenderer.gt_ptsnumrange['5-10nums'] += 1
                        elif num_pts > 10 and num_pts <= 20:
                            TrackingRenderer.gt_ptsnumrange['10-20nums'] += 1
                        elif num_pts > 20 and num_pts <= 30:
                            TrackingRenderer.gt_ptsnumrange['20-30nums'] += 1
                        elif num_pts > 30:
                            TrackingRenderer.gt_ptsnumrange['>30nums'] += 1
                    if box.token in FN:
                        #####distance
                        dis = math.sqrt(box.center[0]**2 + box.center[1]**2)
                        if dis > 0 and dis <= 15:
                            TrackingRenderer.fn_disrange[
                                '<15m'] = TrackingRenderer.fn_disrange[
                                    '<15m'] + 1
                        elif dis > 15 and dis <= 30:
                            TrackingRenderer.fn_disrange[
                                '15-30m'] = TrackingRenderer.fn_disrange[
                                    '15-30m'] + 1
                        elif dis > 30 and dis <= 45:
                            TrackingRenderer.fn_disrange[
                                '30-45m'] = TrackingRenderer.fn_disrange[
                                    '30-45m'] + 1
                        elif dis > 45 and dis <= 54:
                            TrackingRenderer.fn_disrange[
                                '45-54m'] = TrackingRenderer.fn_disrange[
                                    '45-54m'] + 1
                        else:
                            TrackingRenderer.fn_disrange[
                                '-1'] = TrackingRenderer.fn_disrange['-1'] + 1
                        #####ve TrackingRenderer.fn_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 }            #car 绝对速度
                        #####   TrackingRenderer.fn_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 }          #ped 绝对速度
                        ve = math.sqrt(frame_gt[i].velocity[0]**2 +
                                       frame_gt[i].velocity[1]**2)
                        if TrackingRenderer.outscen == 'car':
                            if ve > 0 and ve <= 0.1:
                                TrackingRenderer.fn_verange['0-0.1m/s'] += 1
                            elif ve > 0.1 and ve <= 2.5:
                                TrackingRenderer.fn_verange['0.1-2.5m/s'] += 1
                            elif ve > 2.5 and ve <= 5:
                                TrackingRenderer.fn_verange['2.5-5m/s'] += 1
                            elif ve > 5 and ve <= 10:
                                TrackingRenderer.fn_verange['5-10m/s'] += 1
                            else:
                                TrackingRenderer.fn_verange['>10m/s'] += 1
                        else:
                            if ve > 0 and ve <= 0.1:
                                TrackingRenderer.fn_verange['0-0.1m/s'] += 1
                            elif ve > 0.1 and ve <= 1.0:
                                TrackingRenderer.fn_verange['0.1-1.0m/s'] += 1
                            elif ve > 1.0 and ve <= 1.5:
                                TrackingRenderer.fn_verange['1.0-1.5m/s'] += 1
                            elif ve > 1.5 and ve <= 2:
                                TrackingRenderer.fn_verange['1.5-2m/s'] += 1
                            else:
                                TrackingRenderer.fn_verange['>2m/s'] += 1
                        #####num_pts TrackingRenderer.fn_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0 }     #car lidar点云数 抽样参考比例:0.21 0.23 0.26,0.2,0.1
                        #############TrackingRenderer.fn_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0 }      #ped lidar点云数
                        num_pts = frame_gt[i].num_pts
                        if TrackingRenderer.outscen == 'car':
                            if num_pts > 0 and num_pts <= 5:
                                TrackingRenderer.fn_ptsnumrange['0-5nums'] += 1
                            elif num_pts > 5 and num_pts <= 10:
                                TrackingRenderer.fn_ptsnumrange[
                                    '5-10nums'] += 1
                            elif num_pts > 10 and num_pts <= 50:
                                TrackingRenderer.fn_ptsnumrange[
                                    '10-50nums'] += 1
                            elif num_pts > 50 and num_pts <= 200:
                                TrackingRenderer.fn_ptsnumrange[
                                    '50-200nums'] += 1
                            elif num_pts > 200:
                                TrackingRenderer.fn_ptsnumrange[
                                    '>200nums'] += 1
                            else:
                                TrackingRenderer.fn_ptsnumrange['-1'] += 1
                        else:
                            if num_pts > 0 and num_pts <= 5:
                                TrackingRenderer.fn_ptsnumrange['0-5nums'] += 1
                            elif num_pts > 5 and num_pts <= 10:
                                TrackingRenderer.fn_ptsnumrange[
                                    '5-10nums'] += 1
                            elif num_pts > 10 and num_pts <= 20:
                                TrackingRenderer.fn_ptsnumrange[
                                    '10-20nums'] += 1
                            elif num_pts > 20 and num_pts <= 30:
                                TrackingRenderer.fn_ptsnumrange[
                                    '20-30nums'] += 1
                            elif num_pts > 200:
                                TrackingRenderer.fn_ptsnumrange['>30nums'] += 1
                            else:
                                TrackingRenderer.fn_ptsnumrange['-1'] += 1
                        ######读取
                        #sample = nusc.get('sample', frame_gt[i].sample_token)
                        #sample_annotation_tokens = sample['anns'] #标注
                        try:
                            ind = sam_anno.index(b.tracking_id)
                            sample_annotation = nusc.get(
                                'sample_annotation',
                                sample_annotation_tokens[ind])
                            ####TrackingRenderer.vis_ratio = {'0-0.4':0, '0.4-0.6':0,  '0.6-0.8':0, '0.8-1.0':0}                                 #相机视角  0-40%, 40-60%, 60-80% and 80-100%  The visibility of an instance is the fraction of annotation visible in all 6 images.
                            visibility = nusc.get(
                                'visibility',
                                sample_annotation['visibility_token'])
                            vis_level = vislev[visibility["level"]]
                            if vis_level == 0:
                                TrackingRenderer.vis_ratio['0-0.4'] += 1
                            elif vis_level == 1:
                                TrackingRenderer.vis_ratio['0.4-0.6'] += 1
                            elif vis_level == 2:
                                TrackingRenderer.vis_ratio['0.6-0.8'] += 1
                            elif vis_level == 3:
                                TrackingRenderer.vis_ratio['0.8-1.0'] += 1
                            ####TrackingRenderer.gt_ratio = {'ang_muta':0, 've_muta':0,  'aug_other':0, 've_other':0, 'firfn_trk':0, 'nonfirfn_trk':0}                            #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
                            pre_token = sample_annotation['prev']
                            if pre_token == '':  #仅作为first_FN
                                TrackingRenderer.gt_ratio['firfn_trk'] += 1

                            else:
                                TrackingRenderer.gt_ratio['nonfirfn_trk'] += 1
                                pre_annotation = nusc.get(
                                    'sample_annotation', pre_token)
                                vari_ang = abs(
                                    R.from_quat(list(frame_gt[i].rotation)).
                                    as_euler('zxy', degrees=False)[0] -
                                    R.from_quat(
                                        list(pre_annotation['rotation'])
                                    ).as_euler('zxy', degrees=False)[0])
                                if vari_ang > 0.52:  #30度
                                    TrackingRenderer.gt_ratio['angvar>30'] += 1
                                elif 0.52 > vari_ang and vari_ang > 0.35:
                                    TrackingRenderer.gt_ratio[
                                        '30>angvar>20'] += 1
                                elif 0.35 > vari_ang and vari_ang > 0.17:
                                    TrackingRenderer.gt_ratio[
                                        '20>angvar>10'] += 1
                                elif vari_ang < 0.17:
                                    TrackingRenderer.gt_ratio['10>angvar'] += 1
                                else:
                                    pass
                                pre_ve = nusc.box_velocity(
                                    pre_annotation['token'])
                                ve_varity = abs(ve - math.sqrt(pre_ve[0]**2 +
                                                               pre_ve[1]**2))
                                if ve_varity > TrackingRenderer.mutave_thr[0]:
                                    TrackingRenderer.gt_ratio[
                                        'vevari>%s' %
                                        (TrackingRenderer.mutave_thr[0])] += 1
                                elif ve_varity < TrackingRenderer.mutave_thr[
                                        0] and ve_varity >= TrackingRenderer.mutave_thr[
                                            1]:
                                    TrackingRenderer.gt_ratio[
                                        '%s<vevari<%s' %
                                        (TrackingRenderer.mutave_thr[1],
                                         TrackingRenderer.mutave_thr[0])] += 1
                                elif ve_varity < TrackingRenderer.mutave_thr[
                                        1] and ve_varity >= TrackingRenderer.mutave_thr[
                                            2]:
                                    TrackingRenderer.gt_ratio[
                                        '%s<vevari<%s' %
                                        (TrackingRenderer.mutave_thr[2],
                                         TrackingRenderer.mutave_thr[1])] += 1
                                else:
                                    TrackingRenderer.gt_ratio[
                                        'vevari<%s' %
                                        TrackingRenderer.mutave_thr[2]] += 1
                        except ValueError:  #标注错误
                            TrackingRenderer.fault_datas += 1
                box.render(ax,
                           view=np.eye(4),
                           colors=(color, color, color),
                           linewidth=1)
        else:
            pass

        # Plot predicted boxes.
        pred_trackid = []
        for i, b in enumerate(frame_pred):
            box = Box(
                b.ego_translation,
                b.size,
                Quaternion(b.rotation),
                name=b.tracking_name,
                token=b.tracking_id,
                score=b.tracking_score,
            )

            # move box to ego vehicle coord system  before has done the translation
            box.rotate(Quaternion(pose_record["rotation"]).inverse)
            # move box to sensor coord system
            box.translate(-np.array(cs_record["translation"]))
            box.rotate(Quaternion(cs_record["rotation"]).inverse)
            pred_trackid.append(b.tracking_id)
            if outscen_class:
                if b.tracking_id in FP:
                    #####distance  TrackingRenderer.fp_disrange = {'<15m':0, '15-30m':0, '30-45m':0, '45-54m':0}
                    dis = math.sqrt(box.center[0]**2 + box.center[1]**2)
                    if dis > 0 and dis <= 15:
                        TrackingRenderer.fp_disrange[
                            '<15m'] = TrackingRenderer.fp_disrange['<15m'] + 1
                    elif dis > 15 and dis <= 30:
                        TrackingRenderer.fp_disrange[
                            '15-30m'] = TrackingRenderer.fp_disrange[
                                '15-30m'] + 1
                    elif dis > 30 and dis <= 45:
                        TrackingRenderer.fp_disrange[
                            '30-45m'] = TrackingRenderer.fp_disrange[
                                '30-45m'] + 1
                    elif dis > 45 and dis <= 54:
                        TrackingRenderer.fp_disrange[
                            '45-54m'] = TrackingRenderer.fp_disrange[
                                '45-54m'] + 1
                    else:
                        TrackingRenderer.fp_disrange[
                            '-1'] = TrackingRenderer.fp_disrange['-1'] + 1
                    #####ve TrackingRenderer.fp_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 }            #car 绝对速度
                    #####TrackingRenderer.fp_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 }         #ped 绝对速度
                    ve = math.sqrt(frame_pred[i].velocity[0]**2 +
                                   frame_pred[i].velocity[1]**2)
                    if TrackingRenderer.outscen == 'car':
                        if ve > 0 and ve <= 0.1:
                            TrackingRenderer.fp_verange['0-0.1m/s'] += 1
                        elif ve > 0.1 and ve <= 2.5:
                            TrackingRenderer.fp_verange['0.1-2.5m/s'] += 1
                        elif ve > 2.5 and ve <= 5:
                            TrackingRenderer.fp_verange['2.5-5m/s'] += 1
                        elif ve > 5 and ve <= 10:
                            TrackingRenderer.fp_verange['5-10m/s'] += 1
                        else:
                            TrackingRenderer.fp_verange['>10m/s'] += 1
                    else:
                        if ve > 0 and ve <= 0.1:
                            TrackingRenderer.fp_verange['0-0.1m/s'] += 1
                        elif ve > 0.1 and ve <= 1.0:
                            TrackingRenderer.fp_verange['0.1-1.0m/s'] += 1
                        elif ve > 1.0 and ve <= 1.5:
                            TrackingRenderer.fp_verange['1.0-1.5m/s'] += 1
                        elif ve > 1.5 and ve <= 2:
                            TrackingRenderer.fp_verange['1.5-2m/s'] += 1
                        else:
                            TrackingRenderer.fp_verange['>2m/s'] += 1
                    #####num_pts TrackingRenderer.fp_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0 }     #car lidar点云数 抽样参考比例:0.21 0.23 0.26,0.2,0.1
                    ########## TrackingRenderer.fp_ptsnumrange =  {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0 }     #ped lidar点云数
                    points_xyzr = np.stack(points.points, 1)
                    points_xyz = points_xyzr[:, :3]

                    points_mask = points_in_box(box, points.points[:3])
                    mask_indx = np.arange(points_mask.shape[0])
                    mask_indx = mask_indx[points_mask]
                    num_pts = mask_indx.shape[0]
                    if TrackingRenderer.outscen == 'car':
                        if num_pts > 0 and num_pts <= 5:
                            TrackingRenderer.fp_ptsnumrange['0-5nums'] += 1
                        elif num_pts > 5 and num_pts <= 10:
                            TrackingRenderer.fp_ptsnumrange['5-10nums'] += 1
                        elif num_pts > 10 and num_pts <= 50:
                            TrackingRenderer.fp_ptsnumrange['10-50nums'] += 1
                        elif num_pts > 50 and num_pts <= 200:
                            TrackingRenderer.fp_ptsnumrange['50-200nums'] += 1
                        elif num_pts > 200:
                            TrackingRenderer.fp_ptsnumrange['>200nums'] += 1
                    else:
                        if num_pts > 0 and num_pts <= 5:
                            TrackingRenderer.fp_ptsnumrange['0-5nums'] += 1
                        elif num_pts > 5 and num_pts <= 10:
                            TrackingRenderer.fp_ptsnumrange['5-10nums'] += 1
                        elif num_pts > 10 and num_pts <= 20:
                            TrackingRenderer.fp_ptsnumrange['10-20nums'] += 1
                        elif num_pts > 20 and num_pts <= 30:
                            TrackingRenderer.fp_ptsnumrange['20-30nums'] += 1
                        elif num_pts > 30:
                            TrackingRenderer.fp_ptsnumrange['>30nums'] += 1
                    #####TrackingRenderer.fpscorrange = {'0-0.1':0, '0.2-0.4':0, '0.4-0.6':0,'0.6-1.0':0}
                    score = box.score
                    if score >= 0 and score <= 0.1:
                        TrackingRenderer.fpscorrange['0-0.1'] += 1
                    if score >= 0.2 and score <= 0.4:
                        TrackingRenderer.fpscorrange['0.2-0.4'] += 1
                    if score >= 0.4 and score <= 0.6:
                        TrackingRenderer.fpscorrange['0.4-0.6'] += 1
                    if score >= 0.6 and score <= 1.0:
                        TrackingRenderer.fpscorrange['0.6-1.0'] += 1
                    #####TrackingRenderer.trk_ratio = {'ang_muta':0, 've_muta':0, 'aug_other':0, 've_other':0}                            #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
                    if box.token in TrackingRenderer.his_trackid:
                        pre_box = TrackingRenderer.his_track[
                            TrackingRenderer.his_trackid.index(box.token)]
                        vari_ang = abs(
                            (R.from_quat(list(frame_pred[i].rotation)).
                             as_euler('zxy', degrees=False)[0]) -
                            (R.from_quat(list(pre_box.rotation)).as_euler(
                                'zxy', degrees=False)[0]))
                        if vari_ang > 0.52:  #30度
                            TrackingRenderer.trk_ratio['angvar>30'] += 1
                        elif 0.52 > vari_ang > 0.35:
                            TrackingRenderer.trk_ratio['30>angvar>20'] += 1
                        elif 0.35 > vari_ang > 0.17:
                            TrackingRenderer.trk_ratio['20>angvar>10'] += 1
                        elif vari_ang < 0.17:
                            TrackingRenderer.trk_ratio['10>angvar'] += 1
                        pre_ve = pre_box.velocity
                        ve = frame_pred[i].velocity
                        ve_varity = abs(
                            math.sqrt(ve[0]**2 + ve[1]**2) -
                            math.sqrt(pre_ve[0]**2 + pre_ve[1]**2))
                        if ve_varity > TrackingRenderer.mutave_thr[
                                0]:  # car均匀加速度为2.778m/s^2  3*0.5s=1.5
                            TrackingRenderer.trk_ratio[
                                'vevari>%s' %
                                TrackingRenderer.mutave_thr[0]] += 1
                        elif ve_varity < TrackingRenderer.mutave_thr[
                                0] and ve_varity >= TrackingRenderer.mutave_thr[
                                    1]:
                            TrackingRenderer.trk_ratio[
                                '%s<vevari<%s' %
                                (TrackingRenderer.mutave_thr[1],
                                 TrackingRenderer.mutave_thr[0])] += 1
                        elif ve_varity < TrackingRenderer.mutave_thr[
                                1] and ve_varity >= TrackingRenderer.mutave_thr[
                                    2]:
                            TrackingRenderer.trk_ratio[
                                '%s<vevari<%s' %
                                (TrackingRenderer.mutave_thr[2],
                                 TrackingRenderer.mutave_thr[1])] += 1
                        else:
                            TrackingRenderer.trk_ratio[
                                'vevari<%s' %
                                TrackingRenderer.mutave_thr[2]] += 1

            # Determine color for this tracking id.
            if b.tracking_id not in self.id2color.keys():
                self.id2color[b.tracking_id] = (
                    float(hash(b.tracking_id + 'r') % 256) / 255,
                    float(hash(b.tracking_id + 'g') % 256) / 255,
                    float(hash(b.tracking_id + 'b') % 256) / 255)

            if ifplthis:
                box_for_path = copy.deepcopy(box)
                box_for_path.rotate(Quaternion(cs_record["rotation"]))
                box_for_path.translate(np.array(cs_record["translation"]))

                box_for_path.rotate(Quaternion(pose_record["rotation"]))
                box_for_path.translate(np.array(
                    pose_record["translation"]))  #到全局
                # 记录轨迹坐标
                if b.tracking_id in self.track_path.keys():
                    self.track_path[b.tracking_id].append(box_for_path)
                else:
                    self.track_path[b.tracking_id] = [box_for_path]

            # Render box. Highlight identity switches in red.
            if b.tracking_id in switch_ids:
                color = self.id2color[b.tracking_id]
                box.render(ax, view=np.eye(4), colors=('r', 'r', color))
                if outscen_class:
                    ###TrackingRenderer.ids_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 }           #car 绝对速度
                    ###TrackingRenderer.ids_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 }        #ped 绝对速度
                    ve = math.sqrt(frame_pred[i].velocity[0]**2 +
                                   frame_pred[i].velocity[1]**2)
                    if TrackingRenderer.outscen == 'car':
                        if ve > 0 and ve <= 0.1:
                            TrackingRenderer.ids_verange['0-0.1m/s'] += 1
                        elif ve > 0.1 and ve <= 2.5:
                            TrackingRenderer.ids_verange['0.1-2.5m/s'] += 1
                        elif ve > 2.5 and ve <= 5:
                            TrackingRenderer.ids_verange['2.5-5m/s'] += 1
                        elif ve > 5 and ve <= 10:
                            TrackingRenderer.ids_verange['5-10m/s'] += 1
                        else:
                            TrackingRenderer.ids_verange['>10m/s'] += 1
                    else:
                        if ve > 0 and ve <= 0.1:
                            TrackingRenderer.ids_verange['0-0.1m/s'] += 1
                        elif ve > 0.1 and ve <= 1.0:
                            TrackingRenderer.ids_verange['0.1-1.0m/s'] += 1
                        elif ve > 1.0 and ve <= 1.5:
                            TrackingRenderer.ids_verange['1.0-1.5m/s'] += 1
                        elif ve > 1.5 and ve <= 2:
                            TrackingRenderer.ids_verange['1.5-2m/s'] += 1
                        else:
                            TrackingRenderer.ids_verange['>2m/s'] += 1
                    ####TrackingRenderer.ids_factratio = {'delay_trk':0, 'del_oth_trk':0, 'reappear':0, 'reapother':0, 've_muta':0, 've_other':0, 'reapdeltrk':0 }
                    indx = np.where(switch_ids == b.tracking_id)
                    gt_id = switch_gt_ids[indx]
                    try:
                        x = sam_anno.index(gt_id)
                        #sample = nusc.get('sample', frame_gt[x].sample_token)
                        #sample_annotation_tokens = sample['anns'] #标注
                        sample_annotation = nusc.get(
                            'sample_annotation', sample_annotation_tokens[x])
                        if sample_annotation['prev'] == '':  #参考意义不大
                            TrackingRenderer.ids_factratio['del_oth_trk'] += 1
                        else:
                            TrackingRenderer.ids_factratio['delay_trk'] += 1
                            visibility = nusc.get(
                                'visibility',
                                sample_annotation['visibility_token'])
                            vis_level = vislev[visibility["level"]]
                            pre_annotation = nusc.get(
                                "sample_annotation", sample_annotation['prev'])
                            pre_vis = nusc.get(
                                'visibility',
                                pre_annotation['visibility_token'])
                            pre_vislevel = vislev[pre_vis["level"]]
                            if vis_level > pre_vislevel or (vis_level
                                                            == pre_vislevel
                                                            and vis_level < 3):
                                TrackingRenderer.ids_factratio['reappear'] += 1
                            elif vis_level == pre_vislevel:
                                TrackingRenderer.ids_factratio[
                                    'reapdeltrk'] += 1
                            else:
                                TrackingRenderer.ids_factratio[
                                    'reapother'] += 1
                            pre_ve = nusc.box_velocity(pre_annotation['token'])
                            ve = nusc.box_velocity(sample_annotation['token'])
                            ve_varity = abs(
                                math.sqrt(ve[0]**2 + ve[1]**2) -
                                math.sqrt(pre_ve[0]**2 + pre_ve[1]**2))
                            if ve_varity > TrackingRenderer.mutave_thr[
                                    0]:  # car均匀加速度为2.778m/s^2  3*0.5s=1.5
                                TrackingRenderer.ids_factratio[
                                    'vevari>%s' %
                                    (TrackingRenderer.mutave_thr[0])] += 1
                            elif ve_varity < TrackingRenderer.mutave_thr[
                                    0] and ve_varity >= TrackingRenderer.mutave_thr[
                                        1]:
                                TrackingRenderer.ids_factratio[
                                    '%s<vevari<%s' %
                                    (TrackingRenderer.mutave_thr[1],
                                     TrackingRenderer.mutave_thr[0])] += 1
                            elif ve_varity < TrackingRenderer.mutave_thr[
                                    1] and ve_varity >= TrackingRenderer.mutave_thr[
                                        2]:
                                TrackingRenderer.ids_factratio[
                                    '%s<vevari<%s' %
                                    (TrackingRenderer.mutave_thr[2],
                                     TrackingRenderer.mutave_thr[1])] += 1
                            else:
                                TrackingRenderer.ids_factratio[
                                    'vevari<%s' %
                                    TrackingRenderer.mutave_thr[2]] += 1
                    except:  #标注错误
                        pass
            else:
                color = self.id2color[b.tracking_id]
                box.render(ax, view=np.eye(4), colors=(color, color, color))

            # Render other infos
            if ifpltsco:
                corners = view_points(box.corners(),
                                      view=np.eye(4),
                                      normalize=False)[:2, :]

                # ax.text(4,5,"hhaa0.8", fontsize=5)
                # ax.text(4,5,"%.2f\n%.2f,%.2f"%(b.tracking_score,b.velocity[0],b.velocity[1]), fontsize=5)

                ax.text(box.center[0],
                        box.center[1],
                        "%.2f\nvx=%.2f,vy=%.2f" %
                        (b.tracking_score, b.velocity[0], b.velocity[1]),
                        fontdict={
                            'size': '6',
                            'color': 'b'
                        })
                #ax.text(box.center[0],box.center[1],"%.2f\n%.2f,%.2f"%(b.tracking_score,b.velocity[0],b.velocity[1]), fontsize=5)
            #if pltve:

        #删去当前帧多余轨迹线
        keys = list(self.track_path.keys())
        for key in keys:
            if key not in pred_trackid:
                self.track_path.pop(key)
        # 画历史轨迹线:
        if ifplthis:
            for id in self.track_path.keys():
                color = self.id2color[id]
                for box_path in self.track_path[id]:
                    #转到当前帧局部
                    # move box to ego vehicle coord system  before has done the translation
                    box_path.translate(-np.array(pose_record["translation"]))
                    box_path.rotate(
                        Quaternion(pose_record["rotation"]).inverse)
                    # move box to sensor coord system
                    box_path.translate(-np.array(cs_record["translation"]))
                    box_path.rotate(Quaternion(cs_record["rotation"]).inverse)

                    ax.scatter(box_path.center[0], box_path.center[1], 10,
                               color)
                    #转回全局
                    box_path.rotate(Quaternion(cs_record["rotation"]))
                    box_path.translate(np.array(cs_record["translation"]))

                    box_path.rotate(Quaternion(pose_record["rotation"]))
                    box_path.translate(np.array(pose_record["translation"]))

        TrackingRenderer.his_track = frame_pred
        TrackingRenderer.his_trackid = pred_trackid
        # Plot MOTA metrics. ly
        # 目标位置 左上角,距离 Y 轴 0.01 倍距离,距离 X 轴 0.95倍距离
        self.cursumfp += len(FP)  #当前场景累积值
        self.cursumfn += len(FN)

        #print("FN=%d,FP=%d,switch_ids=%d,cursumfp=%d,cursumfn=%d" % ( len(FN), len(FP), len(switch_ids), self.cursumfp, self.cursumfn ))
        #ax.text(0.01, 0.95, "IDS:%d\nFP:%d\nFN:%d\ncur_sce sumfp:%d sumfn:%d\nthreshold:%f"%(len(switch_ids),len(FP),len(FN),self.cursumfp,self.cursumfn,threshold), transform=ax.transAxes, fontdict={'size': '10', 'color': 'b'})

        # Plot ego pose.
        plt.scatter(0, 0, s=96, facecolors='none', edgecolors='k', marker='o')
        plt.xlim(-50, 50)
        plt.ylim(-50, 50)
        plt.axis('off')
        # Save to disk and close figure.
        fig.savefig(os.path.join(self.save_path, '{}.png'.format(timestamp)))
        plt.close(fig)
Exemplo n.º 3
0
def vis_model_per_sample_data(bev_input_data,
                              data_dict,
                              frame_skip=3,
                              voxel_size=(0.25, 0.25, 0.4),
                              loaded_models=None,
                              which_model="MotionNet",
                              model_path=None,
                              img_save_dir=None,
                              use_adj_frame_pred=False,
                              use_motion_state_pred_masking=False,
                              frame_idx=0,
                              disp=True):
    """
    Visualize the prediction (ie, displacement field) results.

    bev_ipput_data: the preprocessed sparse bev data
    data_dict: a dictionary storing the point cloud data and annotations
    frame_skip: how many frames we want to skip for future frames
    voxel_size: the size of each voxel
    loaded_models: the model which has loaded the pretrained weights
    which_model: which model to apply ['MotionNet'/'MotionNetMGDA']
    model_path: the path to the pretrained model
    img_save_dir: the directory for saving the predicted image
    use_adj_frame_pred: whether to predict the relative offsets between two adjacent frames
    use_motion_state_pred_masking: whether to threshold the prediction with motion state estimation results
    frame_idx: used for specifying the name of saved image frames
    disp: whether to immediately show the predicted results
    """
    if model_path is None:
        raise ValueError("Need to specify saved model path.")
    if img_save_dir is None:
        raise ValueError("Need to specify image save path.")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    fig, ax = plt.subplots(1, 3, figsize=(20, 8))

    # Load pre-trained network weights
    if which_model == "MotionNet":
        model = loaded_models[0]
    else:
        model_encoder = loaded_models[0]
        model_head = loaded_models[1]

    # Prepare data for the network
    padded_voxel_points, all_disp_field_gt, all_valid_pixel_maps,\
        non_empty_map, pixel_cat_map_gt, num_past_pcs, num_future_pcs = bev_input_data

    padded_voxel_points = torch.unsqueeze(padded_voxel_points, 0).to(device)

    # Make prediction
    if which_model == "MotionNet":
        model.eval()
    else:
        model_encoder.eval()
        model_head.eval()

    with torch.no_grad():
        if which_model == "MotionNet":
            disp_pred, cat_pred, motion_pred = model(padded_voxel_points)
        else:
            shared_feats = model_encoder(padded_voxel_points)
            disp_pred, cat_pred, motion_pred = model_head(shared_feats)

        disp_pred = disp_pred.cpu().numpy()
        disp_pred = np.transpose(disp_pred, (0, 2, 3, 1))
        cat_pred = np.squeeze(cat_pred.cpu().numpy(), 0)

        if use_adj_frame_pred:  # The prediction are the displacement between adjacent frames
            for c in range(1, disp_pred.shape[0]):
                disp_pred[c, ...] = disp_pred[c, ...] + disp_pred[c - 1, ...]

        if use_motion_state_pred_masking:
            motion_pred_numpy = motion_pred.cpu().numpy()
            motion_pred_numpy = np.argmax(motion_pred_numpy, axis=1)
            motion_mask = motion_pred_numpy == 0

            cat_pred_numpy = np.argmax(cat_pred, axis=0)
            cat_mask = np.logical_and(cat_pred_numpy == 0, non_empty_map == 1)
            cat_mask = np.expand_dims(cat_mask, 0)

            cat_weight_map = np.ones_like(motion_pred_numpy, dtype=np.float32)
            cat_weight_map[motion_mask] = 0.0
            cat_weight_map[cat_mask] = 0.0
            cat_weight_map = cat_weight_map[:, :, :,
                                            np.newaxis]  # (1, h, w. 1)

            disp_pred = disp_pred * cat_weight_map

    # ------------------------- Visualization -------------------------
    # --- Load the point cloud data and annotations ---
    num_sweeps = data_dict['num_sweeps']
    times = data_dict['times']
    num_past_sweeps = len(np.where(times >= 0)[0])
    num_future_sweeps = len(np.where(times < 0)[0])
    assert num_past_sweeps + num_future_sweeps == num_sweeps, "The number of sweeps is incorrect!"

    # Load point cloud
    pc_list = []

    for i in range(num_sweeps):
        pc = data_dict['pc_' + str(i)]
        pc_list.append(pc)

    # Reorder the pc, and skip sample frames if wanted
    tmp_pc_list_1 = pc_list[0:num_past_sweeps:(frame_skip + 1)]
    tmp_pc_list_1 = tmp_pc_list_1[::-1]
    tmp_pc_list_2 = pc_list[(num_past_sweeps + frame_skip)::(frame_skip + 1)]
    pc_list = tmp_pc_list_1 + tmp_pc_list_2
    num_past_pcs = len(tmp_pc_list_1)

    # Load box annotations, and reorder and skip some annotations if wanted
    num_instances = data_dict['num_instances']
    instance_box_list = list()

    for i in range(num_instances):
        instance = data_dict['instance_boxes_' + str(i)]

        # Reorder the boxes
        tmp_instance = np.zeros((len(pc_list), instance.shape[1]),
                                dtype=np.float32)
        tmp_instance[(num_past_pcs -
                      1)::-1] = instance[0:num_past_sweeps:(frame_skip + 1)]
        tmp_instance[num_past_pcs:] = instance[(num_past_sweeps +
                                                frame_skip)::(frame_skip + 1)]
        instance = tmp_instance[:]

        instance_box_list.append(instance)

    # Draw the LIDAR and quiver plots
    # The distant points are very sparse and not reliable. We do not show them.
    border_meter = 4
    border_pixel = border_meter * 4
    x_lim = [-(32 - border_meter), (32 - border_meter)]
    y_lim = [-(32 - border_meter), (32 - border_meter)]

    # We only show the cells having one-hot category vectors
    max_prob = np.amax(pixel_cat_map_gt, axis=-1)
    filter_mask = max_prob == 1.0
    pixel_cat_map = np.argmax(
        pixel_cat_map_gt,
        axis=-1) + 1  # category starts from 1 (background), etc
    pixel_cat_map = (pixel_cat_map * non_empty_map * filter_mask).astype(
        np.int)

    cat_pred = np.argmax(cat_pred, axis=0) + 1
    cat_pred = (cat_pred * non_empty_map * filter_mask).astype(np.int)

    # --- Visualization ---
    idx = num_past_pcs - 1

    points = pc_list[idx]

    ax[0].scatter(points[0, :], points[1, :], c=points[2, :], s=1)
    ax[0].set_xlim(x_lim[0], x_lim[1])
    ax[0].set_ylim(y_lim[0], y_lim[1])
    ax[0].axis('off')
    ax[0].set_aspect('equal')
    ax[0].title.set_text('LIDAR data')

    for j in range(num_instances):
        inst = instance_box_list[j]

        box_data = inst[idx]
        if np.isnan(box_data).any():
            continue

        box = Box(center=box_data[0:3],
                  size=box_data[3:6],
                  orientation=Quaternion(box_data[6:]))
        box.render(ax[0])

    # Plot quiver. We only show non-empty vectors. Plot each category.
    field_gt = all_disp_field_gt[-1]
    idx_x = np.arange(field_gt.shape[0])
    idx_y = np.arange(field_gt.shape[1])
    idx_x, idx_y = np.meshgrid(idx_x, idx_y, indexing='ij')
    qk = [None] * len(color_map)  # for quiver key

    for k in range(len(color_map)):
        # ------------------------ Ground-truth ------------------------
        mask = pixel_cat_map == (k + 1)

        # For cells with very small movements, we threshold them to be static
        field_gt_norm = np.linalg.norm(field_gt, ord=2, axis=-1)  # out: (h, w)
        thd_mask = field_gt_norm <= 0.4
        field_gt[thd_mask, :] = 0

        # Get the displacement field
        X = idx_x[mask]
        Y = idx_y[mask]
        U = field_gt[:, :, 0][mask] / voxel_size[
            0]  # the distance between pixels is w.r.t. grid size (e.g., 0.2m)
        V = field_gt[:, :, 1][mask] / voxel_size[1]

        qk[k] = ax[1].quiver(X,
                             Y,
                             U,
                             V,
                             angles='xy',
                             scale_units='xy',
                             scale=1,
                             color=color_map[k])
        ax[1].quiverkey(qk[k],
                        X=0.0 + k / 5.0,
                        Y=1.1,
                        U=20,
                        label=cat_names[k],
                        labelpos='E')
        ax[1].set_xlim(border_pixel, field_gt.shape[0] - border_pixel)
        ax[1].set_ylim(border_pixel, field_gt.shape[1] - border_pixel)
        ax[1].set_aspect('equal')
        ax[1].title.set_text('Ground-truth')
        ax[1].axis('off')

        # ------------------------ Prediction ------------------------
        # Show the prediction results. We show the cells corresponding to the non-empty one-hot gt cells.
        mask_pred = cat_pred == (k + 1)
        field_pred = disp_pred[
            -1]  # Show last prediction, ie., the 20-th frame

        # For cells with very small movements, we threshold them to be static
        field_pred_norm = np.linalg.norm(field_pred, ord=2,
                                         axis=-1)  # out: (h, w)
        thd_mask = field_pred_norm <= 0.4
        field_pred[thd_mask, :] = 0

        # We use the same indices as the ground-truth, since we are currently focused on the foreground
        X_pred = idx_x[mask_pred]
        Y_pred = idx_y[mask_pred]
        U_pred = field_pred[:, :, 0][mask_pred] / voxel_size[0]
        V_pred = field_pred[:, :, 1][mask_pred] / voxel_size[1]

        ax[2].quiver(X_pred,
                     Y_pred,
                     U_pred,
                     V_pred,
                     angles='xy',
                     scale_units='xy',
                     scale=1,
                     color=color_map[k])
        ax[2].set_xlim(border_pixel, field_pred.shape[0] - border_pixel)
        ax[2].set_ylim(border_pixel, field_pred.shape[1] - border_pixel)
        ax[2].set_aspect('equal')
        ax[2].title.set_text('Prediction')
        ax[2].axis('off')

    print("finish sample {}".format(frame_idx))
    plt.savefig(os.path.join(img_save_dir, str(frame_idx) + '.png'))

    if disp:
        plt.pause(0.02)
    ax[0].clear()
    ax[1].clear()
    ax[2].clear()

    if disp:
        plt.show()