Ejemplo n.º 1
0
def track_vis(gt_annos,dt_annos,output_dir, track_id):
    root_path = dt_annos[0]["metadata"]["image_prefix"]
    plt.style.use(['science','ieee','retro'])
    gt_plt = plt.figure()
    plt.xlim(-40,40)
    plt.ylim(-40,40)
    plt.rcParams['figure.dpi'] = 500
    plt.rcParams['savefig.dpi'] = 500
    gt_ax = gt_plt.add_subplot(1,1,1)
    plt.style.use(['science','ieee','retro'])
    dt_plt = plt.figure()
    plt.xlim(-40,40)
    plt.ylim(-40,40)
    if gt_annos is None:
        plt.xlim(-20,20)
        plt.ylim(-20,20)
    plt.rcParams['figure.dpi'] = 500
    plt.rcParams['savefig.dpi'] = 500
    dt_ax = dt_plt.add_subplot(1,1,1)

    fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1600, 1000))
    pcd = o3d.io.read_point_cloud("/Extra/zhangmeng/3d_detection/BBOX_x4_track/testing/Lidar/PC_0.pcd")
    points = np.asarray(pcd.points)
    fig = draw_lidar(points,color=None,fig=fig)
    
    if gt_annos is not None:
        overlap,_,_,_ = calculate_iou_partly(gt_annos,dt_annos,2,50,2,0.5)
        dataset = lvx_object(root_path)
        gt_image_idxes = [str(info["token"]) for info in gt_annos]
        count = 0
        for idx in gt_image_idxes:
            gt_objects = dataset.get_label_objects(gt_annos[gt_image_idxes.index(idx)])
            dt_objects = dataset.get_label_objects(dt_annos[gt_image_idxes.index(idx)])

            show_trajectory(gt_objects,dt_objects,gt_ax,dt_ax,overlap[gt_image_idxes.index(idx)],track_id)
            if count%5==0:
                fig = draw_track_3d(dt_objects,fig,track_id)
            count+=1
    
    else:
        dataset = lvx_object(root_path,'testing')
        for count,dt_anno in enumerate(dt_annos):
            idx = dt_anno['metadata']['token']
            dt_objects = dataset.get_label_objects(dt_anno)
            show_trajectory([],dt_objects,None,dt_ax,[],track_id)

    gt_img_path = os.path.join(output_dir,f"gt_tracject_{track_id}.png")
    gt_plt.savefig(gt_img_path)
    dt_img_path = os.path.join(output_dir,f"dt_tracject_{track_id}.pdf")
    dt_plt.savefig(dt_img_path)

    mlab.savefig(gt_img_path)
    mlab.clf()
Ejemplo n.º 2
0
def eval_class_v3(
    gt_annos,
    dt_annos,
    current_classes,
    difficultys,
    metric,
    min_overlaps,
    compute_aos=False,
    z_axis=1,
    z_center=1.0,
    num_parts=50,
):
    """Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
    Args:
        gt_annos: dict, must from get_label_annos() in kitti_common.py
        dt_annos: dict, must from get_label_annos() in kitti_common.py
        current_class: int, 0: car, 1: pedestrian, 2: cyclist
        difficulty: int. eval difficulty, 0: easy, 1: normal, 2: hard
        metric: eval type. 0: bbox, 1: bev, 2: 3d
        min_overlap: float, min overlap. official:
            [[0.7, 0.5, 0.5], [0.7, 0.5, 0.5], [0.7, 0.5, 0.5]]
            format: [metric, class]. choose one from matrix above.
        num_parts: int. a parameter for fast calculate algorithm

    Returns:
        dict of recall, precision and aos
    """
    assert len(gt_annos) == len(dt_annos)
    num_examples = len(gt_annos)
    split_parts = get_split_parts(num_examples, num_parts)
    split_parts = [i for i in split_parts if i != 0]

    rets = calculate_iou_partly(dt_annos,
                                gt_annos,
                                metric,
                                num_parts,
                                z_axis=z_axis,
                                z_center=z_center)
    overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
    N_SAMPLE_PTS = 41
    num_minoverlap = len(min_overlaps)
    num_class = len(current_classes)
    num_difficulty = len(difficultys)
    precision = np.zeros(
        [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
    recall = np.zeros(
        [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
    aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
    all_thresholds = np.zeros(
        [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
    for m, current_class in enumerate(current_classes):
        for l, difficulty in enumerate(difficultys):
            rets = prepare_data(
                gt_annos,
                dt_annos,
                current_class,
                difficulty=difficulty,
                clean_data=clean_data,
            )
            (
                gt_datas_list,
                dt_datas_list,
                ignored_gts,
                ignored_dets,
                dontcares,
                total_dc_num,
                total_num_valid_gt,
            ) = rets
            for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
                thresholdss = []
                for i in range(len(gt_annos)):
                    rets = compute_statistics_jit(
                        overlaps[i],
                        gt_datas_list[i],
                        dt_datas_list[i],
                        ignored_gts[i],
                        ignored_dets[i],
                        dontcares[i],
                        metric,
                        min_overlap=min_overlap,
                        thresh=0.0,
                        compute_fp=False,
                    )
                    tp, fp, fn, similarity, thresholds = rets
                    thresholdss += thresholds.tolist()
                thresholdss = np.array(thresholdss)
                thresholds = get_thresholds(thresholdss, total_num_valid_gt)
                thresholds = np.array(thresholds)
                # print(thresholds)
                all_thresholds[m, l, k, :len(thresholds)] = thresholds
                pr = np.zeros([len(thresholds), 4])
                idx = 0
                for j, num_part in enumerate(split_parts):
                    gt_datas_part = np.concatenate(
                        gt_datas_list[idx:idx + num_part], 0)
                    dt_datas_part = np.concatenate(
                        dt_datas_list[idx:idx + num_part], 0)
                    dc_datas_part = np.concatenate(
                        dontcares[idx:idx + num_part], 0)
                    ignored_dets_part = np.concatenate(
                        ignored_dets[idx:idx + num_part], 0)
                    ignored_gts_part = np.concatenate(
                        ignored_gts[idx:idx + num_part], 0)
                    fused_compute_statistics(
                        parted_overlaps[j],
                        pr,
                        total_gt_num[idx:idx + num_part],
                        total_dt_num[idx:idx + num_part],
                        total_dc_num[idx:idx + num_part],
                        gt_datas_part,
                        dt_datas_part,
                        dc_datas_part,
                        ignored_gts_part,
                        ignored_dets_part,
                        metric,
                        min_overlap=min_overlap,
                        thresholds=thresholds,
                        compute_aos=compute_aos,
                    )
                    idx += num_part
                for i in range(len(thresholds)):
                    # recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
                    precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
                    if compute_aos:
                        aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
                for i in range(len(thresholds)):
                    precision[m, l, k, i] = np.max(precision[m, l, k, i:],
                                                   axis=-1)
                    if compute_aos:
                        aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
                # use interp to calculate recall
                """
                current_recalls = np.linspace(0, 1, 41)
                prec_unique, inds = np.unique(precision[m, l, k], return_index=True)
                current_recalls = current_recalls[inds]
                f = interp1d(prec_unique, current_recalls)
                precs_for_recall = np.linspace(0, 1, 41)
                max_prec = np.max(precision[m, l, k])
                valid_prec = precs_for_recall < max_prec
                num_valid_prec = valid_prec.sum()
                recall[m, l, k, :num_valid_prec] = f(precs_for_recall[valid_prec])
                """
    ret_dict = {
        "recall":
        recall,  # [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]
        "precision": precision,
        "orientation": aos,
        "thresholds": all_thresholds,
        "min_overlaps": min_overlaps,
    }
    return ret_dict
Ejemplo n.º 3
0
def lvx_vis(gt_annos,dt_annos,output_dir):
    root_path = dt_annos[0]["metadata"]["image_prefix"]
    
    if gt_annos is not None:
        miss = -3*np.ones((22,len(gt_annos)))
        overlap,_,_,_ = calculate_iou_partly(gt_annos,dt_annos,2,50,2,0.5)
        dataset = lvx_object(root_path)
        gt_image_idxes = [str(info["token"]) for info in gt_annos]
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        vot = cv2.VideoWriter(os.path.join(output_dir,"bev_video.mp4"),fourcc,10,(3200,2400),True)
        for idx in gt_image_idxes:
            print(idx)
            points = dataset.get_lidar(idx)
            points = points[points[:,1]<40,:]
            points = points[points[:,0]<40,:]
            points = points[points[:,1]>-40,:]
            points = points[points[:,0]>-40,:]
            points = points[points[:,2]>0.1,:]

            points_1 = dataset.get_lidar(f'{int(idx)-1}')
            points_1 = points_1[points_1[:,1]<40,:]
            points_1 = points_1[points_1[:,0]<40,:]
            points_1 = points_1[points_1[:,1]>-40,:]
            points_1 = points_1[points_1[:,0]>-40,:]
            points_1 = points_1[points_1[:,2]>0.1,:]

            if (gt_image_idxes.index(idx) < len(gt_image_idxes)-1):
                points_2 = dataset.get_lidar(f'{int(idx)+1}')
                points_2 = points_2[points_2[:,1]<40,:]
                points_2 = points_2[points_2[:,0]<40,:]
                points_2 = points_2[points_2[:,1]>-40,:]
                points_2 = points_2[points_2[:,0]>-40,:]
                points_2 = points_2[points_2[:,2]>0.1,:]
            else:
                points_2 = []

            gt_objects = dataset.get_label_objects(gt_annos[gt_image_idxes.index(idx)])
            dt_objects = dataset.get_label_objects(dt_annos[gt_image_idxes.index(idx)])

            if not os.path.exists(os.path.join(output_dir,"bev_imgs")):
                os.makedirs(os.path.join(output_dir,"bev_imgs"))

            img_path = os.path.join(output_dir,f"bev_imgs/lvx_{idx}.png")

            show_bev_objects(points,points_1,points_2,gt_objects,dt_objects,img_path,overlap[gt_image_idxes.index(idx)],miss[:,gt_image_idxes.index(idx)])
            img = cv2.imread(img_path)
            vot.write(img)
        vot.release()
        np.savetxt(os.path.join(output_dir,'miss.txt'),miss,fmt="%d")
    
    else:
        dataset = lvx_object(root_path,split="testing")
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        vot = cv2.VideoWriter(os.path.join(output_dir,"beicao_video.mp4"),fourcc,10,(3200,2400),True)
        for count,dt_anno in enumerate(dt_annos):
            idx = dt_anno['metadata']['token']
            print(idx)

            points = dataset.get_lidar(idx)
            points = points[points[:,1]<40,:]
            points = points[points[:,0]<40,:]
            points = points[points[:,1]>-40,:]
            points = points[points[:,0]>-40,:]
            points = points[points[:,2]>0.1,:]

            points_1 = dataset.get_lidar(f'{int(idx)-1}')
            points_1 = points_1[points_1[:,1]<40,:]
            points_1 = points_1[points_1[:,0]<40,:]
            points_1 = points_1[points_1[:,1]>-40,:]
            points_1 = points_1[points_1[:,0]>-40,:]
            points_1 = points_1[points_1[:,2]>0.1,:]

            if (count < len(dt_annos)-1):
                points_2 = dataset.get_lidar(f'{int(idx)+1}')
                points_2 = points_2[points_2[:,1]<40,:]
                points_2 = points_2[points_2[:,0]<40,:]
                points_2 = points_2[points_2[:,1]>-40,:]
                points_2 = points_2[points_2[:,0]>-40,:]
                points_2 = points_2[points_2[:,2]>0.1,:]
            else:
                points_2 = []
            dt_objects = dataset.get_label_objects(dt_anno)

            if not os.path.exists(os.path.join(output_dir,"beicao_imgs")):
                os.makedirs(os.path.join(output_dir,"beicao_imgs"))

            img_path = os.path.join(output_dir,f"beicao_imgs/lvx_{idx}.png")

            show_bev_objects(points,points_1,points_2,[],dt_objects,img_path,None, None)
            img = cv2.imread(img_path)
            vot.write(img)
        vot.release()