コード例 #1
0
    def load_dataset(self, path_to_dataset):

        """ 
        Load the features .pgz file and parse it into the class variables
        """ 
                    
        inputs = []
        labels = []

        # creating groups for stratified validation (all 10-minute file from a specific
        # hour can only be or in training set or in validation set)
        groups = []

        print "Loading dataset using features " + str(self.selected_features) + "..",

        # [file_name][feature_name][epoch][..] or
        # [file_name][state/hour]
        dataset_dict = load_zipped_pickle(path_to_dataset) 
        
        for file in dataset_dict:
            for input in dataset_dict[file][self.selected_features]:
                inputs.append(input)
                labels.append(dataset_dict[file]['state'])
                groups.append(dataset_dict[file]['hour'])
                        
        print(" done.")
        
        return(np.array(inputs), np.array(labels), np.array(groups))
コード例 #2
0
    def __init__(self, classes, npoints, center_perturbation, size_perturbation, angle_perturbation, 
        classes_to_drop=[], classes_to_drop_prob=0, random_flip=False, random_shift=False, rotate_to_center=False, overwritten_data_path=None):
        self.classes = classes
        self.npoints = npoints
        self.random_flip = random_flip
        self.random_shift = random_shift
        self.rotate_to_center = rotate_to_center
        self.center_perturbation = center_perturbation
        self.size_perturbation = size_perturbation
        self.angle_perturbation = angle_perturbation
        self.prepared_batches = deque()
        assert(overwritten_data_path is not None)
        
        idx_l, box2d_l, box3d_l, image_crop_l, points_l, label_l, cls_type_l, heading_l, size_l, \
            rtilt_l, k_l, frustum_angle_l, img_dims_l = load_zipped_pickle(overwritten_data_path)

        # Maps cls_type to it's idx within (self.label_2Dl, self.heading_2Dl, etc) to allow class-wise sampling
        self.cls_to_idx_map = {}
        # Filtering out classes that we do not want
        self.idx_l, self.box2d_l, self.box3d_l, self.image_crop_l, self.points_l, self.label_l, \
            self.cls_type_l, self.heading_l, self.size_l, self.rtilt_l, self.k_l, \
            self.frustum_angle_l, self.img_dims_l = [], [], [], [], [], [], [], [], [], [], [], [], []

        np.random.seed(20)
        cls_idx = 0
        for idx, box2d, box3d, image_crop, points, label, cls_type, heading, size, rtilt, k, \
            frustum_angle, img_dims in zip(idx_l, box2d_l, box3d_l, image_crop_l, points_l, \
            label_l, cls_type_l, heading_l, size_l, rtilt_l, k_l, frustum_angle_l, img_dims_l):

            if cls_type in self.classes:

                if cls_type in classes_to_drop and (np.random.rand() < classes_to_drop_prob):
                    continue

                if self.cls_to_idx_map.get(cls_type) is None:
                    self.cls_to_idx_map[cls_type] = [cls_idx]
                else:
                    self.cls_to_idx_map[cls_type].append(cls_idx)
                cls_idx += 1

                self.idx_l.append(idx)
                self.box2d_l.append(box2d)
                self.box3d_l.append(box3d)
                #self.image_crop_l.append(image_crop)
                self.points_l.append(points)
                self.label_l.append(label)
                self.cls_type_l.append(cls_type)
                self.heading_l.append(heading)
                self.size_l.append(size)
                self.rtilt_l.append(rtilt)
                self.k_l.append(k)
                self.frustum_angle_l.append(frustum_angle)
                self.img_dims_l.append(img_dims)
コード例 #3
0
    def load_dataset(self, path_to_dataset):

        """ 
        Load the features .pgz file and parse it into the class variables
        """ 
                    
        inputs = []
        file_names = []
        
        print "Loading testset using features " + str(self.selected_features) + "..",

        # [file_name][feature_name][epoch][..] and
        # [file_name][state/hour]
        dataset_dict = load_zipped_pickle(path_to_dataset) 
        
        for file in dataset_dict:
            inputs.append(dataset_dict[file][self.selected_features])
            file_names.append(file)
                        
        print(" done.")
        
        return(np.array(inputs), np.array(file_names))
コード例 #4
0
VISU = FLAGS.viz
if VISU:
    import mayavi.mlab as mlab
    from view_pc import draw_lidar, draw_gt_boxes3d

#with open(FLAGS.result_path, 'rb') as fp:
#    ps_list = pickle.load(fp)
#    segp_list = pickle.load(fp)
#    center_list = pickle.load(fp)
#    heading_cls_list = pickle.load(fp)
#    heading_res_list = pickle.load(fp)
#    size_cls_list = pickle.load(fp)
#    size_res_list = pickle.load(fp)
#    rot_angle_list = pickle.load(fp)
#    score_list = pickle.load(fp)
ps_list, segp_list, center_list, heading_cls_list, heading_res_list, size_cls_list, size_res_list, rot_angle_list, score_list = load_zipped_pickle(
    FLAGS.result_path)

total_cnt = 0
correct_cnt = 0
type_whitelist = [
    'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser',
    'night_stand', 'bookshelf', 'bathtub'
]
class_correct_cnt = {classname: 0 for classname in type_whitelist}
class_total_cnt = {classname: 0 for classname in type_whitelist}
for i in range(len(segp_list)):
    print " ---- %d/%d" % (i, len(segp_list))
    img_id = TEST_DATASET.id_list[i]
    box2d = TEST_DATASET.box2d_list[i]
    classname = TEST_DATASET.type_list[i]
コード例 #5
0
def vis_predictions3D(pred_files, gt_file, number_to_show=10, filenums=None):
    
    from roi_seg_box3d_dataset import class2type
    
    idx = 0
    COLS = number_to_show
    ap_infos = {}
    classes, file_nums, mean_box_ious, mean_seg_ious, box_ious, seg_ious = [], [], [], [], [], []
    vtk_pcs_with_col, vtk_pcs_wo_col, vtk_imgs, vtk_gt_boxes, vtk_pred_boxes, vtk_texts = [], [], [], [], [], []
    choices = []

    test_dataset = ROISegBoxDataset(WHITE_LIST, npoints=2048, 
                                    split='val', rotate_to_center=True, 
                                    overwritten_data_path=gt_file, 
                                    from_rgb_detection=False)

    for n, pred_file in enumerate(pred_files):
        # Lists of different items from predictions
        predictions = load_zipped_pickle(pred_file)
        ps_l, seg_gt_l, seg_pred_l, center_l, heading_cls_l, heading_res_l, size_cls_l, size_res_l, rot_angle_l, \
            score_l, cls_type_l, file_num_l, box2d_l, box3d_l = predictions
        if n == 0:
            # Choosing equal number of objects per class to display
            cls_types = []
            options = {}
            for i, cls_type in enumerate(cls_type_l):
                if not class2type[cls_type] in WHITE_LIST: continue
                if options.get(cls_type) is None:
                    options[cls_type] = [i]
                    cls_types.append(cls_type)
                else:
                    options[cls_type].append(i)

            # Make use of array_split to divide into fairly equal groups
            arr = np.array_split([1] * number_to_show, len(options.keys()))
            random.shuffle(arr)
            for i, group in enumerate(arr):
                cls_type = cls_types[i]
                choice_list = np.random.choice(options[cls_type], len(group), replace=False) #replace=True)
                choices.extend(choice_list)
            print('Number of objects in whitelist: %d' % len(options))

        # Compute overall statistics
        if not FLAGS.rgb_detection:
            print('==== Computing overall statistics for %s ====' % pred_file)
            from evaluate import evaluate_predictions, get_ap_info
            rec, prec, ap, mean_ap = evaluate_predictions(predictions, dataset, CLASSES, 
                                                          test_dataset, WHITE_LIST)
            ap['Mean AP'] = mean_ap
            for classname in ap.keys():
                if ap_infos.get(classname) is None: ap_infos[classname] = []
                ap_infos[classname].append('%11s: [%.1f]' % (classname, 100. * ap[classname]))

            box_iou_sum, seg_iou_sum = 0, 0
            for i in range(len(ps_l)):
                seg_gt = seg_gt_l[i]
                box3d = box3d_l[i]
                seg_pred = seg_pred_l[i]
                center = center_l[i]
                heading_cls = heading_cls_l[i]
                heading_res = heading_res_l[i]
                size_cls = size_cls_l[i]
                size_res = size_res_l[i]
                rot_angle = rot_angle_l[i]

                gt_box3d = rotate_pc_along_y(np.copy(box3d), rot_angle)
                heading_angle = class2angle(heading_cls, heading_res, NUM_HEADING_BIN)
                box_size = class2size(size_cls, size_res) 
                pred_box3d = get_3d_box(box_size, heading_angle, center)

                # Box IOU
                shift_arr = np.array([4,5,6,7,0,1,2,3])
                box_iou3d, _ = box3d_iou(gt_box3d[shift_arr,:], pred_box3d)
                # Seg IOU
                seg_iou = get_seg_iou(seg_gt, seg_pred, 2)

                box_iou_sum += box_iou3d
                seg_iou_sum += seg_iou
            mean_box_iou = box_iou_sum / len(ps_l)
            mean_seg_iou = seg_iou_sum / len(ps_l)
            mean_box_ious.append(mean_box_iou)
            mean_seg_ious.append(mean_seg_iou)
             
        for i in choices:
            row, col = idx // COLS, idx % COLS
            idx += 1
            ps = ps_l[i]
            seg_pred = seg_pred_l[i]
            center = center_l[i]
            heading_cls = heading_cls_l[i]
            heading_res = heading_res_l[i]
            size_cls = size_cls_l[i]
            size_res = size_res_l[i]
            rot_angle = rot_angle_l[i]
            score = score_l[i]
            cls_type = cls_type_l[i]
            file_num = file_num_l[i]
            seg_gt = seg_gt_l[i] if not FLAGS.rgb_detection else []
            box2d = box2d_l[i]
            box3d = box3d_l[i] if not FLAGS.rgb_detection else []

            # Visualize point cloud (with and without color)
            vtk_pc_wo_col = vis.VtkPointCloud(ps)
            vtk_pc = vis.VtkPointCloud(ps, gt_points=seg_gt, pred_points=seg_pred)
            vis.vtk_transform_actor(vtk_pc_wo_col.vtk_actor, translate=(SEP*col,SEP*row,0))
            vis.vtk_transform_actor(vtk_pc.vtk_actor, translate=(SEP*col,SEP*row,0))
            vtk_pcs_wo_col.append(vtk_pc_wo_col.vtk_actor)
            vtk_pcs_with_col.append(vtk_pc.vtk_actor)

            # Visualize GT 3D box
            if FLAGS.rgb_detection:
                objects = dataset.get_label_objects(file_num)
                calib = dataset.get_calibration(file_num)
                for obj in objects:
                    if obj.classname not in WHITE_LIST: continue
                    box3d_pts_2d, box3d_pts_3d = compute_box_3d(obj, calib)
                    box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
                    box3d_pts_3d = rotate_pc_along_y(np.copy(box3d_pts_3d), rot_angle)
                    vtk_box3D = vis.vtk_box_3D(box3d_pts_3d, color=vis.Color.LightGreen)
                    vis.vtk_transform_actor(vtk_box3D, translate=(SEP*col,SEP*row,0))
                    vtk_gt_boxes.append(vtk_box3D)
            else:
                gt_box3d = rotate_pc_along_y(np.copy(box3d), rot_angle)
                vtk_gt_box3D = vis.vtk_box_3D(gt_box3d, color=vis.Color.LightGreen)
                vis.vtk_transform_actor(vtk_gt_box3D, translate=(SEP*col,SEP*row,0))
                vtk_gt_boxes.append(vtk_gt_box3D)

            # Visualize Pred 3D box
            heading_angle = class2angle(heading_cls, heading_res, NUM_HEADING_BIN)
            box_size = class2size(size_cls, size_res) 
            pred_box3d = get_3d_box(box_size, heading_angle, center)
            vtk_pred_box3D = vis.vtk_box_3D(pred_box3d, color=vis.Color.White)
            vis.vtk_transform_actor(vtk_pred_box3D, translate=(SEP*col,SEP*row,0))
            vtk_pred_boxes.append(vtk_pred_box3D)

            # Visualize Images
            box2d_col = vis.Color.LightGreen if not FLAGS.rgb_detection else vis.Color.Orange
            img_filename = os.path.join(IMG_DIR, '%06d.jpg' % file_num)
            vtk_img = vis.vtk_image(img_filename, box2Ds_list=[[box2d]], box2Ds_cols=[box2d_col])
            vis.vtk_transform_actor(vtk_img, scale=(IMG_SCALE,IMG_SCALE,IMG_SCALE), 
                                    rot=(0,180,180), translate=(-2+SEP*col,2+SEP*row,10))
            vtk_imgs.append(vtk_img)

            # Other information
            classes.append(class2type[cls_type].capitalize())
            file_nums.append(str(file_num))
            if not FLAGS.rgb_detection:
                shift_arr = np.array([4,5,6,7,0,1,2,3])
                box_iou3d, _ = box3d_iou(gt_box3d[shift_arr,:], pred_box3d)
                box_ious.append(box_iou3d)
                seg_iou = get_seg_iou(seg_gt, seg_pred, 2)
                seg_ious.append(seg_iou)

    # Visualize overall statistics
    vtk_texts.extend(vis.vtk_text([('Model: %s' % pred_file.split('/')[-1]) for pred_file in pred_files], arr_type='text', sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,2.5,2)))
    vtk_texts.extend(vis.vtk_text(['Mean Box IOU:'] * len(pred_files), arr_type='text', sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,3,2)))
    vtk_texts.extend(vis.vtk_text(mean_box_ious, arr_type='float', color=True, sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-10,3,2)))
    vtk_texts.extend(vis.vtk_text(['Mean Seg IOU:'] * len(pred_files), arr_type='text', sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,3.5,2)))
    vtk_texts.extend(vis.vtk_text(mean_seg_ious, arr_type='float', color=True, sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-10,3.5,2)))
    for i, (cls_name, ap_info) in enumerate(ap_infos.items()):
        vtk_texts.extend(vis.vtk_text(ap_info, arr_type='text', color=True, sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,4+i*0.5,2)))

    # Visualize text information
    vtk_texts.extend(vis.vtk_text(['Class:'] * len(classes), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,3,2)))
    vtk_texts.extend(vis.vtk_text(classes, arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0.5,3,2)))
    vtk_texts.extend(vis.vtk_text(['File:'] * len(file_nums), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,3.5,2)))
    vtk_texts.extend(vis.vtk_text(file_nums, arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0.25,3.5,2)))
    if not FLAGS.rgb_detection:
        vtk_texts.extend(vis.vtk_text(['Box:'] * len(box_ious), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,4,2)))
        vtk_texts.extend(vis.vtk_text(box_ious, arr_type='float', color=True, sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0,4,2)))
        vtk_texts.extend(vis.vtk_text(['Seg:'] * len(seg_ious), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,4.5,2)))
        vtk_texts.extend(vis.vtk_text(seg_ious, arr_type='float', color=True, sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0,4.5,2)))

    key_to_actors_to_hide = { 'g': vtk_gt_boxes, 'p': vtk_pred_boxes, 'i': vtk_imgs, 'c': vtk_pcs_wo_col, 't': vtk_texts }
    return vtk_pcs_with_col, key_to_actors_to_hide
コード例 #6
0
ファイル: viz_eval.py プロジェクト: joosm/frustum-pointnets
VISU = FLAGS.viz
if VISU:
    import mayavi.mlab as mlab
    from view_pc import draw_lidar, draw_gt_boxes3d

#with open(FLAGS.result_path, 'rb') as fp:
#    ps_list = pickle.load(fp)
#    segp_list = pickle.load(fp)
#    center_list = pickle.load(fp)
#    heading_cls_list = pickle.load(fp)
#    heading_res_list = pickle.load(fp)
#    size_cls_list = pickle.load(fp)
#    size_res_list = pickle.load(fp)
#    rot_angle_list = pickle.load(fp)
#    score_list = pickle.load(fp)
ps_list, segp_list, center_list, heading_cls_list, heading_res_list, size_cls_list, size_res_list, rot_angle_list, score_list = load_zipped_pickle(FLAGS.result_path)

total_cnt = 0
correct_cnt = 0
type_whitelist=['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub']
class_correct_cnt = {classname:0 for classname in type_whitelist}
class_total_cnt = {classname:0 for classname in type_whitelist}
for i in range(len(segp_list)):
    print " ---- %d/%d"%(i,len(segp_list))
    img_id = TEST_DATASET.id_list[i] 
    box2d = TEST_DATASET.box2d_list[i]
    classname = TEST_DATASET.type_list[i]

    objects = dataset.get_label_objects(img_id)
    target_obj = None
    for obj in objects: # **Assuming we use GT box2d for 3D box estimation evaluation**
コード例 #7
0
    def __init__(self, npoints, split, random_flip=False, random_shift=False, rotate_to_center=False, overwritten_data_path=None, from_rgb_detection=False, one_hot=False):
        self.npoints = npoints
        self.random_flip = random_flip
        self.random_shift = random_shift
        self.rotate_to_center = rotate_to_center
        self.one_hot = one_hot
        if overwritten_data_path is None:
            overwritten_data_path = os.path.join(BASE_DIR, '%s_1002.zip.pickle'%(split))

        self.from_rgb_detection = from_rgb_detection
        if from_rgb_detection:
            self.id_list, self.box2d_list, self.input_list, self.type_list, self.frustum_angle_list, self.prob_list = load_zipped_pickle(overwritten_data_path)
        else:
            self.id_list,self.box2d_list,self.box3d_list,self.input_list,self.label_list,self.type_list,self.heading_list,self.size_list,self.frustum_angle_list=load_zipped_pickle(overwritten_data_path)
コード例 #8
0
    def __init__(self,
                 classes,
                 npoints,
                 split,
                 classes_to_drop=[],
                 classes_to_drop_prob=0,
                 random_flip=False,
                 random_shift=False,
                 rotate_to_center=False,
                 overwritten_data_path=None,
                 from_rgb_detection=False,
                 one_hot=False):
        self.classes = classes
        self.npoints = npoints
        self.random_flip = random_flip
        self.random_shift = random_shift
        self.rotate_to_center = rotate_to_center
        self.one_hot = one_hot
        assert (not overwritten_data_path is None)

        self.from_rgb_detection = from_rgb_detection
        if from_rgb_detection:
            idx_l, box2d_l, image_crop_l, points_l, cls_type_l, frustum_angle_l, prob_l = \
                load_zipped_pickle(overwritten_data_path)

            # Filtering out classes that we do not want
            self.idx_l, self.box2d_l, self.image_crop_l, self.points_l, self.cls_type_l, \
                self.frustum_angle_l, self.prob_l = [], [], [], [], [], [], []

            for idx, box2d, image_crop, points, cls_type, frustum_angle, prob in \
                zip(idx_l, box2d_l, image_crop_l, points_l, cls_type_l, frustum_angle_l, prob_l):
                if cls_type in self.classes:
                    self.idx_l.append(idx)
                    self.box2d_l.append(box2d)
                    self.image_crop_l.append(image_crop)
                    self.points_l.append(points)
                    self.cls_type_l.append(cls_type)
                    self.frustum_angle_l.append(frustum_angle)
                    self.prob_l.append(prob)
        else:
            idx_l, box2d_l, box3d_l, image_crop_l, points_l, label_l, cls_type_l, heading_l, size_l, \
                rtilt_l, k_l, frustum_angle_l, img_dims_l = load_zipped_pickle(overwritten_data_path)

            # Filtering out classes that we do not want
            self.idx_l, self.box2d_l, self.box3d_l, self.image_crop_l, self.points_l, self.label_l, \
                self.cls_type_l, self.heading_l, self.size_l, self.rtilt_l, self.k_l, \
                self.frustum_angle_l, self.img_dims_l = [], [], [], [], [], [], [], [], [], [], [], [], []

            for idx, box2d, box3d, image_crop, points, label, cls_type, heading, size, rtilt, k, \
                frustum_angle, img_dims in zip(idx_l, box2d_l, box3d_l, image_crop_l, points_l, \
                label_l, cls_type_l, heading_l, size_l, rtilt_l, k_l, frustum_angle_l, img_dims_l):

                if cls_type in self.classes:

                    if cls_type in classes_to_drop and (np.random.rand() <
                                                        classes_to_drop_prob):
                        continue

                    self.idx_l.append(idx)
                    self.box2d_l.append(box2d)
                    self.box3d_l.append(box3d)
                    self.image_crop_l.append(image_crop)
                    self.points_l.append(points)
                    self.label_l.append(label)
                    self.cls_type_l.append(cls_type)
                    self.heading_l.append(heading)
                    self.size_l.append(size)
                    self.rtilt_l.append(rtilt)
                    self.k_l.append(k)
                    self.frustum_angle_l.append(frustum_angle)
                    self.img_dims_l.append(img_dims)
コード例 #9
0
ファイル: run_cl.py プロジェクト: sungsoo-ahn/pseudo-data
    for age, task_idx in enumerate(range(nb_tasks)):
        print("Age is {}".format(age))
        X_train, y_train = training_data[task_idx]

        cl.pre_task_updates()
        cl.fit(X_train, y_train, protocol['epochs_per_task'])

        ftask = []
        for X_valid, y_valid in valid_data[:task_idx + 1]:
            f_ = cl.evaluate(X_valid, y_valid)
            ftask.append(f_)

        print(np.mean(ftask))
        evals.append(ftask)

        next_task = True if task_idx != nb_tasks - 1 else False
        cl.post_task_updates(X_train, y_train, next_task=next_task)

    return evals, tmp_evals


total_evals = []
evals, tmp_evals = run_fits(training_dataset, validation_dataset)

print([np.mean(e) for e in evals])
print([np.mean(e) for e in tmp_evals])

utils.save_zipped_pickle(evals, datafile_name)
evals = utils.load_zipped_pickle(datafile_name)
コード例 #10
0
    def __init__(self,
                 classes3D,
                 classes2D,
                 npoints,
                 data3D_keep_prob=1,
                 add3D_for_classes2D_prob=0,
                 random_flip=False,
                 random_shift=False,
                 rotate_to_center=False,
                 overwritten_data_path=None):
        # Classes3D are the classes that we want the 3D bounding boxes of while
        # Classes2D are the classes that we want the 2D bounding boxes of.
        self.classes3D = classes3D
        self.classes2D = classes2D
        self.npoints = npoints
        self.random_flip = random_flip
        self.random_shift = random_shift
        self.rotate_to_center = rotate_to_center
        assert (overwritten_data_path is not None)

        idx_l, box2d_l, box3d_l, image_crop_l, points_l, label_l, cls_type_l, heading_l, size_l, \
            rtilt_l, k_l, frustum_angle_l, img_dims_l = load_zipped_pickle(overwritten_data_path)

        # Maps cls_type to it's idx within (self.label_3Dl, self.heading_3Dl, etc) to allow class-wise sampling
        self.cls_to_idx_map3D = {}
        # Choosing the Classes to provide 3D bounding boxes for
        self.idx_3Dl, self.box2d_3Dl, self.box3d_3Dl, self.image_crop_3Dl, self.points_3Dl, self.label_3Dl, \
            self.cls_type_3Dl, self.heading_3Dl, self.size_3Dl, self.rtilt_3Dl, self.k_3Dl, \
            self.frustum_angle_3Dl, self.img_dims_3Dl = [], [], [], [], [], [], [], [], [], [], [], [], []

        # Maps cls_type to it's idx within (self.label_2Dl, self.heading_2Dl, etc) to allow class-wise sampling
        self.cls_to_idx_map2D = {}
        # Choosing the Classes to only provide 2D bounding boxes for
        self.idx_2Dl, self.box2d_2Dl, self.box3d_2Dl, self.image_crop_2Dl, self.points_2Dl, self.label_2Dl, \
            self.cls_type_2Dl, self.heading_2Dl, self.size_2Dl, self.rtilt_2Dl, self.k_2Dl, \
            self.frustum_angle_2Dl, self.img_dims_2Dl = [], [], [], [], [], [], [], [], [], [], [], [], []

        np.random.seed(20)
        cls3D_idx, cls2D_idx = 0, 0
        for idx, box2d, box3d, image_crop, points, label, cls_type, heading, size, rtilt, k, \
            frustum_angle, img_dims in zip(idx_l, box2d_l, box3d_l, image_crop_l, points_l, \
            label_l, cls_type_l, heading_l, size_l, rtilt_l, k_l, frustum_angle_l, img_dims_l):

            if (cls_type in self.classes3D and (np.random.rand() <= data3D_keep_prob)) or \
               (np.random.rand() < add3D_for_classes2D_prob):

                if self.cls_to_idx_map3D.get(cls_type) is None:
                    self.cls_to_idx_map3D[cls_type] = [cls3D_idx]
                else:
                    self.cls_to_idx_map3D[cls_type].append(cls3D_idx)
                cls3D_idx += 1

                self.idx_3Dl.append(idx)
                self.box2d_3Dl.append(box2d)
                self.box3d_3Dl.append(box3d)
                #self.image_crop_3Dl.append(image_crop)
                self.points_3Dl.append(points)
                self.label_3Dl.append(label)
                self.cls_type_3Dl.append(cls_type)
                self.heading_3Dl.append(heading)
                self.size_3Dl.append(size)
                self.rtilt_3Dl.append(rtilt)
                self.k_3Dl.append(k)
                self.frustum_angle_3Dl.append(frustum_angle)
                self.img_dims_3Dl.append(img_dims)

            if self.classes2D is not None and cls_type in self.classes2D:
                if self.cls_to_idx_map2D.get(cls_type) is None:
                    self.cls_to_idx_map2D[cls_type] = [cls2D_idx]
                else:
                    self.cls_to_idx_map2D[cls_type].append(cls2D_idx)
                cls2D_idx += 1

                self.idx_2Dl.append(idx)
                self.box2d_2Dl.append(box2d)
                self.box3d_2Dl.append(box3d)
                #self.image_crop_2Dl.append(image_crop)
                self.points_2Dl.append(points)
                self.label_2Dl.append(label)
                self.cls_type_2Dl.append(cls_type)
                self.heading_2Dl.append(heading)
                self.size_2Dl.append(size)
                self.rtilt_2Dl.append(rtilt)
                self.k_2Dl.append(k)
                self.frustum_angle_2Dl.append(frustum_angle)
                self.img_dims_2Dl.append(img_dims)
コード例 #11
0
    sorted_keys = ap.keys()
    sorted_keys.sort()
    for classname in sorted_keys:
        string += ('%11s: [%.1f]\n' % (classname, 100. * ap[classname]))
    string += ('    Mean AP:  %.1f' % (100. * mean_ap))
    return string

if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--gt_path', default=None, help='GT path for .pickle file, the one used for val in train.py [default: None]')
    parser.add_argument('--pred_path', default=None, help='Prediction path for .pickle file from test.py [default: None]')
    FLAGS = parser.parse_args()

    CLASSES      = ['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub']
    TEST_CLASSES = ['table','sofa','dresser','night_stand','bookshelf'] #['bed','chair','toilet','desk','bathtub']
    SUNRGBD_DATASET_DIR = '/home/yewsiang/Transferable3D/dataset/mysunrgbd'
    IMG_DIR = pjoin(SUNRGBD_DATASET_DIR, 'training', 'image')

    # Load GT
    TEST_DATASET = roi_seg_box3d_dataset.ROISegBoxDataset(TEST_CLASSES, npoints=2048, 
                                                          split='val', rotate_to_center=True, 
                                                          overwritten_data_path=FLAGS.gt_path, 
                                                          from_rgb_detection=False)
    DATASET = sunrgbd_object(SUNRGBD_DATASET_DIR, 'training')
    
    # Load predictions
    predictions = load_zipped_pickle(FLAGS.pred_path)
    rec, prec, ap, mean_ap = evaluate_predictions(predictions, DATASET, CLASSES, TEST_DATASET, TEST_CLASSES)
    print(get_ap_info(ap, mean_ap))
    plot_ap(ap)