pred_boxes_right = pred_boxes_right.squeeze()

            pred_kpts = torch.cat(
                (pred_kpts, kpts_type, max_prob, pred_left, pred_right), 2)
            pred_kpts = pred_kpts.squeeze()
            dim_orien = dim_orien.squeeze()

            det_toc = time.time()
            detect_time = det_toc - det_tic

            img_path = imdb.img_left_path_at(i)
            split_path = img_path.split('/')
            image_number = split_path[len(split_path) - 1].split('.')[0]
            calib_path = img_path.replace("image_2", "calib")
            calib_path = calib_path.replace("png", "txt")
            calib = kitti_utils.read_obj_calibration(calib_path)
            label_path = calib_path.replace("calib", "label_2")
            lidar_path = calib_path.replace("calib", "velodyne")
            lidar_path = lidar_path.replace("txt", "bin")

            im2show_left = np.copy(cv2.imread(imdb.img_left_path_at(i)))
            im2show_right = np.copy(cv2.imread(imdb.img_right_path_at(i)))

            pointcloud = kitti_utils.get_point_cloud(lidar_path, calib)
            im_box = vis_utils.vis_lidar_in_bev(pointcloud,
                                                width=im2show_left.shape[0] *
                                                2)

            for j in xrange(1, imdb.num_classes):
                inds = torch.nonzero(scores[:, j] > eval_thresh).view(-1)
                # if there is det
예제 #2
0
        pred_left /= im_info[0, 2].data
        pred_right /= im_info[0, 2].data

        scores = scores.squeeze()
        pred_boxes_left = pred_boxes_left.squeeze()
        pred_boxes_right = pred_boxes_right.squeeze()

        pred_kpts = torch.cat(
            (pred_kpts, kpts_type, max_prob, pred_left, pred_right), 2)
        pred_kpts = pred_kpts.squeeze()
        dim_orien = dim_orien.squeeze()

        det_toc = time.time()
        detect_time = det_toc - det_tic

        calib = kitti_utils.read_obj_calibration('demo/calib.txt')

        im2show_left = np.copy(cv2.imread(img_l_path))
        im2show_right = np.copy(cv2.imread(img_r_path))

        pointcloud = kitti_utils.get_point_cloud('demo/lidar.bin', calib)
        im_box = vis_utils.vis_lidar_in_bev(pointcloud,
                                            width=im2show_left.shape[0] * 2)

        for j in xrange(1, len(kitti_classes)):
            inds = torch.nonzero(scores[:, j] > eval_thresh).view(-1)
            # if there is det
            if inds.numel() > 0:
                cls_scores = scores[:, j][inds]
                _, order = torch.sort(cls_scores, 0, True)
예제 #3
0
    def _load_kitti_annotation(self, index):
        if self._image_set == 'test':
            objects = []
        else:
            filename = os.path.join(self._data_path, 'training', 'label_2',
                                    index + '.txt')

            calib_file = os.path.join(self._data_path, 'training', 'calib',
                                      index + '.txt')
            calib_it = kitti_utils.read_obj_calibration(calib_file)
            im_left = cv2.imread(self.img_left_path_from_index(index))
            objects_origin = kitti_utils.read_obj_data(filename, calib_it,
                                                       im_left.shape)
            objects = []

            objects_origin = self.remove_occluded_keypoints(objects_origin)
            objects_origin = self.remove_occluded_keypoints(objects_origin,
                                                            left=False)

            for i in range(len(objects_origin)):
                if objects_origin[i].truncate < 0.98 and objects_origin[i].occlusion < 3 and \
                   (objects_origin[i].boxes[0].box[3] - objects_origin[i].boxes[0].box[1])>10 and \
                   objects_origin[i].cls in self._classes and \
                   objects_origin[i].boxes[0].visible_right - objects_origin[i].boxes[0].visible_left > 3 and\
                   objects_origin[i].boxes[1].visible_right - objects_origin[i].boxes[1].visible_left > 3:
                    objects.append(objects_origin[i])

            f = calib_it.p2[0, 0]
            cx = calib_it.p2[0, 2]
            base_line = (calib_it.p2[0, 3] - calib_it.p3[0, 3]) / f

            num_objs = len(objects)

            boxes_left = np.zeros((num_objs, 4), dtype=np.float32)
            boxes_right = np.zeros((num_objs, 4), dtype=np.float32)
            boxes_merge = np.zeros((num_objs, 4), dtype=np.float32)
            dim_orien = np.zeros((num_objs, 4), dtype=np.float32)
            kpts = np.zeros((num_objs, 6), dtype=np.float32)
            kpts_right = np.zeros((num_objs, 6), dtype=np.float32)
            truncation = np.zeros((num_objs), dtype=np.float32)
            occlusion = np.zeros((num_objs), dtype=np.float32)
            gt_classes = np.zeros((num_objs), dtype=np.int32)
            overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)

            for i in range(len(objects)):
                cls = self._class_to_ind[objects[i].cls]
                boxes_left[i, :] = objects[i].boxes[0].box
                boxes_right[i, :] = objects[i].boxes[1].box
                boxes_merge[i, :] = objects[i].boxes[2].box
                dim_orien[i, 0:3] = objects[i].dim
                dim_orien[i, 3] = objects[i].alpha
                kpts[i, :4] = objects[i].boxes[0].keypoints
                kpts[i, 4] = objects[i].boxes[0].visible_left
                kpts[i, 5] = objects[i].boxes[0].visible_right
                kpts_right[i, :4] = objects[i].boxes[1].keypoints
                kpts_right[i, 4] = objects[i].boxes[1].visible_left
                kpts_right[i, 5] = objects[i].boxes[1].visible_right
                occlusion[i] = objects[i].occlusion
                truncation[i] = objects[i].truncate
                gt_classes[i] = cls
                overlaps[i, cls] = 1.0

            overlaps = scipy.sparse.csr_matrix(overlaps)
            gt_subclasses = np.zeros((num_objs), dtype=np.int32)
            gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
            subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
            subindexes_flipped = np.zeros((num_objs, self.num_classes),
                                          dtype=np.int32)
            subindexes = scipy.sparse.csr_matrix(subindexes)
            subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)

            return {
                'boxes_left': boxes_left,
                'boxes_right': boxes_right,
                'boxes_merge': boxes_merge,
                'dim_orien': dim_orien,
                'kpts': kpts,
                'kpts_right': kpts_right,
                'truncation': truncation,
                'occlusion': occlusion,
                'gt_classes': gt_classes,
                'igt_subclasses': gt_subclasses,
                'gt_subclasses_flipped': gt_subclasses_flipped,
                'gt_overlaps': overlaps,
                'gt_subindexes': subindexes,
                'gt_subindexes_flipped': subindexes_flipped,
                'flipped': False
            }