Esempio n. 1
0
def visualize_2d_boxes_on_top_image(bboxes_grid,
                                    top_view,
                                    grid_meters,
                                    bbox_voxel_size,
                                    prediction=False):
    top_image_vis = []
    for boxes, top_image in zip(bboxes_grid, top_view):  # iterate over batch
        top_image = top_image.numpy()
        shape = top_image.shape
        rgb_image = np.zeros((shape[0], shape[1], 3))
        rgb_image[top_image[:, :, 0] > 0] = 1

        box, labels, _ = get_boxes_from_box_grid(boxes, bbox_voxel_size)
        box = box.numpy()
        box, orientation_3d = make_eight_points_boxes(box)

        if prediction:
            labels = np.argmax(labels, axis=-1)
        if len(box) > 0:
            rgb_image = draw_boxes_top_view(rgb_image, box, grid_meters,
                                            labels, orientation_3d)

        # rgb_image = np.rot90(rgb_image)
        top_image_vis.append(rgb_image)
    return np.asarray(top_image_vis)
def reproject_and_save(dataset_dir, inference_dir, camera_name, seq):

    seq_number = seq.split("/")[-1]
    bbox_list = sorted(glob.glob(seq + "/*.txt"))
    image_f_dir = os.path.join(dataset_dir, seq_number, "camera", camera_name)
    intrinsics_f = read_json(image_f_dir + "/intrinsics.json")
    intr_f = intrinscs_to_matrix(**intrinsics_f)
    poses_f = read_json(image_f_dir + "/poses.json")
    lidar_poses = read_json(
        os.path.join(dataset_dir, seq_number, "lidar", "poses.json"))
    output_dir = os.path.join(inference_dir, "image_bboxes", seq_number,
                              camera_name)
    os.makedirs(output_dir, exist_ok=True)
    for idx, bbox_path in enumerate(bbox_list):
        name = os.path.splitext(os.path.basename(bbox_path))[0]
        cam_T_lidar = get_extrinsic(lidar_poses[idx], poses_f[idx])

        # lidar_file = os.path.join(
        #    dataset_dir, seq_number, "lidar_processed", name + ".bin"
        # )
        # lidar = load_lidar(lidar_file)
        # lidar = lidar[:, :3]
        # Load boxes

        bboxes = load_bboxes(bbox_path, label_string=False)
        labels = bboxes[:, -1]

        lidar_corners_3d, _ = make_eight_points_boxes(bboxes[:, :-1])
        orient_3d = get_orient(lidar_corners_3d)

        lidar_corners_3d = project_boxes_to_cam(lidar_corners_3d, cam_T_lidar)
        orient_3d = project_boxes_to_cam(orient_3d, cam_T_lidar)

        # lidar = project_lidar_to_cam(lidar, cam_T_lidar)
        # figure = visualize_lidar(lidar)
        # figure = visualize_bboxes_3d(lidar_corners_3d, figure)
        # project to image
        image_path = os.path.join(image_f_dir, name + ".jpg")
        image_f = np.asarray(Image.open(image_path)) / 255
        box_2d = project_to_image(lidar_corners_3d, intr_f)
        orient_2d = project_to_image(orient_3d, intr_f)
        image = visualize_bboxes_on_image(image_f, box_2d, labels,
                                          orient_2d) * 255

        image_name = os.path.join(output_dir, name + ".png")
        img = Image.fromarray(image.astype("uint8"))
        img.save(image_name)
Esempio n. 3
0
    def load_data(self, data_input):
        """
        Loads image and semseg and resizes it
        Note: This is numpy function.
        """
        lidar_file, bboxes_file = np.asarray(data_input).astype("U")

        lidar = load_lidar(lidar_file)
        bboxes = load_bboxes(bboxes_file)
        labels = bboxes[:, -1]
        lidar_corners_3d, _ = make_eight_points_boxes(bboxes[:, :-1])
        if self.augmentation:
            np.random.shuffle(lidar)
            if np.random.uniform(
                    0, 1) < 0.50:  # 50% probability to flip over x axis
                lidar, lidar_corners_3d = random_flip_x_lidar_boxes(
                    lidar, lidar_corners_3d)
            if np.random.uniform(
                    0, 1) < 0.50:  # 50% probability  to flip over y axis
                lidar, lidar_corners_3d = random_flip_y_lidar_boxes(
                    lidar, lidar_corners_3d)
            if np.random.uniform(0, 1) < 0.80:  # 80% probability to rotate
                lidar, lidar_corners_3d = random_rotate_lidar_boxes(
                    lidar, lidar_corners_3d)

        # # Shift lidar coordinate to positive quadrant
        lidar_coord = np.asarray(self.param_settings["lidar_offset"],
                                 dtype=np.float32)
        lidar = lidar + lidar_coord
        lidar_corners_3d = lidar_corners_3d + lidar_coord[:3]
        # Process data
        top_view = make_top_view_image(lidar,
                                       self.param_settings["grid_meters"],
                                       self.param_settings["voxel_size"])
        box_grid = get_bboxes_grid(
            labels,
            lidar_corners_3d,
            self.param_settings["grid_meters"],
            self.param_settings["bbox_voxel_size"],
        )
        return top_view, box_grid, lidar_file
def preprocess_data(dataset_dir):
    """
    The function visualizes data from pandaset.
    Arguments:
        dataset_dir: directory with  Pandaset data
    """
    shift_lidar = [25, 50, 2.5]
    # Get list of data samples
    search_string = os.path.join(dataset_dir, "*")
    seq_list = sorted(glob.glob(search_string))
    for seq in tqdm(seq_list, desc="Process sequences", total=len(seq_list)):
        search_string = os.path.join(seq, "lidar", "*.pkl.gz")
        lidar_list = sorted(glob.glob(search_string))
        lidar_pose_path = os.path.join(seq, "lidar", "poses.json")
        lidar_pose = read_json(lidar_pose_path)
        for idx, lidar_path in enumerate(lidar_list):
            # Get pose of the lidar
            translation = lidar_pose[idx]["position"]
            translation = np.asarray([translation[key] for key in translation])
            rotation = lidar_pose[idx]["heading"]
            rotation = np.asarray([rotation[key] for key in rotation])
            rotation = quaternion_to_euler(*rotation)
            Rt = to_transform_matrix(translation, rotation)

            # Get respective bboxes
            bbox_path = lidar_path.split("/")
            bbox_path[-2] = "annotations/cuboids"
            bbox_path = os.path.join(*bbox_path)

            # Load data
            lidar = np.asarray(pd.read_pickle(lidar_path))
            # Get only lidar 0 (there is also lidar 1)
            lidar = lidar[lidar[:, -1] == 0]
            intensity = lidar[:, 3]
            lidar = transform_lidar_box_3d(lidar, Rt)
            # add intensity
            lidar = np.concatenate((lidar, intensity[:, None]), axis=-1)

            # Load bboxes
            bboxes = np.asarray(pd.read_pickle(bbox_path))
            labels, bboxes = make_xzyhwly(bboxes)
            corners_3d, orientation_3d = make_eight_points_boxes(bboxes)
            corners_3d = np.asarray(
                [transform_lidar_box_3d(box, Rt) for box in corners_3d])
            orientation_3d = np.asarray(
                [transform_lidar_box_3d(box, Rt) for box in orientation_3d])
            labels, corners_3d, orientation_3d = filter_boxes(
                labels, corners_3d, orientation_3d, lidar)
            centroid, width, length, height, yaw = get_bboxes_parameters_from_points(
                corners_3d)

            boxes_new = np.concatenate(
                (
                    centroid,
                    length[:, None],
                    width[:, None],
                    height[:, None],
                    yaw[:, None],
                ),
                axis=-1,
            )
            lidar[:, :3] = lidar[:, :3] + shift_lidar

            corners_3d, orientation_3d = make_eight_points_boxes(boxes_new)
            corners_3d = corners_3d + shift_lidar
            orientation_3d = orientation_3d + shift_lidar
            figure = visualize_bboxes_3d(corners_3d, None, orientation_3d)
            figure = visualize_lidar(lidar, figure)
            mlab.show(1)
            input()
            mlab.close(figure)
Esempio n. 5
0
def preprocess_data(dataset_dir):
    """
    The function visualizes data from pandaset.
    Arguments:
        dataset_dir: directory with  Pandaset data
    """

    # Get list of data samples
    search_string = os.path.join(dataset_dir, "*")
    seq_list = sorted(glob.glob(search_string))
    for seq in tqdm(seq_list, desc="Process sequences", total=len(seq_list)):
        # Make output dirs for data
        lidar_out_dir = os.path.join(seq, "lidar_processed")
        bbox_out_dir = os.path.join(seq, "bbox_processed")
        os.makedirs(lidar_out_dir, exist_ok=True)
        os.makedirs(bbox_out_dir, exist_ok=True)
        search_string = os.path.join(seq, "lidar", "*.pkl.gz")
        lidar_list = sorted(glob.glob(search_string))
        lidar_pose_path = os.path.join(seq, "lidar", "poses.json")
        lidar_pose = read_json(lidar_pose_path)
        for idx, lidar_path in enumerate(lidar_list):
            sample_idx = os.path.splitext(os.path.basename(lidar_path))[0].split(".")[0]
            # Get pose of the lidar
            translation = lidar_pose[idx]["position"]
            translation = np.asarray([translation[key] for key in translation])
            rotation = lidar_pose[idx]["heading"]
            rotation = np.asarray([rotation[key] for key in rotation])
            rotation = quaternion_to_euler(*rotation)
            Rt = to_transform_matrix(translation, rotation)

            # Get respective bboxes
            bbox_path = lidar_path.split("/")
            bbox_path[-2] = "annotations/cuboids"
            bbox_path = os.path.join(*bbox_path)

            # Load data
            lidar = np.asarray(pd.read_pickle(lidar_path))
            # Get only lidar 0 (there is also lidar 1)
            lidar = lidar[lidar[:, -1] == 0]
            intensity = lidar[:, 3]
            lidar = transform_lidar_box_3d(lidar, Rt)
            # add intensity
            lidar = np.concatenate((lidar, intensity[:, None]), axis=-1)

            # Load bboxes
            bboxes = np.asarray(pd.read_pickle(bbox_path))
            labels, bboxes = make_xzyhwly(bboxes)
            corners_3d, orientation_3d = make_eight_points_boxes(bboxes)
            corners_3d = np.asarray(
                [transform_lidar_box_3d(box, Rt) for box in corners_3d]
            )
            orientation_3d = np.asarray(
                [transform_lidar_box_3d(box, Rt) for box in orientation_3d]
            )
            # filter boxes containing less then 20 lidar points inside
            labels, corners_3d, orientation_3d = filter_boxes(
                labels, corners_3d, orientation_3d, lidar
            )
            centroid, width, length, height, yaw = get_bboxes_parameters_from_points(
                corners_3d
            )

            # Save data
            lidar_filename = os.path.join(lidar_out_dir, sample_idx + ".bin")
            save_lidar(lidar_filename, lidar.astype(np.float32))
            box_filename = os.path.join(bbox_out_dir, sample_idx + ".txt")
            save_bboxes_to_file(
                box_filename, centroid, width, length, height, yaw, labels
            )
cam_T_lid = get_extrinsic(pose_l[0], pose_c[0])

lidar = load_lidar(lidar_file)
lidar = lidar[:, :3]
lidar = np.transpose(lidar)
ones = np.ones_like(lidar[0])
lidar_hom = np.concatenate((lidar, ones[None, :]), axis=0)
lidar_cam = np.matmul(cam_T_lid, lidar_hom)
lidar_cam = np.transpose(lidar_cam)
lidar = lidar_cam[:, :3]
lidar = lidar[lidar[:, 2] >= 0]
bboxes = load_bboxes(bboxes_file)
labels = bboxes[:, -1]

print(f"bboxes[:, 1:] {bboxes.shape}")
lidar_corners_3d, orient = make_eight_points_boxes(bboxes[:, :-1])
(
    centroid,
    width,
    length,
    height,
    yaw,
) = get_bboxes_parameters_from_points(lidar_corners_3d)
print(f"shape {centroid.shape}, height {height.shape} yaw {yaw.shape}")
box_xyzlwhy = np.concatenate(
    (centroid, length[:, None], width[:, None], height[:, None], yaw[:, None]),
    axis=-1)
lidar_corners_3d, orient = make_eight_points_boxes(box_xyzlwhy)

# # Shift lidar coordinate to positive quadrant
lidar_coord = np.asarray(param_settings["lidar_offset"], dtype=np.float32)
Esempio n. 7
0
def validation_inference(param_settings, dataset_file, model_dir, output_dir):
    setup_gpu()

    # Load model
    model = tf.keras.models.load_model(model_dir)
    bbox_voxel_size = np.asarray(param_settings["bbox_voxel_size"],
                                 dtype=np.float32)
    lidar_coord = np.array(param_settings["lidar_offset"], dtype=np.float32)
    grid_meters = param_settings["grid_meters"]

    val_dataset = DetectionDataset(param_settings, dataset_file, shuffle=False)
    param_settings["val_size"] = val_dataset.num_samples
    for val_samples in tqdm(
            val_dataset.dataset,
            desc=f"val_inference",
            total=val_dataset.num_it_per_epoch,
    ):
        top_view, gt_boxes, lidar_filenames = val_samples
        predictions = model(top_view, training=False)
        for image, predict, gt, filename in zip(top_view, predictions,
                                                gt_boxes, lidar_filenames):
            filename = str(filename.numpy())
            seq_folder = filename.split("/")[-3]
            name = os.path.splitext(os.path.basename(filename))[0]
            # Ensure that output dir exists or create it
            top_view_dir = os.path.join(output_dir, "top_view", seq_folder)
            bboxes_dir = os.path.join(output_dir, "bboxes", seq_folder)
            os.makedirs(top_view_dir, exist_ok=True)
            os.makedirs(bboxes_dir, exist_ok=True)
            p_top_view = (visualize_2d_boxes_on_top_image(
                [predict],
                [image],
                grid_meters,
                bbox_voxel_size,
                prediction=True,
            ) * 255)
            gt_top_view = (visualize_2d_boxes_on_top_image(
                [gt],
                [image],
                grid_meters,
                bbox_voxel_size,
                prediction=False,
            ) * 255)
            result = np.vstack((p_top_view[0], gt_top_view[0]))
            file_to_save = os.path.join(top_view_dir, name + ".png")
            img = Image.fromarray(result.astype("uint8"))
            img.save(file_to_save)

            box, labels, _ = get_boxes_from_box_grid(predict, bbox_voxel_size)
            box = box.numpy()
            box, _ = make_eight_points_boxes(box)
            if len(box) > 0:
                box = box - lidar_coord[:3]
                labels = np.argmax(labels, axis=-1)
                (
                    centroid,
                    width,
                    length,
                    height,
                    yaw,
                ) = get_bboxes_parameters_from_points(box)
                bboxes_name = os.path.join(bboxes_dir, name + ".txt")
                save_bboxes_to_file(bboxes_name, centroid, width, length,
                                    height, yaw, labels)