def reproject_and_save(dataset_dir, inference_dir, camera_name, seq):

    seq_number = seq.split("/")[-1]
    bbox_list = sorted(glob.glob(seq + "/*.txt"))
    image_f_dir = os.path.join(dataset_dir, seq_number, "camera", camera_name)
    intrinsics_f = read_json(image_f_dir + "/intrinsics.json")
    intr_f = intrinscs_to_matrix(**intrinsics_f)
    poses_f = read_json(image_f_dir + "/poses.json")
    lidar_poses = read_json(
        os.path.join(dataset_dir, seq_number, "lidar", "poses.json"))
    output_dir = os.path.join(inference_dir, "image_bboxes", seq_number,
                              camera_name)
    os.makedirs(output_dir, exist_ok=True)
    for idx, bbox_path in enumerate(bbox_list):
        name = os.path.splitext(os.path.basename(bbox_path))[0]
        cam_T_lidar = get_extrinsic(lidar_poses[idx], poses_f[idx])

        # lidar_file = os.path.join(
        #    dataset_dir, seq_number, "lidar_processed", name + ".bin"
        # )
        # lidar = load_lidar(lidar_file)
        # lidar = lidar[:, :3]
        # Load boxes

        bboxes = load_bboxes(bbox_path, label_string=False)
        labels = bboxes[:, -1]

        lidar_corners_3d, _ = make_eight_points_boxes(bboxes[:, :-1])
        orient_3d = get_orient(lidar_corners_3d)

        lidar_corners_3d = project_boxes_to_cam(lidar_corners_3d, cam_T_lidar)
        orient_3d = project_boxes_to_cam(orient_3d, cam_T_lidar)

        # lidar = project_lidar_to_cam(lidar, cam_T_lidar)
        # figure = visualize_lidar(lidar)
        # figure = visualize_bboxes_3d(lidar_corners_3d, figure)
        # project to image
        image_path = os.path.join(image_f_dir, name + ".jpg")
        image_f = np.asarray(Image.open(image_path)) / 255
        box_2d = project_to_image(lidar_corners_3d, intr_f)
        orient_2d = project_to_image(orient_3d, intr_f)
        image = visualize_bboxes_on_image(image_f, box_2d, labels,
                                          orient_2d) * 255

        image_name = os.path.join(output_dir, name + ".png")
        img = Image.fromarray(image.astype("uint8"))
        img.save(image_name)
def report(
    checkpoints_dir,
    report_dir,
    from_file=None,
    full_report=False,
    latex_pdf=True,
    remove_plots=True,
):
    """
    The generate json report and save it and also generate pdf report with plots
    Arguments:
        checkpoints_dir: ditectory containing all checkpoints from training
        report_dir: directory to save reprot
        from_file: if not None generate pdf report from specified .json file
        full_report: generate .json file containing all information
                     from all epoch (not only from the best)
        latex_pdf: if true generate pdf file otherwise only .json
        remove_plots: if true removes all generated .png plots
    """
    if from_file is None:
        report_json = os.path.join(report_dir, "experiment_report.json")
        graphics_dir = os.path.join(report_dir, "experiment_report")
        os.makedirs(graphics_dir, exist_ok=True)

        report_name = "experiment_report"
        # Metrics to plot
        plot_metrics = ["train_loss", "val_loss"]
        main_metric = "val_loss"  # Metric to define best epoch
        report_dict = get_report_json(
            checkpoints_dir,
            report_dir,
            plot_metrics,
            main_metric,
            full_report=full_report,
        )
        save_to_json(report_json, report_dict)

    else:
        report_dict = read_json(from_file)
        graphics_dir = os.path.splitext(from_file)[0]
        report_dir = "/".join(graphics_dir.split("/")[:-1])
        report_name = graphics_dir.split("/")[-1]
        os.makedirs(graphics_dir, exist_ok=True)

    # Get metrics to plot
    plot_dict = report_dict["plot_metrics"]
    best_epoch = int(report_dict["best_epoch"])

    # Create images of plots for: loss, miou, accuracy
    plot_metric_to_file(graphics_dir,
                        plot_dict,
                        metric_name="loss",
                        best_epoch=best_epoch)
    plot_learning_rate_to_file(graphics_dir, report_dict)

    if latex_pdf:
        generate_latex_pdf(graphics_dir, report_dir, report_dict, report_name)
    if remove_plots:
        shutil.rmtree(graphics_dir)
def preprocess_data(dataset_dir):
    """
    The function visualizes data from pandaset.
    Arguments:
        dataset_dir: directory with  Pandaset data
    """
    shift_lidar = [25, 50, 2.5]
    # Get list of data samples
    search_string = os.path.join(dataset_dir, "*")
    seq_list = sorted(glob.glob(search_string))
    for seq in tqdm(seq_list, desc="Process sequences", total=len(seq_list)):
        search_string = os.path.join(seq, "lidar", "*.pkl.gz")
        lidar_list = sorted(glob.glob(search_string))
        lidar_pose_path = os.path.join(seq, "lidar", "poses.json")
        lidar_pose = read_json(lidar_pose_path)
        for idx, lidar_path in enumerate(lidar_list):
            # Get pose of the lidar
            translation = lidar_pose[idx]["position"]
            translation = np.asarray([translation[key] for key in translation])
            rotation = lidar_pose[idx]["heading"]
            rotation = np.asarray([rotation[key] for key in rotation])
            rotation = quaternion_to_euler(*rotation)
            Rt = to_transform_matrix(translation, rotation)

            # Get respective bboxes
            bbox_path = lidar_path.split("/")
            bbox_path[-2] = "annotations/cuboids"
            bbox_path = os.path.join(*bbox_path)

            # Load data
            lidar = np.asarray(pd.read_pickle(lidar_path))
            # Get only lidar 0 (there is also lidar 1)
            lidar = lidar[lidar[:, -1] == 0]
            intensity = lidar[:, 3]
            lidar = transform_lidar_box_3d(lidar, Rt)
            # add intensity
            lidar = np.concatenate((lidar, intensity[:, None]), axis=-1)

            # Load bboxes
            bboxes = np.asarray(pd.read_pickle(bbox_path))
            labels, bboxes = make_xzyhwly(bboxes)
            corners_3d, orientation_3d = make_eight_points_boxes(bboxes)
            corners_3d = np.asarray(
                [transform_lidar_box_3d(box, Rt) for box in corners_3d])
            orientation_3d = np.asarray(
                [transform_lidar_box_3d(box, Rt) for box in orientation_3d])
            labels, corners_3d, orientation_3d = filter_boxes(
                labels, corners_3d, orientation_3d, lidar)
            centroid, width, length, height, yaw = get_bboxes_parameters_from_points(
                corners_3d)

            boxes_new = np.concatenate(
                (
                    centroid,
                    length[:, None],
                    width[:, None],
                    height[:, None],
                    yaw[:, None],
                ),
                axis=-1,
            )
            lidar[:, :3] = lidar[:, :3] + shift_lidar

            corners_3d, orientation_3d = make_eight_points_boxes(boxes_new)
            corners_3d = corners_3d + shift_lidar
            orientation_3d = orientation_3d + shift_lidar
            figure = visualize_bboxes_3d(corners_3d, None, orientation_3d)
            figure = visualize_lidar(lidar, figure)
            mlab.show(1)
            input()
            mlab.close(figure)
Beispiel #4
0
def preprocess_data(dataset_dir):
    """
    The function visualizes data from pandaset.
    Arguments:
        dataset_dir: directory with  Pandaset data
    """

    # Get list of data samples
    search_string = os.path.join(dataset_dir, "*")
    seq_list = sorted(glob.glob(search_string))
    for seq in tqdm(seq_list, desc="Process sequences", total=len(seq_list)):
        # Make output dirs for data
        lidar_out_dir = os.path.join(seq, "lidar_processed")
        bbox_out_dir = os.path.join(seq, "bbox_processed")
        os.makedirs(lidar_out_dir, exist_ok=True)
        os.makedirs(bbox_out_dir, exist_ok=True)
        search_string = os.path.join(seq, "lidar", "*.pkl.gz")
        lidar_list = sorted(glob.glob(search_string))
        lidar_pose_path = os.path.join(seq, "lidar", "poses.json")
        lidar_pose = read_json(lidar_pose_path)
        for idx, lidar_path in enumerate(lidar_list):
            sample_idx = os.path.splitext(os.path.basename(lidar_path))[0].split(".")[0]
            # Get pose of the lidar
            translation = lidar_pose[idx]["position"]
            translation = np.asarray([translation[key] for key in translation])
            rotation = lidar_pose[idx]["heading"]
            rotation = np.asarray([rotation[key] for key in rotation])
            rotation = quaternion_to_euler(*rotation)
            Rt = to_transform_matrix(translation, rotation)

            # Get respective bboxes
            bbox_path = lidar_path.split("/")
            bbox_path[-2] = "annotations/cuboids"
            bbox_path = os.path.join(*bbox_path)

            # Load data
            lidar = np.asarray(pd.read_pickle(lidar_path))
            # Get only lidar 0 (there is also lidar 1)
            lidar = lidar[lidar[:, -1] == 0]
            intensity = lidar[:, 3]
            lidar = transform_lidar_box_3d(lidar, Rt)
            # add intensity
            lidar = np.concatenate((lidar, intensity[:, None]), axis=-1)

            # Load bboxes
            bboxes = np.asarray(pd.read_pickle(bbox_path))
            labels, bboxes = make_xzyhwly(bboxes)
            corners_3d, orientation_3d = make_eight_points_boxes(bboxes)
            corners_3d = np.asarray(
                [transform_lidar_box_3d(box, Rt) for box in corners_3d]
            )
            orientation_3d = np.asarray(
                [transform_lidar_box_3d(box, Rt) for box in orientation_3d]
            )
            # filter boxes containing less then 20 lidar points inside
            labels, corners_3d, orientation_3d = filter_boxes(
                labels, corners_3d, orientation_3d, lidar
            )
            centroid, width, length, height, yaw = get_bboxes_parameters_from_points(
                corners_3d
            )

            # Save data
            lidar_filename = os.path.join(lidar_out_dir, sample_idx + ".bin")
            save_lidar(lidar_filename, lidar.astype(np.float32))
            box_filename = os.path.join(bbox_out_dir, sample_idx + ".txt")
            save_bboxes_to_file(
                box_filename, centroid, width, length, height, yaw, labels
            )
Beispiel #5
0
def get_report_json(
    checkpoints_dir, report_dir, plot_metrics, main_metric, full_report=False
):
    """
    Parse checkpoints_dir and returns json report with all information
    Arguments:
        checkpoints_dir: ditectory containing all checkpoints from training
        report_dir: directory to save reprot
        plot_metrics: metrics to plot
        main_metric: main metric to define best epoch
        full_report: generate .json file containing all information
                     from all epoch (not only from the best)
    Returns:
        report_dict: the dictionary with information for report
    """
    search_string = os.path.join(checkpoints_dir, "*")
    model_list = sorted(glob.glob(search_string))
    date_now = datetime.datetime.now().strftime("%d %B %Y")

    report_dict = {
        "model_name": None,
        "date": date_now,
        "parameters": None,
        "best_epoch": None,
        "main_metric": main_metric,
        "epoch_metrics": None,
        "plot_metrics": None,
    }

    if len(model_list) == 0:
        ValueError("The checkpoint folder {} is empty".format(checkpoints_dir))
    else:
        epoch_metrics = {}
        for model_folder in model_list:
            model_name, epoch = model_folder.split("/")[-1].split("-")

            # Fill header
            if report_dict["model_name"] is None and report_dict["parameters"] is None:
                param_filename = os.path.join(model_folder, "parameters.json")
                report_dict["model_name"] = model_name
                report_dict["parameters"] = read_json(param_filename)
            # Check that we have only one model name inside checkpoints
            if model_name != report_dict["model_name"]:
                ValueError(
                    "model name in report {} is not \
                            same as current model folder {}".format(
                        report_dict["model_name"], model_name
                    )
                )
            # Fill epoch metrics
            metrics_filename = os.path.join(model_folder, "epoch_metrics.json")
            epoch_metrics[epoch] = read_json(metrics_filename)
        # Find best epoch by metric

        report_dict["epoch_metrics"] = epoch_metrics

        # Get metrics to plot
        plot_metrics = parse_report_for_epoch_metrics(report_dict, metrics=plot_metrics)

        # Find best checkpoint idx, epoch
        best_ckpt_idx = np.argmax(plot_metrics[main_metric]["value"])
        best_epoch = "{0:04d}".format(plot_metrics[main_metric]["epoch"][best_ckpt_idx])
        report_dict["best_epoch"] = best_epoch
        report_dict["plot_metrics"] = plot_metrics
        if not full_report:
            # Override the epoch metrics with info from only best epoch
            report_dict["epoch_metrics"] = {best_epoch: epoch_metrics[best_epoch]}

    return report_dict
from detection_3d.tools.visualization_tools import visualize_lidar, visualize_bboxes_3d
import mayavi.mlab as mlab
import numpy as np
from detection_3d.tools.file_io import read_json

param_settings = Parameters().settings

from detection_3d.reproject_to_image import intrinscs_to_matrix, get_extrinsic

bboxes_file = "/home/denis/lidar_dynamic_objects_detection/detection_3d/dataset/001/bbox_processed/00.txt"
lidar_file = "/home/denis/Pandaset/PandaSet/001/lidar_processed/00.bin"
pose_lidar = "/home/denis/Pandaset/PandaSet/001/lidar/poses.json"
pose_camera = "/home/denis/Pandaset/PandaSet/001/camera/front_camera/poses.json"
intrinsics = "/home/denis/Pandaset/PandaSet/001/camera/front_camera/intrinsics.json"

pose_l = read_json(pose_lidar)
pose_c = read_json(pose_camera)
intr_c = read_json(intrinsics)

cam_T_lid = get_extrinsic(pose_l[0], pose_c[0])

lidar = load_lidar(lidar_file)
lidar = lidar[:, :3]
lidar = np.transpose(lidar)
ones = np.ones_like(lidar[0])
lidar_hom = np.concatenate((lidar, ones[None, :]), axis=0)
lidar_cam = np.matmul(cam_T_lid, lidar_hom)
lidar_cam = np.transpose(lidar_cam)
lidar = lidar_cam[:, :3]
lidar = lidar[lidar[:, 2] >= 0]
bboxes = load_bboxes(bboxes_file)