Esempio n. 1
0
def get_lidar_point_cloud_with_color(img_idx,
                                     img_dir,
                                     calib_dir,
                                     velo_dir,
                                     im_size=None):
    """ Calculates the lidar point cloud, and optionally returns only the
    points that are projected to the image.

    :param img_idx: image index
    :param calib_dir: directory with calibration files
    :param velo_dir: directory with velodyne files
    :param im_size: (optional) 2 x 1 list containing the size of the image
                      to filter the point cloud [w, h]
    :param min_intensity: (optional) minimum intensity required to keep a point

    :return: (3, N) point_cloud in the form [[x,...][y,...][z,...]]
    """

    # Read calibration info
    frame_calib = calib_utils.read_calibration(calib_dir, img_idx)
    x, y, z, i = calib_utils.read_lidar(velo_dir=velo_dir, img_idx=img_idx)

    # Calculate the point cloud
    pts = np.vstack((x, y, z)).T
    pts = calib_utils.lidar_to_cam_frame(pts, frame_calib)

    # The given image is assumed to be a 2D image
    if not im_size:
        point_cloud = pts.T
        return point_cloud

    else:
        # Only keep points in front of camera (positive z)
        pts = pts[pts[:, 2] > 0]
        point_cloud = pts.T

        # Project to image frame
        point_in_im = calib_utils.project_to_image(point_cloud,
                                                   p=frame_calib.p2).T

        # Filter based on the given image size
        image_filter = (point_in_im[:, 0] > 0) & \
                       (point_in_im[:, 0] < im_size[0]) & \
                       (point_in_im[:, 1] > 0) & \
                       (point_in_im[:, 1] < im_size[1])

        img_dir = img_dir + "/%06d.png" % img_idx
        img = Image.open(img_dir)
        img = np.array(img)
        point_colors = img[point_in_im[image_filter, 1].astype(np.int),
                           point_in_im[image_filter, 0].astype(np.int)]

    # return np.vstack((pts[image_filter].T, point_colors[image_filter].T))
    return pts[image_filter].T, point_colors.T
Esempio n. 2
0
def get_lidar_point_cloud_new(img_idx,
                              frame_calib,
                              velo_dir,
                              im_size=None,
                              min_intensity=None):
    """ Calculates the lidar point cloud, and optionally returns only the
    points that are projected to the image.

    :param img_idx: image index
    :param calib_dir: directory with calibration files
    :param velo_dir: directory with velodyne files
    :param im_size: (optional) 2 x 1 list containing the size of the image
                      to filter the point cloud [w, h]
    :param min_intensity: (optional) minimum intensity required to keep a point

    :return: (3, N) point_cloud in the form [[x,...][y,...][z,...]]
    """

    # Read calibration info
    x, y, z, i = calib_utils.read_lidar(velo_dir=velo_dir, img_idx=img_idx)

    # Calculate the point cloud
    pts = np.vstack((x, y, z)).T
    pts = calib_utils.lidar_to_cam_frame(pts, frame_calib)

    # The given image is assumed to be a 2D image
    if not im_size:
        point_cloud = pts.T
        return point_cloud

    else:
        # Only keep points in front of camera (positive z)
        pts = pts[pts[:, 2] > 0]
        point_cloud = pts.T

        # Project to image frame
        point_in_im = calib_utils.project_to_image(point_cloud,
                                                   p=frame_calib.p2).T

        # Filter based on the given image size
        image_filter = (point_in_im[:, 0] > 0) & \
                       (point_in_im[:, 0] < im_size[0]) & \
                       (point_in_im[:, 1] > 0) & \
                       (point_in_im[:, 1] < im_size[1])

    if not min_intensity:
        return pts[image_filter].T

    else:
        intensity_filter = i > min_intensity
        point_filter = np.logical_and(image_filter, intensity_filter)
        return pts[point_filter].T
Esempio n. 3
0
    def test_read_lidar(self):
        test_data_dir = ROOTDIR + "/tests/test_data/calib"
        velo_mat = scipy.io.loadmat(test_data_dir + '/test_velo.mat')
        velo_true = velo_mat['current_frame']['xyz_velodyne'][0][0][:,0:3]

        x, y, z, i = calib.read_lidar(velo_dir=test_data_dir,
                                      img_idx=0)

        velo_test = np.vstack((x, y, z)).T
        np.testing.assert_almost_equal(velo_true, velo_test, decimal=5, verbose=True)

        velo_mat = scipy.io.loadmat(test_data_dir + '/test_velo_tf.mat')
        velo_true_tf = velo_mat['velo_cam_frame']

        calib_out = calib.read_calibration(test_data_dir, 0)
        xyz_cam = calib.lidar_to_cam_frame(velo_test, calib_out)

        np.testing.assert_almost_equal(velo_true_tf, xyz_cam, decimal=5, verbose=True)
Esempio n. 4
0
def main():
    """This demo shows RPN proposals and AVOD predictions in 3D
    and 2D in image space. Given certain thresholds for proposals
    and predictions, it selects and draws the bounding boxes on
    the image sample. It goes through the entire proposal and
    prediction samples for the given dataset split.

    The proposals, overlaid, and prediction images can be toggled on or off
    separately in the options section.
    The prediction score and IoU with ground truth can be toggled on or off
    as well, shown as (score, IoU) above the detection.
    """
    dataset_config = DatasetBuilder.copy_config(DatasetBuilder.KITTI_VAL)

    ##############################
    # Options
    ##############################
    dataset_config = DatasetBuilder.merge_defaults(dataset_config)
    dataset_config.data_split = 'val'

    fig_size = (10, 6.1)

    rpn_score_threshold = 0.1
    avod_score_threshold = 0.1

    # gt_classes = ['Car']
    gt_classes = ['Pedestrian']
    # gt_classes = ['Pedestrian', 'Cyclist']

    # Overwrite this to select a specific checkpoint
    global_step = 115000  # None
    checkpoint_name = 'pplp_pedestrian_kitti'

    # Drawing Toggles
    draw_proposals_separate = False
    draw_overlaid = True  # To draw both proposal and predcition bounding boxes
    draw_predictions_separate = False

    # Show orientation for both GT and proposals/predictions
    draw_orientations_on_prop = True
    draw_orientations_on_pred = True

    # Draw 2D bounding boxes
    draw_projected_2d_boxes = True

    # Draw pointclouds
    draw_point_cloud = True
    point_cloud_source = 'lidar'
    slices_config = \
        """
        slices {
            height_lo: -1.0 # -0.2
            height_hi: 2 # 2.3
            num_slices: 1 # 5
        }
        """

    print('slices_config = ', slices_config)

    text_format.Merge(slices_config,
                      dataset_config.kitti_utils_config.bev_generator)

    # Save images for samples with no detections
    save_empty_images = False

    draw_score = True
    draw_iou = True
    ##############################
    # End of Options
    ##############################

    # Get the dataset
    dataset = DatasetBuilder.build_kitti_dataset(dataset_config,
                                                 use_defaults=False)

    # Setup Paths
    predictions_dir = pplp.root_dir() + \
        '/data/outputs/' + checkpoint_name + '/predictions'

    proposals_and_scores_dir = predictions_dir + \
        '/proposals_and_scores/' + dataset.data_split

    predictions_and_scores_dir = predictions_dir + \
        '/final_predictions_and_scores/' + dataset.data_split

    # Output images directories
    output_dir_base = predictions_dir + '/images_2d'

    # Get checkpoint step
    steps = os.listdir(proposals_and_scores_dir)
    steps.sort(key=int)
    print('Available steps: {}'.format(steps))

    # Use latest checkpoint if no index provided
    if global_step is None:
        global_step = steps[-1]

    if draw_proposals_separate:
        prop_out_dir = output_dir_base + '/proposals/{}/{}/{}'.format(
            dataset.data_split, global_step, rpn_score_threshold)

        if not os.path.exists(prop_out_dir):
            os.makedirs(prop_out_dir)

        print('Proposal images saved to:', prop_out_dir)

    if draw_overlaid:
        overlaid_out_dir = output_dir_base + '/overlaid/{}/{}/{}'.format(
            dataset.data_split, global_step, avod_score_threshold)

        if not os.path.exists(overlaid_out_dir):
            os.makedirs(overlaid_out_dir)

        print('Overlaid images saved to:', overlaid_out_dir)

    if draw_predictions_separate:
        pred_out_dir = output_dir_base + '/predictions/{}/{}/{}'.format(
            dataset.data_split, global_step, avod_score_threshold)

        if not os.path.exists(pred_out_dir):
            os.makedirs(pred_out_dir)

        print('Prediction images saved to:', pred_out_dir)

    # Rolling average array of times for time estimation
    avg_time_arr_length = 10
    last_times = np.repeat(time.time(), avg_time_arr_length) + \
        np.arange(avg_time_arr_length)

    # for sample_idx in range(dataset.num_samples):
    for sample_idx in [6]:
        # Estimate time remaining with 5 slowest times
        start_time = time.time()
        last_times = np.roll(last_times, -1)
        last_times[-1] = start_time
        avg_time = np.mean(np.sort(np.diff(last_times))[-5:])
        samples_remaining = dataset.num_samples - sample_idx
        est_time_left = avg_time * samples_remaining

        # Print progress and time remaining estimate
        sys.stdout.write('\rSaving {} / {}, Avg Time: {:.3f}s, '
                         'Time Remaining: {:.2f}s'.format(
                             sample_idx + 1, dataset.num_samples, avg_time,
                             est_time_left))
        sys.stdout.flush()

        sample_name = dataset.sample_names[sample_idx]
        img_idx = int(sample_name)

        ##############################
        # Proposals
        ##############################
        if draw_proposals_separate or draw_overlaid:
            # Load proposals from files
            proposals_file_path = proposals_and_scores_dir + \
                "/{}/{}.txt".format(global_step, sample_name)
            if not os.path.exists(proposals_file_path):
                print('Sample {}: No proposals, skipping'.format(sample_name))
                continue
            print('Sample {}: Drawing proposals'.format(sample_name))

            proposals_and_scores = np.loadtxt(proposals_file_path)

            proposal_boxes_3d = proposals_and_scores[:, 0:7]
            proposal_scores = proposals_and_scores[:, 7]

            # Apply score mask to proposals
            score_mask = proposal_scores > rpn_score_threshold
            proposal_boxes_3d = proposal_boxes_3d[score_mask]
            proposal_scores = proposal_scores[score_mask]

            proposal_objs = \
                [box_3d_encoder.box_3d_to_object_label(proposal,
                                                       obj_type='Proposal')
                 for proposal in proposal_boxes_3d]

        ##############################
        # Predictions
        ##############################
        if draw_predictions_separate or draw_overlaid:
            predictions_file_path = predictions_and_scores_dir + \
                "/{}/{}.txt".format(global_step,
                                    sample_name)
            if not os.path.exists(predictions_file_path):
                continue

            # Load predictions from files
            predictions_and_scores = np.loadtxt(
                predictions_and_scores_dir +
                "/{}/{}.txt".format(global_step, sample_name))

            prediction_boxes_3d = predictions_and_scores[:, 0:7]
            prediction_scores = predictions_and_scores[:, 7]
            prediction_class_indices = predictions_and_scores[:, 8]

            # process predictions only if we have any predictions left after
            # masking
            if len(prediction_boxes_3d) > 0:

                # Apply score mask
                avod_score_mask = prediction_scores >= avod_score_threshold
                prediction_boxes_3d = prediction_boxes_3d[avod_score_mask]
                prediction_scores = prediction_scores[avod_score_mask]
                prediction_class_indices = \
                    prediction_class_indices[avod_score_mask]

                # # Swap l, w for predictions where w > l
                # swapped_indices = \
                #     prediction_boxes_3d[:, 4] > prediction_boxes_3d[:, 3]
                # prediction_boxes_3d = np.copy(prediction_boxes_3d)
                # prediction_boxes_3d[swapped_indices, 3] = \
                #     prediction_boxes_3d[swapped_indices, 4]
                # prediction_boxes_3d[swapped_indices, 4] = \
                #     prediction_boxes_3d[swapped_indices, 3]

        ##############################
        # Ground Truth
        ##############################

        # Get ground truth labels
        if dataset.has_labels:
            gt_objects = obj_utils.read_labels(dataset.label_dir, img_idx)
        else:
            gt_objects = []

        # Filter objects to desired difficulty
        filtered_gt_objs = dataset.kitti_utils.filter_labels(
            gt_objects, classes=gt_classes)

        boxes2d, _, _ = obj_utils.build_bbs_from_objects(
            filtered_gt_objs, class_needed=gt_classes)

        image_path = dataset.get_rgb_image_path(sample_name)
        image = Image.open(image_path)
        image_size = image.size

        # Read the stereo calibration matrix for visualization
        stereo_calib = calib_utils.read_calibration(dataset.calib_dir, img_idx)
        calib_p2 = stereo_calib.p2

        ##############################
        # Reformat and prepare to draw
        ##############################
        if draw_proposals_separate or draw_overlaid:
            proposals_as_anchors = box_3d_encoder.box_3d_to_anchor(
                proposal_boxes_3d)

            proposal_boxes, _ = anchor_projector.project_to_image_space(
                proposals_as_anchors, calib_p2, image_size)

            num_of_proposals = proposal_boxes_3d.shape[0]

            prop_fig, prop_2d_axes, prop_3d_axes = \
                vis_utils.visualization(dataset.rgb_image_dir,
                                        img_idx,
                                        display=False)

            draw_proposals(filtered_gt_objs, calib_p2, num_of_proposals,
                           proposal_objs, proposal_boxes, prop_2d_axes,
                           prop_3d_axes, draw_orientations_on_prop)
            if draw_point_cloud:
                # Filter the useful pointclouds from all points
                kitti_utils = dataset.kitti_utils
                x, y, z, i = calib_utils.read_lidar(dataset.velo_dir, img_idx)
                point_cloud = np.vstack((x, y, z))
                # pts = calib_utils.lidar_to_cam_frame(pts, frame_calib)
                # point_cloud = kitti_utils.get_point_cloud(
                #     point_cloud_source, img_idx, image_size)
                point_cloud = kitti_utils.get_point_cloud(
                    point_cloud_source, img_idx, image_size)
                print('point_cloud = ', point_cloud)
                ground_plane = kitti_utils.get_ground_plane(sample_name)
                all_points = np.transpose(point_cloud)
                # print('dataset_config.kitti_utils_config.bev_generator.\
                # slices.height_lo = ', dataset_config.kitti_utils_config.bev_generator.slices.height_lo)
                height_lo = dataset_config.kitti_utils_config.bev_generator.slices.height_lo
                height_hi = dataset_config.kitti_utils_config.bev_generator.slices.height_hi

                # config = dataset.config.kitti_utils_config
                # print('config = ', config)
                area_extents = [[0., 70.], [-40, 40], [-1.1, -1]]
                # ?,x,?
                area_extents = np.reshape(area_extents, (3, 2))
                print('area_extents = ', area_extents)
                print('ground_plane = ', ground_plane)
                slice_filter = get_point_filter(point_cloud,
                                                area_extents,
                                                ground_plane=None,
                                                offset_dist=2.0)

                # slice_filter = kitti_utils.create_slice_filter(
                #     point_cloud,
                #     area_extents,
                #     ground_plane,
                #     height_lo,
                #     height_hi)
                # Apply slice filter
                slice_points = all_points[slice_filter]
                print('slice_points = ', slice_points)
                # print('slice_points =', slice_points)
                # Project the useful pointclouds on 2D image
                point_2d = obj_utils.get_lidar_points_on_2D_image(
                    img_idx, dataset.calib_dir, dataset.velo_dir, image_size,
                    True, slice_points)
                draw_points(prop_2d_axes, point_2d, 'red')

            if draw_proposals_separate:
                # Save just the proposals
                filename = prop_out_dir + '/' + sample_name + '.png'
                plt.savefig(filename)

                if not draw_overlaid:
                    plt.close(prop_fig)

        if draw_overlaid or draw_predictions_separate:
            if len(prediction_boxes_3d) > 0:
                # Project the 3D box predictions to image space
                image_filter = []
                final_boxes_2d = []
                for i in range(len(prediction_boxes_3d)):
                    box_3d = prediction_boxes_3d[i, 0:7]
                    img_box = box_3d_projector.project_to_image_space(
                        box_3d,
                        calib_p2,
                        truncate=True,
                        image_size=image_size,
                        discard_before_truncation=False)
                    if img_box is not None:
                        image_filter.append(True)
                        final_boxes_2d.append(img_box)
                    else:
                        image_filter.append(False)
                final_boxes_2d = np.asarray(final_boxes_2d)
                final_prediction_boxes_3d = prediction_boxes_3d[image_filter]
                final_scores = prediction_scores[image_filter]
                final_class_indices = prediction_class_indices[image_filter]

                num_of_predictions = final_boxes_2d.shape[0]

                # Convert to objs
                final_prediction_objs = \
                    [box_3d_encoder.box_3d_to_object_label(
                        prediction, obj_type='Prediction')
                        for prediction in final_prediction_boxes_3d]
                for (obj, score) in zip(final_prediction_objs, final_scores):
                    obj.score = score
            else:
                if save_empty_images:
                    pred_fig, pred_2d_axes, pred_3d_axes = \
                        vis_utils.visualization(dataset.rgb_image_dir,
                                                img_idx,
                                                display=False,
                                                fig_size=fig_size)
                    filename = pred_out_dir + '/' + sample_name + '.png'
                    plt.savefig(filename)
                    plt.close(pred_fig)
                continue

            if draw_overlaid:
                # Overlay prediction boxes on image
                draw_predictions(filtered_gt_objs, calib_p2,
                                 num_of_predictions, final_prediction_objs,
                                 final_class_indices, final_boxes_2d,
                                 prop_2d_axes, prop_3d_axes, draw_score,
                                 draw_iou, gt_classes,
                                 draw_orientations_on_pred)
                filename = overlaid_out_dir + '/' + sample_name + '.png'
                plt.savefig(filename)

                plt.close(prop_fig)

            if draw_predictions_separate:
                # Now only draw prediction boxes on images
                # on a new figure handler
                if draw_projected_2d_boxes:
                    pred_fig, pred_2d_axes, pred_3d_axes = \
                        vis_utils.visualization(dataset.rgb_image_dir,
                                                img_idx,
                                                display=False,
                                                fig_size=fig_size)

                    draw_predictions(filtered_gt_objs, calib_p2,
                                     num_of_predictions, final_prediction_objs,
                                     final_class_indices, final_boxes_2d,
                                     pred_2d_axes, pred_3d_axes, draw_score,
                                     draw_iou, gt_classes,
                                     draw_orientations_on_pred)
                else:
                    pred_fig, pred_3d_axes = \
                        vis_utils.visualize_single_plot(
                            dataset.rgb_image_dir, img_idx, display=False)

                    draw_3d_predictions(filtered_gt_objs, calib_p2,
                                        num_of_predictions,
                                        final_prediction_objs,
                                        final_class_indices, final_boxes_2d,
                                        pred_3d_axes, draw_score, draw_iou,
                                        gt_classes, draw_orientations_on_pred)
                filename = pred_out_dir + '/' + sample_name + '.png'
                plt.savefig(filename)
                plt.close(pred_fig)

    print('\nDone')
Esempio n. 5
0
def get_lidar_point_cloud_sub(img_idx,
                              calib_dir,
                              velo_dir,
                              im_size=None,
                              min_intensity=None,
                              stride_sub=1):
    """ Calculates the lidar point cloud, and optionally returns only the
    points that are projected to the image.

    :param img_idx: image index
    :param calib_dir: directory with calibration files
    :param velo_dir: directory with velodyne files
    :param im_size: (optional) 2 x 1 list containing the size of the image
                      to filter the point cloud [w, h]
    :param min_intensity: (optional) minimum intensity required to keep a point

    :return: (3, N) point_cloud in the form [[x,...][y,...][z,...]]
    """

    # Read calibration info
    frame_calib = calib_utils.read_calibration(calib_dir, img_idx)
    x, y, z, i = calib_utils.read_lidar(velo_dir=velo_dir, img_idx=img_idx)

    starts = np.where(np.diff(np.sign(y)) > 0)[0] + 1
    true_starts = np.append(np.diff(starts) > 2, [True])
    starts = starts[true_starts]
    n_lasers = starts.shape[0] + 1
    starts = [0] + starts.tolist() + [len(x)]

    include = np.zeros(len(x), dtype=bool)
    lasers_num = range(0, SINFields.LASERS_NUM, stride_sub)
    for laser in lasers_num:
        if laser < n_lasers:
            include[starts[laser]:starts[laser + 1]] = True

    i = i[include]

    # Calculate the point cloud
    pts = np.vstack((x[include], y[include], z[include])).T
    pts = calib_utils.lidar_to_cam_frame(pts, frame_calib)

    # The given image is assumed to be a 2D image
    if not im_size:
        point_cloud = pts.T
        return point_cloud

    else:
        # Only keep points in front of camera (positive z)
        pts = pts[pts[:, 2] > 0]
        point_cloud = pts.T

        # Project to image frame
        point_in_im = calib_utils.project_to_image(point_cloud,
                                                   p=frame_calib.p2).T

        # Filter based on the given image size
        image_filter = (point_in_im[:, 0] > 0) & \
                       (point_in_im[:, 0] < im_size[0]) & \
                       (point_in_im[:, 1] > 0) & \
                       (point_in_im[:, 1] < im_size[1])

    if not min_intensity:
        return pts[image_filter].T

    else:
        intensity_filter = i > min_intensity
        point_filter = np.logical_and(image_filter, intensity_filter)
        return pts[point_filter].T
Esempio n. 6
0
def plotSINImage(data_dir,
                 out_dir,
                 img_idx,
                 sin_type,
                 sin_level,
                 on_img,
                 sin_input_name,
                 mask_2d=None):
    fname = '{:06d}'.format(img_idx)
    path_img = os.path.join(data_dir, 'image_2')
    path_velo = os.path.join(data_dir, 'velodyne')
    path_calib = os.path.join(data_dir, 'calib')

    # Load image
    cv_bgr_image = cv2.imread(os.path.join(path_img, fname + '.png'))
    rgb_image = cv_bgr_image[..., ::-1]
    image_shape = rgb_image.shape[0:2]

    # Load point cloud
    frame_calib = calib_utils.read_calibration(path_calib, img_idx)
    x, y, z, _ = calib_utils.read_lidar(velo_dir=path_velo, img_idx=img_idx)
    if sin_type == 'lowres':
        starts = np.where(np.diff(np.sign(y)) > 0)[0] + 1
        true_starts = np.append(np.diff(starts) > 2, [True])
        starts = starts[true_starts]
        n_lasers = starts.shape[0] + 1
        starts = [0] + starts.tolist() + [len(x)]

        include = np.zeros(len(x), dtype=bool)
        stride_sub = get_stride_sub(sin_level)
        lasers_num = range(0, SINFields.LASERS_NUM, stride_sub)
        for laser in lasers_num:
            if laser < n_lasers:
                include[starts[laser]:starts[laser + 1]] = True

        pts_lowres = np.vstack((x[include], y[include], z[include])).T  # N x 3
        pts_lowres = calib_utils.lidar_to_cam_frame(pts_lowres, frame_calib)
        pts_lowres = pts_lowres[
            pts_lowres[:, 2] >
            0]  # Only keep points in front of camera (positive z)
        # Project to image frame
        point_in_im_lowres = calib_utils.project_to_image(
            pts_lowres.T, p=frame_calib.p2).T  # N x 3
        im_size = [image_shape[1], image_shape[0]]
        # Filter based on the given image size
        image_filter_lowres = (point_in_im_lowres[:, 0] > 0) & \
                              (point_in_im_lowres[:, 0] < im_size[0]) & \
                              (point_in_im_lowres[:, 1] > 0) & \
                              (point_in_im_lowres[:, 1] < im_size[1])
        point_cloud_lowres = pts_lowres[image_filter_lowres, :].T
    else:
        include = np.ones(len(x), dtype=bool)

    pts = np.vstack((x, y, z)).T  # N x 3
    pts = calib_utils.lidar_to_cam_frame(pts, frame_calib)
    pts = pts[pts[:, 2] >
              0]  # Only keep points in front of camera (positive z)
    # Project to image frame
    point_in_im = calib_utils.project_to_image(pts.T,
                                               p=frame_calib.p2).T  # N x 3
    im_size = [image_shape[1], image_shape[0]]
    # Filter based on the given image size
    image_filter = (point_in_im[:, 0] > 0) & \
                   (point_in_im[:, 0] < im_size[0]) & \
                   (point_in_im[:, 1] > 0) & \
                   (point_in_im[:, 1] < im_size[1])
    point_cloud = pts[image_filter, :].T
    point_in_im = point_in_im[image_filter, :]

    image_input_sin, point_cloud_sin = genSINtoInputs(
        rgb_image,
        point_cloud,
        sin_type=sin_type,
        sin_level=sin_level,
        sin_input_name=sin_input_name,
        mask_2d=mask_2d,
        frame_calib_p2=frame_calib.p2)
    if sin_type == 'lowres':
        point_cloud_sin = point_cloud_lowres

    fname_out = fname + '_{}_sin_{}_{}'.format(sin_input_name, sin_type,
                                               sin_level)
    if sin_input_name == 'image':
        cv2.imwrite(os.path.join(out_dir, fname + '_image_org.png'),
                    cv_bgr_image)
        cv2.imwrite(os.path.join(out_dir, fname_out + '.png'),
                    image_input_sin[..., ::-1])
    elif sin_input_name == 'lidar':
        # Clip distance with min/max values (for visualization)
        pointsDist = point_cloud[2, :]
        # for i_pt,pdist in enumerate(pointsDist):
        #     pointsDist[i_pt] = D_MAX if pdist>D_MAX \
        #                        else pdist if pdist>D_MIN \
        #                        else D_MIN
        image_w_pts = points_on_img(point_in_im,
                                    pointsDist,
                                    rgb_image,
                                    on_img=on_img)

        point_in_im2 = calib_utils.project_to_image(point_cloud_sin,
                                                    p=frame_calib.p2).T
        # Clip distance with min/max values (for visualization)
        pointsDist2 = point_cloud_sin[2, :]
        # for i_pt,pdist in enumerate(pointsDist2):
        #     pointsDist2[i_pt] =  D_MAX if pdist>D_MAX \
        #                          else pdist if pdist>D_MIN \
        #                          else D_MIN

        image_filter2 = (point_in_im2[:, 0] > 0) & \
                        (point_in_im2[:, 0] < im_size[0]) & \
                        (point_in_im2[:, 1] > 0) & \
                        (point_in_im2[:, 1] < im_size[1])
        point_in_im2 = point_in_im2[image_filter2, :]
        pointsDist2 = pointsDist2[image_filter2]
        image_w_pts2 = points_on_img(point_in_im2,
                                     pointsDist2,
                                     rgb_image,
                                     on_img=on_img)

        cv2.imwrite(os.path.join(out_dir, fname + '_lidar_org.png'),
                    image_w_pts[..., ::-1])
        if on_img:
            cv2.imwrite(os.path.join(out_dir, fname_out + '.png'),
                        image_w_pts2[..., ::-1])
        else:
            cv2.imwrite(os.path.join(out_dir, fname_out + '_black.png'),
                        image_w_pts2[..., ::-1])
    else:
        raise ValueError("Invalid sin_input_name {}".format(sin_input_name))