Beispiel #1
0
def main():
    """This demo visualizes box 8C format predicted by MLOD, before
    getting converted to Box 3D.

    Keys:
        F1: Toggle predictions
        F2: Toggle easy ground truth objects (Green)
        F3: Toggle medium ground truth objects (Orange)
        F4: Toggle hard ground truth objects (Red)
        F5: Toggle all ground truth objects (default off)

        F6: Toggle 3D voxel grid
        F7: Toggle point cloud
    """
    ##############################
    # Options
    ##############################
    mlod_score_threshold = 0.1
    show_orientations = True

    checkpoint_name = 'mlod_exp_8c'

    global_step = None

    sample_name = None

    dataset_config = DatasetBuilder.copy_config(DatasetBuilder.KITTI_VAL_HALF)

    dataset = DatasetBuilder.build_kitti_dataset(dataset_config)
    ##############################
    # Setup Paths
    ##############################

    # # # Cars # # #
    # sample_name = '000050'
    # sample_name = '000104'
    # sample_name = '000169'
    # sample_name = '000191'
    # sample_name = '000360'
    # sample_name = '001783'
    # sample_name = '001820'
    # sample_name = '006338'

    # # # People # # #
    # val_half split
    # sample_name = '000001'
    sample_name = '000005'  # Easy, 1 ped
    # sample_name = '000122'  # Easy, 1 cyc
    # sample_name = '000134'  # Hard, lots of people
    # sample_name = '000167'  # Medium, 1 ped, 2 cycs
    # sample_name = '000187'  # Medium, 1 ped on left
    # sample_name = '000381'  # Easy, 1 ped
    # sample_name = '000398'  # Easy, 1 ped
    # sample_name = '000401'  # Hard, obscured peds
    # sample_name = '000407'  # Easy, 1 ped
    # sample_name = '000448'  # Hard, several far people
    # sample_name = '000486'  # Hard 2 obscured peds
    # sample_name = '000509'  # Easy, 1 ped
    # sample_name = '000718'  # Hard, lots of people
    # sample_name = '002216'  # Easy, 1 cyc

    # Random sample
    if sample_name is None:
        sample_idx = np.random.randint(0, dataset.num_samples)
        sample_name = dataset.sample_list[sample_idx]

    img_idx = int(sample_name)

    # Text files directory
    predictions_and_scores_dir = mlod.root_dir() + \
        '/data/outputs/' + checkpoint_name + '/predictions' +  \
        '/final_boxes_8c_and_scores/' + dataset.data_split

    # Get checkpoint step
    steps = os.listdir(predictions_and_scores_dir)
    steps.sort(key=int)
    print('Available steps: {}'.format(steps))

    # Use latest checkpoint if no index provided
    if global_step is None:
        global_step = steps[-1]

    ##############################
    # predictions
    ##############################
    # Load predictions from files
    predictions_and_scores = np.loadtxt(
        predictions_and_scores_dir +
        "/{}/{}.txt".format(global_step, sample_name))

    predictions_boxes_8c = predictions_and_scores[:, 0:24]
    prediction_scores = predictions_and_scores[:, 24]

    score_mask = prediction_scores >= mlod_score_threshold
    predictions_boxes_8c = predictions_boxes_8c[score_mask]

    all_vtk_box_corners = []
    predictions_boxes_8c = np.reshape(predictions_boxes_8c, [-1, 3, 8])
    for i in range(len(predictions_boxes_8c)):
        box_8c = predictions_boxes_8c[i, :, :]
        vtk_box_corners = VtkBox8c()
        vtk_box_corners.set_objects(box_8c)
        all_vtk_box_corners.append(vtk_box_corners)

    ##############################
    # Ground Truth
    ##############################
    if dataset.has_labels:
        easy_gt_objs, medium_gt_objs, \
            hard_gt_objs, all_gt_objs = \
            demo_utils.get_gts_based_on_difficulty(dataset,
                                                   img_idx)
    else:
        easy_gt_objs = medium_gt_objs = hard_gt_objs = all_gt_objs = []

    ##############################
    # Point Cloud
    ##############################
    image_path = dataset.get_rgb_image_path(sample_name)
    image = cv2.imread(image_path)
    img_idx = int(sample_name)

    points, point_colours = demo_utils.get_filtered_pc_and_colours(
        dataset, image, img_idx)

    # Voxelize the point cloud for visualization
    voxel_grid = VoxelGrid()
    voxel_grid.voxelize(points, voxel_size=0.1, create_leaf_layout=False)

    ##############################
    # Visualization
    ##############################
    # Create VtkVoxelGrid
    vtk_voxel_grid = VtkVoxelGrid()
    vtk_voxel_grid.set_voxels(voxel_grid)

    vtk_point_cloud = VtkPointCloud()
    vtk_point_cloud.set_points(points, point_colours)

    # Create VtkAxes
    axes = vtk.vtkAxesActor()
    axes.SetTotalLength(5, 5, 5)

    # Create VtkBoxes for ground truth
    vtk_easy_gt_boxes, vtk_medium_gt_boxes, \
        vtk_hard_gt_boxes, vtk_all_gt_boxes = \
        demo_utils.create_gt_vtk_boxes(easy_gt_objs,
                                       medium_gt_objs,
                                       hard_gt_objs,
                                       all_gt_objs,
                                       show_orientations)

    # Create Voxel Grid Renderer in bottom half
    vtk_renderer = vtk.vtkRenderer()
    vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor)
    vtk_renderer.AddActor(vtk_point_cloud.vtk_actor)

    vtk_box_actors = vtk.vtkAssembly()

    # Create VtkBoxes for prediction boxes
    for i in range(len(all_vtk_box_corners)):
        # Adding labels, slows down rendering
        # vtk_renderer.AddActor(all_vtk_box_corners[i].
        # vtk_text_labels.vtk_actor)
        vtk_box_actors.AddPart(all_vtk_box_corners[i].vtk_actor)

    vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor)
    vtk_renderer.AddActor(vtk_point_cloud.vtk_actor)

    vtk_renderer.SetBackground(0.2, 0.3, 0.4)
    vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor)
    vtk_renderer.AddActor(vtk_point_cloud.vtk_actor)

    vtk_renderer.AddActor(vtk_hard_gt_boxes.vtk_actor)
    vtk_renderer.AddActor(vtk_medium_gt_boxes.vtk_actor)
    vtk_renderer.AddActor(vtk_easy_gt_boxes.vtk_actor)
    vtk_renderer.AddActor(vtk_all_gt_boxes.vtk_actor)

    vtk_renderer.AddActor(vtk_box_actors)

    vtk_renderer.AddActor(axes)

    # Set initial properties for some actors
    vtk_point_cloud.vtk_actor.GetProperty().SetPointSize(2)
    vtk_voxel_grid.vtk_actor.SetVisibility(0)
    vtk_all_gt_boxes.vtk_actor.SetVisibility(0)

    # Setup Camera
    current_cam = vtk_renderer.GetActiveCamera()
    current_cam.Pitch(160.0)
    current_cam.Roll(180.0)

    # Zooms out to fit all points on screen
    vtk_renderer.ResetCamera()

    # Zoom in slightly
    current_cam.Zoom(2.5)

    # Reset the clipping range to show all points
    vtk_renderer.ResetCameraClippingRange()

    # Setup Render Window
    vtk_render_window = vtk.vtkRenderWindow()
    vtk_render_window.SetWindowName(
        "Predictions: Step {}, Sample {}, Min Score {}".format(
            global_step,
            sample_name,
            mlod_score_threshold,
        ))

    vtk_render_window.SetSize(900, 600)
    vtk_render_window.AddRenderer(vtk_renderer)

    # Setup custom interactor style, which handles mouse and key events
    vtk_render_window_interactor = vtk.vtkRenderWindowInteractor()
    vtk_render_window_interactor.SetRenderWindow(vtk_render_window)

    vtk_render_window_interactor.SetInteractorStyle(
        vis_utils.ToggleActorsInteractorStyle([
            vtk_box_actors, vtk_easy_gt_boxes.vtk_actor,
            vtk_medium_gt_boxes.vtk_actor, vtk_hard_gt_boxes.vtk_actor,
            vtk_all_gt_boxes.vtk_actor, vtk_voxel_grid.vtk_actor,
            vtk_point_cloud.vtk_actor
        ]))

    vtk_render_window_interactor.Start()
Beispiel #2
0
    def test_get_empty_anchor_filter_in_2d(self):
        # create generic ground plane (normal vector is straight up)
        area_extent = [(0., 2.), (-1., 0.), (0., 2.)]

        # Creates a voxel grid in following format at y = bin (-1.5, -0.5]
        # [ ][ ][ ][ ]
        # [ ][ ][x][ ]
        # [ ][ ][ ][ ]
        # [ ][ ][x][ ]
        pts = np.array([[0.51, -0.5, 1.1], [1.51, -0.5, 1.1]])

        voxel_size = 0.5
        voxel_grid = VoxelGrid()
        voxel_grid.voxelize(pts, voxel_size, extents=area_extent)

        # Define anchors to test
        boxes_3d = np.array([
            [0.51, 0, 0.51, 1, 1, 1, 0],
            [0.51, 0, 0.51, 1, 1, 1, np.pi / 2.],
            [0.51, 0, 1.1, 1, 1, 1, 0],
            [0.51, 0, 1.1, 1, 1, 1, np.pi / 2.],
            [1.51, 0, 0.51, 1, 1, 1, 0],
            [1.51, 0, 0.51, 1, 1, 1, np.pi / 2.],
            [1.51, 0, 1.1, 1, 1, 1, 0],
            [1.51, 0, 1.1, 1, 1, 1, np.pi / 2.],
        ])

        anchors = box_3d_encoder.box_3d_to_anchor(boxes_3d)

        # test anchor locations, number indicates the anchors indices
        # [ ][ ][ ][ ]
        # [ ][1][3][ ]
        # [ ][ ][ ][ ]
        # [ ][5][7][ ]

        gen_filter = anchor_filter.get_empty_anchor_filter(anchors,
                                                           voxel_grid,
                                                           density_threshold=1)

        expected_filter = np.array(
            [False, False, True, True, False, False, True, True])

        self.assertTrue((gen_filter == expected_filter).all())

        boxes_3d = np.array([
            [0.5, 0, 0.5, 2, 1, 1, 0],  # case 1
            [0.5, 0, 0.5, 2, 1, 1, np.pi / 2.],
            [0.5, 0, 1.5, 1, 2, 1, 0],  # case 2
            [0.5, 0, 1.5, 1, 2, 1, np.pi / 2.],
            [1.5, 0, 0.5, 2, 1, 1, 0],  # case 3
            [1.5, 0, 0.5, 2, 1, 1, np.pi / 2.],
            [1.5, 0, 1.5, 1, 2, 1, 0],  # case 4
            [1.5, 0, 1.5, 1, 2, 1, np.pi / 2.]
        ])

        anchors = box_3d_encoder.box_3d_to_anchor(boxes_3d)

        # case 1
        # [ ][ ][ ][ ]   [ ][ ][ ][ ]
        # [ ][o][ ][ ]   [ ][o][o][ ]
        # [ ][o][ ][ ]   [ ][ ][ ][ ]
        # [ ][ ][ ][ ]   [ ][ ][ ][ ]

        # case 2
        # [ ][ ][ ][ ]   [ ][ ][ ][ ]
        # [ ][ ][o][o]   [ ][ ][o][ ]
        # [ ][ ][ ][ ]   [ ][ ][o][ ]
        # [ ][ ][ ][ ]   [ ][ ][ ][ ]

        # case 3
        # [ ][ ][ ][ ]   [ ][ ][ ][ ]
        # [ ][ ][ ][ ]   [ ][ ][ ][ ]
        # [ ][o][ ][ ]   [ ][o][o][ ]
        # [ ][o][ ][ ]   [ ][ ][ ][ ]

        # case 4
        # [ ][ ][ ][ ]   [ ][ ][ ][ ]
        # [ ][ ][ ][ ]   [ ][ ][ ][ ]
        # [ ][ ][o][o]   [ ][ ][o][ ]
        # [ ][ ][ ][ ]   [ ][ ][o][ ]

        gen_filter = anchor_filter.get_empty_anchor_filter(anchors,
                                                           voxel_grid,
                                                           density_threshold=1)
        expected_filter = np.array(
            [False, True, True, True, False, True, True, True])

        self.assertTrue((gen_filter == expected_filter).all())
def main():
    """This demo shows RPN proposals and AVOD predictions in the
    3D point cloud.

    Keys:
        F1: Toggle proposals
        F2: Toggle predictions
        F3: Toggle 3D voxel grid
        F4: Toggle point cloud

        F5: Toggle easy ground truth objects (Green)
        F6: Toggle medium ground truth objects (Orange)
        F7: Toggle hard ground truth objects (Red)
        F8: Toggle all ground truth objects (default off)

        F9: Toggle ground slice filter (default off)
        F10: Toggle offset slice filter (default off)
    """

    ##############################
    # Options
    ##############################
    rpn_score_threshold = 0.1
    avod_score_threshold = 0.1

    proposals_line_width = 1.0
    predictions_line_width = 3.0
    show_orientations = True

    point_cloud_source = 'depth'

    # Config file folder, default (<avod_root>/data/outputs/<checkpoint_name>)
    config_dir = None

    checkpoint_name = 'pyramid_cars_with_aug_example'
    global_step = None  # Latest checkpoint
    global_step = 83000

    #data_split = 'val_half'
    data_split = 'val'
    # data_split = 'test'

    # Show 3D iou text
    draw_ious_3d = True

    name_list =[]


    #name_file = '/media/wavelab/d3cd89ab-7705-4996-94f3-01da25ba8f50/moosey/val.txt'

    #with open(name_file) as f:
        #for line in f:
            #newline = line.replace("\n","")
            #name_list.append(newline)


    #name_list =['0000000003','0000000009','0000000016','0000000233','0000000234','0000000236','0000000422','0000000473','0000000490','0000000494','0000000547','0000000655',\
                #'0000000679','0000000690','0000000692','0000000781']
    name_list =['0000000004']

    for names in name_list:

        sample_name = names
        #sample_name = None

        # # # Cars # # #
        # sample_name = '000050'
        # sample_name = '000104'
        # sample_name = '000169'
        # sample_name = '000191'
        # sample_name = '000360'
        # sample_name = '001783'
        # sample_name = '001820'

        # val split
        # sample_name = '000181'
        # sample_name = '000751'
        # sample_name = '000843'
        # sample_name = '000944'
        # sample_name = '006338'

        # # # People # # #
        # val_half split
        # sample_name = '000001'  # Hard, 1 far cyc
        # sample_name = '000005'  # Easy, 1 ped
        # sample_name = '000122'  # Easy, 1 cyc
        # sample_name = '000134'  # Hard, lots of people
        # sample_name = '000167'  # Medium, 1 ped, 2 cycs
        # sample_name = '000187'  # Medium, 1 ped on left
        # sample_name = '000381'  # Easy, 1 ped
        # sample_name = '000398'  # Easy, 1 ped
        # sample_name = '000401'  # Hard, obscured peds
        # sample_name = '000407'  # Easy, 1 ped
        # sample_name = '000448'  # Hard, several far people
        # sample_name = '000486'  # Hard 2 obscured peds
        # sample_name = '000509'  # Easy, 1 ped
        # sample_name = '000718'  # Hard, lots of people
        # sample_name = '002216'  # Easy, 1 cyc

        # val split
        # sample_name = '000015'
        # sample_name = '000048'
        # sample_name = '000058'
        # sample_name = '000076'    # Medium, few ped, 1 cyc
        # sample_name = '000108'
        # sample_name = '000118'
        # sample_name = '000145'
        # sample_name = '000153'
        # sample_name = '000186'
        # sample_name = '000195'
        # sample_name = '000199'
        # sample_name = '000397'
        # sample_name = '004425'
        # sample_name = '004474'    # Hard, many ped, 1 cyc
        # sample_name = '004657'    # Hard, Few cycl, few ped
        # sample_name = '006071'
        # sample_name = '006828'    # Hard, Few cycl, few ped
        # sample_name = '006908'    # Hard, Few cycl, few ped
        # sample_name = '007412'
        # sample_name = '007318'    # Hard, Few cycl, few ped

        ##############################
        # End of Options
        ##############################

        if data_split == 'test':
            draw_ious_3d = False

        if config_dir is None:
            config_dir = avod.root_dir() + '/data/outputs/' + checkpoint_name

        # Parse experiment config
        pipeline_config_file = \
            config_dir + '/' + checkpoint_name + '.config'
        _, _, _, dataset_config = \
            config_builder_util.get_configs_from_pipeline_file(
                pipeline_config_file, is_training=False)

        dataset_config.data_split = data_split

        if data_split == 'test':
            dataset_config.data_split_dir = 'testing'
            dataset_config.has_labels = False

        dataset = DatasetBuilder.build_kitti_dataset(dataset_config,
                                                     use_defaults=False)

        # Random sample
        if sample_name is None:
            sample_idx = np.random.randint(0, dataset.num_samples)
            sample_name = dataset.sample_names[sample_idx]

        ##############################
        # Setup Paths
        ##############################
        img_idx = int(sample_name)

        # Text files directory
        proposals_and_scores_dir = avod.root_dir() + \
            '/data/outputs/' + checkpoint_name + '/predictions' +  \
            '/proposals_and_scores/' + dataset.data_split

        predictions_and_scores_dir = avod.root_dir() + \
            '/data/outputs/' + checkpoint_name + '/predictions' +  \
            '/final_predictions_and_scores/' + dataset.data_split

        # Get checkpoint step
        steps = os.listdir(proposals_and_scores_dir)
        steps.sort(key=int)
        print('Available steps: {}'.format(steps))

        # Use latest checkpoint if no index provided
        if global_step is None:
            global_step = steps[-1]

        # Output images directory
        img_out_dir = avod.root_dir() + '/data/outputs/' + checkpoint_name + \
            '/predictions/images_3d/{}/{}/{}'.format(dataset.data_split,
                                                     global_step,
                                                     rpn_score_threshold)

        if not os.path.exists(img_out_dir):
            os.makedirs(img_out_dir)

        ##############################
        # Proposals
        ##############################
        # Load proposals from files
        proposals_and_scores = np.loadtxt(proposals_and_scores_dir +
                                          "/{}/{}.txt".format(global_step,
                                                              sample_name))

        proposals = proposals_and_scores[:, 0:7]
        proposal_scores = proposals_and_scores[:, 7]

        rpn_score_mask = proposal_scores > rpn_score_threshold

        proposals = proposals[rpn_score_mask]
        proposal_scores = proposal_scores[rpn_score_mask]
        print('Proposals:', len(proposal_scores), proposal_scores)

        proposal_objs = \
            [box_3d_encoder.box_3d_to_object_label(proposal,
                                                   obj_type='Proposal')
             for proposal in proposals]

        ##############################
        # Predictions
        ##############################
        # Load proposals from files
        predictions_and_scores = np.loadtxt(predictions_and_scores_dir +
                                            "/{}/{}.txt".format(
                                                global_step,
                                                sample_name)).reshape(-1, 9)

        prediction_boxes_3d = predictions_and_scores[:, 0:7]
        prediction_scores = predictions_and_scores[:, 7]
        prediction_types = np.asarray(predictions_and_scores[:, 8], dtype=np.int32)

        avod_score_mask = prediction_scores >= avod_score_threshold
        prediction_boxes_3d = prediction_boxes_3d[avod_score_mask]
        prediction_scores = prediction_scores[avod_score_mask]
        print('Predictions: ', len(prediction_scores), prediction_scores)

        final_predictions = np.copy(prediction_boxes_3d)

        # # Swap l, w for predictions where w > l
        # swapped_indices = predictions[:, 4] > predictions[:, 3]
        # final_predictions[swapped_indices, 3] = predictions[swapped_indices, 4]
        # final_predictions[swapped_indices, 4] = predictions[swapped_indices, 3]

        prediction_objs = []
        for pred_idx in range(len(final_predictions)):
            prediction_box_3d = final_predictions[pred_idx]
            prediction_type = dataset.classes[prediction_types[pred_idx]]
            prediction_obj = box_3d_encoder.box_3d_to_object_label(
                prediction_box_3d, obj_type=prediction_type)
            prediction_objs.append(prediction_obj)

        ##############################
        # Ground Truth
        ##############################
        if dataset.has_labels:
            # Get ground truth labels
            easy_gt_objs, medium_gt_objs, \
                hard_gt_objs, all_gt_objs = \
                demo_utils.get_gts_based_on_difficulty(dataset, img_idx)
        else:
            easy_gt_objs = medium_gt_objs = hard_gt_objs = all_gt_objs = []

        ##############################
        # 3D IoU
        ##############################
        if draw_ious_3d:
            # Convert to box_3d
            all_gt_boxes_3d = [box_3d_encoder.object_label_to_box_3d(gt_obj)
                               for gt_obj in all_gt_objs]
            pred_boxes_3d = [box_3d_encoder.object_label_to_box_3d(pred_obj)
                             for pred_obj in prediction_objs]
            max_ious_3d = demo_utils.get_max_ious_3d(all_gt_boxes_3d,
                                                     pred_boxes_3d)

        ##############################
        # Point Cloud
        ##############################
        image_path = dataset.get_rgb_image_path(sample_name)
        image = cv2.imread(image_path)

        print("***************")
        print(point_cloud_source)
        print(img_idx)
        print(image.shape)

        point_cloud = dataset.kitti_utils.get_point_cloud(point_cloud_source,
                                                          img_idx,
                                                          image_shape=image.shape)


        print("This is the shape of the point_cloud")
        print(point_cloud.shape)
        point_cloud = np.asarray(point_cloud)

        # Filter point cloud to extents
        area_extents = np.asarray([[-40, 40], [-5, 3], [0, 70]])
        bev_extents = area_extents[[0, 2]]

        points = point_cloud.T
        point_filter = obj_utils.get_point_filter(point_cloud, area_extents)
        points = points[point_filter]

        point_colours = vis_utils.project_img_to_point_cloud(points,
                                                             image,
                                                             dataset.calib_dir,
                                                             img_idx)

        # Voxelize the point cloud for visualization
        voxel_grid = VoxelGrid()
        voxel_grid.voxelize(points, voxel_size=0.1,
                            create_leaf_layout=False)

        # Ground plane
        ground_plane = obj_utils.get_road_plane(img_idx, dataset.planes_dir)

        ##############################
        # Visualization
        ##############################
        # Create VtkVoxelGrid
        vtk_voxel_grid = VtkVoxelGrid()
        vtk_voxel_grid.set_voxels(voxel_grid)

        vtk_point_cloud = VtkPointCloud()
        vtk_point_cloud.set_points(points, point_colours)

        # Create VtkAxes
        vtk_axes = vtk.vtkAxesActor()
        vtk_axes.SetTotalLength(5, 5, 5)

        # Create VtkBoxes for proposal boxes
        vtk_proposal_boxes = VtkBoxes()
        vtk_proposal_boxes.set_line_width(proposals_line_width)
        vtk_proposal_boxes.set_objects(proposal_objs,
                                       COLOUR_SCHEME_PREDICTIONS)

        # Create VtkBoxes for prediction boxes
        vtk_prediction_boxes = VtkPyramidBoxes()
        vtk_prediction_boxes.set_line_width(predictions_line_width)
        vtk_prediction_boxes.set_objects(prediction_objs,
                                         COLOUR_SCHEME_PREDICTIONS,
                                         show_orientations)

        # Create VtkBoxes for ground truth
        vtk_hard_gt_boxes = VtkBoxes()
        vtk_medium_gt_boxes = VtkBoxes()
        vtk_easy_gt_boxes = VtkBoxes()
        vtk_all_gt_boxes = VtkBoxes()

        vtk_hard_gt_boxes.set_objects(hard_gt_objs, COLOUR_SCHEME_PREDICTIONS,
                                      show_orientations)
        vtk_medium_gt_boxes.set_objects(medium_gt_objs, COLOUR_SCHEME_PREDICTIONS,
                                        show_orientations)
        vtk_easy_gt_boxes.set_objects(easy_gt_objs, COLOUR_SCHEME_PREDICTIONS,
                                      show_orientations)
        vtk_all_gt_boxes.set_objects(all_gt_objs, VtkBoxes.COLOUR_SCHEME_KITTI,
                                     show_orientations)

        # Create VtkTextLabels for 3D ious
        vtk_text_labels = VtkTextLabels()

        if draw_ious_3d and len(all_gt_boxes_3d) > 0:
            gt_positions_3d = np.asarray(all_gt_boxes_3d)[:, 0:3]
            vtk_text_labels.set_text_labels(
                gt_positions_3d,
                ['{:0.3f}'.format(iou_3d) for iou_3d in max_ious_3d])

        # Create VtkGroundPlane
        vtk_ground_plane = VtkGroundPlane()
        vtk_slice_bot_plane = VtkGroundPlane()
        vtk_slice_top_plane = VtkGroundPlane()

        vtk_ground_plane.set_plane(ground_plane, bev_extents)
        vtk_slice_bot_plane.set_plane(ground_plane + [0, 0, 0, -0.2], bev_extents)
        vtk_slice_top_plane.set_plane(ground_plane + [0, 0, 0, -2.0], bev_extents)

        # Create Voxel Grid Renderer in bottom half
        vtk_renderer = vtk.vtkRenderer()
        vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor)
        vtk_renderer.AddActor(vtk_point_cloud.vtk_actor)

        vtk_renderer.AddActor(vtk_proposal_boxes.vtk_actor)
        vtk_renderer.AddActor(vtk_prediction_boxes.vtk_actor)

        vtk_renderer.AddActor(vtk_hard_gt_boxes.vtk_actor)
        vtk_renderer.AddActor(vtk_medium_gt_boxes.vtk_actor)
        vtk_renderer.AddActor(vtk_easy_gt_boxes.vtk_actor)
        vtk_renderer.AddActor(vtk_all_gt_boxes.vtk_actor)

        vtk_renderer.AddActor(vtk_text_labels.vtk_actor)

        # Add ground plane and slice planes
        vtk_renderer.AddActor(vtk_ground_plane.vtk_actor)
        vtk_renderer.AddActor(vtk_slice_bot_plane.vtk_actor)
        vtk_renderer.AddActor(vtk_slice_top_plane.vtk_actor)

        vtk_renderer.AddActor(vtk_axes)
        vtk_renderer.SetBackground(0.2, 0.3, 0.4)

        # Set initial properties for some actors
        vtk_point_cloud.vtk_actor.GetProperty().SetPointSize(3)
        vtk_proposal_boxes.vtk_actor.SetVisibility(0)
        vtk_voxel_grid.vtk_actor.SetVisibility(0)
        vtk_all_gt_boxes.vtk_actor.SetVisibility(0)

        vtk_ground_plane.vtk_actor.SetVisibility(0)
        vtk_slice_bot_plane.vtk_actor.SetVisibility(0)
        vtk_slice_top_plane.vtk_actor.SetVisibility(0)
        vtk_ground_plane.vtk_actor.GetProperty().SetOpacity(0.9)
        vtk_slice_bot_plane.vtk_actor.GetProperty().SetOpacity(0.9)
        vtk_slice_top_plane.vtk_actor.GetProperty().SetOpacity(0.9)

        # Setup Camera
        current_cam = vtk_renderer.GetActiveCamera()
        current_cam.Pitch(140.0)
        current_cam.Roll(180.0)

        # Zooms out to fit all points on screen
        vtk_renderer.ResetCamera()
        # Zoom in slightly
        current_cam.Zoom(2)

        # Reset the clipping range to show all points
        vtk_renderer.ResetCameraClippingRange()

        # Setup Render Window
        vtk_render_window = vtk.vtkRenderWindow()
        vtk_render_window.SetWindowName(
            "Predictions: Step {}, Sample {}, Min Score {}".format(
                global_step,
                sample_name,
                avod_score_threshold,
            ))
        vtk_render_window.SetSize(900, 600)
        vtk_render_window.AddRenderer(vtk_renderer)

        # Setup custom interactor style, which handles mouse and key events
        vtk_render_window_interactor = vtk.vtkRenderWindowInteractor()
        vtk_render_window_interactor.SetRenderWindow(vtk_render_window)

        # Add custom interactor to toggle actor visibilities
        custom_interactor = vis_utils.CameraInfoInteractorStyle([
            vtk_proposal_boxes.vtk_actor,
            vtk_prediction_boxes.vtk_actor,
            vtk_voxel_grid.vtk_actor,
            vtk_point_cloud.vtk_actor,

            vtk_easy_gt_boxes.vtk_actor,
            vtk_medium_gt_boxes.vtk_actor,
            vtk_hard_gt_boxes.vtk_actor,
            vtk_all_gt_boxes.vtk_actor,

            vtk_ground_plane.vtk_actor,
            vtk_slice_bot_plane.vtk_actor,
            vtk_slice_top_plane.vtk_actor,
            vtk_text_labels.vtk_actor,
        ])

        vtk_render_window_interactor.SetInteractorStyle(custom_interactor)
        # Render in VTK
        vtk_render_window.Render()

        # Take a screenshot
        window_to_image_filter = vtk.vtkWindowToImageFilter()
        window_to_image_filter.SetInput(vtk_render_window)
        window_to_image_filter.Update()

        png_writer = vtk.vtkPNGWriter()
        file_name = img_out_dir + "/{}.png".format(sample_name)
        png_writer.SetFileName(file_name)
        png_writer.SetInputData(window_to_image_filter.GetOutput())
        png_writer.Write()

        print('Screenshot saved to ', file_name)

        #vtk_render_window_interactor.Start()  # Blocking


        vtk_render_window_interactor.Initialize()   # Non-Blocking
def main():
    # Setting Paths
    cam = 2

    # dataset_dir = '/media/bradenhurl/hd/gta/object/'
    data_set = 'training'
    dataset_dir = os.path.expanduser('~') + '/wavedata-dev/demos/gta'
    #dataset_dir = os.path.expanduser('~') + '/Kitti/object/'
    dataset_dir = os.path.expanduser(
        '~') + '/GTAData/TruPercept/object_tru_percept8/'

    #Set to true to see predictions (results) from all perspectives
    use_results = True
    altPerspective = False
    perspID = 48133
    perspStr = '%07d' % perspID
    altPerspect_dir = os.path.join(dataset_dir, data_set + '/alt_perspective/')
    if altPerspective:
        data_set = data_set + '/alt_perspective/' + perspStr

    fromWiseWindows = False
    useEVE = False
    if fromWiseWindows:
        data_set = 'object'
        if useEVE:
            dataset_dir = '/media/bradenhurl/hd/data/eve/'
        else:
            dataset_dir = '/media/bradenhurl/hd/data/'
    image_dir = os.path.join(dataset_dir, data_set) + '/image_2'
    velo_dir = os.path.join(dataset_dir, data_set) + '/velodyne'
    calib_dir = os.path.join(dataset_dir, data_set) + '/calib'

    if use_results:
        label_dir = os.path.join(dataset_dir, data_set) + '/predictions'
    else:
        label_dir = os.path.join(dataset_dir, data_set) + '/label_2'

    base_dir = os.path.join(dataset_dir, data_set)

    comparePCs = False
    if comparePCs:
        velo_dir2 = os.path.join(dataset_dir, data_set) + '/velodyne'

    tracking = False
    if tracking:
        seq_idx = 1
        data_set = '%04d' % seq_idx
        dataset_dir = '/media/bradenhurl/hd/GTAData/August-01/tracking'
        image_dir = os.path.join(dataset_dir, 'images', data_set)
        label_dir = os.path.join(dataset_dir, 'labels', data_set)
        velo_dir = os.path.join(dataset_dir, 'velodyne', data_set)
        calib_dir = os.path.join(dataset_dir, 'training', 'calib', '0000')

    #Used for visualizing inferences
    #label_dir = '/media/bradenhurl/hd/avod/avod/data/outputs/pyramid_people_gta_40k'
    #label_dir = label_dir + '/predictions/kitti_predictions_3d/test/0.02/154000/data/'

    closeView = False
    pitch = 170
    pointSize = 3
    zoom = 1
    if closeView:
        pitch = 180.5
        pointSize = 3
        zoom = 35

    image_list = os.listdir(image_dir)

    fulcrum_of_points = True
    use_intensity = False
    img_idx = 2

    print('=== Loading image: {:06d}.png ==='.format(img_idx))
    print(image_dir)

    image = cv2.imread(image_dir + '/{:06d}.png'.format(img_idx))
    image_shape = (image.shape[1], image.shape[0])

    if use_intensity:
        point_cloud, intensity = obj_utils.get_lidar_point_cloud(
            img_idx, calib_dir, velo_dir, ret_i=use_intensity)
    else:
        point_cloud = obj_utils.get_lidar_point_cloud(img_idx,
                                                      calib_dir,
                                                      velo_dir,
                                                      im_size=image_shape)

    if comparePCs:
        point_cloud2 = obj_utils.get_lidar_point_cloud(img_idx,
                                                       calib_dir,
                                                       velo_dir2,
                                                       im_size=image_shape)
        point_cloud = np.hstack((point_cloud, point_cloud2))

    # Reshape points into N x [x, y, z]
    all_points = np.array(point_cloud).transpose().reshape((-1, 3))

    # Define Fixed Sizes for the voxel grid
    x_min = -85
    x_max = 85
    y_min = -5
    y_max = 5
    z_min = 3
    z_max = 85

    x_min = min(point_cloud[0])
    x_max = max(point_cloud[0])
    y_min = min(point_cloud[1])
    y_max = max(point_cloud[1])
    #z_min = min(point_cloud[2])
    z_max = max(point_cloud[2])

    # Filter points within certain xyz range
    area_filter = (point_cloud[0] > x_min) & (point_cloud[0] < x_max) & \
                  (point_cloud[1] > y_min) & (point_cloud[1] < y_max) & \
                  (point_cloud[2] > z_min) & (point_cloud[2] < z_max)

    all_points = all_points[area_filter]

    #point_colours = np.zeros(point_cloud.shape[1],0)
    #print(point_colours.shape)

    if fulcrum_of_points:
        # Get point colours
        point_colours = vis_utils.project_img_to_point_cloud(
            all_points, image, calib_dir, img_idx)
        print("Point colours shape: ", point_colours.shape)
        print("Sample 0 of colour: ", point_colours[0])
    elif use_intensity:
        adjusted = intensity == 65535
        intensity = intensity > 0
        intensity = np.expand_dims(intensity, -1)
        point_colours = np.hstack(
            (intensity * 255, intensity * 255 - adjusted * 255,
             intensity * 255 - adjusted * 255))
        print("Intensity shape:", point_colours.shape)
        print("Intensity sample: ", point_colours[0])

    # Create Voxel Grid
    voxel_grid = VoxelGrid()
    voxel_grid_extents = [[x_min, x_max], [y_min, y_max], [z_min, z_max]]
    print(voxel_grid_extents)

    start_time = time.time()
    voxel_grid.voxelize(all_points, 0.2, voxel_grid_extents)
    end_time = time.time()
    print("Voxelized in {} s".format(end_time - start_time))

    # Get bounding boxes
    gt_detections = obj_utils.read_labels(label_dir,
                                          img_idx,
                                          results=use_results)
    if gt_detections is None:
        gt_detections = []

    #perspective_utils.to_world(gt_detections, base_dir, img_idx)
    #perspective_utils.to_perspective(gt_detections, base_dir, img_idx)
    for entity_str in os.listdir(altPerspect_dir):
        if os.path.isdir(os.path.join(altPerspect_dir, entity_str)):
            perspect_detections = perspective_utils.get_detections(
                base_dir,
                altPerspect_dir,
                img_idx,
                perspID,
                entity_str,
                results=use_results)
            if perspect_detections != None:
                if use_results:
                    stripped_detections = trust_utils.strip_objs(
                        perspect_detections)
                    gt_detections = gt_detections + stripped_detections
                else:
                    gt_detections = gt_detections + perspect_detections

    # Create VtkPointCloud for visualization
    vtk_point_cloud = VtkPointCloud()
    if fulcrum_of_points or use_intensity:
        vtk_point_cloud.set_points(all_points, point_colours)
    else:
        vtk_point_cloud.set_points(all_points)
    vtk_point_cloud.vtk_actor.GetProperty().SetPointSize(pointSize)

    # Create VtkVoxelGrid for visualization
    vtk_voxel_grid = VtkVoxelGrid()
    vtk_voxel_grid.set_voxels(voxel_grid)

    COLOUR_SCHEME_PAPER = {
        "Car": (0, 0, 255),  # Blue
        "Pedestrian": (255, 0, 0),  # Red
        "Bus": (0, 0, 255),  #Blue
        "Cyclist": (150, 50, 100),  # Purple
        "Van": (255, 150, 150),  # Peach
        "Person_sitting": (150, 200, 255),  # Sky Blue
        "Truck": (0, 0, 255),  # Light Grey
        "Tram": (150, 150, 150),  # Grey
        "Misc": (100, 100, 100),  # Dark Grey
        "DontCare": (255, 255, 255),  # White
    }

    # Create VtkBoxes for boxes
    vtk_boxes = VtkBoxes()
    vtk_boxes.set_objects(gt_detections,
                          COLOUR_SCHEME_PAPER)  #vtk_boxes.COLOUR_SCHEME_KITTI)

    # Create Axes
    axes = vtk.vtkAxesActor()
    axes.SetTotalLength(5, 5, 5)

    # Create Voxel Grid Renderer in bottom half
    vtk_renderer = vtk.vtkRenderer()
    vtk_renderer.AddActor(vtk_point_cloud.vtk_actor)
    vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor)
    vtk_renderer.AddActor(vtk_boxes.vtk_actor)
    #vtk_renderer.AddActor(axes)
    vtk_renderer.SetBackground(0.2, 0.3, 0.4)

    # Setup Camera
    current_cam = vtk_renderer.GetActiveCamera()
    current_cam.Pitch(pitch)
    current_cam.Roll(180.0)

    # Zooms out to fit all points on screen
    vtk_renderer.ResetCamera()

    # Zoom in slightly
    current_cam.Zoom(zoom)

    # Reset the clipping range to show all points
    vtk_renderer.ResetCameraClippingRange()

    # Setup Render Window
    vtk_render_window = vtk.vtkRenderWindow()
    vtk_render_window.SetWindowName(
        "Point Cloud and Voxel Grid, Image {}".format(img_idx))
    vtk_render_window.SetSize(1920, 1080)
    vtk_render_window.AddRenderer(vtk_renderer)

    # Setup custom interactor style, which handles mouse and key events
    vtk_render_window_interactor = vtk.vtkRenderWindowInteractor()
    vtk_render_window_interactor.SetRenderWindow(vtk_render_window)

    # Add custom interactor to toggle actor visibilities

    vtk_render_window_interactor.SetInteractorStyle(
        vis_utils.ToggleActorsInteractorStyle([
            vtk_point_cloud.vtk_actor,
            vtk_voxel_grid.vtk_actor,
            vtk_boxes.vtk_actor,
        ]))

    # Show image
    image = cv2.imread(image_dir + "/%06d.png" % img_idx)
    cv2.imshow("Press any key to continue", image)
    cv2.waitKey()

    # Render in VTK
    vtk_render_window.Render()
    vtk_render_window_interactor.Start()  # Blocking
Beispiel #5
0
def vis_pc(pc, obj_list, frustum_points=None):

    # Define Fixed Sizes for the voxel grid
    x_min = -85
    x_max = 85
    y_min = -5
    y_max = 5
    z_min = 3
    z_max = 85

    # Comment these out to filter points by area
    x_min = min(pc[0])
    x_max = max(pc[0])
    y_min = min(pc[1])
    y_max = max(pc[1])
    z_min = min(pc[2])
    z_max = max(pc[2])

    # Reshape points into N x [x, y, z]
    all_points = np.array(pc).transpose().reshape((-1, 3))

    # Filter points within certain xyz range
    area_filter = (pc[0] > x_min) & (pc[0] < x_max) & \
                  (pc[1] > y_min) & (pc[1] < y_max) & \
                  (pc[2] > z_min) & (pc[2] < z_max)

    all_points = all_points[area_filter]

    # Create Voxel Grid
    voxel_grid = VoxelGrid()
    voxel_grid_extents = [[x_min, x_max], [y_min, y_max], [z_min, z_max]]
    print(voxel_grid_extents)

    start_time = time.time()
    voxel_grid.voxelize(all_points, 0.2, voxel_grid_extents)
    end_time = time.time()
    print("Voxelized in {} s".format(end_time - start_time))

    # Some settings for the initial camera view and point size
    closeView = False
    pitch = 170
    pointSize = 4
    zoom = 1
    if closeView:
        pitch = 180.5
        pointSize = 3
        zoom = 35

    # Create VtkPointCloud for visualization
    vtk_point_cloud = VtkPointCloud()
    vtk_point_cloud.set_points(all_points)
    vtk_point_cloud.vtk_actor.GetProperty().SetPointSize(pointSize)

    # Create VtkVoxelGrid for visualization
    vtk_voxel_grid = VtkVoxelGrid()
    vtk_voxel_grid.set_voxels(voxel_grid)

    # Create VtkBoxes for boxes
    vtk_boxes = VtkBoxes()
    vtk_boxes.set_objects(obj_list, COLOUR_SCHEME, False)

    # Create Axes
    axes = vtk.vtkAxesActor()
    axes.SetTotalLength(2, 2, 2)

    # Create Voxel Grid Renderer in bottom half
    vtk_renderer = vtk.vtkRenderer()
    vtk_renderer.AddActor(vtk_point_cloud.vtk_actor)
    vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor)
    vtk_renderer.AddActor(vtk_boxes.vtk_actor)
    vtk_renderer.AddActor(axes)
    vtk_renderer.SetBackground(0.2, 0.3, 0.4)


    # Add lines for frustum
    if frustum_points is not None:
        frustum_actor = get_frustum_actor(frustum_points)
        vtk_renderer.AddActor(frustum_actor)

    # Setup Camera
    current_cam = vtk_renderer.GetActiveCamera()
    current_cam.Pitch(pitch)
    current_cam.Roll(180.0)

    # Zooms out to fit all points on screen
    vtk_renderer.ResetCamera()

    # Zoom in slightly
    current_cam.Zoom(zoom)

    # Zoom/navigate to the desired camera view then exit
    # Three lines will be output. Paste these here
    # Above forward view
    current_cam.SetPosition(7.512679241328601, -312.20497623371926, -130.38469206536766)
    current_cam.SetViewUp(-0.01952407393317445, -0.44874501090739727, 0.893446543293314)
    current_cam.SetFocalPoint(11.624950999358777, 14.835920755080867, 33.965665867613836)

    # Reset the clipping range to show all points
    vtk_renderer.ResetCameraClippingRange()

    # Setup Render Window
    vtk_render_window = vtk.vtkRenderWindow()
    vtk_render_window.SetWindowName(
        "Point Cloud and Voxel Grid")
    vtk_render_window.SetSize(1920, 1080)
    vtk_render_window.AddRenderer(vtk_renderer)

    # Setup custom interactor style, which handles mouse and key events
    vtk_render_window_interactor = vtk.vtkRenderWindowInteractor()
    vtk_render_window_interactor.SetRenderWindow(vtk_render_window)

    # Add custom interactor to toggle actor visibilities

    vtk_render_window_interactor.SetInteractorStyle(
        vis_utils.ToggleActorsInteractorStyle([
            vtk_point_cloud.vtk_actor,
            vtk_voxel_grid.vtk_actor,
            vtk_boxes.vtk_actor,
        ]))

    # Render in VTK
    vtk_render_window.Render()

    vtk_render_window_interactor.Start()  # Blocking
Beispiel #6
0
def visualize_objects_in_pointcloud(objects, COLOUR_SCHEME, dataset_dir,
              img_idx, fulcrum_of_points, use_intensity,
              receive_from_perspective, compare_pcs=False,
              show_3d_point_count=False, show_orientation=cfg.VISUALIZE_ORIENTATION,
              final_results=False, show_score=False,
              compare_with_gt=False, show_image=True,
              _text_positions=None, _text_labels=None):

    image_dir = os.path.join(dataset_dir, 'image_2')
    velo_dir = os.path.join(dataset_dir, 'velodyne')
    calib_dir = os.path.join(dataset_dir, 'calib')

    if compare_pcs:
        fulcrum_of_points = False

    print('=== Loading image: {:06d}.png ==='.format(img_idx))
    print(image_dir)

    image = cv2.imread(image_dir + '/{:06d}.png'.format(img_idx))
    image_shape = (image.shape[1], image.shape[0])

    if use_intensity:
        point_cloud,intensity = obj_utils.get_lidar_point_cloud(img_idx, calib_dir, velo_dir,
                                                    ret_i=use_intensity)
    else:
        point_cloud = obj_utils.get_lidar_point_cloud(img_idx, calib_dir, velo_dir,
                                                    im_size=image_shape)

    if compare_pcs:
        receive_persp_dir = os.path.join(altPerspect_dir, '{:07d}'.format(receive_from_perspective))
        velo_dir2 = os.path.join(receive_persp_dir, 'velodyne')
        print(velo_dir2)
        if not os.path.isdir(velo_dir2):
            print("Error: cannot find velo_dir2: ", velo_dir2)
            exit()
        point_cloud2 = obj_utils.get_lidar_point_cloud(img_idx, calib_dir, velo_dir2,
                                                    im_size=image_shape)
        #Set to true to display point clouds in world coordinates (for debugging)
        display_in_world=False
        if display_in_world:
            point_cloud = perspective_utils.pc_to_world(point_cloud.T, receive_persp_dir, img_idx)
            point_cloud2 = perspective_utils.pc_to_world(point_cloud2.T, dataset_dir, img_idx)
            point_cloud = np.hstack((point_cloud.T, point_cloud2.T))
        else:
            point_cloud2 = perspective_utils.pc_persp_transform(point_cloud2.T, receive_persp_dir, dataset_dir, img_idx)
            point_cloud = np.hstack((point_cloud, point_cloud2.T))

    # Reshape points into N x [x, y, z]
    all_points = np.array(point_cloud).transpose().reshape((-1, 3))

    # Define Fixed Sizes for the voxel grid
    x_min = -85
    x_max = 85
    y_min = -5
    y_max = 5
    z_min = 3
    z_max = 85

    # Comment these out to filter points by area
    x_min = min(point_cloud[0])
    x_max = max(point_cloud[0])
    y_min = min(point_cloud[1])
    y_max = max(point_cloud[1])
    z_min = min(point_cloud[2])
    z_max = max(point_cloud[2])

    # Filter points within certain xyz range
    area_filter = (point_cloud[0] > x_min) & (point_cloud[0] < x_max) & \
                  (point_cloud[1] > y_min) & (point_cloud[1] < y_max) & \
                  (point_cloud[2] > z_min) & (point_cloud[2] < z_max)

    all_points = all_points[area_filter]

    point_colours = None
    if fulcrum_of_points:
        # Get point colours
        point_colours = vis_utils.project_img_to_point_cloud(all_points, image,
                                                             calib_dir, img_idx)
    elif use_intensity:
        adjusted = intensity == 65535
        intensity = intensity > 0
        intensity = np.expand_dims(intensity,-1)
        point_colours = np.hstack((intensity*255,intensity*255-adjusted*255,intensity*255-adjusted*255))

    # Create Voxel Grid
    voxel_grid = VoxelGrid()
    voxel_grid_extents = [[x_min, x_max], [y_min, y_max], [z_min, z_max]]
    print(voxel_grid_extents)

    start_time = time.time()
    voxel_grid.voxelize(all_points, 0.2, voxel_grid_extents)
    end_time = time.time()
    print("Voxelized in {} s".format(end_time - start_time))

    # Some settings for the initial camera view and point size
    closeView = False
    pitch = 170
    pointSize = 2
    zoom = 1
    if closeView:
        pitch = 180.5
        pointSize = 3
        zoom = 35

    # Create VtkPointCloud for visualization
    vtk_point_cloud = VtkPointCloud()
    if point_colours is not None:
        vtk_point_cloud.set_points(all_points, point_colours)
    else:
        vtk_point_cloud.set_points(all_points)
    vtk_point_cloud.vtk_actor.GetProperty().SetPointSize(pointSize)

    # Create VtkVoxelGrid for visualization
    vtk_voxel_grid = VtkVoxelGrid()
    vtk_voxel_grid.set_voxels(voxel_grid)

    # Create VtkBoxes for boxes
    vtk_boxes = VtkBoxes()
    vtk_boxes.set_objects(objects, COLOUR_SCHEME, show_orientation)#vtk_boxes.COLOUR_SCHEME_KITTI)

    # Create Axes
    axes = vtk.vtkAxesActor()
    axes.SetTotalLength(5, 5, 5)

    # Create Voxel Grid Renderer in bottom half
    vtk_renderer = vtk.vtkRenderer()
    vtk_renderer.AddActor(vtk_point_cloud.vtk_actor)
    vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor)
    vtk_renderer.AddActor(vtk_boxes.vtk_actor)
    vtk_renderer.AddActor(axes)
    if _text_positions is not None:
        vtk_text_labels = VtkTextLabels()
        vtk_text_labels.set_text_labels(_text_positions, _text_labels)
        vtk_renderer.AddActor(vtk_text_labels.vtk_actor)
    vtk_renderer.SetBackground(0.2, 0.3, 0.4)

    # Setup Camera
    current_cam = vtk_renderer.GetActiveCamera()
    current_cam.Pitch(pitch)
    current_cam.Roll(180.0)

    # Zooms out to fit all points on screen
    vtk_renderer.ResetCamera()

    # Zoom in slightly
    current_cam.Zoom(zoom)

    # Zoom/navigate to the desired camera view then exit
    # Three lines will be output. Paste these here
    # Above forward view
    current_cam.SetPosition(7.512679241328601, -312.20497623371926, -130.38469206536766)
    current_cam.SetViewUp(-0.01952407393317445, -0.44874501090739727, 0.893446543293314)
    current_cam.SetFocalPoint(11.624950999358777, 14.835920755080867, 33.965665867613836)

    # Top down view of synchronization
    current_cam.SetPosition(28.384757950371405, -125.46190537888288, 63.60263366961189)
    current_cam.SetViewUp(-0.02456679343399302, 0.0030507437719906913, 0.9996935358512673)
    current_cam.SetFocalPoint(27.042134804730317, 15.654378427929846, 63.13899801247614)

    current_cam.SetPosition(30.3869590224831, -50.28910856489952, 60.097631136698965)
    current_cam.SetViewUp(-0.0237472244952177, -0.06015048799392083, 0.997906803325274)
    current_cam.SetFocalPoint(27.06695416156647, 15.347824332314035, 63.97499987548391)


    # current_cam.SetPosition(14.391008769593322, -120.06549828061613, -1.567028749253062)
    # current_cam.SetViewUp(-0.02238762832327178, -0.1049057307562059, 0.9942301452644481)
    # current_cam.SetFocalPoint(10.601112314728102, 20.237110061924664, 13.151596441968126)

    # # Top down view of whole detection area
    # current_cam.SetPosition(11.168659642537031, -151.97163016078756, 17.590894639193227)
    # current_cam.SetViewUp(-0.02238762832327178, -0.1049057307562059, 0.9942301452644481)
    # current_cam.SetFocalPoint(6.5828849321501055, 17.79452593368671, 35.400431120570865)

    # Top down view of scenario
    current_cam.SetPosition(2.075612197299923, -76.19063612245675, 5.948366424752178)
    current_cam.SetViewUp(-0.02238762832327178, -0.1049057307562059, 0.9942301452644481)
    current_cam.SetFocalPoint(-0.5129380758134061, 19.637933198314016, 16.00138547483155)

    # Reset the clipping range to show all points
    vtk_renderer.ResetCameraClippingRange()

    # Setup Render Window
    vtk_render_window = vtk.vtkRenderWindow()
    vtk_render_window.SetWindowName(
        "Point Cloud and Voxel Grid, Image {}".format(img_idx))
    vtk_render_window.SetSize(1920, 1080)
    vtk_render_window.AddRenderer(vtk_renderer)

    # Setup custom interactor style, which handles mouse and key events
    vtk_render_window_interactor = vtk.vtkRenderWindowInteractor()
    vtk_render_window_interactor.SetRenderWindow(vtk_render_window)

    # Add custom interactor to toggle actor visibilities

    vtk_render_window_interactor.SetInteractorStyle(
        vis_utils.ToggleActorsInteractorStyle([
            vtk_point_cloud.vtk_actor,
            vtk_voxel_grid.vtk_actor,
            vtk_boxes.vtk_actor,
        ]))

    # Show image
    if show_image:
        image = cv2.imread(image_dir + "/%06d.png" % img_idx)
        cv2.imshow("Press any key to continue", image)
        cv2.waitKey()

    # Render in VTK
    vtk_render_window.Render()

    vtk_render_window_interactor.Start()  # Blocking
    # vtk_render_window_interactor.Initialize()   # Non-Blocking  

    # Obtain camera positional information for repeatable views
    print("current_cam.SetPosition{}".format(current_cam.GetPosition()))
    print("current_cam.SetViewUp{}".format(current_cam.GetViewUp()))
    print("current_cam.SetFocalPoint{}".format(current_cam.GetFocalPoint()))