dataset_dir = '/home/bradenhurl/GTAData/' data_dir = dataset_dir + label_dir startIdx = 0 endIdx = 50000 x = [] y = [] z = [] for idx in range(startIdx,endIdx): sys.stdout.write("\rProcessing index {} / {}".format( idx + 1 - startIdx, endIdx - startIdx)) filepath = data_dir + '/' + "{:06d}.txt".format(idx) if os.stat(filepath).st_size != 0: obj_list = obj_utils.read_labels(data_dir, idx) for obj in obj_list: if obj.type == 'Pedestrian': x.append(obj.t[0]) y.append(obj.t[1]) z.append(obj.t[2]) #x/y for this visualization represent right/forward (x/z in kitti cam coords) x = np.array(x) y = np.array(z) data = np.vstack([x,y]).T # Create a figure with 6 plot areas fig, axes = plt.subplots(ncols=6, nrows=1, figsize=(21, 5)) # Everything sarts with a Scatterplot
def main(): """This demo shows RPN proposals and AVOD predictions in 3D and 2D in image space. Given certain thresholds for proposals and predictions, it selects and draws the bounding boxes on the image sample. It goes through the entire proposal and prediction samples for the given dataset split. The proposals, overlaid, and prediction images can be toggled on or off separately in the options section. The prediction score and IoU with ground truth can be toggled on or off as well, shown as (score, IoU) above the detection. """ fig_size = (10, 6.1) gt_classes = ['Car', 'Pedestrian', 'Cyclist'] # Output images directories output_dir_base = 'images_2d' data_dir = '../../DATA/Kitti/object/' label_dir = data_dir + 'training/label_2' image_dir = data_dir + 'training/image_2' filepath = data_dir + 'val.txt' calib_dir = data_dir + 'training/calib' filenames = open(filepath, 'r').readlines() filenames = [int(filename) for filename in filenames] i = 0 i_max = len(filenames) for filename in filenames: ############################## # Ground Truth ############################## # Get ground truth labels gt_objects = obj_utils.read_labels(label_dir, filename) boxes2d, _, _ = obj_utils.build_bbs_from_objects( gt_objects, class_needed=gt_classes) image_path = image_dir + "/%06d.png" % filename image = Image.open(image_path) image_size = image.size prop_fig, prop_2d_axes, prop_3d_axes = \ vis_utils.visualization(image_dir, filename, display=False) # Read the stereo calibration matrix for visualization stereo_calib = calib_utils.read_calibration(calib_dir, filename) calib_p2 = stereo_calib.p2 draw_gt(gt_objects, prop_2d_axes, prop_3d_axes, calib_p2) out_name = output_dir_base + "/%06d.png" % filename plt.savefig(out_name) plt.close(prop_fig) i += 1 print(str(i) + '/' + str(i_max)) print('\nDone')
def main(): """ This demo shows example mini batch info for full MlodModel training. This includes ground truth, ortho rotated ground truth, negative proposal anchors, positive proposal anchors, and a sampled mini batch. The 2D iou can be modified to show the effect of changing the iou threshold for mini batch sampling. In order to let this demo run without training an RPN, the proposals shown are being read from a text file. Keys: F1: Toggle ground truth F2: Toggle ortho rotated ground truth F3: Toggle negative proposal anchors F4: Toggle positive proposal anchors F5: Toggle mini batch anchors """ ############################## # Options ############################## # Config file folder, default (<mlod_root>/data/outputs/<checkpoint_name>) config_dir = None # checkpoint_name = None checkpoint_name = 'mlod_exp_example' data_split = 'val_half' # global_step = None global_step = 100000 # # # Cars # # # # sample_name = "000050" sample_name = "000104" # sample_name = "000764" # # # People # # # # val_half # sample_name = '000001' # Hard, 1 far cyc # sample_name = '000005' # Easy, 1 ped # sample_name = '000122' # Easy, 1 cyc # sample_name = '000134' # Hard, lots of people # sample_name = '000167' # Medium, 1 ped, 2 cycs # sample_name = '000187' # Medium, 1 ped on left # sample_name = '000381' # Easy, 1 ped # sample_name = '000398' # Easy, 1 ped # sample_name = '000401' # Hard, obscured peds # sample_name = '000407' # Easy, 1 ped # sample_name = '000448' # Hard, several far people # sample_name = '000486' # Hard 2 obscured peds # sample_name = '000509' # Easy, 1 ped # sample_name = '000718' # Hard, lots of people # sample_name = '002216' # Easy, 1 cyc mini_batch_size = 512 neg_proposal_2d_iou_hi = 0.6 pos_proposal_2d_iou_lo = 0.65 bkg_proposals_line_width = 0.5 neg_proposals_line_width = 0.5 mid_proposals_line_width = 0.5 pos_proposals_line_width = 1.0 ############################## # End of Options ############################## img_idx = int(sample_name) print("Showing mini batch for sample {}".format(sample_name)) # Read proposals from file if checkpoint_name is None: # Use VAL Dataset dataset = DatasetBuilder.build_kitti_dataset(DatasetBuilder.KITTI_VAL) # Load demo proposals proposals_and_scores_dir = mlod.top_dir() + \ '/demos/data/predictions/' + checkpoint_name + \ '/proposals_and_scores/' + dataset.data_split else: if config_dir is None: config_dir = mlod.root_dir() + '/data/outputs/' + checkpoint_name # Parse experiment config pipeline_config_file = \ config_dir + '/' + checkpoint_name + '.config' _, _, _, dataset_config = \ config_builder_util.get_configs_from_pipeline_file( pipeline_config_file, is_training=False) dataset_config.data_split = data_split dataset = DatasetBuilder.build_kitti_dataset(dataset_config, use_defaults=False) # Overwrite mini_batch_utils = dataset.kitti_utils.mini_batch_utils mini_batch_utils.mlod_neg_iou_range[1] = neg_proposal_2d_iou_hi mini_batch_utils.mlod_pos_iou_range[0] = pos_proposal_2d_iou_lo # Load proposals from outputs folder proposals_and_scores_dir = mlod.root_dir() + \ '/data/outputs/' + checkpoint_name + \ '/predictions/proposals_and_scores/' + dataset.data_split # Get checkpoint step steps = os.listdir(proposals_and_scores_dir) steps.sort(key=int) print('Available steps: {}'.format(steps)) # Use latest checkpoint if no index provided if global_step is None: global_step = steps[-1] proposals_and_scores = np.loadtxt( proposals_and_scores_dir + "/{}/{}.txt".format(global_step, sample_name)) proposal_boxes_3d = proposals_and_scores[:, 0:7] proposal_anchors = box_3d_encoder.box_3d_to_anchor(proposal_boxes_3d) # Get filtered ground truth obj_labels = obj_utils.read_labels(dataset.label_dir, img_idx) filtered_objs = dataset.kitti_utils.filter_labels(obj_labels) # Convert ground truth to anchors gt_boxes_3d = np.asarray([ box_3d_encoder.object_label_to_box_3d(obj_label) for obj_label in filtered_objs ]) gt_anchors = box_3d_encoder.box_3d_to_anchor(gt_boxes_3d, ortho_rotate=True) # Ortho rotate ground truth gt_ortho_boxes_3d = box_3d_encoder.anchors_to_box_3d(gt_anchors) gt_ortho_objs = [ box_3d_encoder.box_3d_to_object_label(box_3d, obj_type='OrthoGt') for box_3d in gt_ortho_boxes_3d ] # Project gt and anchors into BEV gt_bev_anchors, _ = \ anchor_projector.project_to_bev(gt_anchors, dataset.kitti_utils.bev_extents) bev_anchors, _ = \ anchor_projector.project_to_bev(proposal_anchors, dataset.kitti_utils.bev_extents) # Reorder boxes into (y1, x1, y2, x2) order gt_bev_anchors_tf_order = anchor_projector.reorder_projected_boxes( gt_bev_anchors) bev_anchors_tf_order = anchor_projector.reorder_projected_boxes( bev_anchors) # Convert to box_list format for iou calculation gt_anchor_box_list = box_list.BoxList( tf.cast(gt_bev_anchors_tf_order, tf.float32)) anchor_box_list = box_list.BoxList( tf.cast(bev_anchors_tf_order, tf.float32)) # Get IoU for every anchor tf_all_ious = box_list_ops.iou(gt_anchor_box_list, anchor_box_list) valid_ious = True # Make sure the calculated IoUs contain values. Since its a [N, M] # tensor, if there are no gt's for instance, that entry will be zero. if tf_all_ious.shape[0] == 0 or tf_all_ious.shape[1] == 0: print('#################################################') print('Warning: This sample does not contain valid IoUs') print('#################################################') valid_ious = False if valid_ious: tf_max_ious = tf.reduce_max(tf_all_ious, axis=0) tf_max_iou_indices = tf.argmax(tf_all_ious, axis=0) # Sample an RPN mini batch from the non empty anchors mini_batch_utils = dataset.kitti_utils.mini_batch_utils # Overwrite mini batch size and sample a mini batch mini_batch_utils.mlod_mini_batch_size = mini_batch_size mb_mask_tf, _ = mini_batch_utils.sample_mlod_mini_batch(tf_max_ious) # Create a session config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) # Run the graph to calculate ious for every proposal and # to get the mini batch mask all_ious, max_ious, max_iou_indices = sess.run( [tf_all_ious, tf_max_ious, tf_max_iou_indices]) mb_mask = sess.run(mb_mask_tf) mb_anchors = proposal_anchors[mb_mask] mb_anchor_boxes_3d = box_3d_encoder.anchors_to_box_3d(mb_anchors) mb_anchor_ious = max_ious[mb_mask] else: # We have no valid IoU's, so assume all IoUs are zeros # and the mini-batch contains all the anchors since we cannot # mask without IoUs. max_ious = np.zeros(proposal_boxes_3d.shape[0]) mb_anchor_ious = max_ious mb_anchors = proposal_anchors mb_anchor_boxes_3d = box_3d_encoder.anchors_to_box_3d(mb_anchors) # Create list of positive/negative proposals based on iou pos_proposal_objs = [] mid_proposal_objs = [] neg_proposal_objs = [] bkg_proposal_objs = [] for i in range(len(proposal_boxes_3d)): box_3d = proposal_boxes_3d[i] if max_ious[i] == 0.0: # Background proposals bkg_proposal_objs.append( box_3d_encoder.box_3d_to_object_label( box_3d, obj_type='BackgroundProposal')) elif max_ious[i] < neg_proposal_2d_iou_hi: # Negative proposals neg_proposal_objs.append( box_3d_encoder.box_3d_to_object_label( box_3d, obj_type='NegativeProposal')) elif max_ious[i] < pos_proposal_2d_iou_lo: # Middle proposals (in between negative and positive) mid_proposal_objs.append( box_3d_encoder.box_3d_to_object_label( box_3d, obj_type='MiddleProposal')) elif max_ious[i] <= 1.0: # Positive proposals pos_proposal_objs.append( box_3d_encoder.box_3d_to_object_label( box_3d, obj_type='PositiveProposal')) else: raise ValueError('Invalid IoU > 1.0') print('{} bkg, {} neg, {} mid, {} pos proposals:'.format( len(bkg_proposal_objs), len(neg_proposal_objs), len(mid_proposal_objs), len(pos_proposal_objs))) # Convert the mini_batch anchors to object list mb_obj_list = [] for i in range(len(mb_anchor_ious)): if valid_ious and (mb_anchor_ious[i] > mini_batch_utils.mlod_pos_iou_range[0]): obj_type = "Positive" else: obj_type = "Negative" obj = box_3d_encoder.box_3d_to_object_label(mb_anchor_boxes_3d[i], obj_type) mb_obj_list.append(obj) # Point cloud image = cv2.imread(dataset.get_rgb_image_path(sample_name)) points, point_colours = demo_utils.get_filtered_pc_and_colours( dataset, image, img_idx) # Visualize from here vis_utils.visualization(dataset.rgb_image_dir, img_idx) plt.show(block=False) # VtkPointCloud vtk_point_cloud = VtkPointCloud() vtk_point_cloud.set_points(points, point_colours) # VtkAxes axes = vtk.vtkAxesActor() axes.SetTotalLength(5, 5, 5) # VtkBoxes for ground truth vtk_gt_boxes = VtkBoxes() vtk_gt_boxes.set_objects(filtered_objs, COLOUR_SCHEME) # VtkBoxes for ortho ground truth vtk_gt_ortho_boxes = VtkBoxes() vtk_gt_ortho_boxes.set_objects(gt_ortho_objs, COLOUR_SCHEME) # VtkBoxes for background proposals vtk_bkg_proposal_boxes = VtkBoxes() vtk_bkg_proposal_boxes.set_objects(bkg_proposal_objs, COLOUR_SCHEME) vtk_bkg_proposal_boxes.set_line_width(bkg_proposals_line_width) # VtkBoxes for negative proposals vtk_neg_proposal_boxes = VtkBoxes() vtk_neg_proposal_boxes.set_objects(neg_proposal_objs, COLOUR_SCHEME) vtk_neg_proposal_boxes.set_line_width(neg_proposals_line_width) # VtkBoxes for middle proposals vtk_mid_proposal_boxes = VtkBoxes() vtk_mid_proposal_boxes.set_objects(mid_proposal_objs, COLOUR_SCHEME) vtk_mid_proposal_boxes.set_line_width(mid_proposals_line_width) # VtkBoxes for positive proposals vtk_pos_proposal_boxes = VtkBoxes() vtk_pos_proposal_boxes.set_objects(pos_proposal_objs, COLOUR_SCHEME) vtk_pos_proposal_boxes.set_line_width(pos_proposals_line_width) # Create VtkBoxes for mini batch anchors vtk_mb_boxes = VtkBoxes() vtk_mb_boxes.set_objects(mb_obj_list, COLOUR_SCHEME) # Create Voxel Grid Renderer in bottom half vtk_renderer = vtk.vtkRenderer() vtk_renderer.SetBackground(0.2, 0.3, 0.4) # Add actors vtk_renderer.AddActor(axes) vtk_renderer.AddActor(vtk_point_cloud.vtk_actor) vtk_renderer.AddActor(vtk_gt_boxes.vtk_actor) vtk_renderer.AddActor(vtk_gt_ortho_boxes.vtk_actor) vtk_renderer.AddActor(vtk_bkg_proposal_boxes.vtk_actor) vtk_renderer.AddActor(vtk_neg_proposal_boxes.vtk_actor) vtk_renderer.AddActor(vtk_mid_proposal_boxes.vtk_actor) vtk_renderer.AddActor(vtk_pos_proposal_boxes.vtk_actor) vtk_renderer.AddActor(vtk_mb_boxes.vtk_actor) # Setup Camera current_cam = vtk_renderer.GetActiveCamera() current_cam.Pitch(160.0) current_cam.Roll(180.0) # Zooms out to fit all points on screen vtk_renderer.ResetCamera() # Zoom in slightly current_cam.Zoom(2.5) # Reset the clipping range to show all points vtk_renderer.ResetCameraClippingRange() # Setup Render Window vtk_render_window = vtk.vtkRenderWindow() vtk_render_window.SetWindowName("MLOD Mini Batch") vtk_render_window.SetSize(900, 500) vtk_render_window.AddRenderer(vtk_renderer) # Setup custom interactor style, which handles mouse and key events vtk_render_window_interactor = vtk.vtkRenderWindowInteractor() vtk_render_window_interactor.SetRenderWindow(vtk_render_window) vtk_render_window_interactor.SetInteractorStyle( vis_utils.ToggleActorsInteractorStyle([ vtk_gt_boxes.vtk_actor, vtk_gt_ortho_boxes.vtk_actor, vtk_bkg_proposal_boxes.vtk_actor, vtk_neg_proposal_boxes.vtk_actor, vtk_mid_proposal_boxes.vtk_actor, vtk_pos_proposal_boxes.vtk_actor, vtk_mb_boxes.vtk_actor, ])) # Render in VTK vtk_render_window.Render() vtk_render_window_interactor.Start()
def main(): """ Visualization of anchor filtering using 3D integral images """ anchor_colour_scheme = { "Car": (0, 255, 0), # Green "Pedestrian": (255, 150, 50), # Orange "Cyclist": (150, 50, 100), # Purple "DontCare": (255, 0, 0), # Red "Anchor": (0, 0, 255), # Blue } # Create Dataset dataset = DatasetBuilder.build_kitti_dataset(DatasetBuilder.KITTI_TRAINVAL) # Options clusters, _ = dataset.get_cluster_info() sample_name = "000000" img_idx = int(sample_name) anchor_stride = [0.5, 0.5] ground_plane = obj_utils.get_road_plane(img_idx, dataset.planes_dir) anchor_3d_generator = grid_anchor_3d_generator.GridAnchor3dGenerator( anchor_3d_sizes=clusters, anchor_stride=anchor_stride) area_extents = np.array([[-40, 40], [-5, 3], [0, 70]]) # Generate anchors in box_3d format start_time = time.time() anchor_boxes_3d = anchor_3d_generator.generate(area_3d=area_extents, ground_plane=ground_plane) end_time = time.time() print("Anchors generated in {} s".format(end_time - start_time)) point_cloud = obj_utils.get_lidar_point_cloud(img_idx, dataset.calib_dir, dataset.velo_dir) offset_dist = 2.0 # Filter points within certain xyz range and offset from ground plane offset_filter = obj_utils.get_point_filter(point_cloud, area_extents, ground_plane, offset_dist) # Filter points within 0.2m of the road plane road_filter = obj_utils.get_point_filter(point_cloud, area_extents, ground_plane, 0.1) slice_filter = np.logical_xor(offset_filter, road_filter) point_cloud = point_cloud.T[slice_filter] # Generate Voxel Grid vx_grid_3d = voxel_grid.VoxelGrid() vx_grid_3d.voxelize(point_cloud, 0.1, area_extents) # Anchors in anchor format all_anchors = box_3d_encoder.box_3d_to_anchor(anchor_boxes_3d) # Filter the boxes here! start_time = time.time() empty_filter = \ anchor_filter.get_empty_anchor_filter(anchors=all_anchors, voxel_grid_3d=vx_grid_3d, density_threshold=1) anchor_boxes_3d = anchor_boxes_3d[empty_filter] end_time = time.time() print("Anchors filtered in {} s".format(end_time - start_time)) # Visualize GT boxes # Grab ground truth ground_truth_list = obj_utils.read_labels(dataset.label_dir, img_idx) # ---------- # Test Sample extraction # Visualize from here vis_utils.visualization(dataset.rgb_image_dir, img_idx) plt.show(block=False) image_path = dataset.get_rgb_image_path(sample_name) image_shape = np.array(Image.open(image_path)).shape rgb_boxes, rgb_normalized_boxes = \ anchor_projector.project_to_image_space(all_anchors, dataset, image_shape, img_idx) # Overlay boxes on images anchor_objects = [] for anchor_idx in range(len(anchor_boxes_3d)): anchor_box_3d = anchor_boxes_3d[anchor_idx] obj_label = box_3d_encoder.box_3d_to_object_label( anchor_box_3d, 'Anchor') # Append to a list for visualization in VTK later anchor_objects.append(obj_label) for idx in range(len(ground_truth_list)): ground_truth_obj = ground_truth_list[idx] # Append to a list for visualization in VTK later anchor_objects.append(ground_truth_obj) # Create VtkAxes axes = vtk.vtkAxesActor() axes.SetTotalLength(5, 5, 5) # Create VtkBoxes for boxes vtk_boxes = VtkBoxes() vtk_boxes.set_objects(anchor_objects, anchor_colour_scheme) vtk_point_cloud = VtkPointCloud() vtk_point_cloud.set_points(point_cloud) vtk_voxel_grid = VtkVoxelGrid() vtk_voxel_grid.set_voxels(vx_grid_3d) # Create Voxel Grid Renderer in bottom half vtk_renderer = vtk.vtkRenderer() vtk_renderer.AddActor(vtk_boxes.vtk_actor) # vtk_renderer.AddActor(vtk_point_cloud.vtk_actor) vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor) vtk_renderer.AddActor(axes) vtk_renderer.SetBackground(0.2, 0.3, 0.4) # Setup Camera current_cam = vtk_renderer.GetActiveCamera() current_cam.Pitch(170.0) current_cam.Roll(180.0) # Zooms out to fit all points on screen vtk_renderer.ResetCamera() # Zoom in slightly current_cam.Zoom(2.5) # Reset the clipping range to show all points vtk_renderer.ResetCameraClippingRange() # Setup Render Window vtk_render_window = vtk.vtkRenderWindow() vtk_render_window.SetWindowName("Anchors") vtk_render_window.SetSize(900, 500) vtk_render_window.AddRenderer(vtk_renderer) # Setup custom interactor style, which handles mouse and key events vtk_render_window_interactor = vtk.vtkRenderWindowInteractor() vtk_render_window_interactor.SetRenderWindow(vtk_render_window) vtk_render_window_interactor.SetInteractorStyle( vtk.vtkInteractorStyleTrackballCamera()) # Render in VTK vtk_render_window.Render() vtk_render_window_interactor.Start() # Blocking
def main(): """This demo runs through all samples in the trainval set, and checks that the 3D box projection of all 'Car', 'Van', 'Pedestrian', and 'Cyclist' objects are in the correct flipped 2D location after applying modifications to the stereo p2 matrix. """ dataset = DatasetBuilder.build_kitti_dataset(DatasetBuilder.KITTI_TRAINVAL, use_defaults=True) np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) all_samples = dataset.sample_names all_pixel_errors = [] all_max_pixel_errors = [] total_flip_time = 0.0 for sample_idx in range(dataset.num_samples): sys.stdout.write('\r{} / {}'.format(sample_idx, dataset.num_samples - 1)) sample_name = all_samples[sample_idx] img_idx = int(sample_name) # Run the main loop to run throughout the images frame_calibration_info = calib_utils.read_calibration( dataset.calib_dir, img_idx) # Load labels gt_labels = obj_utils.read_labels(dataset.label_dir, img_idx) gt_labels = dataset.kitti_utils.filter_labels( gt_labels, ['Car', 'Van', 'Pedestrian', 'Cyclist']) image = cv2.imread(dataset.get_rgb_image_path(sample_name)) image_size = [image.shape[1], image.shape[0]] # Flip p2 matrix calib_p2 = frame_calibration_info.p2 flipped_p2 = np.copy(calib_p2) flipped_p2[0, 2] = image.shape[1] - flipped_p2[0, 2] flipped_p2[0, 3] = -flipped_p2[0, 3] for obj_idx in range(len(gt_labels)): obj = gt_labels[obj_idx] # Get original 2D bounding boxes orig_box_3d = box_3d_encoder.object_label_to_box_3d(obj) orig_bbox_2d = box_3d_projector.project_to_image_space( orig_box_3d, calib_p2, truncate=True, image_size=image_size) # Skip boxes outside image if orig_bbox_2d is None: continue orig_bbox_2d_flipped = flip_box_2d(orig_bbox_2d, image_size) # Do flipping start_time = time.time() flipped_obj = kitti_aug.flip_label_in_3d_only(obj) flip_time = time.time() - start_time total_flip_time += flip_time box_3d_flipped = box_3d_encoder.object_label_to_box_3d(flipped_obj) new_bbox_2d_flipped = box_3d_projector.project_to_image_space( box_3d_flipped, flipped_p2, truncate=True, image_size=image_size) pixel_errors = new_bbox_2d_flipped - orig_bbox_2d_flipped max_pixel_error = np.amax(np.abs(pixel_errors)) all_pixel_errors.append(pixel_errors) all_max_pixel_errors.append(max_pixel_error) if max_pixel_error > 5: print(' Error > 5px', sample_idx, max_pixel_error) print(np.round(orig_bbox_2d_flipped, 3), np.round(new_bbox_2d_flipped, 3)) print('Avg flip time:', total_flip_time / dataset.num_samples) # Convert to ndarrays all_pixel_errors = np.asarray(all_pixel_errors) all_max_pixel_errors = np.asarray(all_max_pixel_errors) # Print max values print(np.amax(all_max_pixel_errors)) # Plot pixel errors fig, axes = plt.subplots(nrows=3, ncols=1) ax0, ax1, ax2 = axes.flatten() ax0.hist(all_pixel_errors[:, 0], 50, histtype='bar', facecolor='green') ax1.hist(all_pixel_errors[:, 2], 50, histtype='bar', facecolor='green') ax2.hist(all_max_pixel_errors, 50, histtype='bar', facecolor='green') plt.show()
if __name__ == '__main__': frame_id = sys.argv[1] pointcloud_file_path = './kitti/velodyne/{0}.bin'.format(frame_id) with open(pointcloud_file_path, 'rb') as fid: data_array = np.fromfile(fid, np.single) points = data_array.reshape(-1, 4) calib_filename = os.path.join('./kitti/calib/', '{0}.txt'.format(frame_id)) calib = kitti_util.Calibration(calib_filename) fig = draw_lidar(points) # ground truth obj_labels = obj_utils.read_labels('./kitti/label_2', int(frame_id)) gt_boxes = [] for obj in obj_labels: if obj.type not in ['Car']: continue _, corners = kitti_util.compute_box_3d(obj, calib.P) corners_velo = calib.project_rect_to_velo(corners) gt_boxes.append(corners_velo) fig = draw_gt_boxes3d(gt_boxes, fig, color=(1, 0, 0)) # proposals proposal_objs = load_proposals(frame_id) boxes = [] box_scores = [] for obj in proposal_objs: _, corners = kitti_util.compute_box_3d(obj, calib.P)
def main(): # Setting Paths cam = 2 # dataset_dir = '/media/bradenhurl/hd/gta/object/' data_set = 'training' dataset_dir = os.path.expanduser('~') + '/wavedata-dev/demos/gta' #dataset_dir = os.path.expanduser('~') + '/Kitti/object/' dataset_dir = os.path.expanduser( '~') + '/GTAData/TruPercept/object_tru_percept8/' #Set to true to see predictions (results) from all perspectives use_results = True altPerspective = False perspID = 48133 perspStr = '%07d' % perspID altPerspect_dir = os.path.join(dataset_dir, data_set + '/alt_perspective/') if altPerspective: data_set = data_set + '/alt_perspective/' + perspStr fromWiseWindows = False useEVE = False if fromWiseWindows: data_set = 'object' if useEVE: dataset_dir = '/media/bradenhurl/hd/data/eve/' else: dataset_dir = '/media/bradenhurl/hd/data/' image_dir = os.path.join(dataset_dir, data_set) + '/image_2' velo_dir = os.path.join(dataset_dir, data_set) + '/velodyne' calib_dir = os.path.join(dataset_dir, data_set) + '/calib' if use_results: label_dir = os.path.join(dataset_dir, data_set) + '/predictions' else: label_dir = os.path.join(dataset_dir, data_set) + '/label_2' base_dir = os.path.join(dataset_dir, data_set) comparePCs = False if comparePCs: velo_dir2 = os.path.join(dataset_dir, data_set) + '/velodyne' tracking = False if tracking: seq_idx = 1 data_set = '%04d' % seq_idx dataset_dir = '/media/bradenhurl/hd/GTAData/August-01/tracking' image_dir = os.path.join(dataset_dir, 'images', data_set) label_dir = os.path.join(dataset_dir, 'labels', data_set) velo_dir = os.path.join(dataset_dir, 'velodyne', data_set) calib_dir = os.path.join(dataset_dir, 'training', 'calib', '0000') #Used for visualizing inferences #label_dir = '/media/bradenhurl/hd/avod/avod/data/outputs/pyramid_people_gta_40k' #label_dir = label_dir + '/predictions/kitti_predictions_3d/test/0.02/154000/data/' closeView = False pitch = 170 pointSize = 3 zoom = 1 if closeView: pitch = 180.5 pointSize = 3 zoom = 35 image_list = os.listdir(image_dir) fulcrum_of_points = True use_intensity = False img_idx = 2 print('=== Loading image: {:06d}.png ==='.format(img_idx)) print(image_dir) image = cv2.imread(image_dir + '/{:06d}.png'.format(img_idx)) image_shape = (image.shape[1], image.shape[0]) if use_intensity: point_cloud, intensity = obj_utils.get_lidar_point_cloud( img_idx, calib_dir, velo_dir, ret_i=use_intensity) else: point_cloud = obj_utils.get_lidar_point_cloud(img_idx, calib_dir, velo_dir, im_size=image_shape) if comparePCs: point_cloud2 = obj_utils.get_lidar_point_cloud(img_idx, calib_dir, velo_dir2, im_size=image_shape) point_cloud = np.hstack((point_cloud, point_cloud2)) # Reshape points into N x [x, y, z] all_points = np.array(point_cloud).transpose().reshape((-1, 3)) # Define Fixed Sizes for the voxel grid x_min = -85 x_max = 85 y_min = -5 y_max = 5 z_min = 3 z_max = 85 x_min = min(point_cloud[0]) x_max = max(point_cloud[0]) y_min = min(point_cloud[1]) y_max = max(point_cloud[1]) #z_min = min(point_cloud[2]) z_max = max(point_cloud[2]) # Filter points within certain xyz range area_filter = (point_cloud[0] > x_min) & (point_cloud[0] < x_max) & \ (point_cloud[1] > y_min) & (point_cloud[1] < y_max) & \ (point_cloud[2] > z_min) & (point_cloud[2] < z_max) all_points = all_points[area_filter] #point_colours = np.zeros(point_cloud.shape[1],0) #print(point_colours.shape) if fulcrum_of_points: # Get point colours point_colours = vis_utils.project_img_to_point_cloud( all_points, image, calib_dir, img_idx) print("Point colours shape: ", point_colours.shape) print("Sample 0 of colour: ", point_colours[0]) elif use_intensity: adjusted = intensity == 65535 intensity = intensity > 0 intensity = np.expand_dims(intensity, -1) point_colours = np.hstack( (intensity * 255, intensity * 255 - adjusted * 255, intensity * 255 - adjusted * 255)) print("Intensity shape:", point_colours.shape) print("Intensity sample: ", point_colours[0]) # Create Voxel Grid voxel_grid = VoxelGrid() voxel_grid_extents = [[x_min, x_max], [y_min, y_max], [z_min, z_max]] print(voxel_grid_extents) start_time = time.time() voxel_grid.voxelize(all_points, 0.2, voxel_grid_extents) end_time = time.time() print("Voxelized in {} s".format(end_time - start_time)) # Get bounding boxes gt_detections = obj_utils.read_labels(label_dir, img_idx, results=use_results) if gt_detections is None: gt_detections = [] #perspective_utils.to_world(gt_detections, base_dir, img_idx) #perspective_utils.to_perspective(gt_detections, base_dir, img_idx) for entity_str in os.listdir(altPerspect_dir): if os.path.isdir(os.path.join(altPerspect_dir, entity_str)): perspect_detections = perspective_utils.get_detections( base_dir, altPerspect_dir, img_idx, perspID, entity_str, results=use_results) if perspect_detections != None: if use_results: stripped_detections = trust_utils.strip_objs( perspect_detections) gt_detections = gt_detections + stripped_detections else: gt_detections = gt_detections + perspect_detections # Create VtkPointCloud for visualization vtk_point_cloud = VtkPointCloud() if fulcrum_of_points or use_intensity: vtk_point_cloud.set_points(all_points, point_colours) else: vtk_point_cloud.set_points(all_points) vtk_point_cloud.vtk_actor.GetProperty().SetPointSize(pointSize) # Create VtkVoxelGrid for visualization vtk_voxel_grid = VtkVoxelGrid() vtk_voxel_grid.set_voxels(voxel_grid) COLOUR_SCHEME_PAPER = { "Car": (0, 0, 255), # Blue "Pedestrian": (255, 0, 0), # Red "Bus": (0, 0, 255), #Blue "Cyclist": (150, 50, 100), # Purple "Van": (255, 150, 150), # Peach "Person_sitting": (150, 200, 255), # Sky Blue "Truck": (0, 0, 255), # Light Grey "Tram": (150, 150, 150), # Grey "Misc": (100, 100, 100), # Dark Grey "DontCare": (255, 255, 255), # White } # Create VtkBoxes for boxes vtk_boxes = VtkBoxes() vtk_boxes.set_objects(gt_detections, COLOUR_SCHEME_PAPER) #vtk_boxes.COLOUR_SCHEME_KITTI) # Create Axes axes = vtk.vtkAxesActor() axes.SetTotalLength(5, 5, 5) # Create Voxel Grid Renderer in bottom half vtk_renderer = vtk.vtkRenderer() vtk_renderer.AddActor(vtk_point_cloud.vtk_actor) vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor) vtk_renderer.AddActor(vtk_boxes.vtk_actor) #vtk_renderer.AddActor(axes) vtk_renderer.SetBackground(0.2, 0.3, 0.4) # Setup Camera current_cam = vtk_renderer.GetActiveCamera() current_cam.Pitch(pitch) current_cam.Roll(180.0) # Zooms out to fit all points on screen vtk_renderer.ResetCamera() # Zoom in slightly current_cam.Zoom(zoom) # Reset the clipping range to show all points vtk_renderer.ResetCameraClippingRange() # Setup Render Window vtk_render_window = vtk.vtkRenderWindow() vtk_render_window.SetWindowName( "Point Cloud and Voxel Grid, Image {}".format(img_idx)) vtk_render_window.SetSize(1920, 1080) vtk_render_window.AddRenderer(vtk_renderer) # Setup custom interactor style, which handles mouse and key events vtk_render_window_interactor = vtk.vtkRenderWindowInteractor() vtk_render_window_interactor.SetRenderWindow(vtk_render_window) # Add custom interactor to toggle actor visibilities vtk_render_window_interactor.SetInteractorStyle( vis_utils.ToggleActorsInteractorStyle([ vtk_point_cloud.vtk_actor, vtk_voxel_grid.vtk_actor, vtk_boxes.vtk_actor, ])) # Show image image = cv2.imread(image_dir + "/%06d.png" % img_idx) cv2.imshow("Press any key to continue", image) cv2.waitKey() # Render in VTK vtk_render_window.Render() vtk_render_window_interactor.Start() # Blocking
def main(): """Flip RPN Mini Batch Visualization of the mini batch anchors for RpnModel training. Keys: F1: Toggle mini batch anchors F2: Flipped """ anchor_colour_scheme = { "Car": (255, 0, 0), # Red "Pedestrian": (255, 150, 50), # Orange "Cyclist": (150, 50, 100), # Purple "DontCare": (255, 255, 255), # White "Anchor": (150, 150, 150), # Gray "Regressed Anchor": (255, 255, 0), # Yellow "Positive": (0, 255, 255), # Teal "Negative": (255, 0, 255) # Purple } dataset_config_path = mlod.root_dir() + \ '/configs/mb_rpn_demo_cars.config' # dataset_config_path = mlod.root_dir() + \ # '/configs/mb_rpn_demo_people.config' ############################## # Options ############################## # # # Random sample # # # sample_name = None # # # Cars # # # # sample_name = "000001" # sample_name = "000050" # sample_name = "000104" # sample_name = "000112" # sample_name = "000169" # sample_name = "000191" sample_name = "003801" # # # Pedestrians # # # # sample_name = "000000" # sample_name = "000011" # sample_name = "000015" # sample_name = "000028" # sample_name = "000035" # sample_name = "000134" # sample_name = "000167" # sample_name = '000379' # sample_name = '000381' # sample_name = '000397' # sample_name = '000398' # sample_name = '000401' # sample_name = '000407' # sample_name = '000486' # sample_name = '000509' # # Cyclists # # # # sample_name = '000122' # sample_name = '000448' # # # Multiple classes # # # # sample_name = "000764" ############################## # End of Options ############################## # Create Dataset dataset = DatasetBuilder.load_dataset_from_config(dataset_config_path) # Random sample if sample_name is None: sample_idx = np.random.randint(0, dataset.num_samples) sample_name = dataset.sample_list[sample_idx] anchor_strides = dataset.kitti_utils.anchor_strides img_idx = int(sample_name) print("Showing mini batch for sample {}".format(sample_name)) image = cv2.imread(dataset.get_rgb_image_path(sample_name)) image_shape = [image.shape[1], image.shape[0]] # KittiUtils class dataset_utils = dataset.kitti_utils ground_plane = obj_utils.get_road_plane(img_idx, dataset.planes_dir) point_cloud = obj_utils.get_depth_map_point_cloud(img_idx, dataset.calib_dir, dataset.depth_dir, image_shape) points = point_cloud.T # Grab ground truth ground_truth_list = obj_utils.read_labels(dataset.label_dir, img_idx) ground_truth_list = dataset_utils.filter_labels(ground_truth_list) stereo_calib_p2 = calib_utils.read_calibration(dataset.calib_dir, img_idx).p2 ############################## # Flip sample info ############################## start_time = time.time() flipped_image = kitti_aug.flip_image(image) flipped_point_cloud = kitti_aug.flip_point_cloud(point_cloud) flipped_gt_list = [ kitti_aug.flip_label_in_3d_only(obj) for obj in ground_truth_list ] flipped_ground_plane = kitti_aug.flip_ground_plane(ground_plane) flipped_calib_p2 = kitti_aug.flip_stereo_calib_p2(stereo_calib_p2, image_shape) print('flip sample', time.time() - start_time) flipped_points = flipped_point_cloud.T point_colours = vis_utils.project_img_to_point_cloud( points, image, dataset.calib_dir, img_idx) ############################## # Generate anchors ############################## clusters, _ = dataset.get_cluster_info() anchor_generator = grid_anchor_3d_generator.GridAnchor3dGenerator() # Read mini batch info anchors_info = dataset_utils.get_anchors_info(sample_name) all_anchor_boxes_3d = [] all_ious = [] all_offsets = [] for class_idx in range(len(dataset.classes)): anchor_boxes_3d = anchor_generator.generate( area_3d=dataset.kitti_utils.area_extents, anchor_3d_sizes=clusters[class_idx], anchor_stride=anchor_strides[class_idx], ground_plane=ground_plane) if len(anchors_info[class_idx]) > 0: indices, ious, offsets, classes = anchors_info[class_idx] # Get non empty anchors from the indices non_empty_anchor_boxes_3d = anchor_boxes_3d[indices] all_anchor_boxes_3d.extend(non_empty_anchor_boxes_3d) all_ious.extend(ious) all_offsets.extend(offsets) if not len(all_anchor_boxes_3d) > 0: # Exit early if anchors_info is empty print("No anchors, Please try a different sample") return # Convert to ndarrays all_anchor_boxes_3d = np.asarray(all_anchor_boxes_3d) all_ious = np.asarray(all_ious) all_offsets = np.asarray(all_offsets) ############################## # Flip anchors ############################## start_time = time.time() # Flip anchors and offsets flipped_anchor_boxes_3d = kitti_aug.flip_boxes_3d(all_anchor_boxes_3d, flip_ry=False) all_offsets[:, 0] = -all_offsets[:, 0] print('flip anchors and offsets', time.time() - start_time) # Overwrite with flipped things all_anchor_boxes_3d = flipped_anchor_boxes_3d points = flipped_points ground_truth_list = flipped_gt_list ground_plane = flipped_ground_plane ############################## # Mini batch sampling ############################## # Sample an RPN mini batch from the non empty anchors mini_batch_utils = dataset.kitti_utils.mini_batch_utils mb_mask_tf, _ = mini_batch_utils.sample_rpn_mini_batch(all_ious) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) mb_mask = sess.run(mb_mask_tf) mb_anchor_boxes_3d = all_anchor_boxes_3d[mb_mask] mb_anchor_ious = all_ious[mb_mask] mb_anchor_offsets = all_offsets[mb_mask] # ObjectLabel list that hold all boxes to visualize obj_list = [] # Convert the mini_batch anchors to object list for i in range(len(mb_anchor_boxes_3d)): if mb_anchor_ious[i] > mini_batch_utils.rpn_pos_iou_range[0]: obj_type = "Positive" else: obj_type = "Negative" obj = box_3d_encoder.box_3d_to_object_label(mb_anchor_boxes_3d[i], obj_type) obj_list.append(obj) # Convert all non-empty anchors to object list non_empty_anchor_objs = \ [box_3d_encoder.box_3d_to_object_label( anchor_box_3d, obj_type='Anchor') for anchor_box_3d in all_anchor_boxes_3d] ############################## # Regress Positive Anchors ############################## # Convert anchor_boxes_3d to anchors and apply offsets mb_pos_mask = mb_anchor_ious > mini_batch_utils.rpn_pos_iou_range[0] mb_pos_anchor_boxes_3d = mb_anchor_boxes_3d[mb_pos_mask] mb_pos_anchor_offsets = mb_anchor_offsets[mb_pos_mask] mb_pos_anchors = box_3d_encoder.box_3d_to_anchor(mb_pos_anchor_boxes_3d) regressed_pos_anchors = anchor_encoder.offset_to_anchor( mb_pos_anchors, mb_pos_anchor_offsets) # Convert regressed anchors to ObjectLabels for visualization regressed_anchor_boxes_3d = box_3d_encoder.anchors_to_box_3d( regressed_pos_anchors, fix_lw=True) regressed_anchor_objs = \ [box_3d_encoder.box_3d_to_object_label( box_3d, obj_type='Regressed Anchor') for box_3d in regressed_anchor_boxes_3d] ############################## # Visualization ############################## cv2.imshow('{} flipped'.format(sample_name), flipped_image) cv2.waitKey() # Create VtkAxes axes = vtk.vtkAxesActor() axes.SetTotalLength(5, 5, 5) # Create VtkBoxes for mini batch anchors vtk_pos_anchor_boxes = VtkBoxes() vtk_pos_anchor_boxes.set_objects(obj_list, anchor_colour_scheme) # VtkBoxes for non empty anchors vtk_non_empty_anchors = VtkBoxes() vtk_non_empty_anchors.set_objects(non_empty_anchor_objs, anchor_colour_scheme) vtk_non_empty_anchors.set_line_width(0.1) # VtkBoxes for regressed anchors vtk_regressed_anchors = VtkBoxes() vtk_regressed_anchors.set_objects(regressed_anchor_objs, anchor_colour_scheme) vtk_regressed_anchors.set_line_width(5.0) # Create VtkBoxes for ground truth vtk_gt_boxes = VtkBoxes() vtk_gt_boxes.set_objects(ground_truth_list, anchor_colour_scheme, show_orientations=True) vtk_point_cloud = VtkPointCloud() vtk_point_cloud.set_points(points, point_colours) vtk_ground_plane = VtkGroundPlane() vtk_ground_plane.set_plane(ground_plane, dataset.kitti_utils.bev_extents) # Create Voxel Grid Renderer in bottom half vtk_renderer = vtk.vtkRenderer() vtk_renderer.AddActor(vtk_point_cloud.vtk_actor) vtk_renderer.AddActor(vtk_non_empty_anchors.vtk_actor) vtk_renderer.AddActor(vtk_pos_anchor_boxes.vtk_actor) vtk_renderer.AddActor(vtk_regressed_anchors.vtk_actor) vtk_renderer.AddActor(vtk_gt_boxes.vtk_actor) vtk_renderer.AddActor(vtk_ground_plane.vtk_actor) vtk_renderer.AddActor(axes) vtk_renderer.SetBackground(0.2, 0.3, 0.4) # Setup Camera current_cam = vtk_renderer.GetActiveCamera() current_cam.Pitch(160.0) current_cam.Roll(180.0) # Zooms out to fit all points on screen vtk_renderer.ResetCamera() # Zoom in slightly current_cam.Zoom(2.5) # Reset the clipping range to show all points vtk_renderer.ResetCameraClippingRange() # Setup Render Window vtk_render_window = vtk.vtkRenderWindow() vtk_render_window.SetWindowName("RPN Mini Batch") vtk_render_window.SetSize(900, 500) vtk_render_window.AddRenderer(vtk_renderer) # Setup custom interactor style, which handles mouse and key events vtk_render_window_interactor = vtk.vtkRenderWindowInteractor() vtk_render_window_interactor.SetRenderWindow(vtk_render_window) vtk_render_window_interactor.SetInteractorStyle( vis_utils.ToggleActorsInteractorStyle([ vtk_non_empty_anchors.vtk_actor, vtk_pos_anchor_boxes.vtk_actor, vtk_regressed_anchors.vtk_actor, vtk_ground_plane.vtk_actor, ])) # Render in VTK vtk_render_window.Render() vtk_render_window_interactor.Start()
def visualize_objects(objects, img_idx, show_results, alt_persp, perspID, fulcrum_of_points=True, use_intensity=False, view_received_detections=True, receive_from_perspective=-1, only_receive_dets=False, compare_pcs=False, show_3d_point_count=False, show_orientation=cfg.VISUALIZE_ORIENTATION, final_results=False, show_score=False, compare_with_gt=True, show_image=True, vis_scores=False): if cfg.VISUALIZE_AGG_EVALS: show_image = False # Setting Paths cam = 2 dataset_dir = cfg.DATASET_DIR print("dataset_dir: ", cfg.DATASET_DIR) if img_idx == -1: print( "Please set the TEST_IDX in the config.py file to see a specific index." ) img_idx = random.randint(0, 101) print("Using random index: ", img_idx) perspStr = '%07d' % perspID altPerspect_dir = os.path.join(dataset_dir, 'alt_perspective') if alt_persp: dataset_dir = dataset_dir + '/alt_perspective/' + perspStr else: perspID = const.ego_id() if show_results: label_dir = os.path.join(dataset_dir, 'predictions') else: label_dir = os.path.join(dataset_dir, 'label_2') COLOUR_SCHEME = { "Car": (0, 0, 255), # Blue "Pedestrian": (255, 0, 0), # Red "Bus": (0, 0, 255), #Blue "Cyclist": (150, 50, 100), # Purple "Van": (255, 150, 150), # Peach "Person_sitting": (150, 200, 255), # Sky Blue "Truck": (0, 0, 255), # Light Grey "Tram": (150, 150, 150), # Grey "Misc": (100, 100, 100), # Dark Grey "DontCare": (255, 255, 255), # White "Received": (255, 150, 150), # Peach "OwnObject": (51, 255, 255), # Cyan "GroundTruth": (0, 255, 0), # Green } # Load points_in_3d_boxes for each object if vis_scores: text_positions = [] text_labels = [] else: text_positions = None text_labels = None if objects is not None: for obj in objects: if vis_scores: text_positions.append(obj.t) txt = '{}'.format(obj.score) text_labels.append(txt) if compare_with_gt: label_dir = os.path.join(dataset_dir, cfg.LABEL_DIR) real_gt_data = obj_utils.read_labels(label_dir, img_idx, results=False) if real_gt_data is not None: for obj in real_gt_data: obj.type = "GroundTruth" objects = objects + real_gt_data vis_utils.visualize_objects_in_pointcloud( objects, COLOUR_SCHEME, dataset_dir, img_idx, fulcrum_of_points, use_intensity, receive_from_perspective, compare_pcs, show_3d_point_count, show_orientation, final_results, show_score, compare_with_gt, show_image, text_positions, text_labels)
def load_samples(self, indices): """ Loads input-output data for a set of samples. Should only be called when a particular sample dict is required. Otherwise, samples should be provided by the next_batch function Args: indices: A list of sample indices from the dataset.sample_list to be loaded Return: samples: a list of data sample dicts """ sample_dicts = [] for sample_idx in indices: sample = self.sample_list[sample_idx] sample_name = sample.name # Only read labels if they exist if self.has_labels: # Read mini batch first to see if it is empty anchors_info = self.get_anchors_info(sample_name) if (not anchors_info) and self.train_val_test == 'train' \ and (not self.train_on_all_samples): empty_sample_dict = { constants.KEY_SAMPLE_NAME: sample_name, constants.KEY_ANCHORS_INFO: anchors_info } return [empty_sample_dict] obj_labels = obj_utils.read_labels(self.label_dir, int(sample_name)) # Only use objects that match dataset classes obj_labels = self.kitti_utils.filter_labels(obj_labels) else: obj_labels = None anchors_info = [] label_anchors = np.zeros((1, 6)) label_boxes_3d = np.zeros((1, 7)) label_classes = np.zeros(1) img_idx = int(sample_name) # Load image (BGR -> RGB) cv_bgr_image = cv2.imread(self.get_rgb_image_path(sample_name)) rgb_image = cv_bgr_image[..., ::-1] image_shape = rgb_image.shape[0:2] image_input = rgb_image # Get ground plane ground_plane = obj_utils.get_road_plane(int(sample_name), self.planes_dir) # Get calibration stereo_calib = calib_utils.read_calibration( self.calib_dir, int(sample_name)) stereo_calib_p2 = stereo_calib.p2 point_cloud = self.kitti_utils.get_point_cloud( self.bev_source, img_idx, image_shape) # Augmentation (Flipping) # WZN: the flipping augmentation flips both image(in camera frame), pointcloud (in Lidar frame), and calibration #matrix(between cam and Lidar) so the correspondence is still true. if kitti_aug.AUG_FLIPPING in sample.augs: image_input = kitti_aug.flip_image(image_input) point_cloud = kitti_aug.flip_point_cloud(point_cloud) obj_labels = [ kitti_aug.flip_label_in_3d_only(obj) for obj in obj_labels ] ground_plane = kitti_aug.flip_ground_plane(ground_plane) stereo_calib_p2 = kitti_aug.flip_stereo_calib_p2( stereo_calib_p2, image_shape) # Augmentation (Image Jitter) if kitti_aug.AUG_PCA_JITTER in sample.augs: image_input[:, :, 0:3] = kitti_aug.apply_pca_jitter(image_input[:, :, 0:3]) if obj_labels is not None: label_boxes_3d = np.asarray([ box_3d_encoder.object_label_to_box_3d(obj_label) for obj_label in obj_labels ]) label_classes = [ self.kitti_utils.class_str_to_index(obj_label.type) for obj_label in obj_labels ] label_classes = np.asarray(label_classes, dtype=np.int32) label_h2d = [ obj_label.y2 - obj_label.y1 for obj_label in obj_labels ] # Return empty anchors_info if no ground truth after filtering if len(label_boxes_3d) == 0: anchors_info = [] if self.train_on_all_samples: # If training without any positive labels, we cannot # set these to zeros, because later on the offset calc # uses log on these anchors. So setting any arbitrary # number here that does not break the offset calculation # should work, since the negative samples won't be # regressed in any case. dummy_anchors = [[-1000, -1000, -1000, 1, 1, 1]] label_anchors = np.asarray(dummy_anchors) dummy_boxes = [[-1000, -1000, -1000, 1, 1, 1, 0]] label_boxes_3d = np.asarray(dummy_boxes) else: label_anchors = np.zeros((1, 6)) label_boxes_3d = np.zeros((1, 7)) label_classes = np.zeros(1) label_h2d = np.zeros(1) else: label_anchors = box_3d_encoder.box_3d_to_anchor( label_boxes_3d, ortho_rotate=True) # Create BEV maps bev_images = self.kitti_utils.create_bev_maps( point_cloud, ground_plane, output_indices=self.output_indices) #WZN produce input for sparse pooling if self.output_indices: voxel_indices = bev_images[1] pts_in_voxel = bev_images[2] bev_images = bev_images[0] height_maps = bev_images.get('height_maps') density_map = bev_images.get('density_map') bev_input = np.dstack((*height_maps, density_map)) #shape: (H, W, C) #import pdb #pdb.set_trace() #WZN produce input for sparse pooling if self.output_indices: feat_stride = 2**(int( self.config.use_pyramid_level_at_SHPL[-1])) spinput = gen_sparse_pooling_input_avod(pts_in_voxel, voxel_indices, stereo_calib, \ [image_shape[1], image_shape[0]], bev_input.shape[0:2]) spinput = produce_sparse_pooling_input( spinput, stride=[feat_stride, feat_stride]) sparse_pooling_input = [spinput] #sparse_pooling_input1 = produce_sparse_pooling_input(gen_sparse_pooling_input_avod(pts_in_voxel,voxel_indices, # stereo_calib,[image_shape[1],image_shape[0]],bev_input.shape[0:2]),stride=[4,4]) #for retinaNet #stereo_calib,[image_shape[1],image_shape[0]],bev_input.shape[0:2]),stride=[1,1]) #WZN: Note here avod padded the vgg input by 4, so add it #bev_input_padded = np.copy(bev_input.shape[0:2]) #bev_input_padded[0] = bev_input_padded[0]+4 #sparse_pooling_input2 = produce_sparse_pooling_input(gen_sparse_pooling_input_avod(pts_in_voxel,voxel_indices, # stereo_calib,[image_shape[1],image_shape[0]],bev_input_padded),stride=[8,8]) #sparse_pooling_input = [sparse_pooling_input1,sparse_pooling_input2] else: sparse_pooling_input = None sample_dict = { constants.KEY_LABEL_BOXES_3D: label_boxes_3d, constants.KEY_LABEL_ANCHORS: label_anchors, constants.KEY_LABEL_CLASSES: label_classes, constants.KEY_LABEL_H2D: label_h2d, constants.KEY_IMAGE_INPUT: image_input, constants.KEY_BEV_INPUT: bev_input, #WZN: for sparse pooling constants.KEY_SPARSE_POOLING_INPUT: sparse_pooling_input, constants.KEY_ANCHORS_INFO: anchors_info, constants.KEY_POINT_CLOUD: point_cloud, constants.KEY_GROUND_PLANE: ground_plane, constants.KEY_STEREO_CALIB_P2: stereo_calib_p2, constants.KEY_SAMPLE_NAME: sample_name, constants.KEY_SAMPLE_AUGS: sample.augs } sample_dicts.append(sample_dict) return sample_dicts
def visualize(img_idx, show_results, alt_persp, perspID, fulcrum_of_points, use_intensity, view_received_detections, receive_from_perspective, receive_det_id, only_receive_dets, change_rec_colour, compare_pcs, alt_colour_peach=False, show_3d_point_count=False, show_orientation=cfg.VISUALIZE_ORIENTATION, final_results=False, show_score=False, compare_with_gt=False, show_image=True, filter_area=cfg.VISUALIZE_AREA_FILTER): # Setting Paths cam = 2 dataset_dir = cfg.DATASET_DIR print("dataset_dir: ", cfg.DATASET_DIR) if img_idx == -1: print("Please set the TEST_IDX in the config.py file to see a specific index.") img_idx = random.randint(0,101) print("Using random index: ", img_idx) global text_labels global text_positions global COLOUR_SCHEME if show_3d_point_count or show_score: text_labels = [] text_positions = [] perspStr = '%07d' % perspID altPerspect_dir = os.path.join(dataset_dir,'alt_perspective') if alt_persp: dataset_dir = dataset_dir + '/alt_perspective/' + perspStr else: perspID = const.ego_id() if show_results: label_dir = os.path.join(dataset_dir, 'predictions') else: label_dir = os.path.join(dataset_dir, 'label_2') # Load points_in_3d_boxes for each object points_dict = points_in_3d_boxes.load_points_in_3d_boxes(img_idx, perspID) gt_detections = [] # Get bounding boxes if final_results: if filter_area: label_dir = os.path.join(dataset_dir, cfg.FINAL_DETS_SUBDIR_AF) else: label_dir = os.path.join(dataset_dir, cfg.FINAL_DETS_SUBDIR) gt_detections = obj_utils.read_labels(label_dir, img_idx, results=show_results) if compare_with_gt and not show_results: for obj in gt_detections: obj.type = "GroundTruth" addScoreText(gt_detections, show_3d_point_count, show_score) else: if (not view_received_detections or receive_from_perspective != -1) and not only_receive_dets: gt_detections = perspective_utils.get_detections(dataset_dir, dataset_dir, img_idx, perspID, perspID, results=show_results, filter_area=filter_area) setPointsText(gt_detections, points_dict, show_3d_point_count) addScoreTextTrustObjs(gt_detections, show_3d_point_count, show_score) gt_detections = trust_utils.strip_objs(gt_detections) gt_detections[0].type = "OwnObject" if view_received_detections: stripped_detections = [] if receive_from_perspective == -1: perspect_detections = perspective_utils.get_all_detections(img_idx, perspID, show_results, filter_area) if change_rec_colour: for obj_list in perspect_detections: obj_list[0].obj.type = "OwnObject" if obj_list[0].detector_id == perspID: if compare_with_gt: if obj_list is not None: for obj in obj_list: obj.obj.type = "GroundTruth" continue color_str = "Received{:07d}".format(obj_list[0].detector_id) prime_val = obj_list[0].detector_id * 47 entity_colour = (prime_val + 13 % 255, (prime_val / 255) % 255, prime_val % 255) COLOUR_SCHEME[color_str] = entity_colour first_obj = True for obj in obj_list: if first_obj: first_obj = False continue obj.obj.type = color_str for obj_list in perspect_detections: setPointsText(obj_list, points_dict, show_3d_point_count) addScoreTextTrustObjs(obj_list, show_3d_point_count, show_score) stripped_detections = trust_utils.strip_objs_lists(perspect_detections) else: receive_entity_str = '{:07d}'.format(receive_from_perspective) receive_dir = os.path.join(altPerspect_dir, receive_entity_str) if os.path.isdir(receive_dir): print("Using detections from: ", receive_dir) perspect_detections = perspective_utils.get_detections(dataset_dir, receive_dir, img_idx, receive_from_perspective, receive_entity_str, results=show_results, filter_area=filter_area) if perspect_detections is not None: color_str = "Received{:07d}".format(receive_from_perspective) prime_val = receive_from_perspective * 47 entity_colour = (prime_val + 13 % 255, (prime_val / 255) % 255, prime_val % 255) COLOUR_SCHEME[color_str] = entity_colour first_obj = True for obj in perspect_detections: if first_obj: first_obj = False continue obj.obj.type = color_str setPointsText(perspect_detections, points_dict, show_3d_point_count) addScoreTextTrustObjs(perspect_detections, show_3d_point_count, show_score) stripped_detections = trust_utils.strip_objs(perspect_detections) else: print("Could not find directory: ", receive_dir) if receive_det_id != -1 and len(stripped_detections) > 0: single_det = [] single_det.append(stripped_detections[receive_det_id]) stripped_detections = single_det if change_rec_colour and alt_colour_peach: for obj in stripped_detections: obj.type = "Received" if len(stripped_detections) > 0: stripped_detections[0].type = "OwnObject" if only_receive_dets: gt_detections = stripped_detections print("Not using main perspective detections") else: gt_detections = gt_detections + stripped_detections if compare_with_gt and show_results: label_dir = os.path.join(dataset_dir, cfg.LABEL_DIR) real_gt_data = obj_utils.read_labels(label_dir, img_idx, results=False) for obj in real_gt_data: if obj.type != "DontCare": obj.type = "GroundTruth" gt_detections = gt_detections + real_gt_data visualize_objects_in_pointcloud(gt_detections, COLOUR_SCHEME, dataset_dir, img_idx, fulcrum_of_points, use_intensity, receive_from_perspective, compare_pcs, show_3d_point_count, show_orientation, final_results, show_score, compare_with_gt, show_image, _text_positions=text_positions, _text_labels=text_labels)