예제 #1
0
def fetch_data(frame_idx):
    cam_rgb_points = dataset.get_cam_points_in_image_with_rgb(frame_idx,
        config['downsample_by_voxel_size'])
    box_label_list = dataset.get_label(frame_idx)
    if 'crop_aug' in train_config:
        cam_rgb_points, box_label_list = sampler.crop_aug(cam_rgb_points,
            box_label_list,
            sample_rate=train_config['crop_aug']['sample_rate'],
            parser_kwargs=train_config['crop_aug']['parser_kwargs'])
    cam_rgb_points, box_label_list = aug_fn(cam_rgb_points, box_label_list)
    graph_generate_fn= get_graph_generate_fn(config['graph_gen_method'])
    (vertex_coord_list, keypoint_indices_list, edges_list) = \
        graph_generate_fn(cam_rgb_points.xyz, **config['graph_gen_kwargs'])
    if config['input_features'] == 'irgb':
        input_v = cam_rgb_points.attr
    elif config['input_features'] == '0rgb':
        input_v = np.hstack([np.zeros((cam_rgb_points.attr.shape[0], 1)),
            cam_rgb_points.attr[:, 1:]])
    elif config['input_features'] == '0000':
        input_v = np.zeros_like(cam_rgb_points.attr)
    elif config['input_features'] == 'i000':
        input_v = np.hstack([cam_rgb_points.attr[:, [0]],
            np.zeros((cam_rgb_points.attr.shape[0], 3))])
    elif config['input_features'] == 'i':
        input_v = cam_rgb_points.attr[:, [0]]
    elif config['input_features'] == '0':
        input_v = np.zeros((cam_rgb_points.attr.shape[0], 1))
    last_layer_graph_level = config['model_kwargs'][
        'layer_configs'][-1]['graph_level']
    last_layer_points_xyz = vertex_coord_list[last_layer_graph_level+1]
    if config['label_method'] == 'yaw':
        cls_labels, boxes_3d, valid_boxes, label_map = \
            dataset.assign_classaware_label_to_points(box_label_list,
            last_layer_points_xyz,
            expend_factor=train_config.get('expend_factor', (1.0, 1.0, 1.0)))
    if config['label_method'] == 'Car':
        cls_labels, boxes_3d, valid_boxes, label_map = \
            dataset.assign_classaware_car_label_to_points(box_label_list,
            last_layer_points_xyz,
            expend_factor=train_config.get('expend_factor', (1.0, 1.0, 1.0)))
    if config['label_method'] == 'Pedestrian_and_Cyclist':
        (cls_labels, boxes_3d, valid_boxes, label_map) =\
            dataset.assign_classaware_ped_and_cyc_label_to_points(
            box_label_list, last_layer_points_xyz,
            expend_factor=train_config.get('expend_factor', (1.0, 1.0, 1.0)))
    encoded_boxes = box_encoding_fn(cls_labels, last_layer_points_xyz,
        boxes_3d, label_map)
    input_v = input_v.astype(np.float32)
    vertex_coord_list = [p.astype(np.float32) for p in vertex_coord_list]
    keypoint_indices_list = [e.astype(np.int32) for e in keypoint_indices_list]
    edges_list = [e.astype(np.int32) for e in edges_list]
    cls_labels = cls_labels.astype(np.int32)
    encoded_boxes = encoded_boxes.astype(np.float32)
    valid_boxes = valid_boxes.astype(np.float32)
    return(input_v, vertex_coord_list, keypoint_indices_list, edges_list,
        cls_labels, encoded_boxes, valid_boxes)
예제 #2
0
def fetch_data(frame_idx):
    cam_points = dataset.get_cam_points_in_image(
        frame_idx, config['downsample_by_voxel_size'])
    box_label_list = dataset.get_label(frame_idx)
    cam_points, box_label_list = aug_fn(cam_points, box_label_list)
    graph_generate_fn = get_graph_generate_fn(config['graph_gen_method'])
    (vertex_coord_list, keypoint_indices_list,
     edges_list) = graph_generate_fn(cam_points.xyz,
                                     **config['graph_gen_kwargs'])
    input_v = cam_points.attr[:, [0]]
    last_layer_graph_level = config['model_kwargs']['layer_configs'][-1][
        'graph_level']
    last_layer_points_xyz = vertex_coord_list[last_layer_graph_level + 1]
    if config['label_method'] == 'yaw':
        (cls_labels, boxes_3d, valid_boxes, label_map) =\
            dataset.assign_classaware_label_to_points(box_label_list,
                last_layer_points_xyz, expend_factor=(1.0, 1.0, 1.0))
    if config['label_method'] == 'Car':
        cls_labels, boxes_3d, valid_boxes, label_map =\
            dataset.assign_classaware_car_label_to_points(box_label_list,
                last_layer_points_xyz,  expend_factor=(1.0, 1.0, 1.0))
    if config['label_method'] == 'Pedestrian_and_Cyclist':
        cls_labels, boxes_3d, valid_boxes, label_map =\
            dataset.assign_classaware_ped_and_cyc_label_to_points(
                box_label_list,
                last_layer_points_xyz,  expend_factor=(1.0, 1.0, 1.0))
    encoded_boxes = box_encoding_fn(cls_labels, last_layer_points_xyz,
                                    boxes_3d, label_map)
    # reducing memory usage by casting to 32bits
    input_v = input_v.astype(np.float32)
    vertex_coord_list = [p.astype(np.float32) for p in vertex_coord_list]
    keypoint_indices_list = [e.astype(np.int32) for e in keypoint_indices_list]
    edges_list = [e.astype(np.int32) for e in edges_list]
    cls_labels = cls_labels.astype(np.int32)
    encoded_boxes = encoded_boxes.astype(np.float32)
    valid_boxes = valid_boxes.astype(np.float32)
    return (input_v, vertex_coord_list, keypoint_indices_list, edges_list,
            cls_labels, encoded_boxes, valid_boxes)
예제 #3
0
 start_time = time.time()
 if VISUALIZATION_LEVEL == 2:
     pcd = open3d.PointCloud()
     line_set = open3d.LineSet()
     graph_line_set = open3d.LineSet()
 # provide input ======================================================
 cam_rgb_points = dataset.get_cam_points_in_image_with_rgb(
     frame_idx, config['downsample_by_voxel_size'])
 calib = dataset.get_calib(frame_idx)
 image = dataset.get_image(frame_idx)
 if not IS_TEST:
     box_label_list = dataset.get_label(frame_idx)
 input_time = time.time()
 time_dict['fetch input'] = time_dict.get('fetch input', 0) \
     + input_time - start_time
 graph_generate_fn = get_graph_generate_fn(config['graph_gen_method'])
 (vertex_coord_list, keypoint_indices_list, edges_list) = \
     graph_generate_fn(
         cam_rgb_points.xyz, **config['runtime_graph_gen_kwargs'])
 graph_time = time.time()
 time_dict['gen graph'] = time_dict.get('gen graph', 0) \
     + graph_time - input_time
 if config['input_features'] == 'irgb':
     input_v = cam_rgb_points.attr
 elif config['input_features'] == '0rgb':
     input_v = np.hstack([
         np.zeros((cam_rgb_points.attr.shape[0], 1)),
         cam_rgb_points.attr[:, 1:]
     ])
 elif config['input_features'] == '0000':
     input_v = np.zeros_like(cam_rgb_points.attr)