Exemplo n.º 1
0
def vis_crop_aug_sampler(crop_filename, dataset):
    sampler = CropAugSampler(crop_filename)
    for frame_idx in range(10):
        labels = dataset.get_label(frame_idx)
        cam_rgb_points = dataset.get_cam_points_in_image_with_rgb(frame_idx)
        cam_rgb_points, labels = sampler.crop_aug(
            cam_rgb_points,
            labels,
            sample_rate={
                "Car": 2,
                "Pedestrian": 10,
                "Cyclist": 10
            },
            parser_kwargs={
                'max_overlap_num_allowed': 50,
                'max_trails': 100,
                'method_name': 'normal',
                'yaw_std': np.pi / 16,
                'expand_factor': (1.1, 1.1, 1.1),
                'auto_box_height': True,
                'overlap_mode': 'box_and_point',
                'max_overlap_rate': 1e-6,
                'appr_factor': 100,
                'must_have_ground': True,
            })
        aug_configs = [{
            'method_name': 'random_box_global_rotation',
            'method_kwargs': {
                'max_overlap_num_allowed': 100,
                'max_trails': 100,
                'appr_factor': 100,
                'method_name': 'normal',
                'yaw_std': np.pi / 8,
                'expend_factor': (1.1, 1.1, 1.1)
            }
        }]
        aug_fn = preprocess.get_data_aug(aug_configs)
        cam_rgb_points, labels = aug_fn(cam_rgb_points, labels)
        dataset.vis_points(cam_rgb_points,
                           labels,
                           expend_factor=(1.1, 1.1, 1.1))
Exemplo n.º 2
0
                       num_classes=config['num_classes'])
NUM_CLASSES = dataset.num_classes

if 'NUM_TEST_SAMPLE' not in train_config:
    NUM_TEST_SAMPLE = dataset.num_files
else:
    if train_config['NUM_TEST_SAMPLE'] < 0:
        NUM_TEST_SAMPLE = dataset.num_files
    else:
        NUM_TEST_SAMPLE = train_config['NUM_TEST_SAMPLE']

BOX_ENCODING_LEN = get_encoding_len(config['box_encoding_method'])
box_encoding_fn = get_box_encoding_fn(config['box_encoding_method'])
box_decoding_fn = get_box_decoding_fn(config['box_encoding_method'])

aug_fn = preprocess.get_data_aug(train_config['data_aug_configs'])

if 'crop_aug' in train_config:
    sampler = CropAugSampler(train_config['crop_aug']['crop_filename'])


def fetch_data(frame_idx):
    cam_rgb_points = dataset.get_cam_points_in_image_with_rgb(
        frame_idx, config['downsample_by_voxel_size'])
    box_label_list = dataset.get_label(frame_idx)
    if 'crop_aug' in train_config:
        cam_rgb_points, box_label_list = sampler.crop_aug(
            cam_rgb_points,
            box_label_list,
            sample_rate=train_config['crop_aug']['sample_rate'],
            parser_kwargs=train_config['crop_aug']['parser_kwargs'])
Exemplo n.º 3
0
def fetch_data(dataset, frame_idx, train_config, config):
    aug_fn = preprocess.get_data_aug(train_config['data_aug_configs'])
    BOX_ENCODING_LEN = get_encoding_len(config['box_encoding_method'])
    box_encoding_fn = get_box_encoding_fn(config['box_encoding_method'])
    box_decoding_fn = get_box_decoding_fn(config['box_encoding_method'])
    graph_generate_fn = get_graph_generate_fn(config['graph_gen_method'])

    cam_rgb_points = dataset.get_cam_points_in_image_with_rgb(
        frame_idx, config['downsample_by_voxel_size'])

    box_label_list = dataset.get_label(frame_idx)
    if 'crop_aug' in train_config:
        cam_rgb_points, box_label_list = sampler.crop_aug(
            cam_rgb_points,
            box_label_list,
            sample_rate=train_config['crop_aug']['sample_rate'],
            parser_kwargs=train_config['crop_aug']['parser_kwargs'])

    cam_rgb_points, box_label_list = aug_fn(cam_rgb_points, box_label_list)

    (vertex_coord_list, keypoint_indices_list, edges_list) = \
        graph_generate_fn(cam_rgb_points.xyz, **config['graph_gen_kwargs'])
    if config['input_features'] == 'irgb':
        input_v = cam_rgb_points.attr
    elif config['input_features'] == '0rgb':
        input_v = np.hstack([
            np.zeros((cam_rgb_points.attr.shape[0], 1)),
            cam_rgb_points.attr[:, 1:]
        ])
    elif config['input_features'] == '0000':
        input_v = np.zeros_like(cam_rgb_points.attr)
    elif config['input_features'] == 'i000':
        input_v = np.hstack([
            cam_rgb_points.attr[:, [0]],
            np.zeros((cam_rgb_points.attr.shape[0], 3))
        ])
    elif config['input_features'] == 'i':
        input_v = cam_rgb_points.attr[:, [0]]
    elif config['input_features'] == '0':
        input_v = np.zeros((cam_rgb_points.attr.shape[0], 1))
    last_layer_graph_level = config['model_kwargs']['layer_configs'][-1][
        'graph_level']
    last_layer_points_xyz = vertex_coord_list[last_layer_graph_level + 1]
    if config['label_method'] == 'yaw':
        cls_labels, boxes_3d, valid_boxes, label_map = \
            dataset.assign_classaware_label_to_points(box_label_list,
            last_layer_points_xyz,
            expend_factor=train_config.get('expend_factor', (1.0, 1.0, 1.0)))
    if config['label_method'] == 'Car':
        cls_labels, boxes_3d, valid_boxes, label_map = \
            dataset.assign_classaware_car_label_to_points(box_label_list,
            last_layer_points_xyz,
            expend_factor=train_config.get('expend_factor', (1.0, 1.0, 1.0)))
    if config['label_method'] == 'Pedestrian_and_Cyclist':
        (cls_labels, boxes_3d, valid_boxes, label_map) =\
            dataset.assign_classaware_ped_and_cyc_label_to_points(
            box_label_list, last_layer_points_xyz,
            expend_factor=train_config.get('expend_factor', (1.0, 1.0, 1.0)))
    encoded_boxes = box_encoding_fn(cls_labels, last_layer_points_xyz,
                                    boxes_3d, label_map)
    input_v = input_v.astype(np.float32)
    vertex_coord_list = [p.astype(np.float32) for p in vertex_coord_list]
    keypoint_indices_list = [e.astype(np.int32) for e in keypoint_indices_list]
    edges_list = [e.astype(np.int32) for e in edges_list]
    cls_labels = cls_labels.astype(np.int32)
    encoded_boxes = encoded_boxes.astype(np.float32)
    valid_boxes = valid_boxes.astype(np.float32)
    return (input_v, vertex_coord_list, keypoint_indices_list, edges_list,
            cls_labels, encoded_boxes, valid_boxes)
Exemplo n.º 4
0
                       num_classes=config['num_classes'])
NUM_CLASSES = dataset.num_classes
print(dataset)
if 'NUM_TEST_SAMPLE' not in eval_config:
    NUM_TEST_SAMPLE = dataset.num_files
else:
    if eval_config['NUM_TEST_SAMPLE'] < 0:
        NUM_TEST_SAMPLE = dataset.num_files
    else:
        NUM_TEST_SAMPLE = eval_config['NUM_TEST_SAMPLE']

BOX_ENCODING_LEN = get_encoding_len(config['box_encoding_method'])
box_encoding_fn = get_box_encoding_fn(config['box_encoding_method'])
box_decoding_fn = get_box_decoding_fn(config['box_encoding_method'])

aug_fn = preprocess.get_data_aug(eval_config['data_aug_configs'])


def fetch_data(frame_idx):
    cam_points = dataset.get_cam_points_in_image(
        frame_idx, config['downsample_by_voxel_size'])
    box_label_list = dataset.get_label(frame_idx)
    cam_points, box_label_list = aug_fn(cam_points, box_label_list)
    graph_generate_fn = get_graph_generate_fn(config['graph_gen_method'])
    (vertex_coord_list, keypoint_indices_list,
     edges_list) = graph_generate_fn(cam_points.xyz,
                                     **config['graph_gen_kwargs'])
    input_v = cam_points.attr[:, [0]]
    last_layer_graph_level = config['model_kwargs']['layer_configs'][-1][
        'graph_level']
    last_layer_points_xyz = vertex_coord_list[last_layer_graph_level + 1]