Exemplo n.º 1
0
def _nuscenes_modify_step(config,
                          epochs,
                          eval_epoch,
                          data_sample_factor,
                          num_examples=28130):
    input_cfg = config.train_input_reader
    train_cfg = config.train_config
    batch_size = input_cfg.batch_size
    data_sample_factor_to_name = {
        1: "NuScenesDataset",
        2: "NuScenesDatasetD2",
        3: "NuScenesDatasetD3",
        4: "NuScenesDatasetD4",
        5: "NuScenesDatasetD5",
        6: "NuScenesDatasetD6",
        7: "NuScenesDatasetD7",
        8: "NuScenesDatasetD8",
    }
    dataset_name = data_sample_factor_to_name[data_sample_factor]
    input_cfg.dataset.dataset_class_name = dataset_name
    ds = get_dataset_class(dataset_name)(
        root_path=input_cfg.dataset.kitti_root_path,
        info_path=input_cfg.dataset.kitti_info_path,
    )
    num_examples_after_sample = len(ds)
    step_per_epoch = _div_up(num_examples_after_sample, batch_size)
    step_per_eval = step_per_epoch * eval_epoch
    total_step = step_per_epoch * epochs
    train_cfg.steps = total_step
    train_cfg.steps_per_eval = step_per_eval
Exemplo n.º 2
0
def readinfo():
    global BACKEND
    instance = request.json
    root_path = Path(instance["root_path"])
    response = {"status": "normal"}
    BACKEND.root_path = root_path
    info_path = Path(instance["info_path"])
    dataset_class_name = instance["dataset_class_name"]
    BACKEND.dataset = get_dataset_class(dataset_class_name)(root_path=root_path, info_path=info_path)
    BACKEND.image_idxes = list(range(len(BACKEND.dataset)))
    response["image_indexes"] = BACKEND.image_idxes
    response = jsonify(results=[response])
    response.headers['Access-Control-Allow-Headers'] = '*'
    return response
Exemplo n.º 3
0
def set_train_step(config, epochs, eval_epoch):
    input_cfg = config.train_input_reader
    train_cfg = config.train_config
    batch_size = input_cfg.batch_size
    dataset_name = input_cfg.dataset.dataset_class_name
    ds = get_dataset_class(dataset_name)(
        root_path=input_cfg.dataset.kitti_root_path,
        info_path=input_cfg.dataset.kitti_info_path,
    )
    num_examples_after_sample = len(ds)
    step_per_epoch = _div_up(num_examples_after_sample, batch_size)
    step_per_eval = step_per_epoch * eval_epoch
    total_step = step_per_epoch * epochs
    train_cfg.steps = total_step
    train_cfg.steps_per_eval = step_per_eval
Exemplo n.º 4
0
def readinfo():
    app.logger.info("readinfo")
    global BACKEND
    instance = request.json
    root_path = Path(instance["root_path"])
    response = {"status": "normal"}
    BACKEND.root_path = root_path
    info_path = Path(instance["info_path"])
    dataset_class_name = instance["dataset_class_name"]
    BACKEND.dataset = get_dataset_class(dataset_class_name)(
        root_path=root_path, info_path=info_path)
    BACKEND.image_idxes = list(range(len(BACKEND.dataset)))
    response["image_indexes"] = BACKEND.image_idxes
    response = jsonify(results=[response])
    return response
Exemplo n.º 5
0
def build(input_reader_config,
          model_config,
          training,
          voxel_generator,
          target_assigner,
          multi_gpu=False):
    """Builds a tensor dictionary based on the InputReader config.

    Args:
        input_reader_config: A input_reader_pb2.InputReader object.

    Returns:
        A tensor dict based on the input_reader_config.

    Raises:
        ValueError: On invalid input reader proto.
        ValueError: If no input paths are specified.
    """
    if not isinstance(input_reader_config, input_reader_pb2.InputReader):
        raise ValueError('input_reader_config not of type '
                         'input_reader_pb2.InputReader.')
    prep_cfg = input_reader_config.preprocess
    dataset_cfg = input_reader_config.dataset
    num_point_features = model_config.num_point_features
    out_size_factor = get_downsample_factor(model_config)
    assert out_size_factor > 0
    cfg = input_reader_config
    db_sampler_cfg = prep_cfg.database_sampler
    db_sampler = None
    if len(db_sampler_cfg.sample_groups) > 0:  # enable sample
        db_sampler = dbsampler_builder.build(db_sampler_cfg)
    grid_size = voxel_generator.grid_size
    # [352, 400]
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    print("feature_map_size", feature_map_size)
    assert all([n != '' for n in target_assigner.classes
                ]), "you must specify class_name in anchor_generators."
    dataset_cls = get_dataset_class(dataset_cfg.dataset_class_name)
    assert dataset_cls.NumPointFeatures >= 3, "you must set this to correct value"
    assert dataset_cls.NumPointFeatures == num_point_features, "currently you need keep them same"
    prep_func = partial(
        prep_pointcloud,
        root_path=dataset_cfg.kitti_root_path,
        voxel_generator=voxel_generator,
        target_assigner=target_assigner,
        training=training,
        max_voxels=prep_cfg.max_number_of_voxels,
        remove_outside_points=False,
        remove_unknown=prep_cfg.remove_unknown_examples,
        create_targets=training,
        shuffle_points=prep_cfg.shuffle_points,
        gt_rotation_noise=list(prep_cfg.groundtruth_rotation_uniform_noise),
        gt_loc_noise_std=list(prep_cfg.groundtruth_localization_noise_std),
        global_rotation_noise=list(prep_cfg.global_rotation_uniform_noise),
        global_scaling_noise=list(prep_cfg.global_scaling_uniform_noise),
        global_random_rot_range=list(
            prep_cfg.global_random_rotation_range_per_object),
        global_translate_noise_std=list(prep_cfg.global_translate_noise_std),
        db_sampler=db_sampler,
        num_point_features=dataset_cls.NumPointFeatures,
        anchor_area_threshold=prep_cfg.anchor_area_threshold,
        gt_points_drop=prep_cfg.groundtruth_points_drop_percentage,
        gt_drop_max_keep=prep_cfg.groundtruth_drop_max_keep_points,
        remove_points_after_sample=prep_cfg.remove_points_after_sample,
        remove_environment=prep_cfg.remove_environment,
        use_group_id=prep_cfg.use_group_id,
        out_size_factor=out_size_factor,
        multi_gpu=multi_gpu)

    ret = target_assigner.generate_anchors(feature_map_size)
    class_names = target_assigner.classes
    anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
    anchors = ret["anchors"]
    anchors = anchors.reshape([-1, 7])
    matched_thresholds = ret["matched_thresholds"]
    unmatched_thresholds = ret["unmatched_thresholds"]
    anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:, [0, 1, 3, 4, 6]])
    anchor_cache = {
        "anchors": anchors,
        "anchors_bv": anchors_bv,
        "matched_thresholds": matched_thresholds,
        "unmatched_thresholds": unmatched_thresholds,
        "anchors_dict": anchors_dict,
    }
    prep_func = partial(prep_func, anchor_cache=anchor_cache)

    dataset = dataset_cls(info_path=dataset_cfg.kitti_info_path,
                          root_path=dataset_cfg.kitti_root_path,
                          class_names=class_names,
                          prep_func=prep_func)

    return dataset