def get_configs_from_pipeline_file(pipeline_config_path, is_training):
    """Reads model configuration from a pipeline_pb2.NetworkPipelineConfig.
    Args:
        pipeline_config_path: A path directory to the network pipeline config
        is_training: A boolean flag to indicate training stage, used for
            creating the checkpoint directory which must be created at the
            first training iteration.
    Returns:
        model_config: A model_pb2.ModelConfig
        train_config: A train_pb2.TrainConfig
        eval_config: A eval_pb2.EvalConfig
        dataset_config: A kitti_dataset_pb2.KittiDatasetConfig
    """

    pipeline_config = pipeline_pb2.NetworkPipelineConfig()
    with open(pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)

    model_config = pipeline_config.model_config

    # Make sure the checkpoint name matches the config filename
    config_file_name = \
        os.path.split(pipeline_config_path)[1].split('.')[0]
    checkpoint_name = model_config.checkpoint_name
    if config_file_name != checkpoint_name:
        raise ValueError('Config and checkpoint names must match.')

    output_root_dir = odes.root_dir() + '/data/outputs/' + checkpoint_name

    # Construct paths
    paths_config = model_config.paths_config
    if not paths_config.checkpoint_dir:
        checkpoint_dir = output_root_dir + '/checkpoints'

        if is_training:
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)

        paths_config.checkpoint_dir = checkpoint_dir

    if not paths_config.logdir:
        paths_config.logdir = output_root_dir + '/logs/'

    if not paths_config.pred_dir:
        paths_config.pred_dir = output_root_dir + '/predictions'

    train_config = pipeline_config.train_config
    eval_config = pipeline_config.eval_config
    dataset_config = pipeline_config.dataset_config

    if is_training:
        # Copy the config to the experiments folder
        experiment_config_path = output_root_dir + '/' +\
            model_config.checkpoint_name
        experiment_config_path += '.config'
        # Copy this even if the config exists, in case some parameters
        # were modified
        shutil.copy(pipeline_config_path, experiment_config_path)

    return model_config, train_config, eval_config, dataset_config
def copy_kitti_native_code(checkpoint_name):
    """Copies and compiles kitti native code.

    It also creates neccessary directories for storing the results
    of the kitti native evaluation code.
    """

    avod_root_dir = odes.root_dir()
    kitti_native_code_copy = avod_root_dir + '/data/outputs/' + \
        checkpoint_name + '/predictions/kitti_native_eval/'

    # Only copy if the code has not been already copied over
    if not os.path.exists(kitti_native_code_copy):

        os.makedirs(kitti_native_code_copy)
        original_kitti_native_code = odes.top_dir() + \
            '/scripts/offline_eval/kitti_native_eval/'

        predictions_dir = avod_root_dir + '/data/outputs/' + \
            checkpoint_name + '/predictions/'
        # create dir for it first
        dir_util.copy_tree(original_kitti_native_code, kitti_native_code_copy)
        # run the script to compile the c++ code
        script_folder = predictions_dir + \
            '/kitti_native_eval/'
        make_script = script_folder + 'run_make.sh'
        subprocess.call([make_script, script_folder])

    # Set up the results folders if they don't exist
    results_dir = odes.top_dir() + '/scripts/offline_eval/results'
    results_05_dir = odes.top_dir() + '/scripts/offline_eval/results_05_iou'
    if not os.path.exists(results_dir):
        os.makedirs(results_dir)
    if not os.path.exists(results_05_dir):
        os.makedirs(results_05_dir)
示例#3
0
def main(_):
    parser = argparse.ArgumentParser()

    # Example usage
    # --checkpoint_name='vgg_car'
    # --data_split='test'
    # --ckpt_indices=50 100 112
    # Optional arg:
    # --device=0

    parser.add_argument('--checkpoint_name',
                        type=str,
                        dest='checkpoint_name',
                        required=True,
                        help='Checkpoint name must be specified as a str\
                        and must match the experiment config file name.')

    parser.add_argument('--data_split',
                        type=str,
                        dest='data_split',
                        required=True,
                        help='Data split must be specified e.g. val or test')

    parser.add_argument(
        '--ckpt_indices',
        type=int,
        nargs='+',
        dest='ckpt_indices',
        required=True,
        help='Checkpoint indices must be a set of \
        integers with space in between -> 0 10 20 etc')

    parser.add_argument('--device',
                        type=str,
                        dest='device',
                        default='0',
                        help='CUDA device id')

    args = parser.parse_args()
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    experiment_config = args.checkpoint_name + '.config'

    # Read the config from the experiment folder
    experiment_config_path = odes.root_dir() + '/data/outputs/' +\
        args.checkpoint_name + '/' + experiment_config

    model_config, _, eval_config, dataset_config = \
        config_builder.get_configs_from_pipeline_file(
            experiment_config_path, is_training=False)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.device
    inference(model_config, eval_config,
              dataset_config, args.data_split,
              args.ckpt_indices)
    def __init__(self, dataset):

        self._dataset = dataset

        self.cluster_split = dataset.cluster_split

        self.data_dir = odes.root_dir() + "/data/label_clusters"
        self.clusters = []
        self.std_devs = []
def main(_):
    parser = argparse.ArgumentParser()

    # Defaults
    default_pipeline_config_path = odes.root_dir() + \
        '/configs/vgg19_cars.config'
    default_data_split = 'train'
    default_device = '1'

    parser.add_argument('--pipeline_config',
                        type=str,
                        dest='pipeline_config_path',
                        default=default_pipeline_config_path,
                        help='Path to the pipeline config')

    parser.add_argument('--data_split',
                        type=str,
                        dest='data_split',
                        default=default_data_split,
                        help='Data split for training')

    parser.add_argument('--device',
                        type=str,
                        dest='device',
                        default=default_device,
                        help='CUDA device id')

    args = parser.parse_args()

    # Parse pipeline config
    model_config, train_config, _, dataset_config = \
        config_builder.get_configs_from_pipeline_file(
            args.pipeline_config_path, is_training=True)

    # Overwrite data split
    dataset_config.data_split = args.data_split

    # Set CUDA device id
    os.environ['CUDA_VISIBLE_DEVICES'] = args.device

    train(model_config, train_config, dataset_config)
def run_kitti_native_script(checkpoint_name, score_threshold, global_step):
    """Runs the kitti native code script."""

    eval_script_dir = odes.root_dir() + '/data/outputs/' + \
        checkpoint_name + '/predictions'
    make_script = eval_script_dir + \
        '/kitti_native_eval/run_eval.sh'
    script_folder = eval_script_dir + \
        '/kitti_native_eval/'

    results_dir = odes.top_dir() + '/scripts/offline_eval/results/'

    # Round this because protobuf encodes default values as full decimal
    score_threshold = round(score_threshold, 3)

    subprocess.call([
        make_script, script_folder,
        str(score_threshold),
        str(global_step),
        str(checkpoint_name),
        str(results_dir)
    ])
def main(dataset=None):
    """Generates anchors info which is used for mini batch sampling.

    Processing on 'Cars' can be split into multiple processes, see the Options
    section for configuration.

    Args:
        dataset: KittiDataset (optional)
            If dataset is provided, only generate info for that dataset.
            If no dataset provided, generates info for all 3 classes.
    """

    if dataset is not None:
        do_preprocessing(dataset, None)
        return

    car_dataset_config_path = odes.root_dir() + \
        '/configs/mb_preprocessing/rpn_cars.config'
    ped_dataset_config_path = odes.root_dir() + \
        '/configs/mb_preprocessing/rpn_pedestrians.config'
    cyc_dataset_config_path = odes.root_dir() + \
        '/configs/mb_preprocessing/rpn_cyclists.config'
    ppl_dataset_config_path = odes.root_dir() + \
        '/configs/mb_preprocessing/rpn_people.config'

    ##############################
    # Options
    ##############################
    # Serial vs parallel processing
    in_parallel = True

    process_car = True  # Cars
    process_ped = False  # Pedestrians
    process_cyc = False  # Cyclists
    process_ppl = False  # People (Pedestrians + Cyclists)

    # Number of child processes to fork, samples will
    # be divided evenly amongst the processes (in_parallel must be True)
    num_car_children = 8
    num_ped_children = 8
    num_cyc_children = 8
    num_ppl_children = 8

    ##############################
    # Dataset setup
    ##############################
    if process_car:
        car_dataset = DatasetBuilder.load_dataset_from_config(
            car_dataset_config_path)
    if process_ped:
        ped_dataset = DatasetBuilder.load_dataset_from_config(
            ped_dataset_config_path)
    if process_cyc:
        cyc_dataset = DatasetBuilder.load_dataset_from_config(
            cyc_dataset_config_path)
    if process_ppl:
        ppl_dataset = DatasetBuilder.load_dataset_from_config(
            ppl_dataset_config_path)

    ##############################
    # Serial Processing
    ##############################
    if not in_parallel:
        if process_car:
            do_preprocessing(car_dataset, None)
        if process_ped:
            do_preprocessing(ped_dataset, None)
        if process_cyc:
            do_preprocessing(cyc_dataset, None)
        if process_ppl:
            do_preprocessing(ppl_dataset, None)

        print('All Done (Serial)')

    ##############################
    # Parallel Processing
    ##############################
    else:

        # List of all child pids to wait on
        all_child_pids = []

        # Cars
        if process_car:
            car_indices_split = split_indices(car_dataset, num_car_children)
            split_work(all_child_pids, car_dataset, car_indices_split,
                       num_car_children)

        # Pedestrians
        if process_ped:
            ped_indices_split = split_indices(ped_dataset, num_ped_children)
            split_work(all_child_pids, ped_dataset, ped_indices_split,
                       num_ped_children)

        # Cyclists
        if process_cyc:
            cyc_indices_split = split_indices(cyc_dataset, num_cyc_children)
            split_work(all_child_pids, cyc_dataset, cyc_indices_split,
                       num_cyc_children)

        # People (Pedestrians + Cyclists)
        if process_ppl:
            ppl_indices_split = split_indices(ppl_dataset, num_ppl_children)
            split_work(all_child_pids, ppl_dataset, ppl_indices_split,
                       num_ppl_children)

        # Wait to child processes to finish
        print('num children:', len(all_child_pids))
        for i, child_pid in enumerate(all_child_pids):
            os.waitpid(child_pid, 0)

        print('All Done (Parallel)')
    def __init__(self, dataset):

        self._dataset = dataset

        self._mini_batch_sampler = \
            balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()

        ##############################
        # Parse KittiUtils config
        ##############################
        self.kitti_utils_config = dataset.config.kitti_utils_config
        self._area_extents = self.kitti_utils_config.area_extents
        self._anchor_strides = np.reshape(
            self.kitti_utils_config.anchor_strides, (-1, 2))

        ##############################
        # Parse MiniBatchUtils config
        ##############################
        self.config = self.kitti_utils_config.mini_batch_config
        self._density_threshold = self.config.density_threshold

        # RPN mini batches
        rpn_config = self.config.rpn_config

        rpn_iou_type = rpn_config.WhichOneof('iou_type')
        if rpn_iou_type == 'iou_2d_thresholds':
            self.rpn_iou_type = '2d'
            self.rpn_iou_thresholds = rpn_config.iou_2d_thresholds

        elif rpn_iou_type == 'iou_3d_thresholds':
            self.rpn_iou_type = '3d'
            self.rpn_iou_thresholds = rpn_config.iou_3d_thresholds

        self.rpn_neg_iou_range = [self.rpn_iou_thresholds.neg_iou_lo,
                                  self.rpn_iou_thresholds.neg_iou_hi]
        self.rpn_pos_iou_range = [self.rpn_iou_thresholds.pos_iou_lo,
                                  self.rpn_iou_thresholds.pos_iou_hi]

        self.rpn_mini_batch_size = rpn_config.mini_batch_size

        # AVOD mini batches
        avod_config = self.config.avod_config
        self.avod_iou_type = '2d'
        self.avod_iou_thresholds = avod_config.iou_2d_thresholds

        self.avod_neg_iou_range = [self.avod_iou_thresholds.neg_iou_lo,
                                   self.avod_iou_thresholds.neg_iou_hi]
        self.avod_pos_iou_range = [self.avod_iou_thresholds.pos_iou_lo,
                                   self.avod_iou_thresholds.pos_iou_hi]

        self.avod_mini_batch_size = avod_config.mini_batch_size

        # Setup paths
        self.mini_batch_dir = odes.root_dir() + '/data/mini_batches/' + \
            'iou_{}/'.format(self.rpn_iou_type) + \
            dataset.name + '/' + dataset.cluster_split + '/' + \
            dataset.bev_source

        # Array column indices for saving to files
        ## benz
        self.col_length = 7
        self.col_anchor_indices = 0
        self.col_ious = 1
        self.col_offsets_lo = 2
        self.col_offsets_hi = 6
        self.col_class_idx = 6
示例#9
0
class DatasetBuilder(object):
    """
    Static class to return preconfigured dataset objects
    """

    KITTI_UNITTEST = KittiDatasetConfig(
        name="unittest-kitti",
        dataset_dir=odes.root_dir() + "/tests/datasets/Kitti/object",
        data_split="train",
        data_split_dir="training",
        has_labels=True,
        cluster_split="train",
        classes=["Car", "Pedestrian", "Cyclist"],
        num_clusters=[2, 1, 1],
    )

    KITTI_TRAIN = KittiDatasetConfig(name="kitti",
                                     data_split="train",
                                     data_split_dir="training",
                                     has_labels=True,
                                     cluster_split="train",
                                     classes=["Car"],
                                     num_clusters=[2])

    KITTI_VAL = KittiDatasetConfig(
        name="kitti",
        data_split="val",
        data_split_dir="training",
        has_labels=True,
        cluster_split="train",
        classes=["Car"],
        num_clusters=[2],
    )

    KITTI_TEST = KittiDatasetConfig(
        name="kitti",
        data_split="test",
        data_split_dir="testing",
        has_labels=False,
        cluster_split="train",
        classes=["Car"],
        num_clusters=[2],
    )

    KITTI_TRAINVAL = KittiDatasetConfig(
        name="kitti",
        data_split="trainval",
        data_split_dir="training",
        has_labels=True,
        cluster_split="trainval",
        classes=["Car"],
        num_clusters=[2],
    )

    KITTI_TRAIN_MINI = KittiDatasetConfig(
        name="kitti",
        data_split="train_mini",
        data_split_dir="training",
        has_labels=True,
        cluster_split="train",
        classes=["Car"],
        num_clusters=[2],
    )
    KITTI_VAL_MINI = KittiDatasetConfig(
        name="kitti",
        data_split="val_mini",
        data_split_dir="training",
        has_labels=True,
        cluster_split="train",
        classes=["Car"],
        num_clusters=[2],
    )
    KITTI_TEST_MINI = KittiDatasetConfig(
        name="kitti",
        data_split="test_mini",
        data_split_dir="testing",
        has_labels=False,
        cluster_split="train",
        classes=["Car"],
        num_clusters=[2],
    )

    CONFIG_DEFAULTS_PROTO = \
    '''
    '''

    @staticmethod
    def load_dataset_from_config(dataset_config_path):

        dataset_config = kitti_dataset_pb2.KittiDatasetConfig()
        with open(dataset_config_path, 'r') as f:
            text_format.Merge(f.read(), dataset_config)

        return DatasetBuilder.build_kitti_dataset(dataset_config,
                                                  use_defaults=False)

    @staticmethod
    def copy_config(cfg):
        return deepcopy(cfg)

    @staticmethod
    def merge_defaults(cfg):
        cfg_copy = DatasetBuilder.copy_config(cfg)
        text_format.Merge(DatasetBuilder.CONFIG_DEFAULTS_PROTO, cfg_copy)
        return cfg_copy

    @staticmethod
    def build_kitti_dataset(base_cfg,
                            use_defaults=True,
                            new_cfg=None) -> KittiDataset:
        """Builds a KittiDataset object using the provided configurations

        Args:
            base_cfg: a base dataset configuration
            use_defaults: whether to use the default config values
            new_cfg: (optional) a custom dataset configuration, no default
                values will be used, all config values must be provided

        Returns:
            KittiDataset object
        """
        cfg_copy = DatasetBuilder.copy_config(base_cfg)

        if use_defaults:
            # Use default values
            text_format.Merge(DatasetBuilder.CONFIG_DEFAULTS_PROTO, cfg_copy)

        if new_cfg:
            # Use new config values if provided
            cfg_copy.MergeFrom(new_cfg)

        return KittiDataset(cfg_copy)
def save_predictions_in_kitti_format(model, checkpoint_name, data_split,
                                     score_threshold, global_step):
    """ Converts a set of network predictions into text files required for
    KITTI evaluation.
    """

    dataset = model.dataset
    # Round this because protobuf encodes default values as full decimal
    score_threshold = round(score_threshold, 3)

    # Get available prediction folders
    predictions_root_dir = odes.root_dir() + '/data/outputs/' + \
        checkpoint_name + '/predictions'

    final_predictions_root_dir = predictions_root_dir + \
        '/final_predictions_and_scores/' + dataset.data_split

    final_predictions_dir = final_predictions_root_dir + \
        '/' + str(global_step)

    # 3D prediction directories
    kitti_predictions_3d_dir = predictions_root_dir + \
        '/kitti_native_eval/' + \
        str(score_threshold) + '/' + \
        str(global_step) + '/data'

    if not os.path.exists(kitti_predictions_3d_dir):
        os.makedirs(kitti_predictions_3d_dir)

    # Do conversion
    num_samples = dataset.num_samples
    num_valid_samples = 0

    print('\nGlobal step:', global_step)
    print('Converting detections from:', final_predictions_dir)

    print('3D Detections being saved to:', kitti_predictions_3d_dir)

    for sample_idx in range(num_samples):

        # Print progress
        sys.stdout.write('\rConverting {} / {}'.format(sample_idx + 1,
                                                       num_samples))
        sys.stdout.flush()

        sample_name = dataset.sample_names[sample_idx]

        prediction_file = sample_name + '.txt'

        kitti_predictions_3d_file_path = kitti_predictions_3d_dir + \
            '/' + prediction_file

        predictions_file_path = final_predictions_dir + \
            '/' + prediction_file

        # If no predictions, skip to next file
        if not os.path.exists(predictions_file_path):
            np.savetxt(kitti_predictions_3d_file_path, [])
            continue

        all_predictions = np.loadtxt(predictions_file_path)

        # # Swap l, w for predictions where w > l
        # swapped_indices = all_predictions[:, 4] > all_predictions[:, 3]
        # fixed_predictions = np.copy(all_predictions)
        # fixed_predictions[swapped_indices, 3] = all_predictions[
        #     swapped_indices, 4]
        # fixed_predictions[swapped_indices, 4] = all_predictions[
        #     swapped_indices, 3]

        score_filter = all_predictions[:,
                                       4] >= score_threshold  ## benz, confidence should be 4th element
        all_predictions = all_predictions[score_filter]

        # If no predictions, skip to next file
        if len(all_predictions) == 0:
            np.savetxt(kitti_predictions_3d_file_path, [])
            continue

        # Project to image space
        sample_name = prediction_file.split('.')[0]
        img_idx = int(sample_name)

        # Load image for truncation

        #image = Image.open(dataset.get_rgb_image_path(sample_name))   ## benz

        #stereo_calib_p2 = calib_utils.read_calibration(dataset.calib_dir,
        #                                               img_idx).p2

        boxes = []
        image_filter = []
        for i in range(len(all_predictions)):
            #box_3d = all_predictions[i, 0:7]
            #img_box = box_3d_projector.project_to_image_space(
            #    box_3d, stereo_calib_p2,
            #    truncate=True, image_size=image.size)

            ## Skip invalid boxes (outside image space)
            #if img_box is None:
            #    image_filter.append(False)
            #    continue

            image_filter.append(True)
            #boxes.append(img_box)
            boxes.append(all_predictions[i, 0:4])

        boxes = np.asarray(boxes)
        all_predictions = all_predictions[image_filter]

        # If no predictions, skip to next file
        if len(boxes) == 0:
            np.savetxt(kitti_predictions_3d_file_path, [])
            continue

        num_valid_samples += 1

        # To keep each value in its appropriate position, an array of zeros
        # (N, 16) is allocated but only values [4:16] are used
        kitti_predictions = np.zeros([len(boxes), 16])

        # Get object types
        all_pred_classes = all_predictions[:, 5].astype(np.int32)
        obj_types = [
            dataset.classes[class_idx] for class_idx in all_pred_classes
        ]

        # Truncation and Occlusion are always empty (see below)

        # Alpha (Not computed)
        kitti_predictions[:, 3] = -10 * np.ones(
            (len(kitti_predictions)), dtype=np.int32)

        # 2D predictions
        kitti_predictions[:, 4:8] = boxes[:, 0:4]

        # 3D predictions
        # (l, w, h)
        ## benz, I do not predict 3D bounding boxes, so keep all these term negative
        kitti_predictions[:, 8] = -1  #all_predictions[:, 5]
        kitti_predictions[:, 9] = -1  #all_predictions[:, 4]
        kitti_predictions[:, 10] = -1  #all_predictions[:, 3]
        # (x, y, z)
        kitti_predictions[:, 11:14] = -1000  #all_predictions[:, 0:3]
        # (ry, score)
        kitti_predictions[:, 14] = -10  #all_predictions[:, 6:8]
        kitti_predictions[:, 15] = all_predictions[:, 4]
        # Round detections to 3 decimal places
        kitti_predictions = np.round(kitti_predictions, 3)

        # Empty Truncation, Occlusion
        kitti_empty_1 = -1 * np.ones(
            (len(kitti_predictions), 2), dtype=np.int32)

        # Stack 3D predictions text
        kitti_text_3d = np.column_stack(
            [obj_types, kitti_empty_1, kitti_predictions[:, 3:16]])

        # Save to text files
        np.savetxt(kitti_predictions_3d_file_path,
                   kitti_text_3d,
                   newline='\r\n',
                   fmt='%s')

    print('\nNum valid:', num_valid_samples)
    print('Num samples:', num_samples)