Пример #1
0
def get_all_detections(idx, persp_id, results, filter_area=False):
    all_perspect_detections = []

    # Load predictions from persp_id vehicle
    persp_dir = get_folder(persp_id)
    perspect_detections = get_detections(persp_dir, persp_dir, idx, persp_id,
                                         persp_id, results, filter_area)
    if perspect_detections is not None and len(perspect_detections) > 0:
        all_perspect_detections.append(perspect_detections)

    # Load detections from cfg.DATASET_DIR if ego_vehicle is not the persp_id
    if persp_id != const.ego_id():
        perspect_detections = get_detections(persp_dir, cfg.DATASET_DIR, idx,
                                             persp_id, const.ego_id(), results,
                                             filter_area)
        if perspect_detections is not None and len(perspect_detections) > 0:
            all_perspect_detections.append(perspect_detections)

    # Load detections from remaining perspectives
    for entity_str in const.valid_perspectives():
        other_persp_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
        if os.path.isdir(other_persp_dir):
            # Skip own detections since they're loaded first
            if int(entity_str) != persp_id:
                perspect_detections = get_detections(persp_dir,
                                                     other_persp_dir,
                                                     idx, persp_id,
                                                     int(entity_str), results,
                                                     filter_area)
                if perspect_detections is not None and len(
                        perspect_detections) > 0:
                    all_perspect_detections.append(perspect_detections)

    return all_perspect_detections
def main():
    filepath = cfg.DATASET_DIR + '/' + cfg.FALSE_DETECTIONS_SUBDIR + '/' + \
                        'random_{}.txt'.format(cfg.RANDOM_MALICIOUS_PROBABILITY)

    with open(filepath, 'w') as f:
        for entity_str in const.valid_perspectives():
            if std_utils.decision_true(cfg.RANDOM_MALICIOUS_PROBABILITY):
                f.write('%s\n' % entity_str)
Пример #3
0
def main():
    # Copy the ego-vehicle dirs
    copy_perspect_dir(cfg.DATASET_DIR)
    # The label dir is sometimes only filtered for the main perspective (used for evaluation)
    dirpath = cfg.DATASET_DIR + '/' + cfg.LABEL_DIR + '/'
    copy_files_in_dir(dirpath, 'txt')

    # Then for all the alternate perspectives
    for entity_str in const.valid_perspectives():
        perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
        copy_perspect_dir(perspect_dir)
Пример #4
0
def save_false_dets():

    print("Beginning save of false detections")

    # First for the ego vehicle
    save_false_dets_persp(cfg.DATASET_DIR, const.ego_id())

    # Then for all the alternate perspectives
    persp_count = len(os.listdir(cfg.ALT_PERSP_DIR))
    persp_idx = 0
    for entity_str in const.valid_perspectives():
        persp_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
        save_false_dets_persp(persp_dir, int(entity_str))

        sys.stdout.flush()
        sys.stdout.write(
            '\rFinished saving detections for perspective {}: {} / {}'.format(
                int(entity_str), persp_idx, persp_count))
        persp_idx += 1
Пример #5
0
def compute_points_in_3d_boxes():

    print("Beginning calculation of points_in_3d_boxes")

    std_utils.delete_all_subdirs(cfg.POINTS_IN_3D_BOXES_DIR)

    # First for the ego vehicle
    compute_perspect_points_in_3d_boxes(cfg.DATASET_DIR, const.ego_id())

    # Then for all the alternate perspectives
    persp_count = len(os.listdir(cfg.ALT_PERSP_DIR))
    persp_idx = 0
    for entity_str in const.valid_perspectives():
        perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
        compute_perspect_points_in_3d_boxes(perspect_dir, int(entity_str))

        sys.stdout.flush()
        sys.stdout.write(
            '\rFinished point count for perspective {}: {} / {}'.format(
                int(entity_str), persp_idx, persp_count))
        persp_idx += 1
Пример #6
0
def calculate_vehicle_trusts():

    # Before calculating, first delete all previous vehicle trust values
    std_utils.delete_subdir(cfg.V_TRUST_SUBDIR)

    # Initialize dictionary for vehicle trust values
    # Entity ID/VehicleTrust object pairs
    trust_dict = {}

    velo_dir = cfg.DATASET_DIR + '/velodyne'
    velo_files = os.listdir(velo_dir)
    for idx in range(cfg.MIN_IDX, cfg.MAX_IDX + 1):
        filepath = velo_dir + '/{:06d}.bin'.format(idx)

        if not os.path.isfile(filepath):
            logging.debug("Could not find file: %s", filepath)
            logging.debug("Stopping at idx: %d", idx)
            break

        # Load stale trust dict if we need it (past msg fresh period)
        stale_trust_dict = {}
        if (idx - cfg.STALE_EVALS_TIME) >= 0:
            stale_trust_dict = load_vehicle_trust_objs(idx -
                                                       cfg.STALE_EVALS_TIME)

        # First for the ego vehicle
        compute_vehicle_trust(cfg.DATASET_DIR, const.ego_id(), idx, trust_dict,
                              stale_trust_dict)

        # Then for all the alternate perspectives
        for entity_str in const.valid_perspectives():
            perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
            compute_vehicle_trust(perspect_dir, int(entity_str), idx,
                                  trust_dict, stale_trust_dict)

        write_trust_vals(trust_dict, idx)

    print("Finished calculating vehicle trusts")
def correct_synchro():
    std_utils.delete_all_subdirs(cfg.SYNCHRONIZED_PREDS_DIR)

    # Need to use augmented labels since they contain the speed and entity ID
    aug_label_dir = cfg.DATASET_DIR + '/label_aug_2'
    velo_dir = cfg.DATASET_DIR + '/velodyne'

    # Do this for every sample index
    velo_files = os.listdir(velo_dir)
    num_files = len(velo_files)
    file_idx = 0

    for file in velo_files:
        filepath = velo_dir + '/' + file
        idx = int(os.path.splitext(file)[0])

        sys.stdout.flush()
        sys.stdout.write(
            '\rFinished synchronization for index: {} / {}'.format(
                file_idx, num_files))
        file_idx += 1

        if idx < cfg.MIN_IDX or idx > cfg.MAX_IDX:
            continue
        logging.debug("**********************************Index: %d", idx)

        # Create dictionary for quickly obtaining speed of object
        ego_gt = obj_utils.read_labels(aug_label_dir,
                                       idx,
                                       results=False,
                                       synthetic=True)
        dict_ego_gt = {}
        for obj in ego_gt:
            dict_ego_gt[obj.id] = obj

        # Ego vehicle does not need synchronization
        # Simply copy file
        src_file = '{}/{}/{:06d}.txt'.format(cfg.DATASET_DIR,
                                             cfg.PREDICTIONS_SUBDIR, idx)
        dst_dir = '{}/{}/'.format(cfg.DATASET_DIR, cfg.SYNCHRONIZED_PREDS_DIR)
        dst_file = dst_dir + '{:06d}.txt'.format(idx)
        std_utils.make_dir(dst_dir)
        copyfile(src_file, dst_file)

        # Do for all the alternate perspectives
        for entity_str in const.valid_perspectives():
            persp_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)

            # Do we want to save the ego detection here?
            ego_detection = p_utils.get_own_vehicle_object(
                persp_dir, idx, int(entity_str))
            persp_det = get_synchronized_dets(persp_dir, cfg.DATASET_DIR, idx,
                                              ego_detection, dict_ego_gt)

            if persp_det == -1:
                continue

            # Make sure directory exists if we've made it this far
            out_dir = persp_dir + '/predictions_synchro/'
            std_utils.make_dir(out_dir)

            # If there are no detections then stop but make empty file
            # since empty file exists for predictions
            if persp_det == None:
                # Write a file with nothing as there are no detections
                with open('{}/{:06d}.txt'.format(out_dir, idx), 'w+') as f:
                    continue

            std_utils.save_objs_to_file(persp_det, idx, out_dir, True)

    print("Finished synchronizing perspectives.")
Пример #8
0
def delete_all_subdirs(subdir):
    delete_subdir(subdir)

    for entity_str in const.valid_perspectives():
        perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
        delete_subdir(subdir, perspect_dir)
Пример #9
0
def inference(model_config,
              eval_config,
              dataset_config,
              base_dir,
              ckpt_indices,
              additional_cls,
              start_perspective=0):

    logging.info("Additional class: {}".format(additional_cls))
    logging.info("ckpt_indices: {}".format(ckpt_indices))
    # Overwrite the defaults
    dataset_config = config_builder.proto_to_obj(dataset_config)

    dataset_config.data_dir = base_dir
    dataset_config.dataset_dir = base_dir

    dataset_config.data_split = 'train'
    dataset_config.data_split_dir = 'training'

    eval_config.eval_mode = 'test'
    eval_config.evaluate_repeatedly = False

    dataset_config.has_labels = False
    # Enable this to see the actually memory being used
    eval_config.allow_gpu_mem_growth = True

    eval_config = config_builder.proto_to_obj(eval_config)
    # Grab the checkpoint indices to evaluate
    eval_config.ckpt_indices = ckpt_indices

    # Remove augmentation during evaluation in test mode
    dataset_config.aug_list = []

    # Setup the model
    # Overwrite repeated field
    model_config = config_builder.proto_to_obj(model_config)
    # Switch path drop off during evaluation
    model_config.path_drop_probabilities = [1.0, 1.0]

    if start_perspective == 0:
        # Create validation split
        create_split.create_split(
            dataset_config.dataset_dir,
            dataset_config.dataset_dir + dataset_config.data_split_dir + '/',
            'val')

        inferPerspective(model_config, eval_config, dataset_config,
                         additional_cls)

    altPerspect_dir = base_dir + dataset_config.data_split_dir + '/alt_perspective/'

    p_idx = 0
    perspective_dirs = const.valid_perspectives()
    perspective_dirs.sort(key=float)
    p_count = len(perspective_dirs)
    for entity_str in perspective_dirs:
        if not os.path.isdir(os.path.join(altPerspect_dir, entity_str)):
            continue

        p_idx += 1

        # Option to skip some perspectives
        if int(entity_str) < start_perspective:
            continue

        dataset_config.data_split = entity_str
        dataset_config.data_split_dir = entity_str
        dataset_config.dataset_dir = altPerspect_dir
        inferPerspective(model_config, eval_config, dataset_config,
                         additional_cls)
        logging.info(
            '\n\n********************Finished perspective: {} / {} ***********************\n\n'
            .format(p_idx, p_count))
Пример #10
0
import os

import config as cfg
import constants as const

# For all the alternate perspectives
for entity_str in const.valid_perspectives():
    perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)

    velo_dir = perspect_dir + '/velodyne'

    # Do this for every sample index
    velo_files = os.listdir(velo_dir)

    for file in velo_files:
        filepath = velo_dir + '/' + file
        idx = int(os.path.splitext(file)[0])

        if idx < cfg.MIN_IDX or idx > cfg.MAX_IDX:
            continue

        print(entity_str)
        break