예제 #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-e", "--expected_rois_file", type=str, required=True)
    parser.add_argument("-a", "--actual_rois_file", type=str, required=True)
    parser.add_argument("-i", "--images_folder", type=str, required=False)
    parser.add_argument("-o", "--result_file", type=str, required=False)
    args = parser.parse_args()

    expected_rois_file = args.expected_rois_file
    actual_rois_file = args.actual_rois_file
    images_folder = args.images_folder

    expected_metadata = proto_api.read_metadata(expected_rois_file)
    actual_metadata = proto_api.read_metadata(actual_rois_file)

    random.seed(1337)

    expected_dictionary = proto_api.create_metadata_dictionary(
        expected_metadata, True)
    actual_dictionary = proto_api.create_metadata_dictionary(
        actual_metadata, False)
    min_size = 25
    statistics_dict = get_model_statistics(expected_dictionary,
                                           actual_dictionary, images_folder,
                                           min_size)
    output_statistics(statistics_dict, args.result_file)
예제 #2
0
def main():
    log_util.config(__file__)
    logger = logging.getLogger(__name__)
    # parse arguments
    args = sys.argv[1:]
    args = parse_args(args)
    # get metadata
    expected_metadata = meta.read_metadata(args.expected_rois_file)
    actual_metadata = meta.read_metadata(args.actual_rois_file)
    # calculate best thresholds
    best_thresholds = get_thresholds(
        meta.create_metadata_dictionary(expected_metadata, True),
        meta.create_metadata_dictionary(actual_metadata, True))
    io_utils.json_dump(best_thresholds, args.result_file)
예제 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input_path", type=str, required=True)
    parser.add_argument("-o", "--output_path", type=str, required=True)
    args = parser.parse_args()

    dirs = ["trainval/", "test/"]
    dirs = [args.output_path + d for d in dirs]
    utils.make_dirs(dirs)

    conf = configuration.load_configuration()
    images = utils.collect_images(os.path.dirname(args.input_path))

    # reproducible datasets
    random.seed(1337)
    random.shuffle(images)

    rois = roi_metadata.read_metadata(args.input_path)

    features_train = set(
        os.path.basename(i)
        for i in images[0:int(len(images) * conf.split_ratio)])
    generate_dataset(rois, os.path.dirname(args.input_path), features_train,
                     dirs[0])

    features_test = set(
        os.path.basename(i)
        for i in images[int(len(images) * conf.split_ratio):])
    generate_dataset(rois, os.path.dirname(args.input_path), features_test,
                     dirs[1])
예제 #4
0
def load_valid_rois(input_path):
    """
    Creates a valid metadata by selecting only the images and rois in which all the rois in an image meet the size
    condition
    :param input_path: rois path
    :return:
    """

    metadata = roi_metadata.read_metadata(input_path)
    valid_metadata = orbb_metadata_pb2.ImageSet()
    valid_metadata.name = ""
    for image_rois in metadata.images:
        current_image = os.path.dirname(
            input_path) + "/" + image_rois.metadata.image_path
        if not os.path.exists(current_image):
            continue

        width, height = image.get_resolution(current_image)

        if not image.valid_image(width, height, conf.ratio, conf.epsilon):
            logger = logging.getLogger(__name__)
            logger.info("invalid image because aspect ratio {} {} {}".format(
                width, height, image_rois.metadata.image_path))
            continue

        scaling_factor, width_crop_size = image.get_crop_scale(width, height)
        # width_crop_size contains the scaled down value
        width_crop_size = int(width_crop_size * scaling_factor)
        image_min_size = scaling_factor * conf.min_size

        valid_rois = []
        for roi in image_rois.rois:

            roi.rect.br.row = min(roi.rect.br.row, height - 1)
            roi.rect.br.col = min(roi.rect.br.col, width - width_crop_size - 1)
            roi.rect.tl.col = max(roi.rect.tl.col, width_crop_size)

            r_width = roi.rect.br.col - roi.rect.tl.col
            r_height = roi.rect.br.row - roi.rect.tl.row
            if r_width >= image_min_size and r_height >= image_min_size:
                valid_roi = orbb_metadata_pb2.Roi()
                valid_roi.rect.CopyFrom(roi.rect)
                valid_roi.algorithm = "DNN"
                valid_roi.manual = False
                valid_roi.type = roi.type
                valid_roi.validation = roi.validation
                valid_rois.append(valid_roi)
            else:
                continue

        if valid_rois:
            valid_image_rois = valid_metadata.images.add()
            valid_image_rois.rois.extend(valid_rois)
            valid_image_rois.metadata.image_path = image_rois.metadata.image_path
            valid_image_rois.metadata.trip_id = ""
            valid_image_rois.metadata.region = ""
            valid_image_rois.metadata.image_index = -1
    return valid_metadata
예제 #5
0
def main():

    log_util.config(__file__)
    logger = logging.getLogger(__name__)
    logger.info('Dataset Augmentation')

    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input_path",
                        type=str, required=True)
    parser.add_argument("-o", "--output_path",
                        type=str, required=True)

    args = parser.parse_args()
    utils.make_dirs([args.output_path])
    threads_number = int(multiprocessing.cpu_count() / 2)
    metadata = proto_api.read_metadata(args.input_path)
    output_metadata = augment(
        metadata, os.path.dirname(args.input_path), threads_number, args.output_path)
    output_metadata.name = ""
    proto_api.serialize_metadata(output_metadata, args.output_path)
예제 #6
0
def load_all_rois(input_path, roi_filter=dummy_roi_filter):
    """
    Loads rois that pass the roi_filter
    :param input_path: path to metadata roi
    :param roi_filter: filters false positives or true positives
    :return: selected rois
    """
    metadata = roi_metadata.read_metadata(input_path)
    rois = defaultdict(list)
    for image in metadata.images:
        current_image = os.path.normpath(
            os.path.dirname(input_path) + "/" + image.metadata.image_path)
        if not os.path.exists(current_image):
            continue

        for roi in image.rois:
            if roi_filter(roi):
                continue
            rois[current_image].append(Roi(roi))
    return rois
예제 #7
0
 def __init__(self,
              generator,
              ground_truth_proto_file,
              train_proto_file,
              resolution=2592,
              max_number_of_images=100,
              roi_min_side_size=25,
              lowest_score_threshold=0.5):
     self.generator = generator
     self.ground_truth_proto_file = ground_truth_proto_file
     self.rois_labels = RoisLabels(train_proto_file)
     self.resolution = resolution
     self.lowest_score_threshold = lowest_score_threshold
     self.images_folder = os.path.dirname(
         os.path.abspath(ground_truth_proto_file))
     self.max_number_of_images = max_number_of_images
     self.logger = logging.getLogger(__name__)
     self.expected_metadata = meta.read_metadata(
         self.ground_truth_proto_file)
     self.expected_dict = meta.create_metadata_dictionary(
         self.expected_metadata, True)
     self.roi_min_side_size = roi_min_side_size
     self.logger = logging.getLogger(__name__)
     super().__init__()
예제 #8
0
 def __get_rois_dict_from_file_name(self):
     roi_metadata = meta.read_metadata(self.rois_file_name)
     rois_dict = meta.create_metadata_dictionary(roi_metadata)
     return rois_dict