def main(): log_util.config(__file__) logger = logging.getLogger(__name__) # parse arguments args = sys.argv[1:] args = parse_args(args) # get metadata expected_metadata = meta.read_metadata(args.expected_rois_file) actual_metadata = meta.read_metadata(args.actual_rois_file) # calculate best thresholds best_thresholds = get_thresholds( meta.create_metadata_dictionary(expected_metadata, True), meta.create_metadata_dictionary(actual_metadata, True)) io_utils.json_dump(best_thresholds, args.result_file)
def main(): log_util.config(__file__) logger = logging.getLogger(__name__) logger.info('Dataset Augmentation') parser = argparse.ArgumentParser() parser.add_argument("-i", "--input_path", type=str, required=True) parser.add_argument("-o", "--output_path", type=str, required=True) args = parser.parse_args() utils.make_dirs([args.output_path]) threads_number = int(multiprocessing.cpu_count() / 2) metadata = proto_api.read_metadata(args.input_path) output_metadata = augment( metadata, os.path.dirname(args.input_path), threads_number, args.output_path) output_metadata.name = "" proto_api.serialize_metadata(output_metadata, args.output_path)
def main(): log_util.config(__file__) logger = logging.getLogger(__name__) # parse arguments args = sys.argv[1:] args = __parse_args(args) rois_labels = RoisLabels(args.train_meta_file) if args.threshold_file == 'SAME': score_threshold_per_class = dict([ (class_label, args.lowest_score_threshold) for class_label in rois_labels.classes.keys() ]) else: score_threshold_per_class = io_utils.json_load(args.threshold_file) logger.info("Score thresholds: {}".format(score_threshold_per_class)) model = get_model_for_pred(args) predict_one_folder(args.input_images_path, args.output_images_path, rois_labels, model, score_threshold_per_class)
def main(): log_util.config(__file__) logger = logging.getLogger(__name__) logger.info('Prepare segmentation') parser = argparse.ArgumentParser() parser.add_argument("-i", "--input_path", type=str, required=True) parser.add_argument("-o", "--output_path", type=str, required=True) args = parser.parse_args() rois = dataset.load_all_rois(args.input_path, dataset.no_false_positives) utils.make_dirs([args.output_path]) dirs = ["/images/", "/labels/"] dirs = [args.output_path + d for d in dirs] utils.make_dirs(dirs) do_work(dataset.FeatureGenerator(args.output_path + "/images/"), rois) do_work(dataset.LabelGenerator(args.output_path + "/labels/"), rois)
def main(): log_util.config(__file__) logger = logging.getLogger(__name__) logger.info('Prepare classification') parser = argparse.ArgumentParser() parser.add_argument("-i", "--input_path", type=str, required=True) parser.add_argument("-o", "--output_path", type=str, required=True) args = parser.parse_args() conf = configuration.load_configuration() dirs = [str(v) for _, v in conf.type2class_id.items()] + ["false_pos"] dirs = [args.output_path + "/" + d for d in dirs] utils.make_dirs(dirs) rois = dataset.load_all_rois(args.input_path, dataset.no_false_positives) generate_rois(os.path.dirname(args.input_path), rois, args.output_path) rois = dataset.load_all_rois(args.input_path, dataset.only_false_positives) generate_rois(os.path.dirname(args.input_path), rois, args.output_path)
def main(): log_util.config(__file__) logger = logging.getLogger(__name__) logger.info('Clean dataset') parser = argparse.ArgumentParser() parser.add_argument("-i", "--input_path", type=str, required=True) parser.add_argument("-o", "--output_path", type=str, required=True) args = parser.parse_args() if not os.path.exists(args.output_path): os.makedirs(args.output_path) metadata = dataset.load_valid_rois(args.input_path) with open(args.output_path + "/rois.bin", "wb") as f_meta: f_meta.write(metadata.SerializeToString()) with open(args.output_path + "/rois.txt", "w") as f_meta: f_meta.write(str(metadata))
def main(args=None): log_util.config(__file__) logger = logging.getLogger(__name__) # parse arguments if args is None: args = sys.argv[1:] args = parse_args(args) # make sure keras is the minimum required version check_keras_version() # optionally choose specific GPU if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu keras.backend.tensorflow_backend.set_session(get_session()) # create the generators train_generator, validation_generator = create_generators(args) if 'resnet' in args.backbone: from keras_retinanet.models.resnet import resnet_retinanet as retinanet, custom_objects, download_imagenet elif 'mobilenet' in args.backbone: from keras_retinanet.models.mobilenet import mobilenet_retinanet as retinanet, custom_objects, download_imagenet elif 'vgg' in args.backbone: from keras_retinanet.models.vgg import vgg_retinanet as retinanet, custom_objects, download_imagenet elif 'densenet' in args.backbone: from keras_retinanet.models.densenet import densenet_retinanet as retinanet, custom_objects, download_imagenet else: raise NotImplementedError('Backbone \'{}\' not implemented.'.format( args.backbone)) # create the model if args.snapshot is not None: logger.info('Loading model, this may take a second...') model = keras.models.load_model(args.snapshot, custom_objects=custom_objects) training_model = model prediction_model = model else: weights = args.weights # default to imagenet if nothing else is specified if weights is None and args.imagenet_weights: weights = download_imagenet(args.backbone) logger.info('Creating model, this may take a second...') model, training_model, prediction_model = create_models( backbone_retinanet=retinanet, backbone=args.backbone, num_classes=train_generator.num_classes(), weights=weights, multi_gpu=args.multi_gpu, freeze_backbone=args.freeze_backbone) logger.info(model.summary()) # this lets the generator compute backbone layer shapes using the actual backbone model if 'vgg' in args.backbone or 'densenet' in args.backbone: compute_anchor_targets = functools.partial( anchor_targets_bbox, shapes_callback=make_shapes_callback(model)) train_generator.compute_anchor_targets = compute_anchor_targets if validation_generator is not None: validation_generator.compute_anchor_targets = compute_anchor_targets # create the callbacks callbacks = create_callbacks( model, training_model, prediction_model, validation_generator, args, ) # start training training_model.fit_generator(generator=train_generator, steps_per_epoch=args.steps, epochs=args.epochs, verbose=1, callbacks=callbacks, workers=4)
detector = lambda x: detect_batch(self.net_segm, x) batch_reader = batch.BatchReader( images_path, self.transformer_segm, network_setup.get_batch_size(self.net_segm)) batch_handler = BatchHandler(detector, classifier) batch_processor = batch.BatchProcessor(batch_handler) batch_reader.start() batch_processor.start() batch_reader.join() batch_processor.join() return batch_handler.get_result() if __name__ == "__main__": log_util.config(__file__) parser = argparse.ArgumentParser() parser.add_argument("-i", "--input_path", type=str, required=True) parser.add_argument("-o", "--output_path", type=str, required=False, default="./") args = parser.parse_args() logger = logging.getLogger(__name__) if not utils.exists_paths(required_files): logger.error("Paths missing {}".format(required_files)) sys.exit(-1)