Example #1
0
def main(args=None):

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generators
    train_generator = create_generators(args, backbone.preprocess_image)

    # create the model
    if args.snapshot is not None:
        print('Loading model')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        anchor_params = None
        if args.config and 'anchor_parameters' in args.config:
            anchor_params = parse_anchor_parameters(args.config)
        prediction_model = retinanet_bbox(model=model,
                                          anchor_params=anchor_params)
    else:
        weights = args.weights
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()
        print('Creating model')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.retinanet,
            num_classes=train_generator.num_classes(),
            weights=weights,
            multi_gpu=args.multi_gpu,
            freeze_backbone=args.freeze_backbone,
            lr=args.lr,
            config=args.config)

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'densenet' in args.backbone:
        train_generator.compute_shapes = make_shapes_callback(model)

    # create the callbacks
    callbacks = create_callbacks(model, training_model, prediction_model, args)

    # start training
    return training_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=train_generator.size() // args.batch_size,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks)
def get_joint_detection_model(model_path, model_type):
    """
    Input -> Model path for the object detection model
            Model type-> Foot or Hand
    Output -> Inference model for getting the predictions on test images
    
    """
    # config_file_path = '/usr/local/bin/config'
    if model_type == 'Foot_detection':
        # with open('/usr/local/bin/src/config.ini','w') as f:
        #     f.write('[anchor_parameters]\nsizes   = 32 64 128 256 512 1024\nstrides = 8 16 32 64 128 256\nratios  = 1.2 1.5 2 2.5 3\nscales  =1 1.5 2\n')

        model, training_model, prediction_model = create_models(
        backbone_retinanet=backbone('resnet50').retinanet,
        num_classes=5,
        weights=None,
        multi_gpu=False,
        freeze_backbone=True,
        lr=1e-3,
        config=read_config_file('/usr/local/bin/Config files/config_foot.ini'))

        training_model.load_weights(model_path)
        infer_model = convert_model(training_model, anchor_params = parse_anchor_parameters(read_config_file('/usr/local/bin/Config files/config_foot.ini')))

    elif model_type == 'Hand_detection':
        # with open('/usr/local/bin/src/config.ini','w') as f:
        #     f.write('[anchor_parameters]\nsizes   = 32 64 128 256 512 1024\nstrides = 8 16 32 64 128 256\nratios  = 1 1.5 2 2.5 3\nscales  = 1 1.2 1.6\n')

        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone('resnet50').retinanet,
            num_classes=6,
            weights=None,
            multi_gpu=False,
            freeze_backbone=True,
            lr=1e-3,
            config=read_config_file('/usr/local/bin/Config files/config_hand.ini'))
        training_model.load_weights(model_path)
        infer_model = convert_model(training_model, anchor_params = parse_anchor_parameters(read_config_file('/usr/local/bin/Config files/config_hand.ini')))
    
    return infer_model
    def from_conf(cls,
                  conf_path: Optional[str] = None) -> 'AnchorParametersWrap':
        ret: Optional['AnchorParametersWrap'] = None
        if conf_path:
            config = read_config_file(conf_path)
            anchors = parse_anchor_parameters(config)
            ret = cls(anchors)
            log.info(f'loaded anchors from {conf_path}')
        else:
            log.info(f'using default anchors')
            ret = cls()

        return ret
def test_parse_anchor_parameters():
    config = create_anchor_params_config()
    anchor_params_parsed = parse_anchor_parameters(config)

    sizes   = [32, 64, 128, 256, 512]
    strides = [8, 16, 32, 64, 128]
    ratios  = np.array([0.5, 1], keras.backend.floatx())
    scales  = np.array([1, 1.2, 1.6], keras.backend.floatx())

    assert sizes   == anchor_params_parsed.sizes
    assert strides == anchor_params_parsed.strides
    np.testing.assert_equal(ratios, anchor_params_parsed.ratios)
    np.testing.assert_equal(scales, anchor_params_parsed.scales)
Example #5
0
    def __init__(self):
        # Reading the classes and respective index from classes.json file
        self.classes = {
            value["id"] - 1: value["name"]
            for value in json.load(open("classes.json", "r")).values()
        }
        self.num_classes = 2
        self.colors_classes = [
            np.random.randint(0, 256, 3).tolist()
            for _ in range(self.num_classes)
        ]
        #Threshold on score to filter detections with (defaults to 0.05).
        self.score_threshold = 0.5
        # IoU Threshold to count for a positive detection (defaults to 0.5).
        self.iou_threshold = 0.05
        # Max Detections per image (defaults to 100).
        self.max_detections = 100
        # Setup GPU device
        self.gpu = 0
        setup_gpu(self.gpu)
        # Rescale the image so the smallest side is min_side.
        self.image_min_side = 800
        # Rescale the image if the largest side is larger than max_side.
        self.image_max_side = 1333
        # make save path if it doesn't exist
        self.save_path = "/eveluation"
        if self.save_path is not None and not os.path.exists(self.save_path):
            os.makedirs(self.save_path)

        # optionally load anchor parameters when the inference model has been generated along with training
        # Provide the path of config of file such as (self.config = "path_to_config_file")
        self.config = None
        self.anchor_params = None
        if self.config and "anchor_parameters" in self.config:
            self.anchor_params = parse_anchor_parameters(self.config)

        # Backbone Network
        self.backbone_network = "resnet50"
        self.weight_dir = "snapshots"
        # Model to be evaluated
        self.model_to_load = os.path.join(self.weight_dir,
                                          "resnet50_csv_17.h5")
        # Convert the trained model to ind=ference model
        self.convert_model = True

        # load the model
        print("Loading model, this may take a second...")
        self.model = models.load_model(self.model_to_load,
                                       backbone_name=self.backbone_network)
        self.model = models.convert_model(self.model,
                                          anchor_params=self.anchor_params)
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generators
    generator = create_generators(args)

    # create the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.snapshot, backbone_name=args.backbone)
    anchor_params = None
    if args.config and 'anchor_parameters' in args.config:
        anchor_params = parse_anchor_parameters(args.config)
    prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)

    # print model summary
    prediction_model.summary()

    # this lets the generator compute backbone layer shapes using the actual backbone model
    if 'vgg' in args.backbone or 'densenet' in args.backbone:
        generator.compute_shapes = make_shapes_callback(model)

    # inference
    boxes, scores, labels = prediction_model.predict_generator(
        generator=generator,
        verbose=1,
    )

    # generate COCO annotations
    create_coco_annotations(boxes, scores, labels, generator.image_ids)
def main(args=None):
    from keras_retinanet.utils.config import parse_anchor_parameters
    from keras.utils import custom_object_scope

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    anchor_params = None
    if 0:
        config = dict()
        config['anchor_parameters'] = dict()
        config['anchor_parameters']['sizes'] = '16 32 64 128 256 512'
        config['anchor_parameters']['strides'] = '8 16 32 64 128'
        config['anchor_parameters']['ratios'] = '0.1 0.5 1 2 4 8'
        config['anchor_parameters']['scales'] = '1 1.25 1.5 1.75'
        anchor_params = parse_anchor_parameters(config)

    # load and convert model
    with custom_object_scope({
            'AdamAccumulate': AdamAccumulate,
            'AccumOptimizer': Adam
    }):
        model = models.load_model(args.model_in, backbone_name=args.backbone)
        model = models.convert_model(
            model,
            nms=args.nms,
            class_specific_filter=args.class_specific_filter,
            max_detections=500,
            nms_threshold=0.3,
            score_threshold=0.01,
            anchor_params=anchor_params)

    # save model
    model.save(args.model_out)
Example #8
0
def main(args=None):
    import json
    with open(
            os.path.expanduser('~') + '/.maskrcnn-modanet/' +
            'savedvars.json') as f:
        savedvars = json.load(f)

    # parse arguments
    if args is None:
        print(
            '\n\n\nExample usage: maskrcnn-modanet train --epochs 15 --workers 0 --batch-size 1 coco\n\n\n'
        )
        args = ['-h']
    args = parse_args(args, savedvars)

    # make sure keras is the minimum required version
    check_keras_version()

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generators
    train_generator, validation_generator = create_generators(args)

    # create the model
    if args.snapshot is not None:
        print('Loading model, this may take a second...')
        model = models.load_model(args.snapshot, backbone_name=args.backbone)
        training_model = model
        prediction_model = model
    else:
        weights = args.weights
        # default to imagenet if nothing else is specified
        if weights is None and args.imagenet_weights:
            weights = backbone.download_imagenet()

        anchor_params = None
        if args.config and 'anchor_parameters' in args.config:
            anchor_params = parse_anchor_parameters(args.config)

        print('Creating model, this may take a second...')
        model, training_model, prediction_model = create_models(
            backbone_retinanet=backbone.maskrcnn,
            num_classes=train_generator.num_classes(),
            weights=weights,
            freeze_backbone=args.freeze_backbone,
            class_specific_filter=args.class_specific_filter,
            anchor_params=anchor_params)

    # print model summary
    print(model.summary())

    # create the callbacks
    callbacks = create_callbacks(
        model,
        training_model,
        prediction_model,
        validation_generator,
        args,
    )

    # Use multiprocessing if workers > 0
    if args.workers > 0:
        use_multiprocessing = True
    else:
        use_multiprocessing = False

    # start training
    training_model.fit_generator(generator=train_generator,
                                 steps_per_epoch=args.steps,
                                 epochs=args.epochs,
                                 verbose=1,
                                 callbacks=callbacks,
                                 workers=args.workers,
                                 use_multiprocessing=use_multiprocessing,
                                 max_queue_size=args.max_queue_size)
    
train_gen,valid_gen = create_generators(args,b.preprocess_image)

model, training_model, prediction_model = create_models(
            backbone_retinanet=b.retinanet,
            num_classes=train_gen.num_classes(),
            weights=None,
            multi_gpu=True,
            freeze_backbone=True,
            lr=1e-9,
            config=args.config
        )
 
training_model.load_weights("C:\\Users\\Pawan\\Documents\\ML\\snapshots12\\resnet50_csv_07.h5")

infer_model = convert_model(training_model,anchor_params=parse_anchor_parameters(read_config_file('C:\\Users\\Pawan\\Documents\\config.ini')))


def test_gen(image_ids, bs = 2, size=672,test = True):
    imgs = []
    scale = None
    idx = 0
    if test:
        path = 'C:\\Users\\Pawan\\Downloads\\dataset_test_rgb\\rgb\\test\\'
    else:
        path = 'C:\\Users\\Pawan\\Downloads\\dataset_test_rgb\\rgb\\test\\'
    
    while idx < len(image_ids):
        if len(imgs) < bs:
            imgs.append(resize_image(preprocess_image(read_image_bgr(path + image_ids[idx] + '.png')),min_side=size,max_side=size)[0])            
            if scale is None:
def main(args=None):
    # parse arguments

    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generator
    generator = create_generator(args)

    # optionally load anchor parameters
    anchor_params = None
    if args.config and 'anchor_parameters' in args.config:
        anchor_params = parse_anchor_parameters(args.config)

    # load the model
    print('Loading model, this may take a second...')
    model = load_model(args.model, backbone_name=args.backbone)

    # optionally convert the model
    if args.convert_model:
        model = convert_model(model=model,
                              nms_threshold=args.nms_threshold,
                              score_threshold=args.nms_score,
                              max_detections=args.nms_detections,
                              anchor_params=anchor_params)

    # print model summary
    # print(model.summary())

    # start evaluation
    if args.dataset_type == 'coco':
        from ..utils.coco_eval import evaluate_coco
        evaluate_coco(generator, model, args.score_threshold)
    else:
        average_precisions, pr_curves = evaluate(
            generator,
            model,
            iou_threshold=args.iou_threshold,
            score_threshold=args.score_threshold,
            max_detections=args.max_detections,
            save_path=args.save_path)

        # print evaluation
        total_instances = []
        precisions = []
        f1_scores = []
        mean_ious = []
        for label, (average_precision,
                    num_annotations) in average_precisions.items():
            #print('{:.0f} instances of class'.format(num_annotations),
            #      generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))
            total_instances.append(num_annotations)
            precisions.append(average_precision)
            f1_scores.append(max(pr_curves[label]['f1_score']))
            mean_ious.append(np.mean(pr_curves[label]['average_iou']))

        if sum(total_instances) == 0:
            print('No test instances found.')
            return

        #print('mAP using the weighted average of precisions among classes: {:.4f}'.format(sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)))
        #print('mAP: {:.4f}'.format(sum(precisions) / sum(x > 0 for x in total_instances)))

        mean_ap = sum(precisions) / sum(x > 0 for x in total_instances)
        mean_f1 = sum(f1_scores) / sum(x > 0 for x in total_instances)
        mean_iou = sum(mean_ious) / sum(x > 0 for x in total_instances)

        for label in range(generator.num_classes()):
            class_label = generator.label_to_name(label)
            instances = int(total_instances[label])
            predictions = len(pr_curves[label]['precision'])
            true_positives = int(pr_curves[label]['TP'][-1]) if len(
                pr_curves[label]['TP']) > 0 else 0
            false_positives = int(pr_curves[label]['FP'][-1]) if len(
                pr_curves[label]['FP']) > 0 else 0

            print(
                '\nClass {}: Instances: {} | Predictions: {} | False positives: {} | True positives: {}'
                .format(class_label, instances, predictions, false_positives,
                        true_positives))

        print('mAP: {:.4f}'.format(mean_ap),
              'mF1-score: {:.4f}'.format(mean_f1),
              'mIoU: {:.4f}'.format(mean_iou))
        print(args.save_path)

        print('Saving results: ')
        f = open(args.save_path + "result.txt", "w+")
        f.write('mAP: {:.4f}; '.format(mean_ap))
        f.write('mF1-score: {:.4f}; '.format(mean_f1))
        f.write('mIoU: {:.4f}'.format(mean_iou))
        f.close()

        # save stats
        if args.logs:
            makedirs(args.logs)
            np.save(os.path.join(args.logs, 'pr_curves'), pr_curves)
Example #11
0
def main(args=None, model_filename=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
        args = parse_args(args)

    # make sure keras and tensorflow are the minimum required version
    check_keras_version()
    check_tf_version()

    # optionally choose specific GPU
    if args.gpu:
        setup_gpu(args.gpu)

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generator
    backbone = models.backbone(args.backbone)
    generator = create_generator(args, backbone.preprocess_image)

    # optionally load anchor parameters
    anchor_params = None
    if args.config and 'anchor_parameters' in args.config:
        anchor_params = parse_anchor_parameters(args.config)

    # load the model
    print('Loading model, this may take a second...')
    if args.continual_learning_model == 'dual_memory':  # Continual learning dual-memory modelling treatment
        base_models = LoadModels(args.historical_snapshots_folder,
                                 args.backbone, args.day_number)
        all_models = []
        for model in base_models:
            generator.compute_shapes = make_shapes_callback(model)
            if args.convert_model:
                model = models.convert_model(model,
                                             anchor_params=anchor_params)
            all_models.append(model)

        (average_precisions, inference_time, detections_per_model,
         final_detections) = evaluate_dual_memory_model(
             generator,
             all_models,
             iou_threshold=args.iou_threshold,
             score_threshold=args.score_threshold,
             max_detections=args.max_detections,
             save_path=args.save_path)

        # bbox_savepath given, save bounding box coordinates from dual-memory model predictions:

        if args.bbox_savepath:
            detections_per_model = [[
                [class_predictions.tolist() for class_predictions in image]
                for image in model_predictions
            ] for model_predictions in detections_per_model]
            detections_with_filenames = {
                'final_detections': final_detections,
                'annotations': args.annotations,
                'detections_per_model': detections_per_model
            }
            with open(args.bbox_savepath, 'wt') as outf:
                json.dump(detections_with_filenames, outf)

            print("Finished dual memory model")
            print(average_precisions, inference_time)

    else:
        if model_filename is None:
            model_filename = args.model
        model = models.load_model(model_filename, backbone_name=args.backbone)

        generator.compute_shapes = make_shapes_callback(model)

        # optionally convert the model
        if args.convert_model:
            model = models.convert_model(model, anchor_params=anchor_params)

        # print model summary
        # print(model.summary())

        # start evaluation
        if args.dataset_type == 'coco':
            from ..utils.coco_eval import evaluate_coco
            evaluate_coco(generator, model, args.score_threshold)
        else:
            average_precisions, inference_time = evaluate(
                generator,
                model,
                iou_threshold=args.iou_threshold,
                score_threshold=args.score_threshold,
                max_detections=args.max_detections,
                save_path=args.save_path)

    # print evaluation
    total_instances = []
    precisions = []
    #labels = []
    for label, (average_precision,
                num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations),
              generator.label_to_name(label),
              'with average precision: {:.4f}'.format(average_precision))
        #labels.append(label)
        total_instances.append(num_annotations)
        precisions.append(average_precision)

    if sum(total_instances) == 0:
        print('No test instances found.')
        return

    print('Inference time for {:.0f} images: {:.4f}'.format(
        generator.size(), inference_time))

    print('mAP using the weighted average of precisions among classes: {:.4f}'.
          format(
              sum([a * b for a, b in zip(total_instances, precisions)]) /
              sum(total_instances)))
    print('mAP: {:.4f}'.format(
        sum(precisions) / sum(x > 0 for x in total_instances)))

    #print(labels)
    print(precisions)
    print(total_instances)

    # Save mAP and other accuracy statistics to mAP_savepath:

    mAP = sum(precisions) / sum(x > 0 for x in total_instances)
    date = datetime.now().strftime("%Y%m%d%H%M")
    with open(args.mAP_savepath, 'a') as outf:
        outf.write(
            f"{date}, {mAP}, {precisions}, {total_instances}, {model_filename}, {args.continual_learning_model}"
            + "\n")
    return mAP
Example #12
0
    def __init__(self, batch_size, fusionnet_model_config, vision_model_config, translation_model_config, log_path):
        self.num_nodes = fusionnet_model_config['node_number']
        if 'fusionnet_layers' in fusionnet_model_config:
            self.fusionnet_layers = fusionnet_model_config['fusionnet_layers']
        else:
            self.fusionnet_layers = None
        if 'config' in fusionnet_model_config:
            fusionnet_model_config['fusionnet_params'] = read_config_file(fusionnet_model_config['config'])
            if 'layer_parameters' in fusionnet_model_config['fusionnet_params']:
                self.num_nodes = int(fusionnet_model_config['fusionnet_params']['layer_parameters']['num_nodes'])
                if self.fusionnet_layers is not None:
                    fusionnet_model_config['fusionnet_params']['layer_parameters']['layers'] = str(self.fusionnet_layers)
            if log_path is not None:
                write_config_file(fusionnet_model_config['fusionnet_params'],
                                  os.path.join(log_path, 'fusion_config.ini'))
        if self.fusionnet_layers is not None and isinstance(self.fusionnet_layers, str):
            self.fusionnet_layers = ast.literal_eval(self.fusionnet_layers)

        if vision_model_config is not None:
            if vision_model_config['name'] == 'retinanet':
                # load anchor parameters, or pass None (so that defaults will be used)
                anchor_params = None
                num_anchors = None
                if 'config' in vision_model_config:
                    vision_model_config['retinanet_params'] = read_config_file(vision_model_config['config'])
                    if 'anchor_parameters' in vision_model_config['retinanet_params']:
                        anchor_params = parse_anchor_parameters(vision_model_config['retinanet_params'])
                        num_anchors = anchor_params.num_anchors()
                    # write config to log directory
                    if log_path is not None:
                        write_config_file(vision_model_config['retinanet_params'],
                                          os.path.join(log_path, 'vision_config.ini'))
                vision_model_config['anchor_params'] = anchor_params
                vision_model_config['num_anchors'] = num_anchors
                # TODO (fabawi): will need to flip 'image-max-side' 'image-min-side'], if the images were horizontal. make it dynamic
                vision_model_input =  keras.layers.Input(batch_shape=(batch_size, vision_model_config['image_min_side'], vision_model_config['image_max_side'], 3))
                self.vision_model = self.create_vision_retinanet(input=vision_model_input, **vision_model_config)
                # default to imagenet if nothing else is specified
                if vision_model_config['weights'] is None and vision_model_config['imagenet_weights']:
                    weights = vision_model_config['backbone'].download_imagenet()
                else:
                    weights = vision_model_config['weights']
                self.vision_model = self.model_with_weights(self.vision_model, weights, skip_mismatch=False)
                self.vision_model_config = vision_model_config

            if vision_model_config['name'] == 'simple_cnn':
                # TODO (fabawi): will need to flip 'image-max-side' 'image-min-side'], if the images were horizontal. make it dynamic
                vision_model_input = keras.layers.Input(batch_shape=(batch_size, vision_model_config['image_min_side'], vision_model_config['image_max_side'], 3))
                self.vision_model = self.create_vision_cnn(vision_model_input)
                self.vision_model_config = vision_model_config

        if translation_model_config is not None:
            if translation_model_config['name'] == 'transformer':
                if 'config' in translation_model_config:
                    translation_model_config['transformer_params'] = read_config_file(translation_model_config['config'])
                    translation_model_config['transformer_params']['init']['len_limit'] = str(translation_model_config['sequence_max_length'])
                else:
                    translation_model_config['transformer_params'] = {'init':{'len_limit': translation_model_config['sequence_max_length']}}
                    # write config to log directory
                    if log_path is not None:
                        write_config_file(translation_model_config['transformer_params'],
                                          os.path.join(log_path, 'language_translation_config.ini'))

                translation_model_inputs =  [keras.layers.Input(batch_shape=(batch_size, translation_model_config['sequence_max_length'])),
                                             keras.layers.Input(batch_shape=(batch_size, translation_model_config['sequence_max_length']))]
                self.language_translation_model = self.create_language_translation_transformer(inputs=translation_model_inputs, **translation_model_config)
                self.language_translation_model_config = translation_model_config

            self.fusionnet_model = None
            self.fusionnet_model_config = fusionnet_model_config