示例#1
0
def create_callbacks(model, prediction_model, validation_generator, cfg):
    """ Creates the callbacks to use during training.

    :param model:                   <keras.Model>           The base model.
    :param prediction_model:        <keras.Model>           The model that should be used for validation.
    :param validation_generator:    <Generator>             The generator for creating validation data.
    :param cfg:                     <Configuration>         Config class with config parameters.

    :return callbacks:              <list>                  A list of callbacks used for training.
    """
    callbacks = []

    # Add progbar
    progbar_callback = keras.callbacks.ProgbarLogger(count_mode='steps',
                                                     stateful_metrics=None)
    callbacks.append(progbar_callback)

    if cfg.tensorboard:
        tb_logdir = os.path.join(cfg.tb_logdir, cfg.model_name)
        makedirs(tb_logdir)
        tensorboard_callback = tf.keras.callbacks.TensorBoard(
            log_dir=tb_logdir,
            histogram_freq=0,
            batch_size=cfg.batchsize,
            write_graph=True,
            write_grads=False,
            write_images=True,
            embeddings_freq=0,
            embeddings_layer_names=None,
            embeddings_metadata=None)
        tensorboard_callback.set_model(model)

        callbacks.append(tensorboard_callback)
    else:
        tensorboard_callback = None

    if cfg.data_set == 'coco':
        from .utils.coco import CocoEval

        # use prediction model for evaluation
        evaluation = CocoEval(validation_generator,
                              tensorboard=tensorboard_callback)
    else:
        save_path = None
        if cfg.save_val_img_path:
            save_path = cfg.save_val_img_path + cfg.model_name
            os.makedirs(save_path)
        evaluation = Evaluate(validation_generator,
                              distance=cfg.distance_detection,
                              tensorboard=tensorboard_callback,
                              weighted_average=cfg.weighted_map,
                              render=False,
                              save_path=save_path,
                              workers=cfg.workers)
    evaluation = RedirectModel(evaluation, prediction_model)
    callbacks.append(evaluation)

    # save the model
    if cfg.save_model:
        # ensure directory created first; otherwise h5py will error after epoch.
        makedirs(cfg.save_model)
        checkpoint = keras.callbacks.ModelCheckpoint(os.path.join(
            cfg.save_model,
            '{model_name}.h5'.format(model_name=cfg.model_name)),
                                                     verbose=1,
                                                     save_best_only=True,
                                                     monitor="mAP",
                                                     mode='max')
        checkpoint = RedirectModel(checkpoint, model)
        callbacks.append(checkpoint)

    callbacks.append(
        keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                          factor=0.75,
                                          patience=2,
                                          verbose=1,
                                          mode='auto',
                                          min_delta=0.0001,
                                          cooldown=0,
                                          min_lr=1e-6))

    return callbacks
                                          class_specific_filter=False)

        all_detections = _get_detections(test_generator,
                                         prediction_model,
                                         distance=cfg.distance_detection,
                                         score_threshold=score_threshold,
                                         max_detections=100,
                                         save_path=None,
                                         render=args.render,
                                         distance_scale=100,
                                         workers=cfg.workers,
                                         cfg=cfg)
        all_annotations = _get_annotations(test_generator)

        pickle_path = './saved_models/detection_pickles'
        makedirs(pickle_path)
        with open(os.path.join(pickle_path, cfg.model_name), 'wb') as fp:
            pickle.dump([all_detections, all_annotations], fp)

    iou_threshold = 0.5
    dist_list_all = np.zeros((0, ))
    dist_errors_all = np.zeros((0, ))

    average_precisions = {}
    recalls = {}
    precisions = {}
    dist_lists = {}
    dist_error_lists = {}
    unoccured_labels = []

    for label in range(test_generator.num_classes()):
示例#3
0
def create_models(backbone_retinanet,
                  num_classes,
                  weights,
                  multi_gpu=0,
                  freeze_backbone=False,
                  distance=False,
                  distance_alpha=1.0,
                  lr=1e-5,
                  cfg=None,
                  inputs=(None, None, 3)):
    """ Creates three models (model, training_model, prediction_model).

    :param backbone_retinanet:      <func>              A function to call to create a retinanet model with a given backbone
    :param num_classes:             <int>               The number of classes to train
    :param weights:                 <keras.Weights>     The weights to load into the model
    :param multi_gpu:               <int>               The number of GPUs to use for training
    :param freeze_backbone:         <bool>              If True, disables learning for the backbone
    :param distance:                <bool>              If True, distance detection is enabled
    :param distance_alpha:          <float>             Weighted loss factor for distance loss
    :param lr:                      <float>             Learning rate for network training
    :param cfg:                     <Configuration>     Config class with config parameters
    :param inputs:                  <tuple>             Input shape for neural network

    :return model:                  <keras.Model>       The base model. This is also the model that is saved in snapshots.
    :return training_model:         <keras.Model>       The training model. If multi_gpu=0, this is identical to model.
    :return prediction_model:       <keras.Model>       The model wrapped with utility functions to perform object detection 
                                                        (applies regression values and performs NMS).
    """

    modifier = freeze_model if freeze_backbone else None

    # load anchor parameters, or pass None (so that defaults will be used)
    if 'small' in cfg.anchor_params:
        anchor_params = AnchorParameters.small
        num_anchors = AnchorParameters.small.num_anchors()
    else:
        anchor_params = None
        num_anchors = None

    # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
    # optionally wrap in a parallel model
    if multi_gpu > 1:
        from keras.utils import multi_gpu_model

        with tf.device('/cpu:0'):
            model = model_with_weights(backbone_retinanet(
                num_classes,
                num_anchors=num_anchors,
                modifier=modifier,
                inputs=inputs,
                distance=distance),
                                       weights=weights,
                                       skip_mismatch=True,
                                       config=copy.deepcopy(cfg),
                                       num_classes=num_classes)

        training_model = multi_gpu_model(model, gpus=multi_gpu)
    else:
        model = model_with_weights(backbone_retinanet(num_classes,
                                                      num_anchors=num_anchors,
                                                      modifier=modifier,
                                                      inputs=inputs,
                                                      distance=distance,
                                                      cfg=cfg),
                                   weights=weights,
                                   skip_mismatch=True,
                                   config=copy.deepcopy(cfg),
                                   num_classes=num_classes)
        training_model = model

    try:
        from keras.utils import plot_model
        # Write the keras model plot into a file
        plot_path = os.path.join(cfg.tb_logdir, cfg.model_name)
        makedirs(plot_path)
        plot_model(training_model,
                   to_file=(os.path.join(plot_path, cfg.network) + '.png'),
                   show_shapes=True)
    except Exception:
        # TODO: Catch the particular exceptions
        print(traceback.format_exc())
        print(sys.exc_info()[2])

    # make prediction model
    prediction_model = retinanet_bbox(
        model=model,
        anchor_params=anchor_params,
        score_thresh_train=cfg.score_thresh_train,
        class_specific_filter=cfg.class_specific_nms)

    # compile model
    if distance:
        training_model.compile(loss={
            'regression':
            losses.smooth_l1(),
            'classification':
            losses.focal(),
            'distance':
            losses.smooth_l1(alpha=distance_alpha)
        },
                               optimizer=keras.optimizers.adam(lr=lr,
                                                               clipnorm=0.001))
    else:
        training_model.compile(loss={
            'regression': losses.smooth_l1(),
            'classification': losses.focal(),
        },
                               optimizer=keras.optimizers.adam(lr=lr,
                                                               clipnorm=0.001))

    return model, training_model, prediction_model