Beispiel #1
0
def test_network_instanciation(shapes_image_size, nb_channels,
                               shapes_nb_labels):
    """Test a simple feature detection network

    """
    net = SemanticSegmentationNetwork(
        "ss_test",
        image_size=shapes_image_size,
        nb_channels=nb_channels,
        nb_labels=shapes_nb_labels,
    )
    m = Model(net.X, net.Y)
    input_shape = m.input_shape
    output_shape = m.output_shape
    assert len(input_shape) == 4
    assert input_shape[1:] == (
        shapes_image_size,
        shapes_image_size,
        nb_channels,
    )
    assert len(output_shape) == 4
    assert output_shape[1:] == (
        shapes_image_size,
        shapes_image_size,
        shapes_nb_labels,
    )
Beispiel #2
0
def test_unet_network_architecture(mapillary_image_size, nb_channels,
                                   mapillary_nb_labels):
    """Test a Unet architecture for semantic segmentation network

    """
    net = SemanticSegmentationNetwork(
        "ss_test",
        image_size=mapillary_image_size,
        nb_channels=nb_channels,
        nb_labels=mapillary_nb_labels,
    )
    m = Model(net.X, net.Y)
    input_shape = m.input_shape
    output_shape = m.output_shape
    assert len(input_shape) == 4
    assert input_shape[1:] == (
        mapillary_image_size,
        mapillary_image_size,
        nb_channels,
    )
    assert len(output_shape) == 4
    assert output_shape[1:] == (
        mapillary_image_size,
        mapillary_image_size,
        mapillary_nb_labels,
    )
Beispiel #3
0
def init_model(
    problem, instance_name, image_size, nb_labels, dropout, network
):
    """Initialize a convolutional neural network with Keras API starting from a
    set of parameters

    Parameters
    ----------
    problem : str
        Type of solved problem, either `feature_detection` or
    `semantic_segmentation`
    instance_name : str
        Name of the instance, for identification purpose
    image_size : int
        Image size, in pixels (height=width)
    nb_labels : int
        Number of output labels
    dropout : float
        Dropout rate
    network : str
        Network architecture

    Returns
    -------
    keras.models.Model
        Convolutional neural network
    """
    K.clear_session()
    if problem == "feature_detection":
        net = FeatureDetectionNetwork(
            network_name=instance_name,
            image_size=image_size,
            nb_labels=nb_labels,
            dropout=dropout,
            architecture=network,
        )
    elif problem == "semantic_segmentation":
        net = SemanticSegmentationNetwork(
            network_name=instance_name,
            image_size=image_size,
            nb_labels=nb_labels,
            dropout=dropout,
            architecture=network,
        )
    else:
        logger.error(
            (
                "Unrecognized model. Please enter 'feature_detection' "
                "or 'semantic_segmentation'."
            )
        )
        sys.exit(1)
    return Model(net.X, net.Y)
Beispiel #4
0
def get_trained_model(datapath, dataset, image_size, nb_labels):
    """Recover model weights stored on the file system, and assign them into
    the `model` structure

    Parameters
    ----------
    datapath : str
        Path of the data on the file system
    dataset : str
        Name of the dataset
    image_size : int
        Image size, in pixels (height=width)
    nb_labels : int
        Number of output labels

    Returns
    -------
    keras.models.Model
        Convolutional neural network
    """
    K.clear_session()
    net = SemanticSegmentationNetwork(
        network_name="semseg_postprocessing",
        image_size=image_size,
        nb_labels=nb_labels,
        dropout=1.0,
        architecture="unet",
    )
    model = Model(net.X, net.Y)
    output_folder = utils.prepare_output_folder(datapath, dataset, "semseg")
    checkpoint_filename = "best-model-" + str(image_size) + "-full" + ".h5"
    checkpoint_full_path = os.path.join(output_folder, checkpoint_filename)
    if os.path.isfile(checkpoint_full_path):
        model.load_weights(checkpoint_full_path)
        logger.info("Model weights have been recovered from %s" %
                    checkpoint_full_path)
    else:
        logger.info(("No available trained model for this image size"
                     " with optimized hyperparameters. The "
                     "inference will be done on an untrained model"))
    return model
Beispiel #5
0
def get_trained_model(model_filepath, image_size, nb_labels):
    """Recover model weights stored on the file system, and assign them into
    the `model` structure

    Parameters
    ----------
    model_filepath : str
        Path of the model on the file system
    image_size : int
        Image size, in pixels (height=width)
    nb_labels : int
        Number of output labels

    Returns
    -------
    keras.models.Model
        Convolutional neural network
    """
    K.clear_session()
    net = SemanticSegmentationNetwork(
        network_name="semseg_postprocessing",
        image_size=image_size,
        nb_labels=nb_labels,
        dropout=1.0,
        architecture="unet",
    )
    model = Model(net.X, net.Y)
    if os.path.isfile(model_filepath):
        model.load_weights(model_filepath)
        logger.info("Model weights have been recovered from %s" % model_filepath)
    else:
        logger.info(
            (
                "No available trained model for this image size"
                " with optimized hyperparameters. The "
                "inference will be done on an untrained model"
            )
        )
    return model
Beispiel #6
0
def run_model(
    train_generator,
    validation_generator,
    dl_model,
    output_folder,
    instance_name,
    image_size,
    nb_labels,
    nb_epochs,
    nb_training_image,
    nb_validation_image,
    batch_size,
    dropout,
    network,
    learning_rate,
    learning_rate_decay,
):
    """Run deep learning `dl_model` starting from training and validation data
    generators, depending on a range of hyperparameters

    Parameters
    ----------
    train_generator : generator
        Training data generator
    validation_generator : generator
        Validation data generator
    dl_model : str
        Name of the addressed research problem (*e.g.* `feature_detection` or
    `semantic_segmentation`)
    output_folder : str
        Name of the folder where the trained model will be stored on the file
    system
    instance_name : str
        Name of the instance
    image_size : int
        Size of images, in pixel (height=width)
    nb_labels : int
        Number of labels into the dataset
    nb_epochs : int
        Number of epochs during which models will be trained
    nb_training_image : int
        Number of images into the training dataset
    nb_validation_image : int
        Number of images into the validation dataset
    batch_size : int
        Number of images into each batch
    dropout : float
        Probability of keeping a neuron during dropout phase
    network : str
        Neural network architecture (*e.g.* `simple`, `vgg`, `inception`)
    learning_rate : float
        Starting learning rate
    learning_rate_decay : float
        Learning rate decay

    Returns
    -------
    dict
        Dictionary that summarizes the instance and the corresponding model
    performance (measured by validation accuracy)
    """
    if dl_model == "featdet":
        net = FeatureDetectionNetwork(
            network_name=instance_name,
            image_size=image_size,
            nb_channels=3,
            nb_labels=nb_labels,
            architecture=network,
        )
        loss_function = "binary_crossentropy"
    elif dl_model == "semseg":
        net = SemanticSegmentationNetwork(
            network_name=instance_name,
            image_size=image_size,
            nb_channels=3,
            nb_labels=nb_labels,
            architecture=network,
        )
        loss_function = "categorical_crossentropy"
    else:
        logger.error(("Unrecognized model: %s. Please choose amongst %s",
                      dl_model, AVAILABLE_MODELS))
        sys.exit(1)
    model = Model(net.X, net.Y)
    opt = Adam(lr=learning_rate, decay=learning_rate_decay)
    metrics = ["acc", iou, dice_coef]
    model.compile(loss=loss_function, optimizer=opt, metrics=metrics)

    # Model training
    steps = max(nb_training_image // batch_size, 1)
    val_steps = max(nb_validation_image // batch_size, 1)

    checkpoint_files = [
        item for item in os.listdir(output_folder)
        if "checkpoint-epoch" in item
    ]
    if len(checkpoint_files) > 0:
        model_checkpoint = max(checkpoint_files)
        trained_model_epoch = int(model_checkpoint[-5:-3])
        checkpoint_complete_path = os.path.join(output_folder,
                                                model_checkpoint)
        model.load_weights(checkpoint_complete_path)
        logger.info(
            "Model weights have been recovered from %s",
            checkpoint_complete_path,
        )
    else:
        logger.info(("No available checkpoint for this configuration. "
                     "The model will be trained from scratch."))
        trained_model_epoch = 0

    checkpoint_filename = os.path.join(output_folder,
                                       "checkpoint-epoch-{epoch:03d}.h5")
    checkpoint = callbacks.ModelCheckpoint(
        checkpoint_filename,
        monitor="val_loss",
        verbose=0,
        save_best_only=True,
        save_weights_only=False,
        mode="auto",
        period=1,
    )
    terminate_on_nan = callbacks.TerminateOnNaN()
    earlystop = callbacks.EarlyStopping(monitor="val_loss",
                                        patience=10,
                                        verbose=1,
                                        mode="max")
    csv_logger = callbacks.CSVLogger(os.path.join(output_folder,
                                                  "training_metrics.csv"),
                                     append=True)

    hist = model.fit_generator(
        train_generator,
        epochs=nb_epochs,
        initial_epoch=trained_model_epoch,
        steps_per_epoch=steps,
        validation_data=validation_generator,
        validation_steps=val_steps,
        callbacks=[checkpoint, earlystop, terminate_on_nan, csv_logger],
    )
    ref_metric = max(hist.history.get("val_acc", [np.nan]))
    return {
        "model": model,
        "val_acc": ref_metric,
        "batch_size": batch_size,
        "network": network,
        "dropout": dropout,
        "learning_rate": learning_rate,
        "learning_rate_decay": learning_rate_decay,
    }
Beispiel #7
0
        sys.exit(1)
    nb_labels = len(label_ids)

    # Model creation
    if args.model == "feature_detection":
        net = FeatureDetectionNetwork(network_name=instance_name,
                                      image_size=args.image_size,
                                      nb_channels=3,
                                      nb_labels=nb_labels,
                                      dropout=args.dropout,
                                      architecture=args.network)
        loss_function = "binary_crossentropy"
    elif args.model == "semantic_segmentation":
        net = SemanticSegmentationNetwork(network_name=instance_name,
                                          image_size=args.image_size,
                                          nb_channels=nb_labels,
                                          nb_labels=nb_labels,
                                          dropout=args.dropout,
                                          architecture=args.network)
        loss_function = "categorical_crossentropy"
    else:
        utils.logger.error(("Unrecognized model. Please enter 'feature_detection' "
                            "or 'semantic_segmentation'."))
        sys.exit(1)
    model = Model(net.X, net.Y)
    opt = Adam(lr=args.learning_rate, decay=args.learning_rate_decay)
    model.compile(loss=loss_function,
                  optimizer=opt,
                  metrics=['acc'])

    # Model training
    STEPS = args.nb_training_image // args.batch_size
Beispiel #8
0
def run_model(train_generator, validation_generator, dl_model, output_folder,
              instance_name, image_size, aggregate_value, nb_labels, nb_epochs,
              nb_training_image, nb_validation_image, batch_size, dropout,
              network, learning_rate, learning_rate_decay):
    """Run deep learning `dl_model` starting from training and validation data generators, depending on a
              range of hyperparameters

    Parameters
    ----------
    train_generator : generator
        Training data generator
    validation_generator : generator
        Validation data generator
    dl_model : str
        Name of the addressed research problem (*e.g.* `feature_detection` or `semantic_segmentation`)
    output_folder : str
        Name of the folder where the trained model will be stored on the file system
    instance_name : str
        Name of the instance
    image_size : int
        Size of images, in pixel (height=width)
    aggregate_value : str
        Label aggregation policy (either `full` or `aggregated`)
    nb_labels : int
        Number of labels into the dataset
    nb_epochs : int
        Number of epochs during which models will be trained
    nb_training_image : int
        Number of images into the training dataset
    nb_validation_image : int
        Number of images into the validation dataset
    batch_size : int
        Number of images into each batch
    dropout : float
        Probability of keeping a neuron during dropout phase
    network : str
        Neural network architecture (*e.g.* `simple`, `vgg`, `inception`)
    learning_rate : float
        Starting learning rate
    learning_rate_decay : float
        Learning rate decay

    Returns
    -------
    dict
        Dictionary that summarizes the instance and the corresponding model performance (measured
    by validation accuracy)
    """
    if dl_model == "feature_detection":
        net = FeatureDetectionNetwork(network_name=instance_name,
                                      image_size=image_size,
                                      nb_channels=3,
                                      nb_labels=nb_labels,
                                      architecture=network)
        loss_function = "binary_crossentropy"
    elif dl_model == "semantic_segmentation":
        net = SemanticSegmentationNetwork(network_name=instance_name,
                                          image_size=image_size,
                                          nb_channels=3,
                                          nb_labels=nb_labels,
                                          architecture=network)
        loss_function = "categorical_crossentropy"
    else:
        utils.logger.error(
            ("Unrecognized model. Please enter 'feature_detection' "
             "or 'semantic_segmentation'."))
        sys.exit(1)
    model = Model(net.X, net.Y)
    opt = Adam(lr=learning_rate, decay=learning_rate_decay)
    metrics = ['acc', iou, dice_coef]
    model.compile(loss=loss_function, optimizer=opt, metrics=metrics)

    # Model training
    steps = nb_training_image // batch_size
    val_steps = nb_validation_image // batch_size

    checkpoint_files = [
        item for item in os.listdir(output_folder)
        if os.path.isfile(os.path.join(output_folder, item))
    ]
    if len(checkpoint_files) > 0:
        model_checkpoint = max(checkpoint_files)
        trained_model_epoch = int(model_checkpoint[-5:-3])
        checkpoint_complete_path = os.path.join(output_folder,
                                                model_checkpoint)
        model.load_weights(checkpoint_complete_path)
        utils.logger.info(("Model weights have been recovered from {}"
                           "").format(checkpoint_complete_path))
    else:
        utils.logger.info(("No available checkpoint for this configuration. "
                           "The model will be trained from scratch."))
        trained_model_epoch = 0

    checkpoint_filename = os.path.join(output_folder,
                                       "checkpoint-epoch-{epoch:03d}.h5")
    checkpoint = callbacks.ModelCheckpoint(checkpoint_filename,
                                           monitor='val_loss',
                                           verbose=0,
                                           save_best_only=True,
                                           save_weights_only=False,
                                           mode='auto',
                                           period=1)
    terminate_on_nan = callbacks.TerminateOnNaN()
    earlystop = callbacks.EarlyStopping(monitor='val_acc',
                                        min_delta=0.001,
                                        patience=10,
                                        verbose=1,
                                        mode='max')
    csv_logger = callbacks.CSVLogger(
        os.path.join(output_folder, 'training_metrics.csv'))

    hist = model.fit_generator(
        train_generator,
        epochs=nb_epochs,
        initial_epoch=trained_model_epoch,
        steps_per_epoch=steps,
        validation_data=validation_generator,
        validation_steps=val_steps,
        callbacks=[checkpoint, earlystop, terminate_on_nan, csv_logger])
    ref_metric = max(hist.history.get("val_acc", [np.nan]))
    return {
        'model': model,
        'val_acc': ref_metric,
        'batch_size': batch_size,
        'network': network,
        'dropout': dropout,
        'learning_rate': learning_rate,
        'learning_rate_decay': learning_rate_decay
    }