Beispiel #1
0
def main(predict: ("Do prediction", "flag", "p"),
         image: ("Show this specific image", "option", "i")=""):

    df = flow.Dataflow(files=flow.get_validation_files(), shuffle=True, batch_size=1, buildings_only=True, return_stacked=True, transform=0.5, return_average=False, return_postmask=True)
    if image != "":
        for i in range(len(df.samples)):
            if image in df.samples[i][0].img_name or image in df.samples[i][1].img_name:
                df.samples = [df.samples[i]]
                show(df)

    if predict:
        predict_and_show(df)
    else:
        show(df)
def main(epochs, noaction=False, restore=False, limit=64):
    if not noaction:
        model = build_model()
        if restore:
            model = load_weights(model, S.DMG_MODELSTRING)
            logger.info("Weights loaded from {} successfully.".format(
                S.DMG_MODELSTRING))
        model.compile(
            optimizer=tf.keras.optimizers.RMSprop(
            ),  #tf.keras.optimizers.Adam(lr=0.00001),
            loss='categorical_crossentropy',  #'categorical_crossentropy',
            metrics=['categorical_accuracy', score.damage_f1_score])
    else:
        model = None
    train_seq = DamagedBuildingDataflow(files=get_training_files())
    valid_seq = DamagedBuildingDataflow(files=get_validation_files())
    callback = tf.keras.callbacks.ModelCheckpoint(S.DMG_MODELSTRING.replace(
        ".hdf5", "-best.hdf5"),
                                                  save_weights_only=True,
                                                  save_best_only=True)
    train_seq.limit = limit
    valid_seq.limit = limit

    try:
        model.fit(train_seq,
                  validation_data=valid_seq,
                  epochs=epochs,
                  verbose=1,
                  callbacks=[callback],
                  validation_steps=len(valid_seq),
                  shuffle=False,
                  use_multiprocessing=False,
                  max_queue_size=10)
    except KeyboardInterrupt:
        model.model.save_weights(S.DMG_MODELSTRING)
        logger.info("Saved to {}".format(S.DMG_MODELSTRING))
    """
def main(restore: ("Restore from checkpoint", "flag", "r"),
         damage: ("Train a damage classifier (default is localization)",
                  "flag", "d"),
         deeplab: ("Build and train a DeeplabV3+ model", "flag", "D"),
         motokimura: ("Build and train a Motokimura-designed Unet", "flag",
                      "M"),
         verbose: ("Keras verbosity level", "option", "v", int) = 1,
         epochs: ("Number of epochs", "option", "e", int) = 50,
         initial_epoch: ("Initial epoch to continue from", "option", "i",
                         int) = 1,
         optimizer: ("Keras optimizer to use", "option", "o", str) = 'RMSprop',
         loss='categorical_crossentropy'):
    """
    Train a model.
    """
    #    optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(optimizer, 'dynamic')
    if deeplab:
        logger.info("Building DeeplabV3+ model.")
        model = build_deeplab_model(classes=S.N_CLASSES,
                                    damage=damage,
                                    train=True)
        S.ARCHITECTURE = "deeplab-xception"
    elif motokimura:
        logger.info("Building MotokimuraUnet model.")
        model = build_model(classes=S.N_CLASSES, damage=damage, train=True)
        S.ARCHITECTURE = "motokimura"
    else:
        logger.error("Use -M (motokimura) or -D (deeplab) parameter.")
        sys.exit(-1)

    S.DAMAGE = True if damage else False
    save_path = S.MODELSTRING = f"{S.ARCHITECTURE}.hdf5"
    S.DMG_MODELSTRING = f"damage-{save_path}"
    if restore:
        load_weights(model, save_path)

    metrics = [
        'accuracy', score.num_correct, score.recall, score.tensor_f1_score
    ]

    callbacks = [
        keras.callbacks.ModelCheckpoint(save_path.replace(
            ".hdf5", "-best.hdf5"),
                                        save_weights_only=True,
                                        save_best_only=True),
        #keras.callbacks.TensorBoard(log_dir="logs"),
    ]

    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    flowcall = flow.DamagedDataflow if damage else flow.Dataflow
    train_seq = flowcall(files=flow.get_training_files(),
                         batch_size=S.BATCH_SIZE,
                         transform=0.3,
                         shuffle=True,
                         buildings_only=True,
                         return_postmask=True if damage else False,
                         return_stacked=True if damage else True,
                         return_post_only=False if damage else False,
                         return_average=False)
    val_seq = flow.Dataflow(files=flow.get_validation_files(),
                            batch_size=S.BATCH_SIZE,
                            buildings_only=True,
                            shuffle=True,
                            return_postmask=True if damage else False,
                            return_stacked=True if damage else True,
                            return_post_only=False if damage else False,
                            return_average=False)

    logger.info(
        "Training and saving best weights after each epoch (CTRL+C to interrupt)."
    )
    train_stepper(model, train_seq, verbose, epochs, callbacks, save_path,
                  val_seq, initial_epoch)
    save_model(model, save_path)
Beispiel #4
0
    plt.title(df.samples[idx].img_name)

    fig.add_subplot(1,3,2)
    plt.imshow(mask_img1)
    plt.title("Mask channels 0:3")

    fig.add_subplot(1,3,3)
    plt.imshow(mask_img2)
    plt.title("Mask channels 3:")

    scores = score.f1_score(convert_prediction(y_true), np.argmax(np.dstack([mask_img1, mask_img2]), axis=2))
    gt = convert_prediction(y_true, argmax=True)
    if len(gt[gt != 0]) == 0:
        plt.close(fig)
        logger.info("Skipping image with no buildings")
        return show_random(model, df)
    print("F1-Score: {}\n".format(scores))
    plt.show()
    return


if __name__ == "__main__":
    # Testing and inspection
    model = train.build_model()
    model = train.load_weights(model)
    S.BATCH_SIZE = 1
    df=Dataflow(files=get_validation_files(), batch_size=1, shuffle=True)
    while True:
        show_random(model, df)
        time.sleep(1)
    if SAMPLES_SEEN >= NUM_SAMPLES:
        initialize_f1(NUM_SAMPLES)

    # xView2 contest formula (harmonic mean of all 4 damage class F1-scores)
    df1 = 4 / np.sum([1 / (scores[i] + 1e-8) for i in range(1, 5)])
    return df1


if __name__ == '__main__':
    import infer
    import math
    import train
    model = train.build_deeplab_model(classes=6, damage=True, train=False)
    S.BATCH_SIZE = 1
    model = train.load_weights(model, S.MODELSTRING)
    df = flow.DamagedDataflow(files=flow.get_validation_files(),
                              batch_size=1,
                              shuffle=True,
                              return_postmask=True,
                              return_stacked=True)
    totals = 0  #[0,0,0]
    num = 1
    pbar = tqdm.tqdm(df, desc="Scoring")
    initialize_f1(len(df) * 2)

    for x, y_ in pbar:
        pred = model.predict(x)  #np.expand_dims(x[j], axis=0))
        #y_pred = infer.convert_prediction(pred)
        #y_true = infer.convert_prediction(y_)
        num += 1
        scores = running_damage_f1_score(