def from_logits(path):
    inp = tf.keras.layers.Input((3145728 / 6, 6))
    #x = tf.keras.layers.Reshape((-1,6))(inp)
    x = tf.keras.layers.Activation('softmax')(inp)
    m = tf.keras.models.Model(inputs=[inp], outputs=[x])

    pbar = tqdm.tqdm(total=933)

    for (pre, post) in flow.get_test_files():
        filename = os.path.basename(pre)

        logits1 = np.fromfile("logits/1-{}".format(filename)).astype(
            np.int32).reshape((-1, 6))
        logits2 = np.fromfile("logits/2-{}".format(filename)).astype(
            np.int32).reshape((-1, 6))
        logits = (logits1 + logits2) / 2

        pred = m.predict(np.expand_dims(logits, axis=0))
        mask = infer.convert_prediction(pred).astype(np.uint8)

        filename = filename.replace("pre", "localization").replace(
            ".png", "_prediction.png")
        write_solution(names=[filename], images=[mask], path=path)
        filename = filename.replace("localization", "damage")
        write_solution(names=[filename], images=[mask], path=path)

        pbar.update(1)
def damage_random(path):
    """
    Generate solution .png files using random damage.
    """
    model = train.build_model(
        train=False)  #, save_path="motokimura-stacked-2.hdf5")
    model = train.load_weights(model, S.MODELSTRING_BEST)
    df = flow.Dataflow(files=flow.get_test_files(),
                       transform=False,
                       batch_size=1,
                       buildings_only=False,
                       shuffle=False,
                       return_postmask=False,
                       return_stacked=True,
                       return_average=False)
    pbar = tqdm.tqdm(total=len(df))

    for image, filename in df:
        filename = os.path.basename(filename)
        filename = filename.replace("pre", "localization").replace(
            ".png", "_prediction.png")
        #if os.path.exists(os.path.join("solution", filename)):
        #    continue

        # localization (segmentation)
        pred = model.predict([image])
        mask = infer.convert_prediction(pred)
        write_solution(names=[filename], images=[mask], path=path)

        mask = randomize_damage(mask)
        filename = filename.replace("localization", "damage")
        write_solution(names=[filename], images=[mask], path=path)

        pbar.update(1)
def damage_by_segmentation(path):
    """
    Generate solution .png files, using a single multiclass segmentation
    model to do so.
    """
    model = train.build_model(classes=6, damage=True)
    model = train.load_weights(model,
                               "damage-motokimura-mobilenetv2-best.hdf5")
    #model.load_individual_weights()# = train.load_weights(model, S.DMG_MODELSTRING_BEST)
    df = flow.Dataflow(files=flow.get_test_files(),
                       transform=False,
                       batch_size=1,
                       buildings_only=False,
                       shuffle=False,
                       return_postmask=False,
                       return_stacked=True,
                       return_average=False)
    pbar = tqdm.tqdm(total=len(df))

    for image, filename in df:
        filename = os.path.basename(filename)
        filename = filename.replace("pre", "localization").replace(
            ".png", "_prediction.png")
        #if os.path.exists(os.path.join("solution", filename)):
        #    continue

        # localization (segmentation)
        pred = model.predict([image])
        mask = infer.convert_prediction(pred)
        write_solution(names=[filename], images=[mask], path=path)

        filename = filename.replace("localization", "damage")
        write_solution(names=[filename], images=[mask], path=path)

        pbar.update(1)
def damage_by_building_classification(path):
    """
    Generate solution .png files, classifying damage using contiguous
    regions in the segmentation model's predicted masks in order to extract
    individual building polygons from pre-disaster and post-disaster images.
    """
    # load the localization (segmentation) model
    S.BATCH_SIZE = 1
    model = train.build_model(architecture=S.ARCHITECTURE, train=True)
    model = train.load_weights(
        model, S.MODELSTRING_BEST)  #.replace(".hdf5", "-best.hdf5"))

    # load the damage classification model
    dmg_model = damage.build_model()
    dmg_model = damage.load_weights(dmg_model, S.DMG_MODELSTRING_BEST)

    # get a dataflow for the test files
    df = flow.Dataflow(files=flow.get_test_files(),
                       transform=False,
                       shuffle=False,
                       buildings_only=False,
                       batch_size=1,
                       return_stacked=True)
    i = 0
    pbar = tqdm.tqdm(total=len(df))
    # x = pre-disaster image, y = post-disaster image
    for stacked, filename in df:
        filename = os.path.basename(filename)
        x = stacked
        #filename = os.path.basename(df.samples[i][0].img_name)
        filename = filename.replace("pre", "localization").replace(
            ".png", "_prediction.png")
        #if os.path.exists(os.path.join("solution", filename)):
        #    continue

        # localization (segmentation)
        pred = model.predict(x)
        mask = infer.convert_prediction(pred)
        write_solution(names=[filename], images=[mask], path=path)

        # damage classification
        filename = filename.replace("localization", "damage")
        pre, post = stacked[..., :3], stacked[
            ..., 3:]  #df.samples[i][0].image(), df.samples[i][1].image()
        boxes, coords = flow.Building.get_all_in(pre, post, mask)
        if len(boxes) > 0:
            labels = dmg_model.predict(boxes)
            for k, c in enumerate(coords):
                x, y, w, h = c
                mask[y:y + h, x:x + w] = np.argmax(labels[k]) + 1

        write_solution(names=[filename], images=[mask], path=path)
        pbar.update(1)
        i += 1
Beispiel #5
0
def predict_and_show(df, argmax=True):
    model = train.build_model()
    model = train.load_weights(model, S.MODELSTRING_BEST)

    for imgs, mask in df:
        pre = imgs[...,:3]
        post = imgs[...,3:]

        pred = model.predict(imgs)
        mask = infer.convert_prediction(mask)

        maxed = infer.convert_prediction(pred, argmax=True)
        pred, _ = infer.convert_prediction(pred, argmax=False)
        pred1 = pred[...,0]
        pred2 = pred[...,1]

        try:
            display_images([pre, post, maxed, pred1, pred2, mask], ["Pre", "Post", "Argmax", "Pred1", "Pred2", "Ground Truth"])
        except Exception as exc:
            [print(x.shape) for x in [pre,post,maxed,pred1,pred2,mask]]
            raise exc
Beispiel #6
0
def show(df):
    i = 0
    for imgs, masks in df:
        pre = imgs[...,:3]
        post = imgs[...,3:]

        mask = infer.convert_prediction(masks)

        prename = df.samples[i][0].img_name
        postname = df.samples[i][1].img_name

        display_images([pre, post, mask], [prename, postname, "Mask"])
        i += 1