Esempio n. 1
0
def damage_by_segmentation(path):
    """
    Generate solution .png files, using a single multiclass segmentation
    model to do so.
    """
    model = train.build_model(classes=6, damage=True)
    model = train.load_weights(model,
                               "damage-motokimura-mobilenetv2-best.hdf5")
    #model.load_individual_weights()# = train.load_weights(model, S.DMG_MODELSTRING_BEST)
    df = flow.Dataflow(files=flow.get_test_files(),
                       transform=False,
                       batch_size=1,
                       buildings_only=False,
                       shuffle=False,
                       return_postmask=False,
                       return_stacked=True,
                       return_average=False)
    pbar = tqdm.tqdm(total=len(df))

    for image, filename in df:
        filename = os.path.basename(filename)
        filename = filename.replace("pre", "localization").replace(
            ".png", "_prediction.png")
        #if os.path.exists(os.path.join("solution", filename)):
        #    continue

        # localization (segmentation)
        pred = model.predict([image])
        mask = infer.convert_prediction(pred)
        write_solution(names=[filename], images=[mask], path=path)

        filename = filename.replace("localization", "damage")
        write_solution(names=[filename], images=[mask], path=path)

        pbar.update(1)
Esempio n. 2
0
def damage_random(path):
    """
    Generate solution .png files using random damage.
    """
    model = train.build_model(
        train=False)  #, save_path="motokimura-stacked-2.hdf5")
    model = train.load_weights(model, S.MODELSTRING_BEST)
    df = flow.Dataflow(files=flow.get_test_files(),
                       transform=False,
                       batch_size=1,
                       buildings_only=False,
                       shuffle=False,
                       return_postmask=False,
                       return_stacked=True,
                       return_average=False)
    pbar = tqdm.tqdm(total=len(df))

    for image, filename in df:
        filename = os.path.basename(filename)
        filename = filename.replace("pre", "localization").replace(
            ".png", "_prediction.png")
        #if os.path.exists(os.path.join("solution", filename)):
        #    continue

        # localization (segmentation)
        pred = model.predict([image])
        mask = infer.convert_prediction(pred)
        write_solution(names=[filename], images=[mask], path=path)

        mask = randomize_damage(mask)
        filename = filename.replace("localization", "damage")
        write_solution(names=[filename], images=[mask], path=path)

        pbar.update(1)
Esempio n. 3
0
def damage_by_building_classification(path):
    """
    Generate solution .png files, classifying damage using contiguous
    regions in the segmentation model's predicted masks in order to extract
    individual building polygons from pre-disaster and post-disaster images.
    """
    # load the localization (segmentation) model
    S.BATCH_SIZE = 1
    model = train.build_model(architecture=S.ARCHITECTURE, train=True)
    model = train.load_weights(
        model, S.MODELSTRING_BEST)  #.replace(".hdf5", "-best.hdf5"))

    # load the damage classification model
    dmg_model = damage.build_model()
    dmg_model = damage.load_weights(dmg_model, S.DMG_MODELSTRING_BEST)

    # get a dataflow for the test files
    df = flow.Dataflow(files=flow.get_test_files(),
                       transform=False,
                       shuffle=False,
                       buildings_only=False,
                       batch_size=1,
                       return_stacked=True)
    i = 0
    pbar = tqdm.tqdm(total=len(df))
    # x = pre-disaster image, y = post-disaster image
    for stacked, filename in df:
        filename = os.path.basename(filename)
        x = stacked
        #filename = os.path.basename(df.samples[i][0].img_name)
        filename = filename.replace("pre", "localization").replace(
            ".png", "_prediction.png")
        #if os.path.exists(os.path.join("solution", filename)):
        #    continue

        # localization (segmentation)
        pred = model.predict(x)
        mask = infer.convert_prediction(pred)
        write_solution(names=[filename], images=[mask], path=path)

        # damage classification
        filename = filename.replace("localization", "damage")
        pre, post = stacked[..., :3], stacked[
            ..., 3:]  #df.samples[i][0].image(), df.samples[i][1].image()
        boxes, coords = flow.Building.get_all_in(pre, post, mask)
        if len(boxes) > 0:
            labels = dmg_model.predict(boxes)
            for k, c in enumerate(coords):
                x, y, w, h = c
                mask[y:y + h, x:x + w] = np.argmax(labels[k]) + 1

        write_solution(names=[filename], images=[mask], path=path)
        pbar.update(1)
        i += 1
Esempio n. 4
0
def predict():
    """测试集上进行预测, 输出分类结果
    """
    model_id = input("model id: ")
   
    record_files = list(map(lambda record_name: os.path.join(basic.test_record_path, record_name), os.listdir(basic.test_record_path)))
    dataset = get_tfdataset(record_files, mode='test')

    model = flexible_model(input_shape=(90,90,3), num_classes=9)
    model = load_weights(model, model_id)
    # model = load_model(ckpt_dir)
    predictions = model.predict(dataset, steps=1)

    save_predictions(predictions, basic.result_path, model_id)
Esempio n. 5
0
def evaluate():
    """验证模型性能
    """
    model_id = input("model id: ")
   
    record_files = list(map(lambda record_name: os.path.join(basic.train_record_path, record_name), os.listdir(basic.train_record_path)))
    val_files = record_files[len(record_files) - 1]

    val_dataset = get_tfdataset(val_files, mode='valid')

    model = flexible_model(input_shape=(90,90,3), num_classes=9)
    model = load_weights(model, model_id)

    eval_loss, eval_accuracy = model.evaluate(val_dataset, steps=1)

    print(f'Eval loss: {eval_loss}, Eval accuracy: {eval_accuracy}')
Esempio n. 6
0
def test_predict():
    """测试集上进行预测, 输出分类结果
    """
    ckpt_dir = input("specify the dir to load ckpt: ")

    dataset = read_data_sets('../data/MNIST_data', one_hot=True)
    
    images = dataset.test.images
    images = images.reshape((-1, 28,28,1))

    data = tf.data.Dataset.from_tensor_slices(images)
    data = data.batch(100)

    model = flexible_model(input_shape=(28,28,1), num_classes=10)
    model = load_weights(model, ckpt_dir)

    predictions = model.predict(data, steps=100)

    save_predictions(predictions, config.result_path, ckpt_dir)
Esempio n. 7
0
def predict_and_show(df, argmax=True):
    model = train.build_model()
    model = train.load_weights(model, S.MODELSTRING_BEST)

    for imgs, mask in df:
        pre = imgs[...,:3]
        post = imgs[...,3:]

        pred = model.predict(imgs)
        mask = infer.convert_prediction(mask)

        maxed = infer.convert_prediction(pred, argmax=True)
        pred, _ = infer.convert_prediction(pred, argmax=False)
        pred1 = pred[...,0]
        pred2 = pred[...,1]

        try:
            display_images([pre, post, maxed, pred1, pred2, mask], ["Pre", "Post", "Argmax", "Pred1", "Pred2", "Ground Truth"])
        except Exception as exc:
            [print(x.shape) for x in [pre,post,maxed,pred1,pred2,mask]]
            raise exc
Esempio n. 8
0
    plt.title(df.samples[idx].img_name)

    fig.add_subplot(1,3,2)
    plt.imshow(mask_img1)
    plt.title("Mask channels 0:3")

    fig.add_subplot(1,3,3)
    plt.imshow(mask_img2)
    plt.title("Mask channels 3:")

    scores = score.f1_score(convert_prediction(y_true), np.argmax(np.dstack([mask_img1, mask_img2]), axis=2))
    gt = convert_prediction(y_true, argmax=True)
    if len(gt[gt != 0]) == 0:
        plt.close(fig)
        logger.info("Skipping image with no buildings")
        return show_random(model, df)
    print("F1-Score: {}\n".format(scores))
    plt.show()
    return


if __name__ == "__main__":
    # Testing and inspection
    model = train.build_model()
    model = train.load_weights(model)
    S.BATCH_SIZE = 1
    df=Dataflow(files=get_validation_files(), batch_size=1, shuffle=True)
    while True:
        show_random(model, df)
        time.sleep(1)
Esempio n. 9
0
    SAMPLES_SEEN += 1
    if SAMPLES_SEEN >= NUM_SAMPLES:
        initialize_f1(NUM_SAMPLES)

    # xView2 contest formula (harmonic mean of all 4 damage class F1-scores)
    df1 = 4 / np.sum([1 / (scores[i] + 1e-8) for i in range(1, 5)])
    return df1


if __name__ == '__main__':
    import infer
    import math
    import train
    model = train.build_deeplab_model(classes=6, damage=True, train=False)
    S.BATCH_SIZE = 1
    model = train.load_weights(model, S.MODELSTRING)
    df = flow.DamagedDataflow(files=flow.get_validation_files(),
                              batch_size=1,
                              shuffle=True,
                              return_postmask=True,
                              return_stacked=True)
    totals = 0  #[0,0,0]
    num = 1
    pbar = tqdm.tqdm(df, desc="Scoring")
    initialize_f1(len(df) * 2)

    for x, y_ in pbar:
        pred = model.predict(x)  #np.expand_dims(x[j], axis=0))
        #y_pred = infer.convert_prediction(pred)
        #y_true = infer.convert_prediction(y_)
        num += 1