Beispiel #1
0
def get_segmentation_model(freeze):
    segmentation_model = pspnet.pspnet(8, 192, 192)
    x = segmentation_model.get_layer("activation_10").output
    # Explicitly define new model input and output by slicing out old model layers
    model_new = Model(inputs=segmentation_model.layers[0].input, outputs=x)

    if freeze:
        for layer in model_new.layers:
            layer.trainable = False

    return model_new
Beispiel #2
0
filenames.sort()

images = []
for img in filenames:
    n = cv2.imread(img)
    binary = n.astype("float") / 255.0
    img = img.replace("mask", "mask_binary")
    cv2.imwrite(img, binary)

from keras_segmentation.models.pspnet import pspnet
from keras_segmentation.pretrained import model_from_checkpoint_path
from keras_segmentation.train import find_latest_checkpoint

#train model
model = pspnet(n_classes=2)
model.train(train_images="dataset/image",
            train_annotations="dataset/mask",
            checkpoints_path="/usr/code/tmp/checkpoints",
            epochs=5)

#load model
model_config = {
    'model_class': 'pspnet',
    'n_classes': 2,
    "input_height": 384,
    "input_width": 576
}
latest_weight = find_latest_checkpoint("/usr/code/tmp/checkpoints")
model = model_from_checkpoint_path(model_config, latest_weight)
data_path = "/work/LAS/jannesar-lab/mburke/image-segmentation-keras/cityscape/prepped/"
# data_path = "/work/LAS/jannesar-lab/mburke/image-segmentation-keras/dataset1/"
# data_path = "/Users/MatthewBurke/PycharmProjects/image-segmentation-keras/cityscape/prepped/"
print("data path is ", data_path)

# pret_model = pspnet_101_cityscapes()  # load the pretrained model trained on Cityscapes dataset
# print("pret_model")
# # evaluating the pretrained model
# print(pret_model.evaluate_segmentation(inp_images_dir=data_path + "images_prepped_test/",
#                                        annotations_dir=data_path + "annotations_prepped_test/"))

print("loading pspnet")
# psp_101 produces OOM error when training
# input_height=1024, input_width=2048 actual image dims, use defaults of input_height=384, input_width=576 instead
pspnet = pspnet(20)  # n_classes changed from 19 to 20
print("model beginning training is ", pspnet.model_name)

pspnet.train(
    train_images=data_path + "images_prepped_train/",
    train_annotations=data_path + "annotations_prepped_train/",
    input_height=None,
    input_width=None,
    n_classes=None,
    verify_dataset=True,
    checkpoints_path="./checkpoints/pspnet",
    epochs=5,  # doesn't do anything now
    batch_size=4,  # default 2
    validate=True,
    val_images=data_path + "images_prepped_val",
    val_annotations=data_path + "annotations_prepped_val",
    val_steps_per_epoch=512,
    gen_use_multiprocessing=True,  # default False
    optimizer_name='adadelta',
    do_augment=False,
    history_csv="./checkpoints/fcn8/model_history_log.csv")

print("Evaluating ", fcn8.name)
# evaluating the model
print(
    fcn8.evaluate_segmentation(
        inp_images_dir=data_path + "images_prepped_test/",
        annotations_dir=data_path + "annotations_prepped_test/"))

# psp_101 produces OOM error when training
psp_gtfine = pspnet(
    20, input_height=1024,
    input_width=2048)  # change to vgg_unet?  # n_classes changed from 19 to 20
print("model beginning training is ", psp_gtfine.name)

psp_gtfine.train(
    train_images=data_path + "images_prepped_train/",
    train_annotations=data_path + "annotations_prepped_train/",
    input_height=None,
    input_width=None,
    n_classes=None,
    verify_dataset=True,
    checkpoints_path="./checkpoints/psp_gtfine",
    epochs=5,  # doesn't do anything now
    batch_size=4,  # default 2
    validate=True,
    val_images=data_path + "images_prepped_val",
Beispiel #5
0
                        optimizer='adam',
                        metrics=['accuracy',
                                 tf.keras.metrics.AUC()])
gan_segnet_reg = gan_disc.make_gan_reg(gen_segnet, disc_segnet_reg)
gan_segnet_reg.compile(loss='binary_crossentropy',
                       optimizer='adam',
                       metrics=['accuracy', tf.keras.metrics.AUC()])
train_alternately(gen_model=gen_segnet,
                  d_model=disc_segnet_reg,
                  gan_model=gan_segnet_reg,
                  gen_model_name="segnet",
                  reg_or_stacked="reg",
                  train_gen_first=False)

# --------------------- pspnet ---------------------------------------
gen_pspnet = pspnet(20, input_height=128,
                    input_width=256)  # n_classes changed from 19 to 20
gen_pspnet.compile(loss='categorical_crossentropy',
                   optimizer='adam',
                   metrics=['accuracy'])
gen_checkpoints_path = get_path("gen_pspnet")
train_gen(gen_pspnet, gen_checkpoints_path, data_path=data_path)
# gen_pspnet.load_weights("")

# Train my stacked input gan
disc_pspnet_stacked = gan_disc.discriminator(gen_pspnet)
disc_pspnet_stacked.compile(loss='binary_crossentropy',
                            optimizer='adam',
                            metrics=['accuracy'])
gan_pspnet_stacked = gan_disc.make_gan(gen_pspnet, disc_pspnet_stacked)
gan_pspnet_stacked.compile(loss='binary_crossentropy',
                           optimizer='adam',