Ejemplo n.º 1
0
                                             horizontal_flip=True,
                                             vertical_flip=True,
                                             fill_mode='constant'))

#Plot images with mask overlays
batch = next(train_gen)
xx, yy = batch
print(xx.shape, yy.shape)
plot_imgs(org_imgs=xx, mask_imgs=yy, nm_img_to_plot=5, figsize=6)

# Importing Satellite Unet model from the keras_unet package

input_shape = x_train[0].shape

model = satellite_unet(input_shape,
                       num_classes=1,
                       output_activation='sigmoid',
                       num_layers=4)

#Creating Checkpoint
model_filename = '/model.h5'
callback_checkpoint = ModelCheckpoint(
    model_filename,
    verbose=1,
    monitor='val_loss',
    save_best_only=True,
)

#Compiling the model with appropriate optimizer

model.compile(optimizer=SGD(lr=0.01, momentum=0.99),
              loss='binary_crossentropy',
Ejemplo n.º 2
0
                              width_shift_range=0,
                              height_shift_range=0,
                              shear_range=0,
                              channel_shift_range=channel_shift_range,
                              zoom_range=zoom_range,
                              horizontal_flip=True,
                              vertical_flip=True,
                              fill_mode='constant'))

###########################
# SETUP AND TRAIN NETWORK #
###########################

# Initialize network
input_shape = images_train[0].shape
model = satellite_unet(input_shape)
model.summary()

# Compile & train
if val_ratio != 0:
    callback_checkpoint = ModelCheckpoint(model_filename_best,
                                          verbose=1,
                                          monitor='val_loss',
                                          save_best_only=True)
else:
    callback_checkpoint = ModelCheckpoint(model_filename_best,
                                          verbose=1,
                                          monitor='loss',
                                          save_best_only=True)

model.compile(
Ejemplo n.º 3
0
def main():
    keras.backend.clear_session()
    # The original images should be downloaded to ../../ONLINE_RESULTS_DIRECTORY so just download the annotation_results folder from BOKU-drive and place it correspondingly into your FS
    if not os.path.exists(VAL_IMAGE_PATH) or not os.path.exists(
            TRAIN_MASK_PATH):
        # crop images according to our hierarchy: only ears in plots are considered
        prepare_images(new_image_size=NEW_IMAGE_SIZE_2d,
                       path=os.path.join("..", "..", ONLINE_RESULTS_DIRECTORY),
                       mask_path=EAR_LABEL_IMAGE)
        # get all image-names
        all_images = get_all_resized_image_names(parent_directory=os.path.join(
            DATA_PATH, RESIZED),
                                                 file_ending_original=".jpg")
        #split the images into train / val-set
        train_set, test_set = split_train_set(all_images, 0.2)
        save_train_and_test_set(train_set, test_set)
        print("restart the program")
        # don't know why but isnt it funny?
        return
    # create 4 image generators, because it only runs like this. Could be improved but...
    train_image_Generator = kunet.utils.ImageDataGenerator(
        horizontal_flip=True, rescale=(1.0 / 255))
    train_mask_Generator = kunet.utils.ImageDataGenerator(horizontal_flip=True)
    val_image_Generator = kunet.utils.ImageDataGenerator(rescale=(1.0 / 255))
    val_mask_Generator = kunet.utils.ImageDataGenerator()

    train_image_generator = train_image_Generator.flow_from_directory(
        os.path.join(IMG_PATH, "train"),
        batch_size=BATCH_SIZE,
        target_size=NEW_IMAGE_SIZE_2d,
        seed=412,
        class_mode='input')
    train_mask_generator = train_mask_Generator.flow_from_directory(
        os.path.join(MASK_PATH, "train"),
        batch_size=BATCH_SIZE,
        target_size=NEW_IMAGE_SIZE_2d,
        color_mode="grayscale",
        seed=412,
        class_mode='input')
    val_image_generator = val_image_Generator.flow_from_directory(
        os.path.join(IMG_PATH, "val"),
        batch_size=BATCH_SIZE,
        target_size=NEW_IMAGE_SIZE_2d,
        seed=42,
        class_mode='input')
    val_mask_generator = val_mask_Generator.flow_from_directory(
        os.path.join(MASK_PATH, "val"),
        batch_size=BATCH_SIZE,
        target_size=NEW_IMAGE_SIZE_2d,
        color_mode="grayscale",
        seed=42,
        class_mode='input')

    ### look into the generator:
    """ x, y = train_image_generator.next()
			 plt.imshow(x[0])
			 plt.show()
			 return"""

    train_generator = zip(train_image_generator, train_mask_generator)
    val_generator = zip(val_image_generator, val_mask_generator)
    # creates a new model.h5-file (and overwrites the old!!!!)
    if not CONTINUE_LEARNING:
        print("attention I will start learning from 0!")
        model = satellite_unet(NEW_IMAGE_SIZE, num_classes=1)
        # model = unet()
        # model = vanilla_unet(NEW_IMAGE_SIZE, num_classes=1, filters=FILTER_SIZE, num_layers=NUM_LAYERS)
        opt = Adam(lr=1E-5, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        """Arguments
			lr: float >= 0. Learning rate.
			rho: float >= 0.
			epsilon: float >= 0. Fuzz factor.
			decay: float >= 0. Learning rate decay over each update."""
        # my_optimizer = rmsprop(lr=1, rho=0.9, epsilon=1e-08, decay=0.05)
        model.compile(optimizer=opt,
                      loss=jaccard_distance,
                      metrics=jaccard_coef)
    else:
        print("I WILL USE THE ALREADY LEARNED WEIGHTS from output.h5")
        model = keras.models.load_model(os.path.join(DATA_PATH,
                                                     "weights/output.h5"),
                                        custom_objects={
                                            'jaccard_distance':
                                            jaccard_distance,
                                            'jaccard_coef': jaccard_coef
                                        })
        model.load_weights(os.path.join(DATA_PATH, "weights/output.h5"))
    #backup after each epoch
    checkpoint = keras.callbacks.ModelCheckpoint(os.path.join(
        WEIGHTS_PATH, "output.h5"),
                                                 monitor='loss',
                                                 verbose=1)
    #nice to have
    csv_logger = keras.callbacks.CSVLogger(os.path.join(
        WEIGHTS_PATH, "log.out"),
                                           append=True,
                                           separator=';')
    #never reached this condition but could become handy. But probably one has to change min_delta and mode?
    earlystopping = keras.callbacks.EarlyStopping(monitor='loss',
                                                  verbose=1,
                                                  min_delta=0.01,
                                                  patience=3,
                                                  mode='min')
    # mode= min for mse, max for non-error-functions

    model.summary()
    #prints a summary

    model.fit(train_generator,
              epochs=NO_OF_EPOCHS,
              steps_per_epoch=(NO_OF_TRAINING_IMAGES // BATCH_SIZE),
              validation_data=val_generator,
              validation_steps=(NO_OF_VAL_IMAGES // BATCH_SIZE),
              callbacks=[checkpoint, csv_logger, earlystopping])
    # when learning is finished, model gets saved in model.h5
    model.save(os.path.join(DATA_PATH, "model.h5"))
    return
Ejemplo n.º 4
0
from keras_unet.models import satellite_unet
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam, SGD
from keras_unet.metrics import iou, iou_thresholded
from keras import metrics
from keras.preprocessing.image import load_img, img_to_array
from keras.models import Model
import tensorflow as tf

#Define model here
input_shape = (512, 512, 3)

model = satellite_unet(
    input_shape,
    #use_batch_norm=False,
    num_classes=1,
    #filters=64,
    #dropout=0.2,
    output_activation='sigmoid',
    num_layers=4)

#Model checkpoints
model_filename = ''

callback_checkpoint = ModelCheckpoint(
    model_filename,
    verbose=1,
    monitor='val_loss',
    save_best_only=True,
)

#Compile model here