예제 #1
0
# build model
unet = UNet(input_size=(img_width, img_height, 1),
            n_filters=64,
            pretrained_weights=pretrained_weights)
unet.build()

# creating a callback, hence best weights configurations will be saved
model_checkpoint = ModelCheckpoint(filepath=model_path,
                                   monitor='val_loss',
                                   verbose=1)

#lr schedule
reduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=7)
#callbacks of CsvLogger
logs_path = os.path.join(SETTINGS_JSON['UNET_LOG_DIR'], "log.csv")
csvlogger = CSVLogger(logs_path)
callbacks = [csvlogger, model_checkpoint, reduceLR]

# model training
# steps per epoch should be equal to number of samples in database divided by batch size
# in this case, it is 528 / 2 = 264
unet.fit_generator(
    generator=train_gen,
    steps_per_epoch=len(os.listdir(os.path.join(train_path, 'images'))) //
    batch_size,
    epochs=200,
    validation_data=val_gen,
    validation_steps=val_samples // batch_size,
    callbacks=callbacks)
예제 #2
0
        pretrained_weights = model_weights_name
    else:
        pretrained_weights = None

    # build model
    unet = UNet(input_size=(img_width, img_height, 1),
                n_filters=64,
                pretrained_weights=pretrained_weights)
    unet.build()

    # creating a callback, hence best weights configurations will be saved
    model_checkpoint = unet.checkpoint(model_name)

    # model training
    # steps per epoch should be equal to number of samples in database divided by batch size
    # in this case, it is 528 / 2 = 264
    unet.fit_generator(train_gen,
                       steps_per_epoch=264,
                       epochs=5,
                       callbacks=[model_checkpoint])

    # saving model weights
    unet.save_model(model_weights_name)

    # generated testing set
    test_gen = test_generator(test_path, 30, img_size)

    # display results
    results = unet.predict_generator(test_gen, 30, verbose=1)
    save_results(save_path, results)