#GPU Memory Assign
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
K.tensorflow_backend.set_session(tf.Session(config=config))

# Specify GPU
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = '0'

img_height = PARAMS_JSON['UNET_INPUT_SIZE']
img_width = PARAMS_JSON['UNET_INPUT_SIZE']
img_size = (img_height, img_width)
test_path = SETTINGS_JSON['TEST_IMAGES_DIR_SEGMENTATION']
save_path = SETTINGS_JSON['PREDICT_DIR_SEGMENTATION']
model_weights_name = SETTINGS_JSON['UNET_PREDICT_DIR']

# build model
unet = UNet(
    input_size = (img_width,img_height,1),
    n_filters = 64,
    pretrained_weights = model_weights_name
)
unet.build()


# generated testing set
test_gen = test_generator(test_path, img_size)
# display results
results = unet.predict_generator(test_gen, len(os.listdir(SETTINGS_JSON['TEST_IMAGES_DIR_SEGMENTATION'])) ,verbose=1)

save_results(save_path, results)
Exemplo n.º 2
0
test_path = 'data/test'
save_path = 'data/results'
model_weights_name = 'unet_bones_weights.hdf5'

if __name__ == "__main__":
    """ Prediction Script
    Run this Python script with a command line
    argument that defines number of test samples
    e.g. python predict.py 6
    Note that test samples names should be:
    1.jpg, 2.jpg, 3.jpg ...
    """

    # get number of samples from command line
    samples_number = int(sys.argv[1])

    # build model
    unet = UNet(
        input_size = (img_width,img_height,1),
        n_filters = 64,
        pretrained_weights = model_weights_name
    )
    unet.build()

    # generated testing set
    test_gen = test_generator(test_path, samples_number, img_size)

    # display results
    results = unet.predict_generator(test_gen, samples_number ,verbose=1)
    save_results(save_path, results)
Exemplo n.º 3
0
        pretrained_weights = model_weights_name
    else:
        pretrained_weights = None

    # build model
    unet = UNet(input_size=(img_width, img_height, 1),
                n_filters=64,
                pretrained_weights=pretrained_weights)
    unet.build()

    # creating a callback, hence best weights configurations will be saved
    model_checkpoint = unet.checkpoint(model_name)

    # model training
    # steps per epoch should be equal to number of samples in database divided by batch size
    # in this case, it is 528 / 2 = 264
    unet.fit_generator(train_gen,
                       steps_per_epoch=264,
                       epochs=5,
                       callbacks=[model_checkpoint])

    # saving model weights
    unet.save_model(model_weights_name)

    # generated testing set
    test_gen = test_generator(test_path, 30, img_size)

    # display results
    results = unet.predict_generator(test_gen, 30, verbose=1)
    save_results(save_path, results)