Ejemplo n.º 1
0
def main():
    train, test, train_dataset, test_dataset, info = load_dataset_oxford_iiit_pet(
    )

    for image, mask in train.take(6):
        sample_image, sample_mask = image, mask
    display([sample_image, sample_mask])

    model = unet_model()

    train_model(model, train_dataset, test_dataset, info)
def predict(parent_folder):
    print_text('Loading and pre-processing test data.')
    model = unet_model(parent_folder)

    test_images, test_image_names = load_test_data(parent_folder)
    test_images = pre_process(test_images)

    test_images = test_images.astype('float32')
    mean = np.mean(test_images)  # mean for data centering
    std = np.std(test_images)  # std for data normalization

    if parent_folder == "carla":
        test_images -= mean
        test_images /= std

    print_text('Loading saved weights.')
    model_json_name = 'tl_model_detector_' + str(parent_folder) + '.json'
    model_name = 'tl_weights_detector_' + str(parent_folder) + '.h5'
    print_text(model_json_name)
    model.load_weights(os.path.join(MODEL_DIR, model_name))

    # serialize model to JSON
    model_json = model.to_json()
    with open(os.path.join(MODEL_DIR, model_json_name), "w") as json_file:
        json_file.write(model_json)
    print_text('Saved model to disk')

    print_text('Predicting masks on test data.')
    predicted_image_masks = model.predict(test_images, verbose=1)

    print_text('Saving predicted masks to files.')
    if not os.path.exists(PRED_DIR):
        os.mkdir(PRED_DIR)
    for pred_image, image_name in zip(predicted_image_masks, test_image_names):
        pred_image = (pred_image[:, :, 0] * 255.).astype(np.uint8)
        imsave(os.path.join(PRED_DIR, image_name + '.pred.png'), pred_image)
Ejemplo n.º 3
0
    # Check loss weights
    args.style_weight = std_input_list(args.style_weight, args.nb_classes,
                                       'Style weight')
    args.content_weight = std_input_list(args.content_weight, args.nb_classes,
                                         'Content weight')
    args.tv_weight = std_input_list(args.tv_weight, args.nb_classes,
                                    'TV weight')

    config_gpu(args.gpu, args.allow_growth)

    print('Creating model...', args.model)
    class_targets = K.placeholder(shape=(None, ), dtype=tf.int32)
    # The model will be trained with 256 x 256 images of the coco dataset.
    if (args.model == "unet"):
        model = unet_model(256,
                           width_factor=args.width_factor,
                           nb_classes=args.nb_classes,
                           targets=class_targets)
    else:
        # model = pastiche_model(256, width_factor=args.width_factor)
        model = pastiche_model(256,
                               width_factor=args.width_factor,
                               nb_classes=args.nb_classes,
                               targets=class_targets)
    x = model.input
    o = model.output

    print('Loading loss network...')
    loss_net, outputs_dict, content_targets_dict = get_loss_net(
        model.output, input_tensor=model.input)

    # Placeholder sizes
set_tf_loglevel(logging.FATAL)

# training examples with augmentation
total_examples = 8000

# img size of the mri images
img_size = 120

print("Loading dataset...")
train_X = np.load(
    'C:/Users/merid/Documents/DeepHealth/MRI/x_{}.npy'.format(img_size))
train_Y = np.load(
    'C:/Users/merid/Documents/DeepHealth/MRI/y_{}.npy'.format(img_size))
print("Dataset loaded.")

model = unet_model()

print("Loading the model...")
model.load_weights('weights/dice_weights_120_10.h5')
print("The model loaded.")

pred = model.predict(train_X[total_examples:total_examples +
                             100])  # 8000 num with data aug

for n in range(6):
    i = int(r.random() * pred.shape[0])
    x = train_X[i + total_examples, 0, :, :]
    y = train_Y[i + total_examples, 0, :, :]
    y_predicted = pred[i, 0, :, :]
    combined_predicted = x + y_predicted
Ejemplo n.º 5
0
from model import unet_model
from constants import *
from data import test_dataset, train_dataset
from display import show_predictions

import tensorflow as tf
import tensorflow_datasets as tfds
tfds.disable_progress_bar()

import matplotlib.pyplot as plt

if __name__ == "__main__":
    model = unet_model(OUTPUT_CHANNELS)

    losses = {
        "mask_output":
        tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        "color_output":
        "mse",
    }

    model.compile(optimizer='adam', loss=losses, metrics=['accuracy'])

    model.summary()

    model_history = model.fit(
        train_dataset,
        epochs=EPOCHS,
        steps_per_epoch=STEPS_PER_EPOCH,
        validation_steps=VALIDATION_STEPS,
        validation_data=test_dataset,
    config_gpu(args.gpu, args.allow_growth)

    # Strip the extension if there is one
    checkpoint_path = os.path.splitext(args.checkpoint_path)[0]

    with h5py.File(checkpoint_path + '.h5', 'r') as f:
        model_args = yaml.load(f.attrs['args'])
        style_names = f.attrs['style_names']

    print('Creating pastiche model...')
    class_targets = K.placeholder(shape=(None, ), dtype=tf.int32)
    # Intantiate the model using information stored on tha yaml file
    if (args.model == "unet"):
        model = unet_model(None,
                           width_factor=model_args.width_factor,
                           nb_classes=model_args.nb_classes,
                           targets=class_targets)
    model = pastiche_model(None,
                           width_factor=model_args.width_factor,
                           nb_classes=model_args.nb_classes,
                           targets=class_targets)
    with h5py.File(checkpoint_path + '.h5', 'r') as f:
        model.load_weights_from_hdf5_group(f['model_weights'])

    inputs = [model.input, class_targets, K.learning_phase()]

    transfer_style = K.function(inputs, [model.output])

    num_batches = int(np.ceil(model_args.nb_classes / float(args.batch_size)))

    for img_name in os.listdir(args.input_path):
Ejemplo n.º 7
0
def train_model(hdf5_dir,
                brains_idx_dir,
                view,
                modified_unet=True,
                batch_size=16,
                val_batch_size=32,
                lr=0.01,
                epochs=100,
                hor_flip=False,
                ver_flip=False,
                zoom_range=0.0,
                save_dir='./save/',
                start_chs=64,
                levels=3,
                multiprocessing=False,
                load_model_dir=None):
    """

    The function that builds/loads UNet model, initializes the data generators for training and validation, and finally 
    trains the model.

    """
    # preparing generators
    hdf5_file = tables.open_file(hdf5_dir, mode='r+')
    brain_idx = np.load(brains_idx_dir)
    datagen_train = CustomDataGenerator(hdf5_file,
                                        brain_idx,
                                        batch_size,
                                        view,
                                        'train',
                                        hor_flip,
                                        ver_flip,
                                        zoom_range,
                                        shuffle=True)
    datagen_val = CustomDataGenerator(hdf5_file,
                                      brain_idx,
                                      val_batch_size,
                                      view,
                                      'validation',
                                      shuffle=False)

    # add callbacks
    save_dir = os.path.join(
        save_dir, '{}_{}'.format(view,
                                 os.path.basename(brains_idx_dir)[:5]))
    if not os.path.isdir(save_dir):
        os.mkdir(save_dir)
    logger = CSVLogger(os.path.join(save_dir, 'log.txt'))
    checkpointer = ModelCheckpoint(filepath=os.path.join(
        save_dir, 'model.hdf5'),
                                   verbose=1,
                                   save_best_only=True)
    tensorboard = TensorBoard(os.path.join(save_dir, 'tensorboard'))
    callbacks = [logger, checkpointer, tensorboard]

    # building the model
    model_input_shape = datagen_train.data_shape[1:]
    model = unet_model(model_input_shape, modified_unet, lr, start_chs, levels)
    # training the model
    model.fit_generator(datagen_train,
                        epochs=epochs,
                        use_multiprocessing=multiprocessing,
                        callbacks=callbacks,
                        validation_data=datagen_val)
from tensorflow import keras
from model import unet_model
from data import Washer_Data, get_data_path

img_size = (160, 160)
batch_size = 4
num_classes = 1

model = unet_model(img_size, num_classes)
#model.summary()

train_path, valid_path = get_data_path()

train_gen = Washer_Data(batch_size, img_size, train_path)
val_gen = Washer_Data(batch_size, img_size, valid_path)


model.compile(optimizer="adam", loss="categorical_crossentropy")

callbacks = [ keras.callbacks.ModelCheckpoint("washer_segmentation.h5", save_best_only=True) ]

# Train the model, doing validation at the end of each epoch.
epochs = 30
model.fit(train_gen, epochs=epochs, validation_data=val_gen, callbacks=callbacks)
Ejemplo n.º 9
0
def train_and_predict(parent_folder):
    print_text('Loading and pre-processing train data.')
    imgs_train, imgs_mask_train = load_train_data(parent_folder)
    imgs_train = pre_process(imgs_train)
    imgs_mask_train = pre_process(imgs_mask_train)
    imgs_train = imgs_train.astype('float32')

    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    if parent_folder == "carla":
        imgs_train -= mean
        imgs_train /= std
    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    tf_board = TensorBoard(log_dir='./Graph',
                           histogram_freq=0,
                           write_graph=True,
                           write_images=True)

    print_text('Creating and compiling model.')
    model = unet_model(parent_folder)
    file_model_name = 'tl_model_detector_' + str(parent_folder) + '.json'
    file_weights_name = 'tl_weights_detector_' + str(parent_folder) + '.h5'
    model_checkpoint = ModelCheckpoint(os.path.join(MODEL_DIR,
                                                    file_weights_name),
                                       monitor='val_loss',
                                       save_best_only=True,
                                       save_weights_only=True,
                                       verbose=1)

    # serialize model to JSON
    model_json = model.to_json()
    with open(os.path.join(MODEL_DIR, file_model_name), "w") as json_file:
        json_file.write(model_json)
    print_text('Saved model to disk')

    print_text('Fitting model.')

    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=16,
              epochs=130,
              verbose=1,
              shuffle=True,
              validation_split=0.2,
              callbacks=[model_checkpoint, tf_board])

    print_text('Loading and pre-processing test data.')
    imgs_test, test_image_names = load_test_data(parent_folder)
    imgs_test = pre_process(imgs_test)
    imgs_test = imgs_test.astype('float32')

    if parent_folder == "carla":
        imgs_test -= mean
        imgs_test /= std

    print_text('Loading saved weights.')
    model.load_weights(os.path.join(MODEL_DIR, file_weights_name))

    print_text('Predicting masks on test data.')
    predicted_image_masks = model.predict(imgs_test, verbose=1)

    print_text('Saving predicted masks to files.')
    if not os.path.exists(PREDS_DIR):
        os.mkdir(PREDS_DIR)
    for image_mask, image_name in zip(predicted_image_masks, test_image_names):
        image_mask = (image_mask[:, :, 0] * 255.).astype(np.uint8)
        imsave(os.path.join(PREDS_DIR, image_name + '.pred.png'), image_mask)