Ejemplo n.º 1
0
def create_model(resize_dim_: Tuple[int, int],
                 lr_: float,
                 loss_function_name_: str,
                 object_storing_dir_: str,
                 num_filters_first_level_: int = 32,
                 num_classes_: int = 1,
                 dropout_rate_: float = 0.2,
                 use_batch_norm_: bool = True,
                 **loss_kwargs):
    """
    Create 2D Unet model that will be used to predict segmentation masks on single slices of the 3D scan.

    Returns a keras model that predicts single channel masks on single channel 2D images

    :param resize_dim_: 2D dimensions of the images that the model cn predict on.
    :param lr_: learning rate
    :param loss_function_name_: name of loss function to use
    :param object_storing_dir_: Directory where model objects will be stored. In this case, the model's summary.
    :param num_filters_first_level_:
    :param num_classes_:
    :param dropout_rate_:
    :param use_batch_norm_:
    :param loss_kwargs: In case the loss function takes arguments, add them as a dictionary

    :return: Keras.model 2D Unet
    """

    # Build model with `custom_unet` library
    model = custom_unet(input_shape=resize_dim_ + (1, ),
                        use_batch_norm=use_batch_norm_,
                        num_classes=num_classes_,
                        filters=num_filters_first_level_,
                        dropout=dropout_rate_,
                        output_activation='sigmoid')

    with open(os.path.join(object_storing_dir_, 'model_summary.txt'),
              'w') as f:
        model.summary(print_fn=lambda x: f.write(x + '\n'))

    # Get loss function to use
    loss_function = training_utils.get_loss_function(loss_function_name_,
                                                     **loss_kwargs)

    # Compile the model
    model.compile(optimizer=Adam(learning_rate=lr_),
                  loss=loss_function,
                  metrics=[iou, iou_thresholded])

    return model
Ejemplo n.º 2
0
 def __init__(self,
              inputShape=(512, 512, 3),
              modelWeights=None,
              useGPU=True,
              dropout=0.05):
     self.inputShape = inputShape
     self.model = custom_unet(input_shape=inputShape,
                              use_batch_norm=False,
                              filters=64,
                              dropout=dropout,
                              num_layers=4,
                              output_activation='sigmoid')
     self.useGPU = useGPU
     if modelWeights is not None:
         try:
             self.model.load_weights(modelWeights)
             self.trained = True
         except IOError as e:
             print(e)
             self.trained = False
     else:
         self.trained = False
Ejemplo n.º 3
0
    os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

# load data
preprocess_input = None  # sm.get_preprocessing('resnet18')
X_train_gen, Y_train_gen, X_test, Y_test = get_data(config.data_folder,
                                                    config.batch_size,
                                                    config.test_split_ratio,
                                                    preprocess_input)

# load model or create new
if config.load_model:
    model = load_model(str(config.load_model), compile=False)
else:
    model = custom_unet(input_shape=config.input_shape,
                        use_batch_norm=True,
                        num_classes=1,
                        filters=4,
                        dropout=0.2,
                        output_activation='sigmoid')
    # model = sm.Unet('resnet18', input_shape=config.input_shape, classes=1, activation='sigmoid',
    #                 decoder_filters=(256, 128, 64, 32, 16), encoder_freeze=False, encoder_weights='imagenet')
opt = Adam(lr=config.learning_rate)
model.compile(opt,
              sm.losses.bce_jaccard_loss,
              metrics=[
                  'mean_squared_error', sm.metrics.iou_score,
                  sm.losses.bce_jaccard_loss, sigmoid_iou_loss
              ])

# print network info
model.summary()
X_test = X_test / 255

#DISPLAYING THE IMAGE AND THE CORRESPONDING MASK

img_new = random.randint(0, len(train_ids))
io.imshow(X_train[img_new])
plt.show()
io.imshow(np.squeeze(Y_train[img_new]))
plt.show()

#IMPORTING KERAS UNET MODEL
from keras_unet.models import custom_unet

model = custom_unet(input_shape=(128, 128, 3),
                    use_batch_norm=False,
                    num_classes=1,
                    filters=128,
                    dropout=0.2,
                    output_activation='sigmoid')

model.summary()

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

results = model.fit(X_train, Y_train, validation_split=0.1, epochs=20)

from keras_unet.utils import plot_segm_history

plot_segm_history(results,
                  metrics=['accuracy', 'val_accuracy'],
Ejemplo n.º 5
0
print(xx.shape, yy.shape)
from keras_unet.utils import plot_imgs

plot_imgs(org_imgs=xx, mask_imgs=yy, nm_img_to_plot=2, figsize=6)

# %% [markdown]
# ## Initialize network

# %%
from keras_unet.models import custom_unet

input_shape = x_train[0].shape

model = custom_unet(input_shape,
                    filters=32,
                    use_batch_norm=True,
                    dropout=0.3,
                    dropout_change_per_layer=0.0,
                    num_layers=4)

# %%
model.summary()

# %% [markdown]
# ## Compile + train

# %%
from tensorflow.keras.callbacks import ModelCheckpoint

model_filename = 'segm_model_v3.h5'
callback_checkpoint = ModelCheckpoint(
    model_filename,
Ejemplo n.º 6
0
def main():
    #trainiing settings 

    #Multi GPU set up
    strategy = tf.distribute.MirroredStrategy()    


    #load dataset
    skin_path = 'Data/skin_lesions'

    img_path = skin_path + '/image/'
    msk_path = skin_path + '/mask/'

    #get possible images and masks
    imgs = os.listdir(img_path)
    msks = os.listdir(msk_path)
    #sort the images and masks 
    msks = sorted(msks)
    imgs = sorted(imgs)
     
    #read in the images 
    images,masks = read_in_skin_images(skin_path,msks,imgs)

    #preprocess the dataset
    for i in range(0,len(masks)):
        m = masks[i]
        m = m[:,:,0] #binarize the mask
        m.reshape(m.shape[0],m.shape[1]) #reshape the binarized mask
        m = cv2.resize(m,(256,256)) #resize the skin masks for memory contraints 
        masks[i] = m
        #images
        im = images[i]
        images[i] = cv2.resize(im,(256,256)) #resize the images for memory constraints 

    #make Arrays 
    images = np.asarray(images)
    masks = np.asarray(masks)
    masks = masks / masks.max() #normalize the masks 
    masks = masks.reshape(masks.shape[0],masks.shape[1],masks.shape[2],1) #reshape to binarized mask array 
    

    #split the data
    img_overall_train, img_test, mask_overall_train, mask_test = train_test_split(images, masks, test_size=0.16667, random_state=42)
    img_train, img_val, mask_train, mask_val = train_test_split(img_overall_train, mask_overall_train, test_size = 0.166667, random_state = 32)
    

    #data rotation values 
    data_aug_dict = dict(
       # rotation_range=15.,
        width_shift_range=0.05,
        height_shift_range=0.05,
        shear_range=0.05,
        zoom_range=0.05,
        horizontal_flip=True,
        vertical_flip=True,
        fill_mode='nearest'
    )

    #data generator 
    train_datagen = ImageDataGenerator(data_aug_dict)

    train_generator = train_datagen.flow(
        img_train, mask_train,
        batch_size=16)

    val_generator = train_datagen.flow(
        img_val, mask_val)
   
  
    #standardize steps per epoch for the dataset and batch size
    STEPS_PER_EPOCH = len(img_train) // 16

    #Get U Net 
	
    input_shape = img_train[0].shape
    #use multi GPU strategy to run the custom U-Net
    with strategy.scope():
	#run the custom model with parameters you want to analyze
        model = custom_unet(
        input_shape,
        filters=64,
        use_batch_norm=False,
        use_dropout_on_upsampling = False,
        dropout=0.55,
        activation = 'relu',
        dropout_change_per_layer=0.00,
        num_layers=4,
        decoder_type = 'simple'
    )

    

    ##Compile and Train
       #save model with this name **Change model name for each run to save model**
        model_filename = 'basic_600_BDice_LR001_BASIC_SKIN_UNET.h5'
        callback_checkpoint = ModelCheckpoint(
        model_filename, 
        verbose=1, 
        monitor='val_loss', 
        save_best_only=True,
    )
        opt = keras.optimizers.Adam(learning_rate=0.0001)

        model.compile(
            optimizer=opt, 
        #optimizer=SGD(lr=0.01, momentum=0.99),
            loss= bce_dice_loss,
            metrics=['mse',iou, iou_thresholded],
    )

        history = model.fit_generator(
        train_generator,
        steps_per_epoch=STEPS_PER_EPOCH,
        epochs=600,
        validation_data=(img_val, mask_val),
        callbacks=[callback_checkpoint]
)

    # serialize model to JSON
    model_json = model.to_json()
    with open("bcdloss_LGG_basic_UNetModel.json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights("LGG_basic_UNetmodel.h5")
    print("Saved model to disk")
Ejemplo n.º 7
0
def main():
    train_x = np.empty(0, dtype='uint8')
    train_y = np.empty(0, dtype='uint8')
    batch_size = 4

    x_dir = 'DataForSegmentator/input'
    y_dir = 'DataForSegmentator/output_filters'

    samples_num = len(os.listdir(x_dir))

    for x_path, y_path in zip(sorted(os.listdir(x_dir)),
                              sorted(os.listdir(y_dir))):
        train_x = np.append(train_x,
                            np.array(Image.open("%s/%s" % (x_dir, x_path))))
        train_y = np.append(train_y,
                            np.array(Image.open("%s/%s" % (y_dir, y_path))))

    train_x.shape = (samples_num, 256, 256, 3)
    train_y.shape = (samples_num, 256, 256, 3)

    if not os.path.isfile('train_unet_x.npy'):
        np.save('train_unet_x', train_x)
    train_x = np.memmap('train_unet_x.npy', shape=train_x.shape, offset=128)

    x_generator = ImageDataGenerator(
        shear_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True,
        vertical_flip=True,

    ) \
        .flow(
        x=train_x,
        batch_size=batch_size,
        seed=42
    )
    y_generator = ImageDataGenerator(
        shear_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True,
        vertical_flip=True,

    ) \
        .flow(
        x=train_y,
        batch_size=batch_size,
        seed=42
    )

    train_generator = zip(x_generator, y_generator)

    model = custom_unet(input_shape=train_x.shape[1:],
                        use_batch_norm=False,
                        num_classes=3,
                        filters=32,
                        dropout=0.2,
                        output_activation='relu')

    # model = get_full_model(json_path='models/model_unet_70.json', h5_path='models/model_unet_70.h5')

    model.compile(optimizer=Adam(lr=1e-8), loss='mae', metrics=['accuracy'])

    callbacks = [
        ModelCheckpoint("model_unet.h5",
                        monitor='acc',
                        verbose=True,
                        save_best_only=True),
        # EarlyStopping(monitor='acc',
        #               patience=0,
        #               baseline=90,
        #               verbose=True,
        #               ),
    ]

    full_history = {"acc": np.empty(0), "loss": np.empty(0)}

    continue_train = True
    epochs_sum = 0
    while continue_train:

        epochs = get_input_int("How many epochs?", 0, 100)

        history = model.fit_generator(
            generator=train_generator,
            steps_per_epoch=int(samples_num / batch_size),
            # validation_steps=train_x.shape[0] / batch_size,
            epochs=epochs,
            shuffle=True,
            verbose=True,
            callbacks=callbacks,
        )

        if epochs != 0:
            full_history['acc'] = np.append(full_history['acc'],
                                            history.history['accuracy'])
            full_history['loss'] = np.append(full_history['loss'],
                                             history.history['loss'])
            epochs_sum = len(full_history['acc'])

            #####################################################################
            # ----------------------- evaluate model ----------------------------
            #####################################################################
            print("\nacc        %.2f%%\n" %
                  (history.history['accuracy'][-1] * 100),
                  end='')

            epochs = len(history.history['accuracy'])

            plot_history_separate_from_dict(history_dict=full_history,
                                            save_path_acc=None,
                                            save_path_loss=None,
                                            show=True,
                                            save=False)

        print("epochs: %d - %d" % (epochs_sum - epochs, epochs_sum))

        #####################################################################
        # ----------------------- CMD UI ------------------------------------
        #####################################################################
        if get_stdin_answer("Show image of prediction?"):
            show_predict_on_window(
                train_x, train_y,
                np.array(model.predict(train_x), dtype='uint8'))

        if get_stdin_answer(text='Save model?'):
            save_model_to_json(model,
                               "models/model_unet_%d.json" % (epochs_sum))
            model.save_weights('models/model_unet_%d.h5' % (epochs_sum))

        continue_train = get_stdin_answer(text="Continue?")
Ejemplo n.º 8
0
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, UpSampling2D

from keras.models import load_model
import cv2
import numpy as np
#edit with your model
IMAGE_SIZE = (480, 480)

from keras_unet.models import custom_unet

model = custom_unet(input_shape=(480, 480, 3),
                    use_batch_norm=True,
                    num_classes=1,
                    filters=16,
                    dropout=0,
                    num_layers=4,
                    output_activation='sigmoid')

model.load_weights('my_model_final_in_480_f_16.h5')

import glob

path = glob.glob("Test/Input/*.jpg")

for myfile in path:
    test_im = cv2.imread(myfile)
    true_size = test_im.shape
    imshow_size = (512, round(true_size[0] * 512 / true_size[1]))
    #cv2.imshow('Input',cv2.resize(test_im, imshow_size))
                              rotation_range=180,
                              width_shift_range=0.3,
                              height_shift_range=0.3,
                              shear_range=0.1,
                              zoom_range=0.1,
                              horizontal_flip=True,
                              vertical_flip=True,
                              fill_mode='constant',
                          ))

from keras_unet.models import custom_unet

model = custom_unet(
    x_train[0].shape,
    use_batch_norm=True,
    num_classes=1,
    filters=64,
    dropout=0.2,
    output_activation='sigmoid',
)

from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping

callbacks = [
    EarlyStopping(monitor='val_loss', patience=3, verbose=1, min_delta=1e-4),
    ModelCheckpoint('./lesson2-model/model.h5',
                    monitor='val_loss',
                    verbose=1,
                    save_best_only=True,
                    mode='auto'),
]
Ejemplo n.º 10
0
def main():
    #trainiing settings

    #Multi GPU strategy
    strategy = tf.distribute.MirroredStrategy()

    #load dataset
    image_paths, mask_paths = load_images(
        "Data/lgg-mri-segmentation/kaggle_3m/")

    #check to see if each image has a mask if so, read in the images
    if image_mask_check(image_paths, mask_paths):
        masks, images = read_in_images(image_paths, mask_paths)

    #reshape mask image to (256,256) -- binarize
    for i in range(0, len(masks)):
        m = masks[i]
        m = m[:, :, 0]
        m.reshape(m.shape[0], m.shape[1])
        masks[i] = m

    #make images and masks into numpy arrays and normalize the masks
    images = np.asarray(images)
    masks = np.asarray(masks)
    masks = masks / 255
    masks = masks.reshape(masks.shape[0], masks.shape[1], masks.shape[2], 1)

    #split the data
    img_overall_train, img_test, mask_overall_train, mask_test = train_test_split(
        images, masks, test_size=0.16667, random_state=42)
    img_train, img_val, mask_train, mask_val = train_test_split(
        img_overall_train,
        mask_overall_train,
        test_size=0.166667,
        random_state=32)

    #create data generator for running model
    #batch size of 16
    train_datagen = ImageDataGenerator()

    train_generator = train_datagen.flow(img_train, mask_train, batch_size=16)

    val_generator = train_datagen.flow(img_val, mask_val)

    #steps within each epoch is the total number of training images divided by the batch size
    STEPS_PER_EPOCH = len(img_train) // 16

    #Get U Net

    input_shape = img_train[0].shape

    #allow model to be trained over multiple GPUs
    with strategy.scope():

        #load in the custom u net from the models section
        model = custom_unet(input_shape,
                            filters=32,
                            use_batch_norm=True,
                            dropout=0.3,
                            dropout_change_per_layer=0.0,
                            num_layers=4)

        print(model.summary())

        ##Compile and Train

        #save each model if validation loss improves
        model_filename = 'UNet_Model'
        callback_checkpoint = ModelCheckpoint(
            model_filename,
            verbose=1,
            monitor='val_loss',
            save_best_only=True,
        )

        opt = keras.optimizers.Adam(learning_rate=0.001)
        #set compile and run for model

        model.compile(
            optimizer=opt,
            #loss='binary_crossentropy',
            loss=bce_dice_loss,
            metrics=[iou, iou_thresholded])

        #use fit_generator because using data generator
        #fit the model for
        history = model.fit_generator(train_generator,
                                      steps_per_epoch=STEPS_PER_EPOCH,
                                      epochs=300,
                                      validation_data=(img_val, mask_val),
                                      callbacks=[callback_checkpoint])

    # serialize model to JSON
    #save model as JSON and weight file
    model_json = model.to_json()
    with open("UNetModel.json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights("model.h5")
    print("Saved model to disk")
Ejemplo n.º 11
0
def main():
    #trainiing settings

    #Multi GPU strategy
    strategy = tf.distribute.MirroredStrategy()

    #load dataset
    image_paths, mask_paths = load_images(
        "Data/lgg-mri-segmentation/kaggle_3m/")

    #sort images and masks
    image_paths = sorted(image_paths)
    mask_paths = sorted(mask_paths)

    #read in images and masks
    if image_mask_check(image_paths, mask_paths):
        masks, images = read_in_MR_images(image_paths, mask_paths)

    #basic pre-processing with MR additives
    for i in range(0, len(masks)):
        m = masks[i]
        m = m[:, :, 0]  #binarize the masks
        m.reshape(m.shape[0], m.shape[1])  #set up binarize shape for masks
        masks[i] = m
        #MRI Values
        im = images[i]
        images[i] = normalize_MRIvolume(
            im)  #normalize the MR images with histogram normalization

    #make Arrays
    images = np.asarray(images)
    masks = np.asarray(masks)
    masks = masks / masks.max()  #normalize masks
    masks = masks.reshape(masks.shape[0], masks.shape[1], masks.shape[2],
                          1)  #set up binarize shape for masks

    #split the data
    img_overall_train, img_test, mask_overall_train, mask_test = train_test_split(
        images, masks, test_size=0.16667, random_state=42)
    img_train, img_val, mask_train, mask_val = train_test_split(
        img_overall_train,
        mask_overall_train,
        test_size=0.166667,
        random_state=32)

    #data generator
    train_datagen = ImageDataGenerator()

    train_generator = train_datagen.flow(img_train, mask_train, batch_size=16)

    val_generator = train_datagen.flow(img_val, mask_val)

    #standardize steps per epoch based on image dataset and batch size
    STEPS_PER_EPOCH = len(img_train) // 16
    # STEPS_PER_EPOCH = 250

    #Get U Net

    input_shape = img_train[0].shape  #input shape for the U-Net

    #multiGPU processing
    with strategy.scope():
        #train custom U-Net model based on parameters you want to train
        model = custom_unet(input_shape,
                            filters=64,
                            use_batch_norm=False,
                            dropout=0.55,
                            dropout_change_per_layer=0.00,
                            num_layers=4,
                            decoder_type='simple',
                            use_dropout_on_upsampling=False,
                            activation='relu')

        ##Compile and Train
        #save model name for evaluation
        model_filename = 'Basic_LGG_aug_UNET_01LR.h5'
        callback_checkpoint = ModelCheckpoint(
            model_filename,
            verbose=1,
            monitor='loss',
            save_best_only=True,
        )
        opt = keras.optimizers.Adam(learning_rate=0.0001)

        model.compile(
            optimizer=opt,
            #optimizer=SGD(lr=0.01, momentum=0.99),
            loss=bce_dice_loss,
            metrics=['mse', iou, iou_thresholded],
        )

        history = model.fit_generator(train_generator,
                                      steps_per_epoch=STEPS_PER_EPOCH,
                                      epochs=600,
                                      validation_data=(img_val, mask_val),
                                      callbacks=[callback_checkpoint])

    # serialize model to JSON
    model_json = model.to_json()
    with open("bcdloss_LGG_basic_UNetModel.json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights("LGG_basic_UNetmodel.h5")
    print("Saved model to disk")
    # Add a per-pixel classification layer
    outputs = layers.Conv2D(num_classes,
                            3,
                            activation="softmax",
                            padding="same")(x)

    # Define the model
    model = keras.Model(inputs, outputs)
    return model


# Free up RAM in case the model definition cells were run multiple times
#keras.backend.clear_session()
model = custom_unet(input_shape=img_size + (3, ),
                    use_batch_norm=True,
                    num_classes=2,
                    filters=32,
                    dropout=0.2,
                    output_activation='softmax')
# Build model
# model = get_model(img_size, num_classes)
model.summary()

#%% Train the model

# Configure the model for training.
# We use the "sparse" version of categorical_crossentropy
# because our target data is integers.
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy")

callbacks = [
    keras.callbacks.ModelCheckpoint("insulators-augmented.h5",
Ejemplo n.º 13
0
def main():
    #trainiing settings

    #load dataset

    image_paths = 'Data/Lung Segmentation/CXR_png/'
    mask_paths = 'Data/Lung Segmentation/masks/'

    images, masks = load_lung_images(image_paths, mask_paths)

    masks, images = read_in_images(images, masks)

    for i in range(0, len(masks)):
        m = masks[i]
        im = images[i]
        #resize
        m = cv2.resize(masks[i], (1024, 1024))
        im = cv2.resize(images[i], (1024, 1024))
        #reshape mask
        m = m[:, :, 0]
        m.reshape(m.shape[0], m.shape[1])

        masks[i] = m
        images[i] = im

    #make Arrays
    images = np.asarray(images)
    masks = np.asarray(masks)
    masks = masks / 255
    masks = masks.reshape(masks.shape[0], masks.shape[1], masks.shape[2], 1)

    #split the data
    img_overall_train, img_test, mask_overall_train, mask_test = train_test_split(
        images, masks, test_size=0.16667, random_state=42)
    img_train, img_val, mask_train, mask_val = train_test_split(
        img_overall_train,
        mask_overall_train,
        test_size=0.166667,
        random_state=32)

    #data generator
    train_datagen = ImageDataGenerator()

    train_generator = train_datagen.flow(img_train, mask_train, batch_size=16)

    val_generator = train_datagen.flow(img_val, mask_val)

    STEPS_PER_EPOCH = len(img_train) // 16
    #Get U Net

    input_shape = img_train[0].shape

    model = custom_unet(input_shape,
                        filters=32,
                        use_batch_norm=True,
                        dropout=0.3,
                        dropout_change_per_layer=0.0,
                        num_layers=4)

    print(model.summary())

    ##Compile and Train

    model_filename = 'chest_segm_model_v3.h5'
    callback_checkpoint = ModelCheckpoint(
        model_filename,
        verbose=1,
        monitor='val_loss',
        save_best_only=True,
    )

    model.compile(
        optimizer='adam',
        #optimizer=SGD(lr=0.01, momentum=0.99),
        loss='binary_crossentropy',
        #loss=jaccard_distance,
        metrics=[iou, iou_thresholded])

    history = model.fit_generator(train_generator,
                                  steps_per_epoch=STEPS_PER_EPOCH,
                                  epochs=75,
                                  validation_data=(img_val, mask_val),
                                  callbacks=[callback_checkpoint])

    # serialize model to JSON
    model_json = model.to_json()
    with open("UNetChestModel.json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights("basic_chest_model.h5")
    print("Saved model to disk")
Ejemplo n.º 14
0
def main():
    #trainiing settings

    #Multi GPU set up
    strategy = tf.distribute.MirroredStrategy()

    #load dataset
    lung_path = 'Data/lung-masks'

    img_path = lung_path + '/2d_images/'
    msk_path = lung_path + '/2d_masks/'

    #list all images and masks
    imgs = os.listdir(img_path)
    msks = os.listdir(msk_path)
    #sort images and masks
    msks = sorted(msks)
    imgs = sorted(imgs)

    #read in images and masks
    images, masks = read_in_lung_images(lung_path, msks, imgs)

    #set up basic preprocessing
    for i in range(0, len(masks)):
        m = masks[i]
        m = m[:, :, 0]  #binarize the masks
        m.reshape(m.shape[0], m.shape[1])  #reshape binary masks
        m = cv2.resize(m,
                       (256, 256))  #resize the masks due to memory constraints
        masks[i] = m
        #images
        im = images[i]
        images[i] = cv2.resize(
            im, (256, 256))  #resize the images due to memory constraints

    #make Arrays
    images = np.asarray(images)
    masks = np.asarray(masks)
    masks = masks / masks.max()  #normalize the masks
    masks = masks.reshape(masks.shape[0], masks.shape[1], masks.shape[2],
                          1)  #reshape the masks to binary set up

    #split the data
    img_overall_train, img_test, mask_overall_train, mask_test = train_test_split(
        images, masks, test_size=0.16667, random_state=42)
    img_train, img_val, mask_train, mask_val = train_test_split(
        img_overall_train,
        mask_overall_train,
        test_size=0.166667,
        random_state=32)

    #data generator
    train_datagen = ImageDataGenerator()

    train_generator = train_datagen.flow(img_train, mask_train, batch_size=16)

    val_generator = train_datagen.flow(img_val, mask_val)

    #standardize steps per epoch based on the dataset
    STEPS_PER_EPOCH = len(img_train) // 16
    #Get U Net

    input_shape = img_train[0].shape  #get input shape for the U-Net model

    #use multiGPU training strategy
    with strategy.scope():
        #change parameters based on model you want to train
        #set up custom u net model
        model = custom_unet(input_shape,
                            filters=64,
                            use_batch_norm=False,
                            use_dropout_on_upsampling=False,
                            dropout=0.55,
                            activation='relu',
                            dropout_change_per_layer=0.00,
                            num_layers=4,
                            decoder_type='simple')

        ##Compile and Train
        #save model name based on model you are training and want to evaluate
        model_filename = 'traditional_600_LR0001_BASIC_LUNG_UNET.h5'
        callback_checkpoint = ModelCheckpoint(
            model_filename,
            verbose=1,
            monitor='val_loss',
            save_best_only=True,
        )
        opt = keras.optimizers.Adam(learning_rate=0.0001)

        model.compile(
            optimizer=opt,
            #optimizer=SGD(lr=0.01, momentum=0.99),
            loss=bce_dice_loss,
            metrics=['mse', iou, iou_thresholded],
        )

        history = model.fit_generator(train_generator,
                                      steps_per_epoch=STEPS_PER_EPOCH,
                                      epochs=600,
                                      validation_data=(img_val, mask_val),
                                      callbacks=[callback_checkpoint])

    # serialize model to JSON
    model_json = model.to_json()
    with open("bcdloss_LGG_basic_UNetModel.json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights("LGG_basic_UNetmodel.h5")
    print("Saved model to disk")
Ejemplo n.º 15
0
                                                      test_size=0.2,
                                                      shuffle=False)

    # data augmentation with horizontal and vertical flips
    train_gen = get_augmented(X_train,
                              y_train,
                              batch_size=2,
                              data_gen_args=dict(
                                  horizontal_flip=True,
                                  vertical_flip=True,
                              ))

    # initilaize network
    model = custom_unet(input_shape=X_train[0].shape,
                        filters=32,
                        use_batch_norm=True,
                        num_classes=1,
                        dropout=0.3,
                        num_layers=4)

    # compile and train
    callback_checkpoint = ModelCheckpoint(
        MODEL_PATH,
        verbose=1,
        monitor='val_loss',
        save_best_only=True,
    )

    model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[iou])

    # train model
    history = model.fit_generator(train_gen,