Ejemplo n.º 1
0
def train():
    training_generator = get_training_generator()
    for epoch_index in range(epochs):
        # Why iterating over epochs instead of passing them correctly as a parameter of fit_generator?
        # The problem is that I need to clean the graph regularly, or I will get a memory error
        # And I did not find a way to clean the memory from used operations without resetting the whole graph
        # But resetting the whole graph also requires to re-load the wholes weights
        # This is obviously not an acceptable long-term solution. To see the issue on github:
        # https://github.com/tensorflow/tensorflow/issues/31419
        graph = tf.Graph()
        K.clear_session()
        gen = training_generator(graph,
                                 starting_index=epoch_index * steps_per_epoch)
        with graph.as_default():
            if (epoch_index == 0):
                unet = Unet(
                    "resnet34",
                    encoder_weights="imagenet",
                    classes=1,
                    activation="sigmoid",
                    input_shape=(tf_image_size, tf_image_size, 3),
                )
                unet.compile(optimizer=Adam(lr=learning_rate),
                             loss=calculate_loss)
            else:
                unet = load_model(
                    file_path,
                    custom_objects={"calculate_loss": calculate_loss})
            unet.fit_generator(gen, steps_per_epoch=steps_per_epoch, epochs=1)
            save_model(unet, file_path)
Ejemplo n.º 2
0
Archivo: unet.py Proyecto: anssar/salt
def train_stage_1(x_train, y_train, x_valid, y_valid):
    opt = optimizers.adam(lr=0.001)
    model = Unet(backbone_name=BACKBONE,
                 encoder_weights='imagenet',
                 freeze_encoder=True)
    model.compile(loss=bce_dice_jaccard_loss,
                  optimizer=opt,
                  metrics=[my_iou_metric])
    model_checkpoint = ModelCheckpoint(
        OUTPUT_DIR + "/{}/models/{}_fold_{}_stage1.model".format(
            BASE_NAME, BASE_NAME, CUR_FOLD_INDEX),
        monitor='val_my_iou_metric',
        mode='max',
        save_best_only=True,
        verbose=1)
    reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric',
                                  mode='max',
                                  factor=0.5,
                                  patience=6,
                                  min_lr=0.00001,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_my_iou_metric',
                                   mode='max',
                                   patience=20,
                                   verbose=1)
    logger = CSVLogger(OUTPUT_DIR + '/{}/logs/{}_fold_{}_stage1.log'.format(
        BASE_NAME, BASE_NAME, CUR_FOLD_INDEX))
    model.fit_generator(
        TrainGenerator(x_train,
                       y_train,
                       batch_size=int(np.ceil(BATCH_SIZE / (len(AUGS) + 1))),
                       img_size_target=IMG_SIZE_TARGET),
        steps_per_epoch=int(
            np.ceil(len(x_train) / int(np.ceil(BATCH_SIZE /
                                               (len(AUGS) + 1))))),
        epochs=WARM_EPOCHS,
        validation_data=ValidGenerator(x_valid,
                                       y_valid,
                                       batch_size=BATCH_SIZE,
                                       img_size_target=IMG_SIZE_TARGET),
        callbacks=[model_checkpoint],
        shuffle=True)
    segmentation_utils.set_trainable(model)
    model.fit_generator(
        TrainGenerator(x_train,
                       y_train,
                       batch_size=int(np.ceil(BATCH_SIZE / (len(AUGS) + 1))),
                       img_size_target=IMG_SIZE_TARGET),
        steps_per_epoch=int(
            np.ceil(len(x_train) / int(np.ceil(BATCH_SIZE /
                                               (len(AUGS) + 1))))),
        epochs=EPOCHS,
        validation_data=ValidGenerator(x_valid,
                                       y_valid,
                                       batch_size=BATCH_SIZE,
                                       img_size_target=IMG_SIZE_TARGET),
        callbacks=[early_stopping, model_checkpoint, reduce_lr, logger],
        shuffle=True)
Ejemplo n.º 3
0
def model_init(path_1, path_2):
    model = Unet(BACKBONE_NAME, input_shape=(None, None, 1), classes=1, encoder_weights=None)
    model.compile(optimizer=OPTIMIZER, loss=LOSS, metrics=[my_iou])
    history = model.fit_generator(train_generator,
                                steps_per_epoch=TRAIN_STEPS_PER_EPOCH,
                                validation_data=valid_generator,
                                validation_steps=VALID_STEPS_PER_EPOCH,
                                callbacks=callbacks,
                                epochs=50)
Ejemplo n.º 4
0
def network_setup():
    global preprocess, model, idx, train_batches, valid_batches
    # LOAD UNET WITH PRETRAINING FROM IMAGENET
    preprocess = get_preprocessing(
        'resnet34')  # for resnet, img = (img-110.0)/1.0
    model = Unet('resnet34',
                 input_shape=(img_resize_shape[0], img_resize_shape[1],
                              in_channels),
                 classes=out_channels,
                 activation='sigmoid')
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=[dice_coef])
    # TRAIN AND VALIDATE MODEL
    idx = int(0.8 * len(train2))
    print()
    train_batches = DataGenerator(train2.iloc[:idx],
                                  shuffle=True,
                                  preprocess=preprocess)
    valid_batches = DataGenerator(train2.iloc[idx:], preprocess=preprocess)
    history = model.fit_generator(train_batches,
                                  validation_data=valid_batches,
                                  epochs=epochs,
                                  verbose=1)
Ejemplo n.º 5
0
def main():
    # with open('/home/kunal/Desktop/Feature-Learning-for-Disease-Classification/temp_patch.txt') as f:
    # 	lines = f.readlines()

    with open('/home/rbuddhad/NIH-XRAY/train_sml.txt') as f1:
        lines1 = f1.readlines()

    with open('/home/rbuddhad/NIH-XRAY/validation_sml.txt') as f2:
        lines2 = f2.readlines()

    # print((lines1))

    train_datagen = ImageDataGenerator()
    train_batches = train_datagen.flow_from_directory(TRAIN_DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=True,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    valid_datagen = ImageDataGenerator()
    valid_batches = valid_datagen.flow_from_directory(VALID_DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=False,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    train_crops_orig = crop_generator(train_batches, CROP_LENGTH,
                                      lines1)  # 224
    valid_crops_orig = crop_generator(valid_batches, CROP_LENGTH, lines2)

    # batch_x_random_crop, batch_y_targeted_crop = next(train_crops)
    # valid_x, valid_y = next(valid_crops)
    # print(train_crops_orig.shape)
    # train_crops_orig=np.reshape(train_crops_orig,(train_crops_orig.shape[0]*train_crops_orig.shape[1],224,224,3))
    # print(train_crops_orig.shape)
    # in_painted_x= out_painting_mask(train_crops_orig)
    # valid_in_x=in_painting_mask(valid_x,valid_y)

    # train_crops_1_ch=rgb2gray(train_crops_orig)
    # train_crops_1_ch=np.reshape(train_crops_1_ch,(train_crops_1_ch.shape[0],224,224,1))

    # valid_x=rgb2gray(valid_x)
    # valid_x=np.reshape(valid_x,(valid_x.shape[0],224,224,1))

    # model = Unet(backbone_name='resnet18', encoder_weights='imagenet', decoder_block_type='transpose') # build U-Net
    model = Unet(backbone_name='resnet18', encoder_weights=None)  # build U-Net
    model.load_weights('best_model.h5')
    model.compile(optimizer='Adam', loss='mean_squared_error')
    model.summary()
    # print('inpaited',in_painted_x.shape)
    # print('1 channel y',train_crops_1_ch.shape)
    # print(in_painted_x.shape)
    # print(train_crops_1_ch.shape)

    callbacks = [
        EarlyStopping(monitor='val_loss', patience=70),
        ModelCheckpoint(filepath='best_model70_withgray_finetuned.h5',
                        monitor='val_loss',
                        save_best_only=True),
        TensorBoard(log_dir='./logs',
                    histogram_freq=0,
                    write_graph=True,
                    write_images=True)
    ]
    model.fit_generator(generator=train_crops_orig,
                        steps_per_epoch=100,
                        validation_data=valid_crops_orig,
                        callbacks=callbacks,
                        validation_steps=200,
                        epochs=300)
    model.save('outpaint70_withgray_finetuned.h5')
Ejemplo n.º 6
0
    opt = Adam(lr=1e-4)
elif args.opt == 2:
    opt = SGD(lr=0.01, decay=1e-6, momentum=0.99, nesterov=True)
else:
    opt = Adadelta(lr=1, rho=0.95, epsilon=1e-08, decay=0.0)

m.compile(optimizer=opt, loss='categorical_crossentropy', metrics=[iou_score])

# fit model
weights_path = args.ckpt_path + 'weights.{epoch:02d}-{val_loss:.2f}-{val_iou_score:.2f}.hdf5'
callbacks = get_callbacks(weights_path, args.ckpt_path, 5, args.opt)

history = m.fit_generator(train_gen,
                          epochs=args.epochs,
                          steps_per_epoch=(NO_OF_TRAINING_IMAGES //
                                           BATCH_SIZE),
                          validation_data=(val_x / 255, val_y),
                          shuffle=True,
                          callbacks=callbacks)
''' save model structure '''
model_json = m.to_json()
with open(os.path.join(args.ckpt_path, "model.json"), "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
print("Saved model to disk")
m.save(os.path.join(args.ckpt_path, 'model.h5'))
'''Evaluate and Test '''
print('======Start Evaluating======')
#don't use generator but directly from array
score = m.evaluate(val_x / 255, val_y, verbose=0)
print("%s: %.2f%%" % (m.metrics_names[0], score[0] * 100))
Ejemplo n.º 7
0
                                                       ''),
                                 monitor='val_acc',
                                 verbose=0,
                                 save_best_only=False,
                                 mode='auto',
                                 period=2)
    trainingData = sio.loadmat('Training.mat')
    validationData = sio.loadmat('Validation.mat')
    train_generator = data_generator(trainingData, batchsize=batch)
    validation_generator = data_generator(validationData, batchsize=batch)
    model.fit_generator(
        generator=train_generator,
        # steps_per_epoch=2,
        steps_per_epoch=200,
        epochs=epochs,
        max_queue_size=100,
        validation_data=validation_generator,
        validation_steps=50,
        callbacks=[tensorboard, checkpoint, lrate],
        workers=1,
        use_multiprocessing=False)

elif status == 'Test':

    path = os.path.join('', '')
    model.load_weights(os.path.join(path, '', ''))
    import scipy.io as sio

    testData = sio.loadmat('Training.mat')
    images = testData['images']
    masks = testData['masks']
    def train_track3(self, net='unet', check_folder=params.CHECKPOINT_DIR):
        os.environ["CUDA_VISIBLE_DEVICES"] = params.GPUS
        if os.path.exists(check_folder) == 0:
            os.mkdir(check_folder)
        CHECKPOINT_DIR = check_folder
        CHECKPOINT_PATH = os.path.join(check_folder,
                                       'weights.{epoch:02d}.hdf5')

        data_folder = 'C:/TrainData/Track3/Train/patch_473/'
        img_train, dsm_train, lable_train, img_val, dsm_val, label_val = load_all_data_files(
            data_folder)

        num_training_sample = len(img_train)
        batch_size = 1
        n_batch_per_epoch = num_training_sample // batch_size

        num_val_sample = len(img_val)
        n_batch_per_epoch_val = num_val_sample // batch_size

        nb_epoch = 200
        NUM_CATEGORIES = 5
        train_generator = input_generator_RGBH(img_train, dsm_train,
                                               lable_train, batch_size)
        val_generator = input_generator_RGBH(img_val, dsm_val, label_val,
                                             batch_size)

        if net == 'psp':
            from segmentation_models import pspnet  #PSPNet
            model_name = 'pspnet101_cityscapes'
            input_shape = (473, 473, 9)
            model = pspnet.PSPNet101(nb_classes=NUM_CATEGORIES,
                                     input_shape=input_shape,
                                     weights=model_name)
            model = model.model
        elif net == 'unet':
            input_shape = [256, 256, 9]
            from keras.layers import Input
            input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                        input_shape[2]))
            model = Unet(input_shape=input_shape,
                         input_tensor=input_tensor,
                         backbone_name=params.BACKBONE,
                         encoder_weights=None,
                         classes=2)

            model.load_weights(
                os.path.join('./checkpoint_track3-1/', 'weights.80.hdf5'))

        from keras.optimizers import Adam, SGD
        from keras.callbacks import ModelCheckpoint, CSVLogger
        #loss=params.SEMANTIC_LOSS
        #   loss=my_weighted_loss
        weights = np.array([1.0, 10.0, 10.0, 20., 30.])
        loss = weighted_categorical_crossentropy(weights)

        optimizer = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        model.compile(optimizer, loss=loss)
        model.summary()
        csv_logger = CSVLogger(os.path.join(CHECKPOINT_DIR, 'train.csv'))

        checkpoint = ModelCheckpoint(filepath=CHECKPOINT_PATH,
                                     monitor='loss',
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=True,
                                     mode='auto',
                                     period=params.MODEL_SAVE_PERIOD)
        callbacks = [csv_logger, checkpoint]

        model.fit_generator(train_generator,
                            steps_per_epoch=n_batch_per_epoch,
                            validation_data=val_generator,
                            validation_steps=n_batch_per_epoch_val,
                            epochs=nb_epoch,
                            callbacks=callbacks)
Ejemplo n.º 9
0
#for i,batch in enumerate(myGene):
#    if(i >= num_batch):
#        break

## original unet without pretrained
#model = unet(input_size=(512,512,3), lr = 0.0001)
## resnet34 or other unet with pretrained
model = Unet('resnet34', encoder_weights='imagenet')
model.compile(optimizer=Adam(lr=0.0001),
              loss='binary_crossentropy',
              metrics=['accuracy'])

model_checkpoint = ModelCheckpoint('unet_MoNuSeg.hdf5',
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor='loss',
                              factor=0.1,
                              patience=5,
                              mode='auto',
                              verbose=1)
early_stopping = EarlyStopping(monitor='loss', patience=10, verbose=1)
model.fit_generator(myGene,
                    steps_per_epoch=200,
                    epochs=1000,
                    callbacks=[model_checkpoint, reduce_lr, early_stopping])

testGene = testGenerator("data/MoNuSeg/test", as_gray=False)
results = model.predict_generator(testGene, 7, verbose=1)
saveResult("data/MoNuSeg/test", results)
Ejemplo n.º 10
0
def train(config_path):
    with open(config_path, 'r') as f:
        config = json.load(f)

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       brightness_range=(0.7, 1.0),
                                       zoom_range=(0.8, 1.2))
    val_datagen = ImageDataGenerator(rescale=1. / 255)

    seed = 42
    train_frames_dir = os.path.join(config['dataset'], 'train_frames')
    train_image_gen = train_datagen.flow_from_directory(
        directory=train_frames_dir,
        target_size=config['image_size'],
        class_mode=None,
        batch_size=config['train_batch_size'],
        seed=seed)

    train_mask_gen = train_datagen.flow_from_directory(
        directory=os.path.join(config['dataset'], 'train_masks'),
        target_size=config['image_size'],
        color_mode='grayscale',
        class_mode=None,
        batch_size=config['train_batch_size'],
        shuffle=True,
        seed=seed)

    val_frames_dir = os.path.join(config['dataset'], 'val_frames')
    val_image_gen = val_datagen.flow_from_directory(
        directory=val_frames_dir,
        target_size=config['image_size'],
        class_mode=None,
        batch_size=config['val_batch_size'],
        shuffle=False)

    val_mask_gen = val_datagen.flow_from_directory(
        directory=os.path.join(config['dataset'], 'val_masks'),
        target_size=config['image_size'],
        color_mode='grayscale',
        class_mode=None,
        batch_size=config['val_batch_size'],
        shuffle=False)

    train_gen = zip(train_image_gen, train_mask_gen)
    val_gen = zip(val_image_gen, val_mask_gen)

    model = Unet(backbone_name='resnet34',
                 input_shape=(config['image_size'][0], config['image_size'][1],
                              3),
                 classes=1,
                 activation='sigmoid',
                 encoder_weights='imagenet',
                 decoder_block_type='transpose')

    optimizer = AdamAccumulate(lr=config['lr'], accum_iters=4)
    loss = dice_coef_loss
    metrics = [dice_coef]

    model.compile(optimizer, loss=loss, metrics=metrics)

    checkpoints_path = os.path.join(
        os.path.dirname(config['dataset']), 'experiments',
        datetime.datetime.now().strftime("%y-%m-%d-%H-%M"),
        'epoch_{epoch:03d}.hdf5')

    callbacks = [
        ModelCheckpoint(checkpoints_path),
        MergeGraphsTensorBoard(['val'],
                               os.path.join(os.path.dirname(checkpoints_path),
                                            'tbruns'))
    ]

    model.fit_generator(train_gen,
                        steps_per_epoch=len(train_image_gen),
                        epochs=config['epochs'],
                        shuffle=False,
                        callbacks=callbacks,
                        validation_data=val_gen,
                        validation_steps=len(val_image_gen),
                        verbose=True)
Ejemplo n.º 11
0
             input_shape=(128, 800, 3),
             classes=4,
             activation='sigmoid')
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=[dice_coef])

# TRAIN AND VALIDATE MODEL
idx = int(0.8 * len(train2))
print()
train_batches = DataGenerator(train2.iloc[:idx],
                              shuffle=True,
                              preprocess=preprocess)
valid_batches = DataGenerator(train2.iloc[idx:], preprocess=preprocess)
history = model.fit_generator(train_batches,
                              validation_data=valid_batches,
                              epochs=30,
                              verbose=2)
# PLOT TRAINING
plt.figure(figsize=(15, 5))
plt.plot(range(history.epoch[-1] + 1),
         history.history['val_dice_coef'],
         label='val_dice_coef')
plt.plot(range(history.epoch[-1] + 1),
         history.history['dice_coef'],
         label='trn_dice_coef')
plt.title('Training Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Dice_coef')
plt.legend()
plt.show()
# PREDICT FROM VALIDATION SET (ONLY IMAGES WITH DEFECTS)
            #         y=y_train,
            #         batch_size=n_batch,
            #         epochs=n_epochs,
            #         class_weight = class_weight,
            #         validation_data=(x_val, y_val),callbacks = callbacks
            #     )
            train_val_p2 = n_batch * 50
            s_p_e = train_val_p2 // n_batch
            val_p2 = n_batch * 10
            val_pe = val_p2 // n_batch
            print(type(s_p_e))
            model.fit_generator(train_generator_2(train_val_p2, number_train,
                                                  n_batch, folder_train, size,
                                                  N),
                                validation_data=val_generator_2(
                                    train_val_p2, number_val, n_batch,
                                    folder_train, size, N),
                                validation_steps=val_pe,
                                steps_per_epoch=s_p_e,
                                epochs=n_epochs)

            times = time_callback.times
            dic_times = {}
            dic_times['times'] = times
            savemat(
                combinations + "_" + BACKBONE + '_' + name_model +
                '_times.mat', dic_times)
            model.save_weights(combinations + "_" + BACKBONE + "_" +
                               name_model + "_model_wIoU" + str(size) + ".h5")
            ############END TRAINING#############
Ejemplo n.º 13
0
def train_model(args):
    # Avoid Tensorflow eats up GPU memory
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    K.set_session(sess)

    # Set file paths
    args.log_path = os.path.join(args.save_dir, 'logs')
    args.model_path = os.path.join(args.save_dir, 'seg_model.h5')
    args.data_path_x = os.path.join(args.data_dir, 'x.npy')
    args.data_path_y = os.path.join(args.data_dir, 'y.npy')
    args.data_path_weights = None
    if args.use_weights:
        args.data_path_weights = os.path.join(args.data_dir, 'weights.npy')

    # Set up training data generator
    training_generator = DataGenerator(args.data_path_x,
                                       args.data_path_y,
                                       args.data_path_weights,
                                       batch_size=args.batch_size,
                                       in_shape=(256, 320, 4),
                                       out_shape=(256, 320, 1))

    # Set up model
    model = Unet('resnet18',
                 input_shape=(256, 320, 4),
                 activation='sigmoid',
                 classes=1,
                 encoder_weights=None)
    adam = keras.optimizers.Adam(lr=1e-4,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08,
                                 decay=0.0)
    callbacks_list = [
        TensorBoardImage(args.data_path_x, args.log_path),
        keras.callbacks.TensorBoard(log_dir=args.log_path,
                                    update_freq=1000,
                                    write_graph=True),
        keras.callbacks.ModelCheckpoint(args.model_path,
                                        save_weights_only=True)
    ]

    # Run model on multiple GPUs if available
    try:
        model = multi_gpu_model(model)
        print("Training model on multiple GPUs")
    except ValueError:
        print("Training model on single GPU")

    # Load weights if specified
    if args.saved_model_path is not None:
        model.load_weights(args.saved_model_path)

    # Set loss
    loss = mask_loss
    acc = mask_acc
    if args.use_weights:
        loss = mask_loss_weighted
        acc = mask_acc_weighted

    # Compile model and start training
    model.compile(loss=[loss], optimizer=adam, metrics=[acc])
    model.fit_generator(generator=training_generator,
                        epochs=args.epochs,
                        use_multiprocessing=True,
                        workers=8,
                        callbacks=callbacks_list)

    return model
                                        mode='max',
                                        verbose=1)

    model_id = int(datetime.utcnow().timestamp())

    checkpoint_callback = ModelCheckpoint(
        filepath=f'{path_checkpoint}/{MODEL_NAME}_{img_size}_{model_id}.hdf5',
        monitor='val_dice_coef',
        mode='max',
        verbose=1,
        save_best_only=True)

    history = model.fit_generator(
        train_generator,
        epochs=epochs,
        steps_per_epoch=no_train_steps,
        validation_data=valid_generator,
        validation_steps=no_valid_steps,
        callbacks=[early_stop_callback, checkpoint_callback])

    plt.figure(figsize=(10, 8), dpi=100)

    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])

    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Loss Over Time')
    plt.legend(['Train', 'Valid'])
    plt.savefig(f'learning_curves/{MODEL_NAME}_{img_size}_{model_id}.png')
reduce_lr = ReduceLROnPlateau(monitor='my_iou_metric',
                              mode='max',
                              factor=0.5,
                              patience=5,
                              min_lr=0.0001,
                              verbose=1)

training_generator = DataGenerator(train_df, batch_size=batch_size)
validation_generator = DataGenerator(val_df,
                                     batch_size=batch_size,
                                     augment=False)

history = model1.fit_generator(generator=training_generator,
                               validation_data=validation_generator,
                               epochs=epochs,
                               callbacks=[model_checkpoint, reduce_lr],
                               verbose=2,
                               use_multiprocessing=True,
                               workers=4)

post_process.plot_history_result(history, TAG, 'model1')

print('load first model')
model2 = load_model(save_model_name,
                    custom_objects={
                        'bce_dice_loss': custom_loss.bce_dice_loss,
                        'my_iou_metric': custom_loss.my_iou_metric
                    })
input_x = model2.layers[0].input

output_layer = model2.layers[-1].input
#creating a training and validation generator that generate masks and images
train_generator = zip(x, y)
val_generator = zip(x_val, y_val)

pip install segmentation-models

from segmentation_models import Unet

model = Unet('resnet34', encoder_weights='imagenet', classes=1, input_shape=(512,512, 3), activation='sigmoid')

model.compile('Adam', loss="binary_crossentropy", metrics=["acc"])

from keras.models import load_model
model.load_weights('Unet_weights.h5')

results = model.fit_generator(train_generator, validation_data=val_generator, validation_steps=500, steps_per_epoch=1000,epochs=10)
model.save_weights('Unet_weights.h5')



def SEG_EVAL(Seg,GT):
    # Seg : Segmented image, must be binary (1 = regions of interest 0 = background)
    # GT : Ground truth, must be binary (1 = regions of interest 0 = background)
    Seg.astype(np.bool)
    GT.astype(np.bool)
    
    #dice_coefficient
    intersection = np.logical_and(Seg, GT)
    dice_coefficient = 2. * intersection.sum() / (Seg.sum() + GT.sum())
    
    #IoU
Ejemplo n.º 17
0
def main():
    #########################################
    # input parser
    #########################################
    parser = argparse.ArgumentParser(
        description='Road Segmentation Challenge- EPFL.')

    group_data = parser.add_argument_group('model arguments')
    group_data.add_argument(
        '--model',
        type=str,
        default="unet",
        choices=["unet", "manunet4", "manunet5", "manunet6"],
        help='select the Neural Network model you desired to use.')
    args = parser.parse_args()
    for arg in vars(args):
        print(arg, getattr(args, arg))
    modelname = args.model

    #########################################
    # generate data
    #########################################
    # 1: Devide the data
    data_division.make_folders()

    # 2 : Load entire images

    # Generators
    training_generator = generator.DataGenerator(
        constants.train_image_path,
        constants.train_mask_path,
        augmentation=helpers.aug_with_crop,
        batch_size=1,
    )
    validation_generator = generator.DataGenerator(constants.val_image_path,
                                                   constants.val_mask_path)

    #########################################
    # Model and training
    #########################################
    if (modelname == "manunet4"):
        model = unetManual()
        model.summary()
    elif (modelname == "manunet5"):
        model = unetManualFiveDeep()
        model.summary()
    elif (modelname == "manunet6"):
        model = unetManualSixDeep()
        model.summary()
    else:
        model = Unet(backbone_name='efficientnetb7',
                     encoder_weights='imagenet',
                     encoder_freeze=False)
        model.compile(optimizer='Adam',
                      loss=bce_jaccard_loss,
                      metrics=[sm.metrics.FScore(threshold=0.5)])

    history = model.fit_generator(training_generator,
                                  shuffle=True,
                                  epochs=30,
                                  workers=4,
                                  use_multiprocessing=True,
                                  validation_data=validation_generator,
                                  verbose=1,
                                  callbacks=[callbacks.lr_reducer])
    # plotting history
    #helpers.plot_training_history(history)

    # Save model
    model.save(constants.PATH + "saved_" + modelname + ".h5")
    print("Trained model was successfully saved on disk.")
    #model = load_model(constants.PATH + "saved_"+modelname+".h5")

    #########################################
    # Testing and make predictions
    #########################################
    test = helpers.listdir_fullpath(constants.IMAGE_PATH)
    os.makedirs(constants.MASK_TEST_PATH)

    for pth in test:
        name = os.listdir(pth)[0]
        path = pth + "/" + name
        print(path)
        image = mpimg.imread(path) / 255
        if (modelname == "manunet4" or modelname == "manunet5"
                or modelname == "manunet6"):
            image = cv2.resize(
                image, dsize=(384, 384), interpolation=cv2.INTER_CUBIC
            )  # resize test images to (384,384) to feed to manual Unet
            prediction = cv2.resize(model.predict(np.expand_dims(
                image, axis=0)).reshape(384, 384),
                                    dsize=(608, 608),
                                    interpolation=cv2.INTER_CUBIC
                                    )  # resize the predictions to (608,608)
        else:
            prediction = model.predict(np.expand_dims(image, axis=0)).reshape(
                608, 608)
        mpimg.imsave(constants.MASK_TEST_PATH + name, prediction)
        print("Image " + name + " saved")

    submission_filename = constants.PATH + "test_final_" + modelname + ".csv"
    image_filenames = helpers.listdir_fullpath(constants.MASK_TEST_PATH)
    make_submission.masks_to_submission(submission_filename, *image_filenames)
Ejemplo n.º 18
0
                                                  target_size=(1024, 1024),
                                                  shuffle=False,
                                                  class_mode=None,
                                                  batch_size=BATCH_SIZE)
train_crops = crop_generator(train_batches, CROP_LENGTH)  #224
valid_crops = crop_generator(valid_batches, CROP_LENGTH)

batch_x, batch_y = next(train_crops)
print(np.array(batch_x).shape)
print(np.array(batch_y).shape)
plt.imshow(batch_x[0])
plt.show()
plt.imshow(batch_y[0])
plt.show()
# build our classifier model based on pre-trained ResNet50:
# 1. we don't include the top (fully connected) layers of ResNet50
# 2. we add a DropOut layer followed by a Dense (fully connected)
#    layer which generates softmax class score for each class
# 3. we compile the final model using an Adam optimizer, with a
#    low learning rate (since we are 'fine-tuning')
model = Unet(backbone_name='resnet18',
             encoder_weights='imagenet',
             decoder_block_type='transpose')  # build U-Net
model.compile('Adam', 'binary_crossentropy', ['binary_accuracy'])

model.fit_generator(generator=train_crops,
                    steps_per_epoch=12,
                    validation_data=valid_crops,
                    validation_steps=12,
                    epochs=100)
	horizontal_flip = True, # 水平翻转图像
    fill_mode = "nearest" # 填充像素,出现在旋转或平移之后
)

train_gen = train_image_generator("image","label",0,29,2,None) # 获取一个训练数据生成器
#validate_gen = train_image_generator("image","label",21,29,3,None) # 获取一个验证数据生成器

#定义并编译一个模型
model = Unet('resnet34', input_shape = (512, 512, 1), encoder_weights = None) #1代表通道数
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])

#进行模型的训练,并用his来记录训练过程中的参数变化,方便最后生成图像
his = model.fit_generator( 
    generator = train_gen, #训练集生成器
    steps_per_epoch = 300, #训练集每次epoch的数量
    # validation_data = validate_gen, #验证集生成器
    # validation_steps = 3, #验证集每次epoch的数量
    epochs = 1 #进行epoch的次数
)

model.save("model_v3.h5") #保存模型 

print("Saved model to disk")


# 生成训练参数图片
# N = 51
# plt.style.use("ggplot")
# plt.figure()
# plt.plot(np.arange(0, N), his.history["loss"], label="train_loss")
# plt.plot(np.arange(0, N), his.history["val_loss"], label="val_loss")
def launch():
    ######################
    #
    # HYPER PARAMS
    #
    ######################
    BATCH_SIZE = 8
    TRAINSIZE_RATIO = 0.8
    N_THREADS = 16
    CLASSES = getClassesLabelList()
    N_CLASSES = 1 if not isMulticlassDataset() else (len(CLASSES) + 1)
    print('NB CLASS ====> ' + str(N_CLASSES))
    FINAL_ACTIVATION_LAYER = 'sigmoid' if N_CLASSES == 1 else 'softmax'

    ### OLD ###########
    LOSS = "binary_crossentropy" if N_CLASSES == 1 else "categorical_crossentropy"
    METRICS = "binary_accuracy" if N_CLASSES == 1 else "categorical_accuracy"
    print('ACTI ====> ' + str(FINAL_ACTIVATION_LAYER))


    TRAIN_PATH = "data/img"
    IMG_DIR_NAME = "ori"
    MASK_DIR_NAME = "mask"

    DIR_MODEL = "result/model"
    MODEL_NAME = "model.h5"

    DIR_LOGS = "result/log/metric"
    LOGS_FILE_NAME = "metrics.csv"

    DIR_TRAINED_MODEL = os.path.join(DIR_MODEL, MODEL_NAME)
    DIR_TRAINED_LOGS = os.path.join(DIR_LOGS, LOGS_FILE_NAME)

    #NUM_SAMPLES = len(os.listdir(os.path.join(os.getcwd(), TRAIN_PATH, IMG_DIR_NAME)))
    EPOCH = 999

    #DATASET = pd.read_csv("data\\label\\datasetAugmented.csv", sep=',', index_col=0)
    DATASET = pd.read_csv("data\\label\\datasetAugmented.csv", sep=',', index_col=0)
    NUM_SAMPLES = len(DATASET)


    SAMPLE_TRAIN = int(NUM_SAMPLES * TRAINSIZE_RATIO)
    SAMPLE_VALID = int(NUM_SAMPLES * (1 - TRAINSIZE_RATIO))

    print("TRAIN_SIZE " + str(len(DATASET[:SAMPLE_TRAIN])))
    print("VAL_SIZE " + str(len(DATASET[SAMPLE_TRAIN:])))




    # TRAIN_STEPS = len(os.listdir((os.path.join(train_path, "images")))) // batch_size
    # VALIDATION_STEPS = len(os.listdir((os.path.join(val_path, "images")))) // batch_size

    # metrics = ["acc", tf.keras.metrics.Recall(), tf.keras.metrics.Precision(), iou]

    ######################
    #
    # CALLBACK
    #
    ######################
    savemodelCallback = ModelCheckpoint(DIR_TRAINED_MODEL,
                                        verbose=1,
                                        save_best_only=True,
                                        save_weights_only=False,
                                        mode='min',
                                        period=1,
                                        monitor='val_loss')
    # monitor='val_acc')
    # logsCallback = TensorBoard(log_dir=DIR_TRAINED_MODEL_LOGS, histogram_freq=0, write_graph=True, write_images=True)
    csv_logger = CSVLogger(DIR_TRAINED_LOGS, append=False, separator=',')
    earlyStopping = EarlyStopping(verbose=1, monitor='val_loss', min_delta=0, patience=5, mode='min')
    reduceLearningrate = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
                                           patience=3, min_lr=0.0001, mode='min', verbose=1)

    ######################
    #
    # MODEL
    #
    ######################
    # COMPILATION MODEL
    model = Unet(backbone_name='resnet18',
                 encoder_weights='imagenet',
                 #decoder_block_type='transpose',
                 classes=N_CLASSES,
                 activation=FINAL_ACTIVATION_LAYER)
    model.compile(optimizer=Adam(lr=0.01),
                  loss=FocalTverskyLoss,
                  metrics=[dice_coef])

    #loss = FocalTverskyLoss,
    #loss = lovasz_loss,
    #metrics = [dice_coef])
    #metrics = [mean_iou])

    ######################
    #
    # GENERATOR
    #
    ######################
    # model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
    # model.fit_generator(myGene,steps_per_epoch=2000,epochs=5,callbacks=[model_checkpoint])
    # trainGen = trainGenerator(TRAIN_PATH, IMG_DIR_NAME, MASK_DIR_NAME, BATCH_SIZE, 0.2)
    # validationGen = validationGenerator(TRAIN_PATH, IMG_DIR_NAME, MASK_DIR_NAME, BATCH_SIZE, 0.2)

    trainGen = DatasetLoader(data=DATASET[:SAMPLE_TRAIN],
                             xLabel='x_path',
                             yLabel='y_path',
                             batchSize=BATCH_SIZE,
                             shuffle=True,
                             targetSize=(256, 256),
                             nClass=N_CLASSES)
    validationGen = DatasetLoader(data=DATASET[SAMPLE_TRAIN:],
                                  xLabel='x_path',
                                  yLabel='y_path',
                                  batchSize=BATCH_SIZE,
                                  shuffle=True,
                                  targetSize=(256, 256),
                                  nClass=N_CLASSES)




    model.fit_generator(generator=trainGen,
                        validation_data=validationGen,
                        epochs=EPOCH,
                        callbacks=[csv_logger, earlyStopping, reduceLearningrate, savemodelCallback],
                        #use_multiprocessing=True,
                        #workers=4
                        )
Ejemplo n.º 21
0
# preprocess input
# x_train = preprocess_input(x_train)
# x_val = preprocess_input(x_val)

# define model
model = Unet(BACKBONE, encoder_weights='imagenet')
model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])

model.fit_generator(training_generator,
                    steps_per_epoch=training_steps_per_epoch,
                    epochs=1,
                    verbose=1,
                    callbacks=None,
                    validation_data=validation_generator,
                    validation_steps=validation_steps_per_epoch,
                    class_weight=None,
                    max_queue_size=10,
                    workers=1,
                    use_multiprocessing=False,
                    shuffle=True,
                    initial_epoch=0)

# fit model
# model.fit(
#     x=x_train,
#     y=y_train,
#     batch_size=16,
#     epochs=100,
#     validation_data=(x_val, y_val),
# )
Ejemplo n.º 22
0
def train(backbone,
          load_pretrain,
          data_path,
          split_path,
          save_path,
          n_split=5,
          seed=960630,
          batch_size=4,
          fold=0):

    # split by all data
    get_train_val_split(data_path=data_path + 'image_set/',
                        save_path=split_path,
                        n_splits=n_split,
                        seed=seed)

    # split by folders
    # get_train_val_split(data_path=data_path+'images/',
    #                     save_path=split_path,
    #                     n_splits=n_split,
    #                     seed=seed)

    if load_pretrain is not None:
        model = load_model(load_pretrain, compile=False)
    elif backbone is not None:
        model = Unet(backbone, classes=1, encoder_weights='imagenet')
    else:
        model = Unet(classes=1, encoder_weights='imagenet')

    model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])
    model.summary()

    # split by all images
    train_data = Carotid_DataGenerator(
        df_path=split_path +
        'split/train_fold_{}_seed_{}.csv'.format(fold, seed),
        image_path=data_path + 'image_set/',
        mask_path=data_path + '/mask_set/',
        batch_size=batch_size,
        target_shape=(512, 512),
        augmentation=True,
        shuffle=False)
    val_data = Carotid_DataGenerator(
        df_path=split_path +
        'split/val_fold_{}_seed_{}.csv'.format(fold, seed),
        image_path=data_path + 'image_set/',
        mask_path=data_path + '/mask_set/',
        batch_size=batch_size,
        target_shape=(512, 512),
        augmentation=True,
        shuffle=False)

    # split by folder
    # train_data = Carotid_DataGenerator(
    #     df_path=split_path+'split/train_fold_{}_seed_{}.csv'.format(fold, seed),
    #     image_path=data_path + 'images/',
    #     mask_path=data_path + '/masks/',
    #     batch_size=batch_size,
    #     target_shape=(512, 512),
    #     augmentation=True,
    #     shuffle=False)
    # val_data = Carotid_DataGenerator(
    #     df_path=split_path + 'split/val_fold_{}_seed_{}.csv'.format(fold, seed),
    #     image_path=data_path + 'images/',
    #     mask_path=data_path + '/masks/',
    #     batch_size=batch_size,
    #     target_shape=(512, 512),
    #     augmentation=True,
    #     shuffle=False)

    callbacks = [
        EarlyStopping(monitor='val_loss',
                      patience=8,
                      verbose=1,
                      min_delta=1e-4),
        ReduceLROnPlateau(monitor='val_loss',
                          factor=0.1,
                          patience=4,
                          verbose=1,
                          epsilon=1e-4),
        ModelCheckpoint(monitor='val_loss',
                        filepath=save_path,
                        verbose=True,
                        save_best_only=True)
    ]

    model.fit_generator(train_data,
                        validation_data=val_data,
                        epochs=10,
                        callbacks=callbacks,
                        verbose=1)
Ejemplo n.º 23
0
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)

seed = 42
bs = 32  # BacthSize
nb_epochs = 100  # epoch

image_generator = image_datagen.flow(X_train,
                                     seed=seed,
                                     batch_size=bs,
                                     shuffle=True)
mask_generator = mask_datagen.flow(y_train,
                                   seed=seed,
                                   batch_size=bs,
                                   shuffle=True)

# Just zip the two generators to get a generator that provides augmented images and masks at the same time
train_generator = zip(image_generator, mask_generator)

results = model.fit_generator(train_generator,
                              steps_per_epoch=(spe),
                              epochs=nb_epochs,
                              validation_data=(X_valid, y_valid),
                              callbacks=[save, lr_schedule, reduce_lr])

# save final model
model.save_weights(CKPT_PATH +
                   '{}_{}_{}_model.h5'.format(t, model_name, backbone_name))

# predicte valid data
predicted = model.predict(X_valid)
    print('Setting up experiment record directory ...')
    record_dir = record.prepare_record_dir()
    record.save_params(record_dir, args)

    # copy training script
    copyfile(__file__, os.path.join(record_dir, __file__))

    checkpoint_path = os.path.join(record_dir, 'checkpoints',
                                   'weights.{epoch:03d}.hdf5')
    callbacks = [
        TerminateOnNaN(),
        ModelCheckpoint(checkpoint_path,
                        period=5,
                        save_weights_only=True,
                        verbose=1),
        CSVLogger(os.path.join(record_dir, 'history.csv'), append=True),
        ReduceLROnPlateau('loss', factor=0.5, min_lr=1e-5, verbose=1),
        TensorBoard(os.path.join(record_dir, 'log')),
        MaskVisualization(model, record_dir, x_val, y_val),
    ]

    steps_per_epoch = int(math.ceil(config.PATCHES_PER_EPOCH /
                                    args.batch_size))

    model.fit_generator(train_datagen,
                        validation_data=[x_val, y_val],
                        steps_per_epoch=steps_per_epoch,
                        epochs=args.epochs,
                        callbacks=callbacks)
                             save_best_only=True, mode='min')

reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, 
                                   verbose=1, mode='min')



log_fname = 'training_log.csv'
csv_logger = CSVLogger(filename=log_fname,
                       separator=',',
                       append=False)

callbacks_list = [checkpoint, earlystopper, csv_logger, reduce_lr]

history = model.fit_generator(train_gen, steps_per_epoch=train_steps, epochs=5, 
                              validation_data=val_gen, validation_steps=val_steps,
                             verbose=1,
                             callbacks=callbacks_list)

#initialize the test generator
test_gen = test_generator(batch_size=1)

model.load_weights('model.h5')
predictions = model.predict_generator(test_gen, 
                                      steps=len(df_test),  
                                      verbose=1)

!ls

preds_test_thresh = (predictions >= 0.7).astype(np.uint8)
preds_test_thresh.shape
Ejemplo n.º 26
0
model = PSPNet(BACKBONE, classes=len(class_ids), encoder_weights='imagenet')
model = Linknet(BACKBONE, classes=len(class_ids), encoder_weights='imagenet')

model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])
model.summary()

modelCheckpoint = keras.callbacks.ModelCheckpoint(filepath='segmod_weights.{epoch:02d}-{val_loss:.4f}.hdf5',
                                                  monitor='val_loss',
                                                  verbose=0, save_best_only=False, save_weights_only=False,
                                                  mode='auto', period=1)
reduceLROnPlateau = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=7, verbose=1,
                                                      mode='auto', min_delta=0.001, cooldown=0, min_lr=10e-7)


model.fit_generator(generator=train_generator, steps_per_epoch=None, epochs=10, verbose=1,
                    callbacks=[reduceLROnPlateau, modelCheckpoint],
                    validation_data=val_generator, validation_steps=None, class_weight=None, max_queue_size=10,
                    workers=2, use_multiprocessing=False, shuffle=True, initial_epoch=0)


os._exit(0)
from segmentation_models import Unet
from segmentation_models.utils import set_trainable

''' 
Some times, it is useful to train only randomly initialized decoder in order not to damage weights of properly trained encoder with huge gradients during first steps of training. 
In this case, all you need is just pass freeze_encoder = True argument while initializing the model.
'''

model = Unet(backbone_name='resnet34', classes=len(class_ids), encoder_weights='imagenet', freeze_encoder=True)
model.compile('Adam', loss=cce_jaccard_loss, metrics=[jaccard_score])
Ejemplo n.º 27
0

start_time = time.time() # measuring modelling time

# basic .fit method
model.fit(X_train, y_train_multidim, epochs = 2, batch_size = batch_size, validation_data = (X_test, y_test_multidim)) 

set_trainable(model, recompile = False) # Set all layers of model trainable, so that encode_freeze is lifted. Recompile = True does not work with Tensorflow 2.0
model.compile(optimizer, 
              loss = JaccardLoss(per_image = False), 
              metrics = ['categorical_accuracy', IOUScore(per_image = False, threshold = 0.5)])

# fit_generator method for image augmentation
model.fit_generator(train_generator, 
                    validation_data=test_generator, 
                    steps_per_epoch=len(X_train) // batch_size, 
                    validation_steps=len(X_test) // batch_size, 
                    epochs=epoch_no, 
                    callbacks=backroom.callbacks)

elapsed_time = time.time()-start_time # measuring modelling time
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time))) # beautifying time format




#------------------------------------------------------------------------------
# 4. STEP
# Prediction 

model.load_weights(SSD_path + "model/weights/fin_M1_unet_best.h5")
y_pred = model.predict(X_test)
Ejemplo n.º 28
0
    target_shape=(512, 512),
    shuffle=False)
val_data = Carotid_DataGenerator(
    df_path='dataset/split/val_fold_1_seed_960630.csv',
    image_path='../Carotid-Data/Carotid-Data/images/',
    mask_path='../Carotid-Data/Carotid-Data/masks/',
    batch_size=4,
    target_shape=(512, 512),
    shuffle=False)

save_path = 'Unet_Pretrained_bce_jaccard_loss_iou_newsplit1' + '.hdf5'

callbacks = [
    EarlyStopping(monitor='val_loss', patience=8, verbose=1, min_delta=1e-4),
    ReduceLROnPlateau(monitor='val_loss',
                      factor=0.1,
                      patience=4,
                      verbose=1,
                      epsilon=1e-4),
    ModelCheckpoint(monitor='val_loss',
                    filepath=save_path,
                    verbose=True,
                    save_best_only=True)
]

model.fit_generator(train_data,
                    validation_data=val_data,
                    epochs=6,
                    callbacks=callbacks,
                    verbose=1)
Ejemplo n.º 29
0
epochs = 60
BATCH_SIZE = 8
steps_per_epoch = x_train.shape[0] // BATCH_SIZE

schedule = SGDRScheduler(min_lr=1e-5,
                         max_lr=1e-4,
                         steps_per_epoch=np.ceil(x_train.shape[0] / 8),
                         cycle_length=30,
                         lr_decay=0.8,
                         mult_factor=1)

history = model1.fit_generator(generator(x_train, y_train, BATCH_SIZE,
                                         seq_det),
                               validation_data=[x_valid, y_valid],
                               epochs=epochs,
                               steps_per_epoch=steps_per_epoch,
                               callbacks=[model_checkpoint, schedule],
                               verbose=2)

# In[17]:

model = load_model(save_model_name,
                   custom_objects={'my_iou_metric': my_iou_metric})

input_x = model1.layers[0].input

output_layer = model1.layers[-1].input
model = Model(input_x, output_layer)
c = optimizers.adam(lr=0.0001)
model.compile(loss=lovasz_loss, optimizer=c, metrics=[my_iou_metric_2])
Ejemplo n.º 30
0
train_gen, valid_gen = utils.preproc_data_with_masks(BATCH_SIZE, TARGET_SIZE)

# If loading actual numpy arrays, need:
# x_val = preprocess_input(x_val)

# define model
model = Unet(
    BACKBONE,
    encoder_weights='imagenet',
    classes=1,
    activation='sigmoid',
    encoder_freeze=True,
)
model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])
# model.compile('Adadelta', loss='binary_crossentropy')
print(model.summary())

callbacks = [
    ModelCheckpoint('model_weights.h5', monitor='val_loss', save_best_only=True, verbose=0)
]

# fit model
model.fit_generator(
    train_gen,
    steps_per_epoch=80,
    epochs=50,
    callbacks=callbacks,
    validation_data=valid_gen,
)
model.save("unet.h5")