def train():
    #load images
    images = []
    for image in os.listdir(im_path):
        imi = cv.imread(os.path.join(im_path, image))
        images.append(imi)

    #load masks
    masks = []
    for mask in os.listdir(mask_path):
        mask_in = cv.imread(os.path.join(mask_path, mask), 0)
        ret_val, threshed_mask = cv.threshold(mask_in, 37, 1, cv.THRESH_BINARY)
        masks.append(threshed_mask)

    model = Unet('resnet34',
                 encoder_weights='imagenet',
                 input_shape=(128, 128, 3))
    model.compile('Adam',
                  loss=bce_jaccard_loss,
                  metrics=[iou_score, 'accuracy'])
    model.summary()
    hist = model.fit(x=np.array(images).reshape(-1, 128, 128, 3),
                     y=np.array(masks).reshape(-1, 128, 128, 1),
                     batch_size=10,
                     epochs=15)

    #save model
    filename = 'trained_model.h5'
    model.save(filename, include_optimizer=False)
Ejemplo n.º 2
0
def train_unet_mobilenetv2(saveModelFn, tensorboardPath):
    # train_imgDir = "/home/xiping/mydisk2/imglib/my_imglib/coco/train2014_person"
    train_imgDir = "/coco/train2014_person"
    (train_data, train_mask_data), (val_data,
                                    val_mask_data) = get_data(train_imgDir,
                                                              maxNum=12000,
                                                              valMaxNum=1000)

    # print(train_data.shape)
    # print(mask_data.shape)
    # print(mask_data[0])
    # cv2.imwrite("xx.bmp", mask_data[1]*255)
    # exit(0)

    print("================================")
    BACKBONE = 'mobilenetv2'
    # define model
    model = Unet(
        BACKBONE,
        classes=1,
        input_shape=(224, 224,
                     3),  # specific inputsize for callback save model
        activation='sigmoid',  #sigmoid,softmax
        encoder_weights='imagenet')

    # Show network structure.
    # model.summary()

    model.compile('Adam', loss='jaccard_loss', metrics=['iou_score'])
    # model.compile('SGD', loss="bce_dice_loss", metrics=["dice_score"])
    # model.compile('SGD', loss="bce_jaccard_loss", metrics=["iou_score"])
    # model.compile('adam', loss="binary_crossentropy", metrics=["iou_score"])

    checkpointer = ModelCheckpoint(
        filepath=
        "weights.epoch={epoch:02d}-val_loss={val_loss:.2f}-val_iou_score={val_iou_score:.2f}.hdf5",
        verbose=1,
        save_best_only=True)

    print("================================")
    print("Start train...")
    # fit model
    # if you use data generator use model.fit_generator(...) instead of model.fit(...)
    # more about `fit_generator` here: https://keras.io/models/sequential/#fit_generator
    model.fit(
        x=train_data,
        y=train_mask_data,
        batch_size=32,
        epochs=200,
        validation_data=(
            val_data,
            val_mask_data),  # callback save middle model need input val data
        callbacks=[TensorBoard(log_dir=tensorboardPath), checkpointer])

    model.save(saveModelFn)
Ejemplo n.º 3
0
def main():

    image_path = "image"  #加载训练图片
    im_start = 0  #确定编号
    im_end = 29
    im_array = load_png_files(image_path, im_start, im_end)
    im_array = im_array[:, :, :, np.newaxis]  # 需要加代表图片的通道数,这里是黑白图片所以是1,因此直接加一维
    print("train_image shape : " + im_array.shape)

    label_path = "label"  #加载训练图片对应的标签图片
    la_start = 0
    la_end = 29
    la_array = load_png_files(label_path, la_start, la_end)
    la_array = la_array[:, :, :, np.newaxis]
    print("train_label shape : " + la_array.shape)

    test_path = "test"  #加载测试集的图片
    te_start = 0
    te_end = 4
    te_array = load_png_files(test_path, te_start, te_end)
    te_array = te_array[:, :, :, np.newaxis]
    print("test_image shape : " + te_array.shape)

    model = Unet('resnet34', input_shape=(512, 512, 1),
                 encoder_weights=None)  #1代表通道数
    model.compile('Adam', loss='binary_crossentropy', metrics=['accuracy'])
    model.fit(
        x=im_array,
        y=la_array,
        batch_size=10,
        epochs=8,
        validation_split=0.2,  #取训练集中的0.2作为验证集
        shuffle=True)
    model.save("model_v1.h5")  #保存模型

    print("Saved model to disk")
    print("done!!!!!")
Ejemplo n.º 4
0
def train_net():
    args = parser.parse_args()
    final_path = args.model_final_path
    checkpoint_path = args.model_checkpoint_path
    dataset_path = args.dataset_path
    with tf.device("/gpu:0"):
        backbone = 'resnet50'
        preprocess_input = get_preprocessing(backbone)

        # load your data
        x_train, y_train, x_val, y_val = from_directory_datagen(dataset_path)

        # preprocess input
        x_train = preprocess_input(x_train)
        x_val = preprocess_input(x_val)

        # define model
        model = Unet(backbone, encoder_weights='imagenet', input_shape=(256, 256, 3))
        model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-3), loss=dice_loss,
                      metrics=[f1_score, iou_score])

        check_point = [ModelCheckpoint(checkpoint_path + 'model-{epoch:03d}-{val_f1-score:03f}.h5', verbose=1,
                                       monitor='val_f1-score',
                                       save_best_only=True, mode='max')]

        # fit model
        model.fit(
            x=(pair for pair in zip(x_train, y_train)),
            epochs=10,
            steps_per_epoch=x_train.n // x_train.batch_size,
            validation_data=(pair for pair in zip(x_val, y_val)),
            validation_steps=x_val.n // x_val.batch_size,
            verbose=1,
            shuffle=True,
            callbacks=check_point,
        )
        model.save(final_path + 'final_model.h5')
#validate_gen = train_image_generator("image","label",21,29,3,None) # 获取一个验证数据生成器

#定义并编译一个模型
model = Unet('resnet34', input_shape = (512, 512, 1), encoder_weights = None) #1代表通道数
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])

#进行模型的训练,并用his来记录训练过程中的参数变化,方便最后生成图像
his = model.fit_generator( 
    generator = train_gen, #训练集生成器
    steps_per_epoch = 300, #训练集每次epoch的数量
    # validation_data = validate_gen, #验证集生成器
    # validation_steps = 3, #验证集每次epoch的数量
    epochs = 1 #进行epoch的次数
)

model.save("model_v3.h5") #保存模型 

print("Saved model to disk")


# 生成训练参数图片
# N = 51
# plt.style.use("ggplot")
# plt.figure()
# plt.plot(np.arange(0, N), his.history["loss"], label="train_loss")
# plt.plot(np.arange(0, N), his.history["val_loss"], label="val_loss")
# plt.plot(np.arange(0, N), his.history["acc"], label="train_acc")
# plt.plot(np.arange(0, N), his.history["val_acc"], label="val_acc")
# plt.title("Training Loss and Accuracy on Dataset")
# plt.xlabel("Epoch #")
# plt.ylabel("Loss/Accuracy")
Ejemplo n.º 6
0
def main():
    # with open('/home/kunal/Desktop/Feature-Learning-for-Disease-Classification/temp_patch.txt') as f:
    # 	lines = f.readlines()

    with open('/home/rbuddhad/NIH-XRAY/train_sml.txt') as f1:
        lines1 = f1.readlines()

    with open('/home/rbuddhad/NIH-XRAY/validation_sml.txt') as f2:
        lines2 = f2.readlines()

    # print((lines1))

    train_datagen = ImageDataGenerator()
    train_batches = train_datagen.flow_from_directory(TRAIN_DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=True,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    valid_datagen = ImageDataGenerator()
    valid_batches = valid_datagen.flow_from_directory(VALID_DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=False,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    train_crops_orig = crop_generator(train_batches, CROP_LENGTH,
                                      lines1)  # 224
    valid_crops_orig = crop_generator(valid_batches, CROP_LENGTH, lines2)

    # batch_x_random_crop, batch_y_targeted_crop = next(train_crops)
    # valid_x, valid_y = next(valid_crops)
    # print(train_crops_orig.shape)
    # train_crops_orig=np.reshape(train_crops_orig,(train_crops_orig.shape[0]*train_crops_orig.shape[1],224,224,3))
    # print(train_crops_orig.shape)
    # in_painted_x= out_painting_mask(train_crops_orig)
    # valid_in_x=in_painting_mask(valid_x,valid_y)

    # train_crops_1_ch=rgb2gray(train_crops_orig)
    # train_crops_1_ch=np.reshape(train_crops_1_ch,(train_crops_1_ch.shape[0],224,224,1))

    # valid_x=rgb2gray(valid_x)
    # valid_x=np.reshape(valid_x,(valid_x.shape[0],224,224,1))

    # model = Unet(backbone_name='resnet18', encoder_weights='imagenet', decoder_block_type='transpose') # build U-Net
    model = Unet(backbone_name='resnet18', encoder_weights=None)  # build U-Net
    model.load_weights('best_model.h5')
    model.compile(optimizer='Adam', loss='mean_squared_error')
    model.summary()
    # print('inpaited',in_painted_x.shape)
    # print('1 channel y',train_crops_1_ch.shape)
    # print(in_painted_x.shape)
    # print(train_crops_1_ch.shape)

    callbacks = [
        EarlyStopping(monitor='val_loss', patience=70),
        ModelCheckpoint(filepath='best_model70_withgray_finetuned.h5',
                        monitor='val_loss',
                        save_best_only=True),
        TensorBoard(log_dir='./logs',
                    histogram_freq=0,
                    write_graph=True,
                    write_images=True)
    ]
    model.fit_generator(generator=train_crops_orig,
                        steps_per_epoch=100,
                        validation_data=valid_crops_orig,
                        callbacks=callbacks,
                        validation_steps=200,
                        epochs=300)
    model.save('outpaint70_withgray_finetuned.h5')
Ejemplo n.º 7
0
callbacks = get_callbacks(weights_path, args.ckpt_path, 5, args.opt)

history = m.fit_generator(train_gen,
                          epochs=args.epochs,
                          steps_per_epoch=(NO_OF_TRAINING_IMAGES //
                                           BATCH_SIZE),
                          validation_data=(val_x / 255, val_y),
                          shuffle=True,
                          callbacks=callbacks)
''' save model structure '''
model_json = m.to_json()
with open(os.path.join(args.ckpt_path, "model.json"), "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
print("Saved model to disk")
m.save(os.path.join(args.ckpt_path, 'model.h5'))
'''Evaluate and Test '''
print('======Start Evaluating======')
#don't use generator but directly from array
score = m.evaluate(val_x / 255, val_y, verbose=0)
print("%s: %.2f%%" % (m.metrics_names[0], score[0] * 100))
print("%s: %.2f%%" % (m.metrics_names[1], score[1] * 100))
with open(os.path.join(args.ckpt_path, 'output.txt'), "w") as file:
    file.write("%s: %.2f%%" % (m.metrics_names[0], score[0] * 100))
    file.write("%s: %.2f%%" % (m.metrics_names[1], score[1] * 100))

print('======Start Testing======')
#test_x, test_y = xy_formarray(mask_path, frame_path, 'test',256, cl)
# test_y = np.eye(cl)[test_y]
#predict_y = m.predict(test_x / 255)
Ejemplo n.º 8
0
train_gen, valid_gen = utils.preproc_data_with_masks(BATCH_SIZE, TARGET_SIZE)

# If loading actual numpy arrays, need:
# x_val = preprocess_input(x_val)

# define model
model = Unet(
    BACKBONE,
    encoder_weights='imagenet',
    classes=1,
    activation='sigmoid',
    encoder_freeze=True,
)
model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])
# model.compile('Adadelta', loss='binary_crossentropy')
print(model.summary())

callbacks = [
    ModelCheckpoint('model_weights.h5', monitor='val_loss', save_best_only=True, verbose=0)
]

# fit model
model.fit_generator(
    train_gen,
    steps_per_epoch=80,
    epochs=50,
    callbacks=callbacks,
    validation_data=valid_gen,
)
model.save("unet.h5")
Ejemplo n.º 9
0
def main():
    #########################################
    # input parser
    #########################################
    parser = argparse.ArgumentParser(
        description='Road Segmentation Challenge- EPFL.')

    group_data = parser.add_argument_group('model arguments')
    group_data.add_argument(
        '--model',
        type=str,
        default="unet",
        choices=["unet", "manunet4", "manunet5", "manunet6"],
        help='select the Neural Network model you desired to use.')
    args = parser.parse_args()
    for arg in vars(args):
        print(arg, getattr(args, arg))
    modelname = args.model

    #########################################
    # generate data
    #########################################
    # 1: Devide the data
    data_division.make_folders()

    # 2 : Load entire images

    # Generators
    training_generator = generator.DataGenerator(
        constants.train_image_path,
        constants.train_mask_path,
        augmentation=helpers.aug_with_crop,
        batch_size=1,
    )
    validation_generator = generator.DataGenerator(constants.val_image_path,
                                                   constants.val_mask_path)

    #########################################
    # Model and training
    #########################################
    if (modelname == "manunet4"):
        model = unetManual()
        model.summary()
    elif (modelname == "manunet5"):
        model = unetManualFiveDeep()
        model.summary()
    elif (modelname == "manunet6"):
        model = unetManualSixDeep()
        model.summary()
    else:
        model = Unet(backbone_name='efficientnetb7',
                     encoder_weights='imagenet',
                     encoder_freeze=False)
        model.compile(optimizer='Adam',
                      loss=bce_jaccard_loss,
                      metrics=[sm.metrics.FScore(threshold=0.5)])

    history = model.fit_generator(training_generator,
                                  shuffle=True,
                                  epochs=30,
                                  workers=4,
                                  use_multiprocessing=True,
                                  validation_data=validation_generator,
                                  verbose=1,
                                  callbacks=[callbacks.lr_reducer])
    # plotting history
    #helpers.plot_training_history(history)

    # Save model
    model.save(constants.PATH + "saved_" + modelname + ".h5")
    print("Trained model was successfully saved on disk.")
    #model = load_model(constants.PATH + "saved_"+modelname+".h5")

    #########################################
    # Testing and make predictions
    #########################################
    test = helpers.listdir_fullpath(constants.IMAGE_PATH)
    os.makedirs(constants.MASK_TEST_PATH)

    for pth in test:
        name = os.listdir(pth)[0]
        path = pth + "/" + name
        print(path)
        image = mpimg.imread(path) / 255
        if (modelname == "manunet4" or modelname == "manunet5"
                or modelname == "manunet6"):
            image = cv2.resize(
                image, dsize=(384, 384), interpolation=cv2.INTER_CUBIC
            )  # resize test images to (384,384) to feed to manual Unet
            prediction = cv2.resize(model.predict(np.expand_dims(
                image, axis=0)).reshape(384, 384),
                                    dsize=(608, 608),
                                    interpolation=cv2.INTER_CUBIC
                                    )  # resize the predictions to (608,608)
        else:
            prediction = model.predict(np.expand_dims(image, axis=0)).reshape(
                608, 608)
        mpimg.imsave(constants.MASK_TEST_PATH + name, prediction)
        print("Image " + name + " saved")

    submission_filename = constants.PATH + "test_final_" + modelname + ".csv"
    image_filenames = helpers.listdir_fullpath(constants.MASK_TEST_PATH)
    make_submission.masks_to_submission(submission_filename, *image_filenames)
Ejemplo n.º 10
0
#定义并编译一个模型
model = Unet('resnet34', input_shape = (512, 512, 1), encoder_weights = None) #1代表通道数
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])

#进行模型的训练,并用his来记录训练过程中的参数变化,方便最后生成图像
his = model.fit_generator( 
    generator = train_gen, #训练集生成器
    steps_per_epoch = 300, #训练集每次epoch的数量
    #validation_data = validate_gen, #验证集生成器
    #validation_steps = 3, #验证集每次epoch的数量
	#validation_split = 0.2
    epochs = 10 #进行epoch的次数
)

model.save("model_v2_050_plusgo.h5") #保存模型 

print("Saved model to disk")


# 生成训练参数图片
# N = 51
# plt.style.use("ggplot")
# plt.figure()
# plt.plot(np.arange(0, N), his.history["loss"], label="train_loss")
# plt.plot(np.arange(0, N), his.history["val_loss"], label="val_loss")
# plt.plot(np.arange(0, N), his.history["acc"], label="train_acc")
# plt.plot(np.arange(0, N), his.history["val_acc"], label="val_acc")
# plt.title("Training Loss and Accuracy on Dataset")
# plt.xlabel("Epoch #")
# plt.ylabel("Loss/Accuracy")
Ejemplo n.º 11
0
x_train = preprocess_input(x_train)

x_val = np.reshape(x_train[-2:, :, :, :], (2, 224, 224, 3))
y_val = np.reshape(y_train[-2:, :, :, :], (2, 224, 224, 4))

x_train = x_train[:-2, :, :, :]
y_train = y_train[:-2, :, :, :]

print(x_train.shape)
print(y_train.shape)

model = Unet(BACKBONE,
             input_shape=(224, 224, 3),
             classes=4,
             encoder_weights='imagenet')
adam = optimizers.Adam(lr=0.0005,
                       beta_1=0.9,
                       beta_2=0.999,
                       epsilon=None,
                       decay=1e-6,
                       amsgrad=True)
model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])

model.fit(x=x_train,
          y=y_train,
          batch_size=8,
          epochs=30,
          validation_data=(x_val, y_val))

model.save('nail_unet.h5', include_optimizer=False)
model = Unet('resnet34', input_shape=(512, 512, 1),
             encoder_weights=None)  #1代表通道数
model.compile(optimizer=Adam(lr=1e-4),
              loss='binary_crossentropy',
              metrics=['accuracy'])

#进行模型的训练,并用his来记录训练过程中的参数变化,方便最后生成图像
his = model.fit_generator(
    generator=train_gen,  #训练集生成器
    steps_per_epoch=100,  #训练集每次epoch的数量
    # validation_data = validate_gen, #验证集生成器
    # validation_steps = 3, #验证集每次epoch的数量
    epochs=1  #进行epoch的次数
)

model.save("model_v2_060.h5")  #保存模型

print("Saved model to disk")

# 生成训练参数图片
# N = 51
# plt.style.use("ggplot")
# plt.figure()
# plt.plot(np.arange(0, N), his.history["loss"], label="train_loss")
# plt.plot(np.arange(0, N), his.history["val_loss"], label="val_loss")
# plt.plot(np.arange(0, N), his.history["acc"], label="train_acc")
# plt.plot(np.arange(0, N), his.history["val_acc"], label="val_acc")
# plt.title("Training Loss and Accuracy on Dataset")
# plt.xlabel("Epoch #")
# plt.ylabel("Loss/Accuracy")
# plt.legend(loc="lower left")
Ejemplo n.º 13
0
n_train = len(X_train)
n_val = len(X_val)
batch_size = 2
my_training_batch_generator = DataGenerator(X_train, Y_train, batch_size)
my_validation_batch_generator = DataGenerator(X_val, Y_val, batch_size)

print(n_train, int(n_train / batch_size))
print(n_val, int(n_val / batch_size))
# pretrain model decoder
model.fit_generator(generator=my_training_batch_generator,
                    epochs=2,
                    steps_per_epoch=int(n_train / batch_size),
                    validation_data=my_validation_batch_generator,
                    verbose=1,
                    validation_steps=int(n_val / batch_size))
# model.fit(X, y, epochs=2)
model.save('./model/2ndepoch_model.h5')

# release all layers for training
set_trainable(model)  # set all layers trainable and recompile model

# continue training
model.fit_generator(generator=my_training_batch_generator,
                    epochs=100,
                    steps_per_epoch=int(n_train / batch_size),
                    validation_data=my_validation_batch_generator,
                    verbose=1,
                    validation_steps=int(n_val / batch_size))
# model.fit(X, y, epochs=100)
model.save("./model/102thepoch_model.h5")
Ejemplo n.º 14
0
datagen = ImageDataGenerator(
        featurewise_center=False, 
        samplewise_center=True,  # set each sample mean to 0
        featurewise_std_normalization=False,  
        samplewise_std_normalization=True) 
X_train, Y_train = load()
X_test, Y_test = load_val()
X_train = X_train.reshape( len(X_train), len(X_train[0]), len(X_train[0][0]),1)
X_test = X_test.reshape( len(X_test), len(X_test[0]), len(X_test[0][0]),1)
Y_train = Y_train.reshape( len(Y_train),len(Y_train[0]), len(Y_train[0][0]),1)
Y_test = Y_test.reshape( len(Y_test), len(Y_test[0]), len(Y_test[0][0]),1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
datagen.fit(X_train) 
for i in range(len(X_test)):
      X_test[i] = datagen.standardize(X_test[i])
earlystop=EarlyStopping(monitor='val_iou_score', min_delta=0, patience=80, verbose=1, mode='max', restore_best_weights=True)
callbacks_list = [earlystop]
history = model.fit_generator(datagen.flow(X_train, Y_train,batch_size=batch_size),steps_per_epoch=32,epochs=1000,validation_data=(X_test,Y_test),callbacks=callbacks_list,verbose=1)
score, acc = model.evaluate(X_test,Y_test,batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
model.save("UNET_" + str(time.time())+".h5") #model are saved with time stamps
print("Model saved")



Ejemplo n.º 15
0
lr_schedule = tf.keras.callbacks.LearningRateScheduler(scheduler)

start_time = time.time()
# train model
model.fit(
    train_sequence_generator,
    steps_per_epoch=100,
    epochs=1000,
    verbose=1,
    callbacks=[earlystopper, tensorboard, lr_schedule],
    validation_data=validation_sequence_generator,
    validation_steps=5
)
end_time = time.time()

if not os.path.exists(f'../../reports/logs_and_plots/{test_name}/'):
    os.makedirs(f'../../reports/logs_and_plots/{test_name}/')

with open(f'../../reports/logs_and_plots/{test_name}/{test_name}_log.txt', 'w+') as file:
    file.write(f'Training completed in {end_time-start_time:0.1f} seconds.\n')

# save trained model
model.save(f'../../models/{test_name}.h5')

# get metrics on full test set
get_metrics_and_cm(
    model_name=test_name,
    split='test',
    model=model
)
Ejemplo n.º 16
0
        plt.subplot(2, 2, j + 1)
        sns.distplot([x for x in correct[j] if x < limit], label='correct')
        sns.distplot([x for x in incorrect[j] if x < limit], label='incorrect')
        plt.title('Defect ' + str(j + 1) + ' mask sizes with threshold = ' +
                  str(THRESHOLD))
        plt.legend()
    plt.show()
    for j in range(4):
        c1 = np.array(correct[j])
        c2 = np.array(incorrect[j])
        print('With threshold =', THRESHOLD, ', defect', j + 1, 'has',
              len(c1[c1 != 0]), 'correct and', len(c2[c2 != 0]),
              'incorrect masks')
    print()
# SAVE MODEL
model.save('UNET.h5')

# LOAD MODEL
from keras.models import load_model
model = load_model('UNET.h5', custom_objects={'dice_coef': dice_coef})

# PREDICT 1 BATCH TEST DATASET
test = pd.read_csv(path + 'sample_submission.csv')
test['ImageId'] = test['ImageId_ClassId'].map(lambda x: x.split('_')[0])
test_batches = DataGenerator(test.iloc[::4],
                             subset='test',
                             batch_size=256,
                             preprocess=preprocess)
test_preds = model.predict_generator(test_batches, steps=1, verbose=1)

# NEXT CONVERT MASKS TO RLE, ADD TO CSV, PROCESS REMAINING BATCHES, AND SUBMIT !!
Ejemplo n.º 17
0
def train_model():
    train_generator = DataGeneratorFolder(
        root_dir='./dataset/training',
        image_folder='input/',
        mask_folder='output/',
        augmentation=aug_with_crop,
        batch_size=4,
        # image_size=512,
        nb_y_features=1)

    test_generator = DataGeneratorFolder(root_dir='./dataset/testing',
                                         image_folder='input/',
                                         mask_folder='output/',
                                         batch_size=1,
                                         nb_y_features=1)

    lr_reducer = ReduceLROnPlateau(factor=0.1,
                                   cooldown=10,
                                   patience=10,
                                   verbose=1,
                                   min_lr=0.1e-5)

    mode_auto_save = ModelCheckpoint(
        "checkpoints/saved_model_epoch_{epoch:02d}_iou_{val_iou_score:.2f}.h5",
        monitor='val_iou_score',
        verbose=1,
        save_best_only=False,
        save_weights_only=False,
        mode='auto',
        period=1)

    early_stopping = EarlyStopping(patience=10, verbose=1, mode='auto')

    # tensor_board = TensorBoard(log_dir='./logs/tensor_board',
    #                            histogram_freq=0,
    #                            write_graph=False,
    #                            write_images=False)

    callbacks = [mode_auto_save, lr_reducer, early_stopping]
    # callbacks = [mode_auto_save, lr_reducer, tensor_board, early_stopping]

    model = Unet(backbone_name='efficientnetb0',
                 encoder_weights='imagenet',
                 encoder_freeze=False)

    model.compile(optimizer=Adam(), loss=bce_jaccard_loss, metrics=[iou_score])

    epochs = 100

    history = model.fit_generator(
        train_generator,
        epochs=epochs,
        # workers=4,
        # use_multiprocessing=True,
        validation_data=test_generator,
        shuffle=True,
        verbose=1,
        callbacks=callbacks)

    model_name = 'trained_model_on_' + str(epochs) + '_epochs_' + \
                 str(datetime.datetime.now().strftime('%d.%m.%Y_%H:%M:%S'))

    model.save(os.path.join('trained_models', model_name + '.h5'))

    plot_training_history(history, model_name)

    """
        Reduce learning rate when a metric has stopped improving.
        monitor: quantity to be monitored.
        factor: factor by which the learning rate will be reduced. new_lr = lr * factor
        patience: number of epochs that produced the monitored quantity with no improvement after which training will be stopped. Validation quantities may not be produced for every epoch, if the validation frequency (model.fit(validation_freq=5)) is greater than one.
        verbose: int. 0: quiet, 1: update messages.
        mode: one of {auto, min, max}. In min mode, lr will be reduced when the quantity monitored has stopped decreasing; in max mode it will be reduced when the quantity monitored has stopped increasing; in auto mode, the direction is automatically inferred from the name of the monitored quantity.
        min_delta: threshold for measuring the new optimum, to only focus on significant changes.
        cooldown: number of epochs to wait before resuming normal operation after lr has been reduced.
        min_lr: lower bound on the learning rate.
    """ \
 \
    """
        Save the model after every epoch.
        filepath: string, path to save the model file.
        monitor: quantity to monitor.
        verbose: verbosity mode, 0 or 1.
        save_best_only: if save_best_only=True, the latest best model according to the quantity monitored will not be overwritten.
        save_weights_only: if True, then only the model's weights will be saved (model.save_weights(filepath)), else the full model is saved (model.save(filepath)).
        mode: one of {auto, min, max}. If save_best_only=True, the decision to overwrite the current save file is made based on either the maximization or the minimization of the monitored quantity. For val_acc, this should be max, for val_loss this should be min, etc. In auto mode, the direction is automatically inferred from the name of the monitored quantity.
        period: Interval (number of epochs) between checkpoints.
    """ \
 \
    """
        Stop training when a monitored quantity has stopped improving.
        monitor: quantity to be monitored.
        min_delta: minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement.
        patience: number of epochs that produced the monitored quantity with no improvement after which training will be stopped. Validation quantities may not be produced for every epoch, if the validation frequency (model.fit(validation_freq=5)) is greater than one.
        verbose: verbosity mode.
        mode: one of {auto, min, max}. In min mode, training will stop when the quantity monitored has stopped decreasing; in max mode it will stop when the quantity monitored has stopped increasing; in auto mode, the direction is automatically inferred from the name of the monitored quantity.
        baseline: Baseline value for the monitored quantity to reach. Training will stop if the model doesn't show improvement over the baseline.
        restore_best_weights: whether to restore model weights from the epoch with the best value of the monitored quantity. If False, the model weights obtained at the last step of training are used.
    """ \
 \
    """
Ejemplo n.º 18
0
    model.fit(x,
              y_train,
              validation_data=(X_val, y_val),
              callbacks=[TestCallback((X_val, y_val))],
              batch_size=1,
              epochs=epc_num,
              verbose=True)
    mname = 'PSPNet_relabeled_' + str(epc_num * (i + 1)) + '.h5'

    print('==============================')
    print('saving model after epoch ', (i + 1) * epc_num)
    print('==============================')

    #     save_name = model_name+"_noaug_epoch_"+str(10*(i+1))+".h5"
    model.save(mname)

# ## Result visualization

# In[5]:

# model = keras.models.load_model('./Unet_epoch_90.h5_noaug_epoch_90.h5')

# In[7]:

# pred = model.predict(X_val, batch_size=None, verbose=1, steps=None)

# In[ ]:

# # resize to original