Ejemplo n.º 1
0
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)

seed = 42
bs = 32  # BacthSize
nb_epochs = 100  # epoch

image_generator = image_datagen.flow(X_train,
                                     seed=seed,
                                     batch_size=bs,
                                     shuffle=True)
mask_generator = mask_datagen.flow(y_train,
                                   seed=seed,
                                   batch_size=bs,
                                   shuffle=True)

# Just zip the two generators to get a generator that provides augmented images and masks at the same time
train_generator = zip(image_generator, mask_generator)

results = model.fit_generator(train_generator,
                              steps_per_epoch=(spe),
                              epochs=nb_epochs,
                              validation_data=(X_valid, y_valid),
                              callbacks=[save, lr_schedule, reduce_lr])

# save final model
model.save_weights(CKPT_PATH +
                   '{}_{}_{}_model.h5'.format(t, model_name, backbone_name))

# predicte valid data
predicted = model.predict(X_valid)
                                                  n_batch, folder_train, size,
                                                  N),
                                validation_data=val_generator_2(
                                    train_val_p2, number_val, n_batch,
                                    folder_train, size, N),
                                validation_steps=val_pe,
                                steps_per_epoch=s_p_e,
                                epochs=n_epochs)

            times = time_callback.times
            dic_times = {}
            dic_times['times'] = times
            savemat(
                combinations + "_" + BACKBONE + '_' + name_model +
                '_times.mat', dic_times)
            model.save_weights(combinations + "_" + BACKBONE + "_" +
                               name_model + "_model_wIoU" + str(size) + ".h5")
            ############END TRAINING#############

            # Load best model
            model.load_weights(combinations + "_" + BACKBONE + "_" +
                               name_model + "_model_wIoU" + str(size) + ".h5")

            #    model.evaluate(x_val, y_val, verbose=1)
            # Predict on train, val and test
            if name_model == "PSPNet":
                preds_train = model.predict(x_train2, verbose=1)
                preds_val = model.predict(x_val2, verbose=1)
                for k in range(0, x_val.shape[0], int(x_val.shape[0] / 100)):
                    x_val_1 = x_val2[k, :, :, :]
                    y_val_1 = y_val2[k, :, :, :]
                    pred_val_1 = preds_val[k, :, :, :]
train_generator = zip(x, y)
val_generator = zip(x_val, y_val)

pip install segmentation-models

from segmentation_models import Unet

model = Unet('resnet34', encoder_weights='imagenet', classes=1, input_shape=(512,512, 3), activation='sigmoid')

model.compile('Adam', loss="binary_crossentropy", metrics=["acc"])

from keras.models import load_model
model.load_weights('Unet_weights.h5')

results = model.fit_generator(train_generator, validation_data=val_generator, validation_steps=500, steps_per_epoch=1000,epochs=10)
model.save_weights('Unet_weights.h5')



def SEG_EVAL(Seg,GT):
    # Seg : Segmented image, must be binary (1 = regions of interest 0 = background)
    # GT : Ground truth, must be binary (1 = regions of interest 0 = background)
    Seg.astype(np.bool)
    GT.astype(np.bool)
    
    #dice_coefficient
    intersection = np.logical_and(Seg, GT)
    dice_coefficient = 2. * intersection.sum() / (Seg.sum() + GT.sum())
    
    #IoU
    TP = np.logical_and(Seg, GT)
Ejemplo n.º 4
0
    def workflow(self):
        # define model
        model = Unet(backbone_name='resnet50', encoder_weights='imagenet')
        adam = keras.optimizers.Adam(lr=self.cfgs["LEARNING_RATE"])
        model.summary()
        # model.compile('Adam', sigmoid_cross_entropy_balanced)
        model.compile(
            'Adam',
            # cross_entropy_balanced
            loss=self.define_loss(),
            # 'binary_crossentropy'
        )

        test_images, test_ulabels, test_elabels, test_rlabels, filelist = self.dl.get_test_data(
        )

        if self.cfgs["RESTORE"]:
            model.load_weights(
                os.path.join(self.cfgs["SAVE_DIR"], "weights", "epoch150.h5"))
            print("RETORE SUCCESSFULLY!")

        callback = TensorBoard('./graph')
        callback.set_model(model)
        train_names = [
            'loss', 'u_outputs_sig_loss', 'e_fuse_sig_loss', 'r_fuse_sig_loss',
            'fuse_dir_loss'
        ]

        current_learning_rate = self.cfgs["LEARNING_RATE"]
        K.set_value(model.optimizer.lr, current_learning_rate)
        for i in range(self.cfgs["EPOCH"]):
            print("[I] EPOCH {}".format(i))
            # TRAIN
            for j in tqdm(range(self.cfgs["STEP"])):
                images_batch, ulabels_batch, elabels_batch, rlabels_batch, d_labels_batch = self.dl.next_batch(
                    "train")
                Logs = model.train_on_batch(
                    images_batch,
                    self.define_train_y(ulabels_batch, elabels_batch,
                                        rlabels_batch, d_labels_batch),
                )

            write_log(callback, train_names, Logs, i)
            if i % self.cfgs["INTERVAL"] == 0 and i >= 0:

                # TEST:
                results = model.predict(test_images, batch_size=10, verbose=0)
                logits = results[-1]
                r_logits = results[-2]

                # result analyse and show
                rlt_worker = ResultManager(i, logits, test_ulabels)
                # r_analyst.compute_roc(savename='roc_vegas_{}.csv'.format(i))
                # rlt_worker_r = ResultManager(i, r_logits, test_rlabels)

                rlt_worker.run()
                # rlt_worker_r.run()

                for ii in range(results[0].shape[0]):
                    #                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/images/{}'.format(filelist[ii][0])),
                    #                                 test_images[ii, :] * 255)
                    #                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/labels/{}'.format(filelist[ii][1])),
                    #                                 test_ulabels[ii, :] * 255)
                    #                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/labels_e/{}'.format(filelist[ii][1])),
                    #                                 test_elabels[ii, :] * 255)
                    #                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/labels_r/{}'.format(filelist[ii][1])),
                    #                                 test_rlabels[ii, :] * 255)

                    #cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/preds/{}'.format(filelist[ii][1])),
                    #           results[-1][ii, :])
                    pred_threshold = threshold(results[-1][ii, :])
                    cv2.imwrite(
                        os.path.join(
                            self.cfgs["SAVE_DIR"],
                            'main_outputs/preds_threshold/{}'.format(
                                filelist[ii][1])), pred_threshold * 255)


#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_e1/{}'.format(filelist[ii][1])),
#                                 threshold(results[1][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_e2/{}'.format(filelist[ii][1])),
#                                 threshold(results[2][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_e3/{}'.format(filelist[ii][1])),
#                                 threshold(results[3][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_e4/{}'.format(filelist[ii][1])),
#                                 threshold(results[4][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_e5/{}'.format(filelist[ii][1])),
#                                 threshold(results[5][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_r1/{}'.format(filelist[ii][1])),
#                                 threshold(results[6][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_r2/{}'.format(filelist[ii][1])),
#                                 threshold(results[7][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_r3/{}'.format(filelist[ii][1])),
#                                 threshold(results[8][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_r4/{}'.format(filelist[ii][1])),
#                                 threshold(results[9][ii, :]) * 255)
#                     cv2.imwrite(os.path.join(self.cfgs["SAVE_DIR"], 'main_outputs/out_r5/{}'.format(filelist[ii][1])),
#                                 threshold(results[10][ii, :]) * 255)

# SAVE WEIGHTS
                current_learning_rate = current_learning_rate * self.cfgs[
                    "LEARNING_RATE_DECAY"]
                K.set_value(model.optimizer.lr, current_learning_rate)
                print('[I] Current Learning Rate: ', current_learning_rate)
                model_json = model.to_json()
                with open("model.json", "w") as json_file:
                    json_file.write(model_json)
                model.save_weights(
                    os.path.join(self.cfgs["SAVE_DIR"],
                                 "epoch{}.h5".format(i)))
Ejemplo n.º 5
0
#print(x_train)
print(x_train.shape)
print(y_train.shape)
print(x_val.shape)
#print(y_val.shape)
#x_train = x_train.reshape(x_train.shape[0], 320, 240, 3)
#x_val = x_val.reshape(x_val.shape[0], 320, 240, 3)
x_train = x_train.astype('float32')
x_test = x_val.astype('float32')
x_train /= 255
x_test /= 255

# preprocess input
x_train = preprocess_input(x_train)
x_val = preprocess_input(x_val)

# define model
model = Unet(BACKBONE, classes=21)
model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])

# fit model
model.fit(
    x=x_train,
    y=y_train,
    batch_size=1,
    epochs=100,
    validation_data=(x_val, y_val),
)
model.save_weights("savedModel")
train_generator = zip(train_image_generator,train_mask_generator)

BACKBONE = 'resnet34'

# define model
model = Unet(BACKBONE, encoder_weights='imagenet')
opt = Adam(lr=0.001)
model.compile(opt, loss=bce_jaccard_loss, metrics=[iou_score])

model.load_weights(net_file)
model.summary()


#show(train_generator,model)
#show_validation(test_image_generator, model)


history = model.fit_generator(train_generator,steps_per_epoch=steps_per_epoch, epochs=epoches)
model.save_weights(net_file)
printHistory(history)

show(train_generator,model)