Beispiel #1
0
from models import Attention_ResUNet, UNet, Attention_UNet, dice_coef, dice_coef_loss, jacard_coef
'''
UNet
'''
unet_model = UNet(input_shape)
unet_model.compile(optimizer=Adam(lr=1e-2),
                   loss=BinaryFocalLoss(gamma=2),
                   metrics=['accuracy', jacard_coef])

print(unet_model.summary())

start1 = datetime.now()
unet_history = unet_model.fit(X_train,
                              y_train,
                              verbose=1,
                              batch_size=batch_size,
                              validation_data=(X_test, y_test),
                              shuffle=False,
                              epochs=50)

stop1 = datetime.now()
#Execution time of the model
execution_time_Unet = stop1 - start1
print("UNet execution time is: ", execution_time_Unet)

unet_model.save('mitochondria_UNet_50epochs_B_focal.hdf5')
#____________________________________________
'''
Attention UNet
'''
att_unet_model = Attention_UNet(input_shape)
Beispiel #2
0
def main():
    # Gets currect directory path
    cdir = os.getcwd()

    # Parameters
    epochs = 100
    batch_size = 1
    depth = 3
    loss_label = 'GDL'
    loss_func = generalized_dice_loss

    learning_rate = 1e-4

    df = pd.DataFrame(columns=['dataset', 'size', 'time elapsed during training',
                               'epochs', 'val_loss', 'test_loss', 'test_acc', 'test_precision', 'test_recall'])

    datasets = ["ds0", "ds1", "ds2", "ds3"]
    datasets_label = ["EvaLady", "AosugiruHaru",
                      "JijiBabaFight", "MariaSamaNihaNaisyo"]

    for d, ds in enumerate(datasets):
        # Gets all files .jpg
        inputs_train = glob.glob(
            str(cdir)+"/../../datasets/D1_"+ds+"/input/*.jpg")
        # Gets all files .png
        targets_train = glob.glob(
            str(cdir)+"/../../datasets/D1_"+ds+"/target/*.png")

        inputs_val = glob.glob(
            str(cdir)+"/../../datasets/TT_"+ds+"/input/*.jpg")
        # Gets all files .png
        targets_val = glob.glob(
            str(cdir)+"/../../datasets/TT_"+ds+"/target/*.png")

        # Sort paths
        inputs_train.sort()
        targets_train.sort()
        inputs_val.sort()
        targets_val.sort()

        opt = Adam(lr=learning_rate)

        # Fixes a initial seed for randomness
        np.random.seed(RANDOM_SEED)
        set_random_seed(RANDOM_SEED)

        X_train = []
        Y_train = []
        X_val = []
        Y_val = []
        # Iterates through files and extract the patches for training, validation and testing

        for i, _ in enumerate(inputs_train):
            x = plt.imread(inputs_train[i])
            if len(x.shape) == 3:
                x = x[:, :, 0]
            X_train.append(fix_size(x, depth))
            Y_train.append(fix_size(plt.imread(targets_train[i]), depth))
        for i, _ in enumerate(inputs_val):
            x = plt.imread(inputs_val[i])
            if len(x.shape) == 3:
                x = x[:, :, 0]
            X_val.append(fix_size(x, depth))
            Y_val.append(fix_size(plt.imread(targets_val[i]), depth))

        X_train = img_to_normal(np.array(X_train)[..., np.newaxis])
        Y_train = img_to_ohe(np.array(Y_train))

        X_val = img_to_normal(np.array(X_val)[..., np.newaxis])
        Y_val = img_to_ohe(np.array(Y_val))

        # Shuffles both the inputs and targets set
        indexes = list(range(0, len(inputs_val)))
        np.random.shuffle(indexes)
        X_val = X_val[indexes]
        Y_val = Y_val[indexes]

        X_val1 = X_val[:5]
        Y_val1 = Y_val[:5]
        X_val2 = X_val[5:10]
        Y_val2 = Y_val[5:10]

        fig, ax = plt.subplots(1, 1, figsize=(15, 10))
        plt.subplots_adjust(hspace=0.4)

        for s in range(1, 11):
            mc = ModelCheckpoint(
                "unet_{0}_{1}.hdf5".format(ds, s), monitor='val_loss', verbose=1, save_best_only=True, mode='min')
            #es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)

            # Initializes model
            model = UNet(depth)
            model.compile(loss=loss_func, optimizer=opt)

            # Trains model
            start = time.time()
            history = model.fit(X_train[:s], Y_train[:s], validation_data=(
                X_val1, Y_val1), epochs=epochs, batch_size=batch_size, verbose=2, callbacks=[mc])
            end = time.time()

            # Plots some performance graphs
            loss = history.history['loss']
            val_loss = history.history['val_loss']

            np.save('history_{0}_{1}.npy'.format(ds, s), history.history)

            epochs_range = list(range(0, len(loss)))

            '''
            Loss 
            '''
            ax.plot(epochs_range[1:], val_loss[1:],
                    label='Validation loss with {0} examples'.format(s))
            ax.xaxis.set_ticks(np.arange(0, 101, 10))
            ax.yaxis.set_ticks(np.arange(0, 1, 0.1))
            ax.set_xlabel('Epochs')
            ax.set_ylabel('Loss')
            ax.set_title('Learning curve - {0}'.format(datasets_label[d]))
            ax.legend()

            fig.savefig('learning_curve_{0}.png'.format(ds))

            model = UNet(depth)

            model.load_weights("unet_{0}_{1}.hdf5".format(ds, s))

            Y_pred = model.predict(X_val2)

            test_loss = loss_func(K.constant(
                Y_val2), K.constant(Y_pred)).numpy()

            Y_pred = ohe_to_img(Y_pred)
            Y_val2 = ohe_to_img(Y_val2)

            metrics = calc_metrics(Y_val2, Y_pred)

            Y_val2 = img_to_ohe(Y_val2)
            
            df2 = pd.DataFrame(data={'dataset': [datasets_label[d]], 'size': [s],
                                     'time elapsed during training': [end-start], 'epochs': [len(loss)],
                                     'val_loss': [np.amin(val_loss)],
                                     'test_loss': [test_loss], 'test_acc': [metrics['accuracy']],
                                     'test_precision': [metrics['precision']],
                                     'test_recall': [metrics['recall']]})
            df = df.append(df2)

            df.to_csv('results.csv', index=False)
Beispiel #3
0
def main():
    # Fixes a initial seed for randomness
    np.random.seed(RANDOM_SEED)
    set_random_seed(RANDOM_SEED)

    epochs = 100
    batch_size = 1
    depths = [1, 2, 3, 4, 5]
    loss_func = CategoricalCrossentropy()
    learning_rate = 1e-4
    opt = Adam(lr=learning_rate)
    depth = 3

    # Gets currect directory path
    cdir = os.getcwd()

    # Gets all files .jpg
    inputs_train = glob.glob(
        str(cdir) + "../../subconjuntos/D1_ds0/inputs/*.jpg")
    # Gets all files .png
    targets_train = glob.glob(
        str(cdir) + "../../subconjuntos/D1_ds0/target/*.png")

    inputs_val = glob.glob(str(cdir) + "../../subconjuntos/TT_ds0/input/*.jpg")
    # Gets all files .png
    targets_val = glob.glob(
        str(cdir) + "../../subconjuntos/TT_ds0/target/*.png")

    X_train = []
    Y_train = []
    X_val = []
    Y_val = []
    # Iterates through files and extract the patches for training, validation and testing

    for i, _ in enumerate(inputs_train):
        x = plt.imread(inputs_train[i])
        if len(x.shape) == 3:
            x = x[:, :, 0]
        X_train.append(fix_size(x, depth))
        Y_train.append(fix_size(plt.imread(targets_train[i]), depth))
    for i, _ in enumerate(inputs_val):
        x = plt.imread(inputs_val[i])
        if len(x.shape) == 3:
            x = x[:, :, 0]
        X_val.append(fix_size(x, depth))
        Y_val.append(fix_size(plt.imread(targets_val[i]), depth))

    X_train = np.array(X_train)[..., np.newaxis]
    Y_train = img_to_ohe(np.array(Y_train))

    X_val = np.array(X_val)[..., np.newaxis]
    Y_val = img_to_ohe(np.array(Y_val))

    # Shuffles both the inputs and targets set
    indexes = list(range(0, len(inputs_val)))
    np.random.shuffle(indexes)
    X_val = X_val[indexes]
    Y_val = Y_val[indexes]

    X_val1 = X_val[:5]
    Y_val1 = Y_val[:5]
    X_val2 = X_val[5:10]
    Y_val2 = Y_val[5:10]

    mc = ModelCheckpoint("unet.hdf5",
                         monitor='val_loss',
                         verbose=1,
                         save_best_only=True,
                         mode='min')
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)

    # Initializes model
    model = UNet(depth)
    model.compile(loss=loss_func,
                  optimizer=opt,
                  metrics=['accuracy', Precision(),
                           Recall()])

    # Trains model
    start = time.time()
    history = model.fit(X_train,
                        Y_train,
                        validation_data=(X_val, Y_val),
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=2,
                        callbacks=[mc, es])
    end = time.time()

    # Plots some performance graphs
    acc = history.history['acc']
    val_acc = history.history['val_acc']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    precision = history.history['precision']
    val_precision = history.history['val_precision']
    recall = history.history['recall']
    val_recall = history.history['val_recall']

    df = pd.DataFrame(
        data={
            'acc': [np.amax(acc)],
            'val_acc': [np.amax(val_acc)],
            'loss': [np.amin(loss)],
            'val_loss': [np.amin(val_loss)],
            'precision': [np.amax(precision)],
            'val_precision': [np.amax(val_precision)],
            'recall': [np.amax(recall)],
            'val_recall': [np.amax(val_recall)]
        })

    epochs_range = list(range(0, len(acc)))

    fig, ax = plt.subplots(2, 2, figsize=(12, 12))
    ax[0][0].plot(epochs_range, loss, 'bo', label='Training loss')
    ax[0][0].plot(epochs_range, val_loss, 'b', label='Validation loss')
    ax[0][0].set_title('Training and validation loss - UNet')
    ax[0][0].set_xlabel('Epochs')
    ax[0][0].set_ylabel('Loss')
    ax[0][0].legend()

    ax[0][1].plot(epochs_range, acc, 'bo', label='Training acc')
    ax[0][1].plot(epochs_range, val_acc, 'b', label='Validation acc')
    ax[0][1].set_title('Training and validation accuracy - UNet')
    ax[0][1].set_xlabel('Epochs')
    ax[0][1].set_ylabel('Accuracy')
    ax[0][1].legend()

    ax[1][0].plot(epochs_range, precision, 'bo', label='Training precision')
    ax[1][0].plot(epochs_range,
                  val_precision,
                  'b',
                  label='Validation precision')
    ax[1][0].set_title('Training and validation precision - UNet')
    ax[1][0].set_xlabel('Epochs')
    ax[1][0].set_ylabel('Precision')
    ax[1][0].legend()

    ax[1][1].plot(epochs_range, recall, 'bo', label='Training recall')
    ax[1][1].plot(epochs_range, val_recall, 'b', label='Validation recall')
    ax[1][1].set_title('Training and validation recall - UNet')
    ax[1][1].set_xlabel('Epochs')
    ax[1][1].set_ylabel('Recall')
    ax[1][1].legend()
    plt.subplots_adjust(hspace=0.5)
    fig.savefig('learning_curve.png')
    plt.clf()

    df.to_csv('results.csv')
Beispiel #4
0
def main():
    # Gets currect directory path
    cdir = os.getcwd()

    # Gets all files .jpg
    inputs_train = glob.glob(
        str(cdir) + "../../subconjuntos/D1_ds0/inputs/*.jpg")
    # Gets all files .png
    targets_train = glob.glob(
        str(cdir) + "../../subconjuntos/D1_ds0/target/*.png")

    inputs_val = glob.glob(str(cdir) + "../../subconjuntos/TT_ds0/input/*.jpg")
    # Gets all files .png
    targets_val = glob.glob(
        str(cdir) + "../../subconjuntos/TT_ds0/target/*.png")

    # Sort paths
    inputs_train.sort()
    targets_train.sort()
    inputs_val.sort()
    targets_val.sort()

    # Parameters
    epochs = 100
    batch_size = 1
    depths = [1, 2, 3, 4, 5]
    loss_func = CategoricalCrossentropy()
    learning_rate = 1e-4
    opt = Adam(lr=learning_rate)

    df = pd.DataFrame(columns=[
        'depth', 'loss_func', 'time elapsed during training', 'epochs', 'loss',
        'val_loss', 'test_loss', 'test acc', 'test precision', 'test_recall'
    ])

    fig, ax = plt.subplots(2, 1, figsize=(15, 15))

    for depth in depths:
        opt = Adam(lr=learning_rate)

        # Fixes a initial seed for randomness
        np.random.seed(RANDOM_SEED)
        set_random_seed(RANDOM_SEED)

        X_train = []
        Y_train = []
        X_val = []
        Y_val = []
        # Iterates through files and extract the patches for training, validation and testing

        for i, _ in enumerate(inputs_train):
            x = plt.imread(inputs_train[i])
            if len(x.shape) == 3:
                x = x[:, :, 0]
            X_train.append(fix_size(x, depth))
            Y_train.append(fix_size(plt.imread(targets_train[i]), depth))
        for i, _ in enumerate(inputs_val):
            x = plt.imread(inputs_val[i])
            if len(x.shape) == 3:
                x = x[:, :, 0]
            X_val.append(fix_size(x, depth))
            Y_val.append(fix_size(plt.imread(targets_val[i]), depth))

        X_train = img_to_normal(np.array(X_train)[..., np.newaxis])
        Y_train = img_to_ohe(np.array(Y_train))

        X_val = img_to_normal(np.array(X_val)[..., np.newaxis])
        Y_val = img_to_ohe(np.array(Y_val))

        # Shuffles both the inputs and targets set
        indexes = list(range(0, len(inputs_val)))
        np.random.shuffle(indexes)
        X_val = X_val[indexes]
        Y_val = Y_val[indexes]

        X_val1 = X_val[:5]
        Y_val1 = Y_val[:5]
        X_val2 = X_val[5:10]
        Y_val2 = Y_val[5:10]

        mc = ModelCheckpoint("unet_{0}.hdf5".format(depth),
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min')

        # Initializes model
        model = UNet(depth)
        model.compile(loss=loss_func, optimizer=opt)

        # Trains models
        start = time.time()
        history = model.fit(X_train,
                            Y_train,
                            validation_data=(X_val1, Y_val1),
                            epochs=epochs,
                            batch_size=batch_size,
                            verbose=2,
                            callbacks=[mc])
        end = time.time()

        # Plots some performance graphs
        loss = history.history['loss']
        val_loss = history.history['val_loss']

        np.save('history_{0}.npy'.format(depth), history.history)

        clear_session()

        epochs_range = list(range(0, len(loss)))

        ax[0].plot(epochs_range[1:], loss[1:], label='Depth {0}'.format(depth))
        ax[0].xaxis.set_ticks(np.arange(0, 101, 5))
        ax[0].yaxis.set_ticks(np.arange(0, 1, 0.1))
        ax[0].set_title('Training loss - UNet')
        ax[0].set_xlabel('Epochs')
        ax[0].set_ylabel('Loss')
        ax[0].legend()

        ax[1].plot(epochs_range[1:],
                   val_loss[1:],
                   label='Depth {0}'.format(depth))
        ax[1].xaxis.set_ticks(np.arange(0, 101, 5))
        ax[1].yaxis.set_ticks(np.arange(0, 1, 0.1))
        ax[1].set_title('Validation loss - UNet')
        ax[1].set_xlabel('Epochs')
        ax[1].set_ylabel('Loss')
        ax[1].legend()

        fig.savefig('learning_curve.png')

        model = UNet(depth)

        model.load_weights("unet_{0}.hdf5".format(depth))

        Y_pred = model.predict(X_val2)

        Y_pred = ohe_to_img(Y_pred)
        Y_val2 = ohe_to_img(Y_val2)

        metrics = calc_metrics(Y_val2, Y_pred)

        test_loss = loss_func(K.constant(Y_val2), K.constant(Y_pred)).numpy()

        df2 = pd.DataFrame(
            data={
                'depth': [depth],
                'loss_func': ["CE"],
                'time elapsed during training': [end - start],
                'epochs': [len(loss)],
                'loss': [np.amin(loss)],
                'val_loss': [np.amin(val_loss)],
                'test_loss': [test_loss],
                'test acc': [metrics['accuracy']],
                'test precision': [metrics['precision']],
                'test_recall': [metrics['recall']]
            })
        df = df.append(df2)

        df.to_csv('results.csv', index=False)
Beispiel #5
0
# ------- MODEL TRAINING ----------

model = UNet()
model.compile(optimizer='adam',
              loss=[LOSS],
              metrics=[
                  'bce', 'accuracy',
                  tf.keras.metrics.Precision(),
                  tf.keras.metrics.Recall(),
                  F1Score()
              ])

# Train the model
fit = model.fit(train_images,
                train_masks,
                batch_size=BATCH_SIZE,
                epochs=EPOCHS_NUMBER,
                shuffle=True,
                validation_data=(test_images, test_masks))
test_loss, _, test_acc, _, _, _ = model.evaluate(test_images, test_masks)

# save model
model.save_weights('models/{}model.ckpt'.format(SAVING_NAME))

# Predict and save masks for the test set
predicted_masks = model.predict(test_images, batch_size=BATCH_SIZE)

for i in range(10):
    pred_mask = np.squeeze(predicted_masks[i])
    true_mask = test_masks[i]
    cv2.imwrite('results/{}{}.{}.png'.format(EPOCHS_NUMBER, BATCH_SIZE, i),
                np.ceil(np.concatenate((true_mask, pred_mask), axis=1) * 255.))