예제 #1
0
def main():
    # Gets currect directory path
    cdir = os.getcwd()

    # Parameters
    epochs = 100
    batch_size = 1
    depth = 3
    loss_label = 'GDL'
    loss_func = generalized_dice_loss

    learning_rate = 1e-4

    df = pd.DataFrame(columns=['dataset', 'size', 'time elapsed during training',
                               'epochs', 'val_loss', 'test_loss', 'test_acc', 'test_precision', 'test_recall'])

    datasets = ["ds0", "ds1", "ds2", "ds3"]
    datasets_label = ["EvaLady", "AosugiruHaru",
                      "JijiBabaFight", "MariaSamaNihaNaisyo"]

    for d, ds in enumerate(datasets):
        # Gets all files .jpg
        inputs_train = glob.glob(
            str(cdir)+"/../../datasets/D1_"+ds+"/input/*.jpg")
        # Gets all files .png
        targets_train = glob.glob(
            str(cdir)+"/../../datasets/D1_"+ds+"/target/*.png")

        inputs_val = glob.glob(
            str(cdir)+"/../../datasets/TT_"+ds+"/input/*.jpg")
        # Gets all files .png
        targets_val = glob.glob(
            str(cdir)+"/../../datasets/TT_"+ds+"/target/*.png")

        # Sort paths
        inputs_train.sort()
        targets_train.sort()
        inputs_val.sort()
        targets_val.sort()

        opt = Adam(lr=learning_rate)

        # Fixes a initial seed for randomness
        np.random.seed(RANDOM_SEED)
        set_random_seed(RANDOM_SEED)

        X_train = []
        Y_train = []
        X_val = []
        Y_val = []
        # Iterates through files and extract the patches for training, validation and testing

        for i, _ in enumerate(inputs_train):
            x = plt.imread(inputs_train[i])
            if len(x.shape) == 3:
                x = x[:, :, 0]
            X_train.append(fix_size(x, depth))
            Y_train.append(fix_size(plt.imread(targets_train[i]), depth))
        for i, _ in enumerate(inputs_val):
            x = plt.imread(inputs_val[i])
            if len(x.shape) == 3:
                x = x[:, :, 0]
            X_val.append(fix_size(x, depth))
            Y_val.append(fix_size(plt.imread(targets_val[i]), depth))

        X_train = img_to_normal(np.array(X_train)[..., np.newaxis])
        Y_train = img_to_ohe(np.array(Y_train))

        X_val = img_to_normal(np.array(X_val)[..., np.newaxis])
        Y_val = img_to_ohe(np.array(Y_val))

        # Shuffles both the inputs and targets set
        indexes = list(range(0, len(inputs_val)))
        np.random.shuffle(indexes)
        X_val = X_val[indexes]
        Y_val = Y_val[indexes]

        X_val1 = X_val[:5]
        Y_val1 = Y_val[:5]
        X_val2 = X_val[5:10]
        Y_val2 = Y_val[5:10]

        fig, ax = plt.subplots(1, 1, figsize=(15, 10))
        plt.subplots_adjust(hspace=0.4)

        for s in range(1, 11):
            mc = ModelCheckpoint(
                "unet_{0}_{1}.hdf5".format(ds, s), monitor='val_loss', verbose=1, save_best_only=True, mode='min')
            #es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)

            # Initializes model
            model = UNet(depth)
            model.compile(loss=loss_func, optimizer=opt)

            # Trains model
            start = time.time()
            history = model.fit(X_train[:s], Y_train[:s], validation_data=(
                X_val1, Y_val1), epochs=epochs, batch_size=batch_size, verbose=2, callbacks=[mc])
            end = time.time()

            # Plots some performance graphs
            loss = history.history['loss']
            val_loss = history.history['val_loss']

            np.save('history_{0}_{1}.npy'.format(ds, s), history.history)

            epochs_range = list(range(0, len(loss)))

            '''
            Loss 
            '''
            ax.plot(epochs_range[1:], val_loss[1:],
                    label='Validation loss with {0} examples'.format(s))
            ax.xaxis.set_ticks(np.arange(0, 101, 10))
            ax.yaxis.set_ticks(np.arange(0, 1, 0.1))
            ax.set_xlabel('Epochs')
            ax.set_ylabel('Loss')
            ax.set_title('Learning curve - {0}'.format(datasets_label[d]))
            ax.legend()

            fig.savefig('learning_curve_{0}.png'.format(ds))

            model = UNet(depth)

            model.load_weights("unet_{0}_{1}.hdf5".format(ds, s))

            Y_pred = model.predict(X_val2)

            test_loss = loss_func(K.constant(
                Y_val2), K.constant(Y_pred)).numpy()

            Y_pred = ohe_to_img(Y_pred)
            Y_val2 = ohe_to_img(Y_val2)

            metrics = calc_metrics(Y_val2, Y_pred)

            Y_val2 = img_to_ohe(Y_val2)
            
            df2 = pd.DataFrame(data={'dataset': [datasets_label[d]], 'size': [s],
                                     'time elapsed during training': [end-start], 'epochs': [len(loss)],
                                     'val_loss': [np.amin(val_loss)],
                                     'test_loss': [test_loss], 'test_acc': [metrics['accuracy']],
                                     'test_precision': [metrics['precision']],
                                     'test_recall': [metrics['recall']]})
            df = df.append(df2)

            df.to_csv('results.csv', index=False)
예제 #2
0
파일: driver.py 프로젝트: robonauta/MAC0499
def main():
    # Gets currect directory path
    cdir = os.getcwd()

    # Parameters
    epochs = 100
    batch_size = 1
    depth = 3
    loss_label = 'GDL'
    loss_func = generalized_dice_loss

    learning_rate = 1e-4

    datasets = ["ds0", "ds1", "ds2", "ds3"]
    datasets_label = [
        "EvaLady", "AosugiruHaru", "JijiBabaFight", "MariaSamaNihaNaisyo"
    ]

    df = pd.DataFrame(columns=[
        'Trained on', 'Tested on', 'Loss', 'Accuracy', 'Precision', 'Recall'
    ])

    for m, ml in enumerate(datasets):
        model = UNet(depth)

        model.load_weights("unet_{0}.hdf5".format(ml))

        for d, ds in enumerate(datasets):
            if ds == m:
                pass

            # Gets all files .jpg
            inputs_train = glob.glob(
                str(cdir) + "/../../datasets/D1_" + ds + "/input/*.jpg")
            # Gets all files .png
            targets_train = glob.glob(
                str(cdir) + "/../../datasets/D1_" + ds + "/target/*.png")

            inputs_val = glob.glob(
                str(cdir) + "/../../datasets/TT_" + ds + "/input/*.jpg")
            # Gets all files .png
            targets_val = glob.glob(
                str(cdir) + "/../../datasets/TT_" + ds + "/target/*.png")

            # Sort paths
            inputs_train.sort()
            targets_train.sort()
            inputs_val.sort()
            targets_val.sort()

            opt = Adam(lr=learning_rate)

            # Fixes a initial seed for randomness
            np.random.seed(RANDOM_SEED)
            set_random_seed(RANDOM_SEED)

            X_train = []
            Y_train = []
            X_val = []
            Y_val = []
            # Iterates through files and extract the patches for training, validation and testing
            for i, _ in enumerate(inputs_val):
                x = plt.imread(inputs_val[i])
                if len(x.shape) == 3:
                    x = x[:, :, 0]
                X_val.append(fix_size(x, depth))
                Y_val.append(fix_size(plt.imread(targets_val[i]), depth))

            X_val = img_to_normal(np.array(X_val)[..., np.newaxis])
            Y_val = img_to_ohe(np.array(Y_val))

            # Shuffles both the inputs and targets set
            indexes = list(range(0, len(inputs_val)))
            np.random.shuffle(indexes)
            X_val = X_val[indexes]
            Y_val = Y_val[indexes]
            inputs_val = np.array(inputs_val)[indexes]

            X_val2 = X_val[5:]
            Y_val2 = Y_val[5:]

            Y_pred = model.predict(X_val2)

            test_loss = loss_func(K.constant(Y_val2),
                                  K.constant(Y_pred)).numpy()

            Y_pred = ohe_to_img(Y_pred)
            Y_val2 = ohe_to_img(Y_val2)

            metrics = calc_metrics(Y_val2, Y_pred)
            df = df.append(
                pd.DataFrame(
                    data={
                        'Trained on': [datasets_label[m]],
                        'Tested on': [datasets_label[d]],
                        'Loss': [test_loss],
                        'Accuracy': [metrics['accuracy']],
                        'Precision': [metrics['precision']],
                        'Recall': [metrics['recall']]
                    }))

            df.to_csv('results.csv', index=False)

    clear_session()
예제 #3
0
def main():
    # Gets currect directory path
    cdir = os.getcwd()

    # Gets all files .jpg
    inputs_train = glob.glob(
        str(cdir) + "../../subconjuntos/D1_ds0/inputs/*.jpg")
    # Gets all files .png
    targets_train = glob.glob(
        str(cdir) + "../../subconjuntos/D1_ds0/target/*.png")

    inputs_val = glob.glob(str(cdir) + "../../subconjuntos/TT_ds0/input/*.jpg")
    # Gets all files .png
    targets_val = glob.glob(
        str(cdir) + "../../subconjuntos/TT_ds0/target/*.png")

    # Sort paths
    inputs_train.sort()
    targets_train.sort()
    inputs_val.sort()
    targets_val.sort()

    # Parameters
    epochs = 100
    batch_size = 1
    depths = [1, 2, 3, 4, 5]
    loss_func = CategoricalCrossentropy()
    learning_rate = 1e-4
    opt = Adam(lr=learning_rate)

    df = pd.DataFrame(columns=[
        'depth', 'loss_func', 'time elapsed during training', 'epochs', 'loss',
        'val_loss', 'test_loss', 'test acc', 'test precision', 'test_recall'
    ])

    fig, ax = plt.subplots(2, 1, figsize=(15, 15))

    for depth in depths:
        opt = Adam(lr=learning_rate)

        # Fixes a initial seed for randomness
        np.random.seed(RANDOM_SEED)
        set_random_seed(RANDOM_SEED)

        X_train = []
        Y_train = []
        X_val = []
        Y_val = []
        # Iterates through files and extract the patches for training, validation and testing

        for i, _ in enumerate(inputs_train):
            x = plt.imread(inputs_train[i])
            if len(x.shape) == 3:
                x = x[:, :, 0]
            X_train.append(fix_size(x, depth))
            Y_train.append(fix_size(plt.imread(targets_train[i]), depth))
        for i, _ in enumerate(inputs_val):
            x = plt.imread(inputs_val[i])
            if len(x.shape) == 3:
                x = x[:, :, 0]
            X_val.append(fix_size(x, depth))
            Y_val.append(fix_size(plt.imread(targets_val[i]), depth))

        X_train = img_to_normal(np.array(X_train)[..., np.newaxis])
        Y_train = img_to_ohe(np.array(Y_train))

        X_val = img_to_normal(np.array(X_val)[..., np.newaxis])
        Y_val = img_to_ohe(np.array(Y_val))

        # Shuffles both the inputs and targets set
        indexes = list(range(0, len(inputs_val)))
        np.random.shuffle(indexes)
        X_val = X_val[indexes]
        Y_val = Y_val[indexes]

        X_val1 = X_val[:5]
        Y_val1 = Y_val[:5]
        X_val2 = X_val[5:10]
        Y_val2 = Y_val[5:10]

        mc = ModelCheckpoint("unet_{0}.hdf5".format(depth),
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min')

        # Initializes model
        model = UNet(depth)
        model.compile(loss=loss_func, optimizer=opt)

        # Trains models
        start = time.time()
        history = model.fit(X_train,
                            Y_train,
                            validation_data=(X_val1, Y_val1),
                            epochs=epochs,
                            batch_size=batch_size,
                            verbose=2,
                            callbacks=[mc])
        end = time.time()

        # Plots some performance graphs
        loss = history.history['loss']
        val_loss = history.history['val_loss']

        np.save('history_{0}.npy'.format(depth), history.history)

        clear_session()

        epochs_range = list(range(0, len(loss)))

        ax[0].plot(epochs_range[1:], loss[1:], label='Depth {0}'.format(depth))
        ax[0].xaxis.set_ticks(np.arange(0, 101, 5))
        ax[0].yaxis.set_ticks(np.arange(0, 1, 0.1))
        ax[0].set_title('Training loss - UNet')
        ax[0].set_xlabel('Epochs')
        ax[0].set_ylabel('Loss')
        ax[0].legend()

        ax[1].plot(epochs_range[1:],
                   val_loss[1:],
                   label='Depth {0}'.format(depth))
        ax[1].xaxis.set_ticks(np.arange(0, 101, 5))
        ax[1].yaxis.set_ticks(np.arange(0, 1, 0.1))
        ax[1].set_title('Validation loss - UNet')
        ax[1].set_xlabel('Epochs')
        ax[1].set_ylabel('Loss')
        ax[1].legend()

        fig.savefig('learning_curve.png')

        model = UNet(depth)

        model.load_weights("unet_{0}.hdf5".format(depth))

        Y_pred = model.predict(X_val2)

        Y_pred = ohe_to_img(Y_pred)
        Y_val2 = ohe_to_img(Y_val2)

        metrics = calc_metrics(Y_val2, Y_pred)

        test_loss = loss_func(K.constant(Y_val2), K.constant(Y_pred)).numpy()

        df2 = pd.DataFrame(
            data={
                'depth': [depth],
                'loss_func': ["CE"],
                'time elapsed during training': [end - start],
                'epochs': [len(loss)],
                'loss': [np.amin(loss)],
                'val_loss': [np.amin(val_loss)],
                'test_loss': [test_loss],
                'test acc': [metrics['accuracy']],
                'test precision': [metrics['precision']],
                'test_recall': [metrics['recall']]
            })
        df = df.append(df2)

        df.to_csv('results.csv', index=False)
예제 #4
0
                  F1Score()
              ])

# Train the model
fit = model.fit(train_images,
                train_masks,
                batch_size=BATCH_SIZE,
                epochs=EPOCHS_NUMBER,
                shuffle=True,
                validation_data=(test_images, test_masks))
test_loss, _, test_acc, _, _, _ = model.evaluate(test_images, test_masks)

# save model
model.save_weights('models/{}model.ckpt'.format(SAVING_NAME))

# Predict and save masks for the test set
predicted_masks = model.predict(test_images, batch_size=BATCH_SIZE)

for i in range(10):
    pred_mask = np.squeeze(predicted_masks[i])
    true_mask = test_masks[i]
    cv2.imwrite('results/{}{}.{}.png'.format(EPOCHS_NUMBER, BATCH_SIZE, i),
                np.ceil(np.concatenate((true_mask, pred_mask), axis=1) * 255.))

results_file = open(
    'models/{}test_loss{}test_accuracy.txt'.format(test_loss, test_acc), 'w')
results_file.write(SAVING_NAME)
results_file.close()

plot_acc_loss(fit.history['accuracy'], fit.history['loss'], SAVING_NAME)