Beispiel #1
0
def save_images(autoencoder, args, test_folds):
    assert (args.threshold != -1)

    array_files = util.load_array_of_files(args.path, test_folds)

    for fname in array_files:
        print('Processing image', fname)

        img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)
        img = np.asarray(img)

        rows = img.shape[0]
        cols = img.shape[1]
        if img.shape[0] < args.window or img.shape[1] < args.window:
            new_rows = args.window if img.shape[
                0] < args.window else img.shape[0]
            new_cols = args.window if img.shape[
                1] < args.window else img.shape[1]
            img = cv2.resize(img, (new_cols, new_rows),
                             interpolation=cv2.INTER_CUBIC)

        img = np.asarray(img).astype('float32')
        img = 255. - img

        finalImg = np.zeros(img.shape, dtype=bool)

        for (x, y, window) in utilDataGenerator.sliding_window(
                img, stepSize=args.step,
                windowSize=(args.window, args.window)):
            if window.shape[0] != args.window or window.shape[1] != args.window:
                continue

            roi = img[y:(y + args.window), x:(x + args.window)].copy()
            roi = roi.reshape(1, args.window, args.window, 1)
            roi = roi.astype('float32')  #/ 255.

            prediction = autoencoder.predict(roi)
            prediction = (prediction > args.threshold)

            finalImg[y:(y + args.window),
                     x:(x + args.window)] = prediction[0].reshape(
                         args.window, args.window)

        finalImg = 1 - finalImg
        finalImg *= 255

        finalImg = finalImg.astype('uint8')

        if finalImg.shape[0] != rows or finalImg.shape[1] != cols:
            finalImg = cv2.resize(finalImg, (cols, rows),
                                  interpolation=cv2.INTER_CUBIC)

        outFilename = fname.replace('_GR/', '_PR-' + args.modelpath + '/')

        util.mkdirp(os.path.dirname(outFilename))

        cv2.imwrite(outFilename, finalImg)
Beispiel #2
0
    weights_filename = 'model_weights_' + BASE_LOG_NAME + '.h5'


print('Loading data...')

x_sufix = '_GR'
y_sufix = '_GT'

train_folds, test_folds = load_dataset_folds(args.db, args.dbp)

# augmentation ?
if args.aug == True:       # Add the augmented folders
    for f in list(train_folds):
        train_folds.append( util.rreplace(f, "/", "/aug_", 1) )

array_test_files = util.load_array_of_files(args.path, test_folds)
x_test, y_test = utilDataGenerator.generate_chunks(array_test_files, x_sufix, y_sufix, args.window, args.window)

if args.test == False:
    array_train_files = util.load_array_of_files(args.path, train_folds)
    train_data_generator = utilDataGenerator.LazyChunkGenerator(array_train_files, x_sufix, y_sufix, args.page, args.window, args.step)
    train_data_generator.shuffle()

    if args.start_from > 0:
        train_data_generator.set_pos(args.start_from)


print('# Processing path:', args.path)
print('# Database:', args.db)
print('# Db param:', args.dbp)
print('# Train data:', len(train_data_generator) if args.test == False else '--')
def main(args=None):
    args = parse_menu()

    x_sufix = '_GR'
    y_sufix = '_GT'

    weights_filename = define_weights_filename(args)

    print('Loading data...')

    train_folds, test_folds = load_dataset_folds(args.db, args.dbp)

    # Run data augmentation ?
    if args.aug == True:  # Add the augmented folders
        for f in list(train_folds):
            train_folds.append(util.rreplace(f, '/', '/aug_', 1))

    array_test_files = util.load_array_of_files(args.path, test_folds)
    x_test, y_test = utilDataGenerator.generate_chunks(array_test_files,
                                                       x_sufix, y_sufix,
                                                       args.window,
                                                       args.window)

    if args.test == False:
        array_train_files = util.load_array_of_files(args.path, train_folds)
        train_data_generator = utilDataGenerator.LazyChunkGenerator(
            array_train_files, x_sufix, y_sufix, args.page, args.window,
            args.step)
        train_data_generator.shuffle()

        if args.start_from > 0:
            train_data_generator.set_pos(args.start_from)

    print('# Processing path:', args.path)
    print('# Database:', args.db)
    print('# Db param:', args.dbp)
    print('# Train data:',
          len(train_data_generator) if args.test == False else '--')
    print('# Test data:', x_test.shape)
    print('# Augmentation:', args.aug)
    print('# Window size:', args.window)
    print('# Step size:', args.step)
    print('# Init weights:', args.loadmodel)
    print('# nb_filters:', args.nb_filters)
    print('# kernel size:', args.kernel)
    print('# Dropout:', args.dropout)
    print('# nb_super_epoch:', args.nb_super_epoch)
    print('# nb_pages:', args.page)
    print('# nb_epoch:', args.nb_epoch)
    print('# batch:', args.batch)
    print('# early_stopping_mode:', args.early_stopping_mode)
    print('# early_stopping_patience:', args.early_stopping_patience)
    print('# Threshold:', args.threshold)
    print('# Weights filename:', weights_filename)

    autoencoder = build_SAE_network(args, weights_filename)

    best_th = args.threshold

    if args.test == False:
        args.monitor = 'min'
        best_th = utilFit.batch_fit_with_data_generator(
            autoencoder, train_data_generator, x_test, y_test, args,
            weights_filename)

        # Re-Load last weights
        autoencoder.load_weights(weights_filename)

    # Save output images
    args.modelpath = weights_filename
    args.threshold = best_th
    save_images(autoencoder, args, test_folds)