test_predictions = []

KERNEL = np.ones((15, 15), np.uint8)

#total prediction
for index, test_image_name in enumerate(test_image_names):
    sys.stdout.write("Predicting: {}/{} ...\r".format(index + 1,
                                                      len(test_image_names)))
    sys.stdout.flush()

    img = io.imread(os.path.join(path_to_test_images, test_image_name),
                    as_gray=True)
    img = img / 255.
    img = reshape_image(img, unet_size)

    result = unet.predict_on_batch(img)
    result = result[0]
    new_img = normalize_mask(result)
    new_img = (new_img * 255).astype('uint8')

    new_img = cv2.erode(new_img, KERNEL, iterations=2)
    new_img = cv2.dilate(new_img, KERNEL, iterations=3)
    new_img = np.expand_dims(new_img, axis=2)

    new_img = cv2.bitwise_not(new_img)

    img = img[0]
    img = (img * 255).astype('uint8')

    img = cv2.add(img, new_img)
    img[img == new_img] = 0
save_path = SETTINGS_JSON['SEGMENTED_TRAIN_IMAGES_DIR']

# build model
unet = UNet(input_size=(img_width, img_height, 1),
            n_filters=64,
            pretrained_weights=model_weights_name)
unet.build()

src_gen = test_generator(train_path, img_size)
train_gen = test_generator(train_path, img_size)

KERNAL = np.ones((15, 15), np.uint8)

for src, img_name in tqdm(zip(src_gen, os.listdir(train_path)), leave=False):

    result = unet.predict_on_batch(src)
    result = result[0]
    img = normalize_mask(result)
    img = (img * 255).astype('uint8')

    img = cv2.erode(img, KERNAL, iterations=2)
    img = cv2.dilate(img, KERNAL, iterations=3)

    img = np.expand_dims(img, axis=2)

    img = cv2.bitwise_not(img)

    train_img = next(train_gen)
    train_img = train_img[0]
    train_img = (train_img * 255).astype('uint8')