Example #1
0
def measure_SSIM_PSNRs(GT_dir, Gen_dir):
    """
      Assumes:
        * GT_dir contain ground-truths {filename.ext}
        * Gen_dir contain generated images {filename_gen.png}
        * Images are of same-size
    """
    GT_paths, Gen_paths = getPaths(GT_dir), getPaths(Gen_dir)
    ssims, psnrs = [], []
    for img_path in GT_paths:
        name_split = ntpath.basename(img_path).split('.')
        gen_path = os.path.join(Gen_dir,
                                name_split[0] + '_gen.png')  #+name_split[1])
        if (gen_path in Gen_paths):
            r_im = misc.imread(img_path)
            g_im = misc.imread(gen_path)
            assert (
                r_im.shape == g_im.shape), "The images should be of same-size"
            ssim = getSSIM(r_im, g_im)
            psnr = getPSNR(r_im, g_im)
            #print ("{0}, {1}: {2}".format(img_path,gen_path, ssim))
            #print ("{0}, {1}: {2}".format(img_path,gen_path, psnr))
            ssims.append(ssim)
            psnrs.append(psnr)
    return np.array(ssims), np.array(psnrs)
Example #2
0
def testGenerator():
    # test all images in the directory
    assert exists(test_dir), "local image path doesnt exist"
    imgs = []
    for p in getPaths(test_dir):
        # read and scale inputs
        img = Image.open(p).resize((im_w, im_h))
        img = np.array(img) / 255.
        img = np.expand_dims(img, axis=0)
        # inference
        out_img = model.predict(img)
        # thresholding
        out_img[out_img > 0.5] = 1.
        out_img[out_img <= 0.5] = 0.
        print("tested: {0}".format(p))
        # get filename
        img_name = ntpath.basename(p).split('.')[0] + '.bmp'
        # save individual output masks
        ROs = np.reshape(out_img[0, :, :, 0], (im_h, im_w))
        FVs = np.reshape(out_img[0, :, :, 1], (im_h, im_w))
        HDs = np.reshape(out_img[0, :, :, 2], (im_h, im_w))
        RIs = np.reshape(out_img[0, :, :, 3], (im_h, im_w))
        WRs = np.reshape(out_img[0, :, :, 4], (im_h, im_w))
        Image.fromarray(np.uint8(ROs * 255.)).save(RO_dir + img_name)
        Image.fromarray(np.uint8(FVs * 255.)).save(FB_dir + img_name)
        Image.fromarray(np.uint8(HDs * 255.)).save(HD_dir + img_name)
        Image.fromarray(np.uint8(RIs * 255.)).save(RI_dir + img_name)
        Image.fromarray(np.uint8(WRs * 255.)).save(WR_dir + img_name)
Example #3
0
def measure_UIQMs(dir_name):
    paths = getPaths(dir_name)
    uqims = []
    for img_path in paths:
        im = misc.imread(img_path)
        uqims.append(getUIQM(im))
    return np.array(uqims)
Example #4
0
def testGenerator():
    # test all images in the directory
    assert os.path.exists(test_dir), "local image path doesnt exist"
    imgs = []
    for p in getPaths(test_dir):
        # read and scale inputs
        img = data.imread(p, as_grey=False)
        img = trans.resize(img, im_shape)
        img = np.expand_dims(img, axis=0)
        # inference
        out_img = model.predict(img)
        # thresholding
        out_img[out_img>0.5] = 1.
        out_img[out_img<=0.5] = 0.
        print ("tested: {0}".format(p))
        # get filename
        img_name = ntpath.basename(p)
        img_name = img_name.split('.')[0]
        # save individual output masks
        ROs = np.reshape(out_img[0,:,:,0], (im_h, im_w))
        FVs = np.reshape(out_img[0,:,:,1], (im_h, im_w))
        HDs = np.reshape(out_img[0,:,:,2], (im_h, im_w))
        RIs = np.reshape(out_img[0,:,:,3], (im_h, im_w))
        WRs = np.reshape(out_img[0,:,:,4], (im_h, im_w))
        misc.imsave(RO_dir+img_name+'.bmp', ROs)
        misc.imsave(FB_dir+img_name+'.bmp', FVs)
        misc.imsave(HD_dir+img_name+'.bmp', HDs)
        misc.imsave(RI_dir+img_name+'.bmp', RIs)
        misc.imsave(WR_dir+img_name+'.bmp', WRs)
        # combine the masks in a single RGB and save
        mask_rgb = get_rgb_from_masks(HDs, ROs, WRs, RIs, FVs)
        misc.imsave(samples_dir+img_name+'.bmp', mask_rgb)
Example #5
0
def measure_UIQMs(dir_name, file_ext=None):
    """
      # measured in RGB
      Assumes:
        * dir_name contain generated images 
        * to evaluate on all images: file_ext = None 
        * to evaluate images that ends with "_SESR.png" or "_En.png"  
            * use file_ext = "_SESR.png" or "_En.png" 
    """
    if file_ext:
        paths = [p for p in getPaths(dir_name) if p.endswith(file_ext)]
    else:
        paths = getPaths(dir_name)
    uqims = []
    for img_path in paths:
        #print (paths)
        im = misc.imresize(misc.imread(img_path), (im_h, im_w))
        uqims.append(getUIQM(im))
    return np.array(uqims)
Example #6
0
def measure_PSNR(GT_dir, Gen_dir):
    """
      # measured in lightness channel 
      Assumes:
        * GT_dir contain ground-truths {filename.ext}
        * Gen_dir contain generated images {filename_SESR.png}
    """
    GT_paths, Gen_paths = getPaths(GT_dir), getPaths(Gen_dir)
    ssims, psnrs = [], []
    for img_path in GT_paths:
        name_split = ntpath.basename(img_path).split('.')
        gen_path = os.path.join(Gen_dir, name_split[0] + '_SESR.png')
        ## >> To evaluate only enhancement: use:
        #gen_path = os.path.join(Gen_dir, name_split[0]+'_En.png')
        if (gen_path in Gen_paths):
            r_im = misc.imresize(misc.imread(img_path, mode='L'), (im_h, im_w))
            g_im = misc.imresize(misc.imread(gen_path, mode='L'), (im_h, im_w))
            assert (
                r_im.shape == g_im.shape), "The images should be of same-size"
            psnr = getPSNR(r_im, g_im)
            psnrs.append(psnr)
    return np.array(psnrs)
Example #7
0
# Maintainer: Jahid (email: [email protected])
# Interactive Robotics and Vision Lab (http://irvlab.cs.umn.edu/)
# Any part of this repo can be used for academic and educational purposes only
"""
import os
import time
import ntpath
import numpy as np
from scipy import misc
from keras.models import model_from_json
## local libs
from utils.data_utils import getPaths, preprocess, deprocess

## for testing arbitrary local data
data_dir = "data/test/low_res_8x/"
test_paths = getPaths(data_dir)
print("{0} test images are loaded".format(len(test_paths)))

## load specific model
model_name = "srdrm"
ckpt_name = "model_30_"
checkpoint_dir = os.path.join("checkpoints/saved/8x/", model_name)
model_h5 = os.path.join(checkpoint_dir, ckpt_name + ".h5")
model_json = os.path.join(checkpoint_dir, ckpt_name + ".json")
# sanity
assert (os.path.exists(model_h5) and os.path.exists(model_json))
# load json and create model
with open(model_json, "r") as json_file:
    loaded_model_json = json_file.read()
generator = model_from_json(loaded_model_json)
# load weights into new model
Example #8
0
import ntpath
import numpy as np
from scipy import misc
## local libs
from utils.data_utils import getPaths
from utils.uiqm_utils import getUIQM
from utils.ssm_psnr_utils import getSSIM, getPSNR

# measurement in a common dimension
im_w, im_h = 320, 240

## data paths
REAL_im_dir = "data/sample_test_ufo/lrd/"  # real/input im-dir with {f.ext}
GEN_im_dir = "data/output/keras_out/"  # generated im-dir with {f_gen.ext}
GTr_im_dir = "data/sample_test_ufo/hr/"  # ground truth im-dir with {f.ext}
REAL_paths, GEN_paths = getPaths(REAL_im_dir), getPaths(GEN_im_dir)


## mesures uqim for all images in a directory
def measure_UIQMs(dir_name, file_ext=None):
    """
      # measured in RGB
      Assumes:
        * dir_name contain generated images 
        * to evaluate on all images: file_ext = None 
        * to evaluate images that ends with "_SESR.png" or "_En.png"  
            * use file_ext = "_SESR.png" or "_En.png" 
    """
    if file_ext:
        paths = [p for p in getPaths(dir_name) if p.endswith(file_ext)]
    else:
Example #9
0
# checkpoint particulars
ckpt = tf.train.get_checkpoint_state(EXPERIMENT_DIR)
if ckpt and ckpt.model_checkpoint_path:
    print "Restoring previous model..."
    try:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print "Model restored"
    except:
        print "Could not restore model"
        pass
step = int(sess.run(global_step))
merged_summary_op = tf.summary.merge_all()

# feed data to the graph's input
trainA_paths = getPaths(data_dir + DATA + "/trainA/")  # underwater photos
trainB_paths = getPaths(data_dir + DATA +
                        "/trainB/")  # normal photos (ground truth)
val_paths = getPaths(data_dir + DATA + "/validation/")
num_train, num_val = len(trainA_paths), len(val_paths)
print("{0} training pairs\n".format(len(trainB_paths)))

# training loop begins
TOTAL_STEP = (EPOCHS * num_train / BATCH_SIZE)
while step < TOTAL_STEP:
    # pick random images every time for D
    for itr in range(n_critic):
        idx = np.random.choice(np.arange(num_train), BATCH_SIZE, replace=False)
        batchA_paths = trainA_paths[idx]
        batchB_paths = trainB_paths[idx]
        batchA_images = np.empty((BATCH_SIZE, 256, 256, 3), dtype=np.float32)
Example #10
0
im_shape = (im_h, im_w, 3)
mask_shape = (im_h, im_w)


# for reading and scaling input images
def read_and_bin(im_path):
    img = data.imread(im_path, as_grey=True)
    img = trans.resize(img, mask_shape)
    #img[img > 0.5] = 1
    #img[img <= 0.5] = 0
    return img


# accumulate F1/iou values in the lists
Ps, Rs, F1s, IoUs = [], [], [], []
for p in getPaths(real_mask_dir):
    img_name = ntpath.basename(p)
    img_name = img_name.split('.')[0]
    gen_path = gen_mask_dir + img_name + ".bmp"
    real, gen = read_and_bin(p), read_and_bin(gen_path)
    if (np.sum(real) > 0):
        precision, recall, F1 = db_eval_boundary(real, gen)
        iou = IoU_bin(real, gen)
        print("{0}:>> P: {1}, R: {2}, F1: {3}, IoU: {4}".format(
            img_name, precision, recall, F1, iou))
        Ps.append(precision)
        Rs.append(recall)
        F1s.append(F1)
        IoUs.append(iou)

# print F-score and mIOU in [0, 100] scale
Example #11
0
## input/output shapes
im_res = (320, 240)


# for reading and scaling input images
def read_and_bin(im_path):
    img = Image.open(im_path).resize(im_res)
    img = np.array(img) / 255.
    img[img >= 0.5] = 1
    img[img < 0.5] = 0
    return img


# accumulate F1/iou values in the lists
Ps, Rs, F1s, IoUs = [], [], [], []
gen_paths = sorted(getPaths(gen_mask_dir))
real_paths = sorted(getPaths(real_mask_dir))
for gen_p, real_p in zip(gen_paths, real_paths):
    gen, real = read_and_bin(gen_p), read_and_bin(real_p)
    if (np.sum(real) > 0):
        precision, recall, F1 = db_eval_boundary(real, gen)
        iou = IoU_bin(real, gen)
        #print ("{0}:>> P: {1}, R: {2}, F1: {3}, IoU: {4}".format(gen_p, precision, recall, F1, iou))
        Ps.append(precision)
        Rs.append(recall)
        F1s.append(F1)
        IoUs.append(iou)

# print F-score and mIOU in [0, 100] scale
print("Avg. F: {0}".format(100.0 * np.mean(F1s)))
print("Avg. IoU: {0}".format(100.0 * np.mean(IoUs)))
Example #12
0
# Any part of this repo can be used for academic and educational purposes only
"""
## python libs
import os
import ntpath
import numpy as np
from scipy import misc
## local libs
from utils.data_utils import getPaths
from utils.uqim_utils import getUIQM
from utils.ssm_psnr_utils import getSSIM, getPSNR

## data paths
GTr_im_dir = 'data/test/hr/'  # ground truth im-dir with {f.ext}
GEN_im_dir = "data/output/4x/srdrm-gan/"  # generated im-dir with {f_gen.png}
GEN_paths = getPaths(GEN_im_dir)


## mesures uqim for all images in a directory
def measure_UIQMs(dir_name):
    paths = getPaths(dir_name)
    uqims = []
    for img_path in paths:
        im = misc.imread(img_path)
        uqims.append(getUIQM(im))
    return np.array(uqims)


## compares avg ssim and psnr
def measure_SSIM_PSNRs(GT_dir, Gen_dir):
    """