def resolve_and_plot(lr_image_path, hr_image_path, save_dir, img_id):
    lr = load_image(lr_image_path)
    hr = load_image(hr_image_path)

    pre_sr = resolve_single(pre_generator, lr)
    gan_sr = resolve_single(gan_generator, lr)

    save_image(pre_sr, save_dir, 'resolved_SRResNet', str(img_id))
    save_image(gan_sr, save_dir, 'resolved_SRGAN', str(img_id))

    images = [lr, hr, pre_sr, gan_sr]
    titles = [
        'Low Resolution', 'High Resolution', 'Super Resolution (PRE)',
        'Super Resolution (GAN)'
    ]
    positions = [1, 2, 3, 4]

    plt.figure(figsize=(20, 20))

    for i, (img, title, pos) in enumerate(zip(images, titles, positions)):
        plt.subplot(2, 2, pos)
        # plt.imshow(img)
        plt.title(title)
        plt.xticks([])
        plt.yticks([])

    plt.savefig(
        os.path.join(save_dir, 'comparison_plots',
                     'test_img_' + str(img_id) + '.png'))
    plt.close()
Пример #2
0
def test(depth,scale,inputs_path,outputs_path,is_validate,weight_file):
    weights_dir = f'weights/edsr-{depth}-x{scale}'
    weights_file = os.path.join(weights_dir, weight_file)

    model = edsr(scale=scale, num_res_blocks=depth)
    model.load_weights(weights_file)
    print("[*] weights loaded: ",weight_file)

    if(is_validate==0):
        print("[*] inferring, scale = ",scale)
        image_path_list = get_image_path(inputs_path)
        total_num = len(image_path_list)
        cnt = 0
        for img_path in image_path_list:
            t_start = time.time()
            cnt += 1
            img_name = get_image_name(img_path)
            print("[*] processing[%d/%d]:%s"%(cnt,total_num,img_name))
            lr_img = load_image(img_path)
            print("   [*] low res image shape = ",lr_img.shape)
            sr_img = resolve_single(model, lr_img)
            # sr_img = tensor2img(model(lr_img[np.newaxis, :] / 255))
            img_name_before, img_name_after = divide_image_name(img_name)
            output_img_name = img_name_before + "_EDSR_4x" + img_name_after
            output_img_path = os.path.join(outputs_path, output_img_name)
            outputs_img = sr_img
            print("output_img_name = ",output_img_name)
            cv2.imwrite(output_img_path, outputs_img)
            t_end = time.time()
            print("   [*] done! Time = %.1fs"%(t_end - t_start))
    else:
        print("   image_name                   PSNR/SSIM        PSNR/SSIM (higher,better)")
        image_path_list = get_image_path(inputs_path)
        for img_path in image_path_list:
            img_name = get_image_name(img_path)
            raw_img = cv2.imread(img_path)
            # Generate low resolution image with original images
            lr_img, hr_img = create_lr_hr_pair(raw_img, 4) # scale=4
            sr_img = resolve_single(model, lr_img)
            bic_img = imresize_np(lr_img, 4).astype(np.uint8)
            str_format = "  [{}] Bic={:.2f}db/{:.2f}, SR={:.2f}db/{:.2f}"
            sr_img = sr_img.numpy()
            b = rgb2ycbcr(sr_img)
            print(str_format.format(
                img_name + ' ' * max(0, 20 - len(img_name)),
                calculate_psnr(rgb2ycbcr(bic_img), rgb2ycbcr(hr_img)),
                calculate_ssim(rgb2ycbcr(bic_img), rgb2ycbcr(hr_img)),
                calculate_psnr(rgb2ycbcr(sr_img), rgb2ycbcr(hr_img)),
                calculate_ssim(rgb2ycbcr(sr_img), rgb2ycbcr(hr_img))))
            img_name_before, img_name_after = divide_image_name(img_name)
            output_img_name = img_name_before + "_ESRGAN_025x_4x" + img_name_after
            output_img_path = os.path.join(outputs_path, output_img_name)
            # outputs_img = np.concatenate((bic_img, sr_img, hr_img), 1)
            outputs_img = sr_img
            # cv2.imwrite(output_img_path, outputs_img) # write super resoltion images
            img_name_before, img_name_after = divide_image_name(img_name)
            output_img_name = img_name_before + "_ESRGAN_025x" + img_name_after
            output_lr_img_path = os.path.join(outputs_path, output_img_name)
            outputs_lr_img = lr_img
Пример #3
0
def upscale_image(filePath, model):
    image = np.asarray(Image.open(filePath).convert('RGB'))
    upscaled = resolve_single(model, image)
    upscaled_resized = Image.fromarray(np.asarray(upscaled)).resize((256, 256))
    upscaled_resized = np.asarray(upscaled_resized)
    upscaled_second = resolve_single(model, upscaled_resized)
    image_processed = Image.fromarray(np.asarray(upscaled_second)).convert('L')
    image_processed.save(filePath)
def main():
    try:
        LR_VID_LOCATION = sys.argv[1]
    except AssertionError as e:
        # Low resolution video path must be provided in first argument.
        print(e)
    try:
        HR_VID_LOCATION = sys.argv[2]
    except AssertionError as e:
        # High resolution video path must be be provided in second argument
        print(e)

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            # Currently, memory growth needs to be the same across GPUs
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
                logical_gpus = tf.config.experimental.list_logical_devices(
                    'GPU')
                print(len(gpus), "Physical GPUs,", len(logical_gpus),
                      "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)

    # SRGAN
    srgan_model = generator()
    srgan_model.load_weights('weights/srgan/gan_generator.h5')

    cap = cv2.VideoCapture(LR_VID_LOCATION)

    success, frame = cap.read()
    if success == True:
        upscale = resolve_single(srgan_model, frame)
        width = int(upscale.shape[1])
        height = int(upscale.shape[0])
        dim = (width, height)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        fps = cap.get(cv2.CAP_PROP_FPS)
        out = cv2.VideoWriter(HR_VID_LOCATION, fourcc, fps, dim)

        frame = 0
        while success == True:
            upscale = resolve_single(srgan_model, frame).numpy()
            out.write(upscale)
            success, frame = cap.read()
            print("Buffering Frame:", frame)
            frame += 1

    cap.release()
    out.release()
def resolve_and_tensorboard_plot(our_model,
                                 lr_image_paths,
                                 title='',
                                 make_input_img_bw=False):

    samples = []

    for lr_image_path in lr_image_paths:

        lr = load_image(lr_image_path, make_input_img_bw)
        sr = resolve_single(our_model, lr)
        samples.append((lr, sr))

    fig = plot_samples(samples,
                       interpolate_lr=True,
                       input_img_bw=make_input_img_bw)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    # Closing the figure prevents it from being displayed directly inside
    # the notebook.
    plt.close(fig)
    buf.seek(0)
    # Convert PNG buffer to TF image
    image = tf.image.decode_png(buf.getvalue(), channels=4)
    # Add the batch dimension
    image = tf.expand_dims(image, 0)

    with tb_file_writer.as_default():
        tf.summary.image(title, image, step=0)
Пример #6
0
def resolve_and_save(lr_image_path):
    lr = load_image(lr_image_path)
    # print(lr)
    # print(lr.shape)
    gan_sr = resolve_single(gan_generator, lr)
    img_name = lr_image_path.split('/')[1].split('.')[0]
    save_image(gan_sr, 'SR-output/sr_image.png')
Пример #7
0
def main():
    if sys.argv[1]:
        LR_IMG_LOCATION = sys.argv[1]

    if sys.argv[2]:
        HR_IMG_LOCATION = sys.argv[2]

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            # Currently, memory growth needs to be the same across GPUs
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
                logical_gpus = tf.config.experimental.list_logical_devices(
                    'GPU')
                print(len(gpus), "Physical GPUs,", len(logical_gpus),
                      "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)

    # SRGAN
    srgan_model = generator()
    srgan_model.load_weights('weights/srgan/gan_generator.h5')

    low_res = cv2.imread(LR_IMG_LOCATION)
    high_res = resolve_single(srgan_model, low_res).numpy()

    cv2.imwrite(HR_IMG_LOCATION, high_res, [cv2.IMWRITE_PNG_COMPRESSION, 9])
Пример #8
0
def test():
    model = edsr(scale=scale, num_res_blocks=depth, num_filters=channels)
    checkpoint = tf.train.Checkpoint(step=tf.Variable(0),
                                     psnr=tf.Variable(-1.0),
                                     optimizer=Adam(1e-04),
                                     model=model)
    checkpoint_manager = tf.train.CheckpointManager(checkpoint=checkpoint,
                                                    directory='./ckpt',
                                                    max_to_keep=3)
    restore(checkpoint, checkpoint_manager)

    video_valid = video_ds(subset='valid')
    valid_ds = video_valid.dataset(batch_size=1,
                                   random_transform=False,
                                   repeat_count=1)

    psnr, ssim = evaluate(checkpoint.model, valid_ds)
    print('PSNR:%.3f, SSIM:%.3f' % (psnr, ssim))

    psnr_b = bilinear_upscale('../image_240', '../image_960', scale=scale)
    print('bilinear upscale PSNR:%.3f' % psnr_b)

    lr = load_image('../image_240/frame1500.jpg')
    sr = resolve_single(checkpoint.model, lr)
    plt.imshow(sr)
    plt.show()
Пример #9
0
def main():
    scale = 30
    if sys.argv[1]:
        scale = int(sys.argv[1])

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            # Currently, memory growth needs to be the same across GPUs
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
                logical_gpus = tf.config.experimental.list_logical_devices(
                    'GPU')
                print(len(gpus), "Physical GPUs,", len(logical_gpus),
                      "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)

    # SRGAN
    srgan_model = generator()
    srgan_model.load_weights('weights/srgan/gan_generator.h5')

    cap = cv2.VideoCapture(0)
    ret, frame = cap.read()

    while (True):
        # Get dimentions of cropped image.
        width = int(frame.shape[1] * (scale / 100))
        height = int(frame.shape[0] * (scale / 100))
        dim = (width, height)

        # Capture frame-by-frame
        ret, frame = cap.read()

        # Crop image by scale
        crop = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)

        # Upscale image
        upscale = resolve_single(srgan_model, crop).numpy()

        # Resize crop and upscaled image to be the same as input image.
        width = int(frame.shape[1])
        height = int(frame.shape[0])
        dim = (width, height)
        crop = cv2.resize(crop, dim, interpolation=cv2.INTER_AREA)
        upscale = cv2.resize(upscale, dim, interpolation=cv2.INTER_AREA)

        # Display the Resulting Frames
        cv2.imshow('input', frame)
        cv2.imshow('cropped', crop)
        cv2.imshow('srgan', upscale)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
Пример #10
0
def resolve_and_plot(model_fine_tuned, lr_image_path):
    lr = load_image(lr_image_path)
    sr_ft = resolve_single(model_fine_tuned, lr)
    #model_name = model_pre_trained.name.upper()
    st.info('Image Uploaded Successfully')
    high = sr_ft.numpy()
    st.text('Converted High Resolution Image:')
    st.image(high,width=300)
    link_image2 = Image.fromarray(high)
    st.markdown(get_image_download_link(link_image2), unsafe_allow_html=True)
    def get_upscaled_frame(self):
        success, image = self.video.read()
        scale = 30
        width = int(image.shape[1] * (scale / 100))
        height = int(image.shape[0] * (scale / 100))
        dim = (width, height)

        crop = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)

        upscale = resolve_single(srgan_model, crop).numpy()

        ret, jpeg = cv2.imencode('.jpg', upscale)
        return jpeg.tobytes()
Пример #12
0
def resolve_and_plot(lr_image_path):
    lr = load_image(lr_image_path)

    pre_sr = resolve_single(pre_generator, lr)
    gan_sr = resolve_single(gan_generator, lr)

    plt.figure(figsize=(20, 20))

    images = [lr, pre_sr, gan_sr]
    titles = ['LR', 'SR (PRE)', 'SR (GAN)']
    positions = [1, 3, 4]

    for i, (img, title, pos) in enumerate(zip(images, titles, positions)):
        plt.subplot(2, 2, pos)
        plt.imshow(img)
        plt.title(title)
        plt.xticks([])
        plt.yticks([])

    plt.savefig('test.png')


#resolve_and_plot('demo/0869x4-crop.png')
Пример #13
0
def results():
    if request.method == "POST":
        if request.files:
            file = request.files["file"]
            filename = secure_filename(file.filename)
            file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            file.save(file_path)
            image = load_image(file_path)

            high_res_np = resolve_single(model, image)
            high_res = Image.fromarray(high_res_np.numpy(), 'RGB')
            high_res.save(os.path.join(OUTPUT_FOLDER, filename))

            return render_template('results.html', filename=filename)
def plot_some_tests():
    img_address = "../dataset/DIV2K_valid_HR/0892.png"
    eval_model = load_pre_trained_model()
    img = np.array(Image.open(img_address)).astype(float)
    h, w, _ = img.shape
    cv2.imwrite("data.png", img)
    translat = [0, 0.25, 1]
    for i, t in enumerate(translat):
        img_test = ffttranslate(img, t)
        img_test = down_sampling(img_test)
        pred = K.eval(resolve_single(eval_model, img_test))
        pred2 = cv2.resize(img_test, (w, h), interpolation=cv2.INTER_CUBIC)
        cv2.imwrite("VT_" + str(t) + ".png", img_test)
        cv2.imwrite("Pred_" + str(t) + ".png", pred)
        cv2.imwrite("base_" + str(t) + ".png", pred2)
Пример #15
0
def generateSR(lr_image_path):
    lr = np.array(Image.open(lr_image_path))
    sr = resolve_single(model, lr)
    img = Image.fromarray(sr.numpy(), 'RGB')
    n = img.size
    name = input("Enter image name : ")
    ext = int(input("Choose format option 1 or 2 ( 1 -> .jpg , 2 -> .png) : "))
    if ext == 1:
        img.save("./outputs/" + name + ".jpg")
        print("Image saved at ./outputs/" + name + ".jpg")
    elif ext == 2:
        img.save("./outputs/" + name + ".png")
        print("Image saved at ./outputs/" + name + ".png")
    else:
        print("Invalid option. Choosing .png as default...")
        img.save("./outputs/" + name + ".png")
        print("Image saved at ./outputs/" + name + ".png")
Пример #16
0
    def resolve_and_plot(self, lr_image_path, idx):
        lr = load_image(lr_image_path + '.png')

        pre_sr = resolve_single(self.checkpoint.model, lr)
        #gan_sr = resolve_single(gan_generator, lr)

        plt.figure(figsize=(20, 20))

        images = [lr, pre_sr]
        titles = ['LR', 'SR (PRE)']
        positions = [1, 3]

        for i, (img, title, pos) in enumerate(zip(images, titles, positions)):
            plt.subplot(2, 2, pos)
            plt.imshow(img)
            plt.title(title)
            plt.xticks([])
            plt.yticks([])

        plt.savefig('%s_%d.png' % (lr_image_path, idx))
 def resolve_and_plot(self, lr, fileName):
     gan_sr = resolve_single(self.gan_generator, lr)
     plt.imshow(gan_sr, interpolation='nearest')
     plt.axis('off')
     print('File name si ----------', fileName)
     plt.savefig(os.path.join(self.CWD_PATH, 'demo', 'HighQuality',
                              fileName),
                 dpi=None,
                 facecolor='w',
                 edgecolor='w',
                 orientation='portrait',
                 papertype='legal',
                 format=None,
                 transparent=False,
                 bbox_inches='tight',
                 pad_inches=0.0,
                 frameon=None,
                 metadata=None)
     gan_sr = open(
         os.path.join(self.CWD_PATH, 'demo', 'HighQuality', fileName), "rb")
     return gan_sr.read()
Пример #18
0
def generate():

    global gan_generator

    imgData = request.get_data()

    with open("input.png", 'wb') as output:
        output.write(imgData)

    lr = load_image("input.png")
    gan_sr = resolve_single(gan_generator, lr)

    epoch_time = int(time.time())

    outputfile = 'output_%s.png' % (epoch_time)

    imsave(outputfile, gan_sr.numpy())

    response = {'result': outputfile}

    return jsonify(response)
Пример #19
0
def supres_generator(list_of_param_dict):
    import tensorflow as tf
    import os
    import numpy as np
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(physical_devices[0], True)
    os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    from model import generator
    from model import resolve_single
    from skimage.transform import rescale
    from skimage.filters import unsharp_mask

    weights_dir = 'weights'
    path_file = lambda dirr, filename: os.path.join(dirr, filename)

    pre_generator = generator()
    gan_generator = generator()
    pre_generator.load_weights(path_file(weights_dir, 'pre_generator.h5'))
    gan_generator.load_weights(path_file(weights_dir, 'gan_generator.h5'))

    for param_dict in list_of_param_dict:
        rgb5 = (rescale(unsharp_mask(
            resolve_single(pre_generator, param_dict['rgb_10']),
            radius=3,
            amount=1,
            multichannel=True,
        ),
                        0.5,
                        multichannel=True,
                        anti_aliasing=True) * 255).astype('uint8')
        rgb5 = np.clip(rgb5, 1, 255)
        transf_rgb5 = list(param_dict['transf_10'])
        transf_rgb5[0] *= 0.5
        transf_rgb5[4] *= 0.5
        param_dict['rgb_5'] = rgb5
        param_dict['transf_5'] = transf_rgb5
        yield param_dict
Пример #20
0
from model import resolve_single
from utils import load_image
import tensorflow as tf
import cv2
from model.srgan import generator, discriminator
from model import resolve_single
from utils import load_image
import PIL
from PIL import Image
from tensorflow.python.types import core as core_tf_types

weights_dir = 'weights/srgan'
weights_file = lambda filename: os.path.join(weights_dir, filename)
os.makedirs(weights_dir, exist_ok=True)

gan_generator = generator()
gan_generator.load_weights(weights_file('gan_generator.h5'))

frame_number = 0
folder = r"C:\Users\Hari\Desktop\super-resolution-master\data"
current_path = "C:\\Users\\Hari\\Desktop\\super-resolution-master\\save\\"

for filename in os.listdir(folder):
    frame_number += 1
    print(frame_number)
    lr = load_image(os.path.join(folder, filename))
    gan_sr = resolve_single(gan_generator, lr)
    tf.keras.preprocessing.image.save_img(
        current_path + str(frame_number) + ".png", gan_sr)
    #save = cv2.resize(lr, (100,100))
    #cv2.imwrite(current_path + str(frame_number) + ".png",save)
Пример #21
0
def resolve_and_plot(model, lr):

    sr_pt = resolve_single(model, lr)
    return sr_pt
Пример #22
0
def run(args):
    args = parse_args(args)
    if args.image_path is not None:
        input_images = process_image(args.image_path, args.resolution)
        latentgan_model = None
    else:
        input_images = None
        print(
            "WARNING: no input image directory specified, embeddings will be sampled using Laten GAN"
        )
        latentgan_model = LatentGAN.load(args.latent_gan_model_path)
    confignet_model = ConfigNet.load(args.confignet_model_path)

    #basic_ui = BasicUI(confignet_model)

    # Sample latent embeddings from input images if available and if not sample from Latent GAN
    current_embedding_unmodified, current_rotation, orig_images = get_new_embeddings(
        input_images, latentgan_model, confignet_model)
    # Set next embedding value for rendering
    if args.enable_sr == 1:
        modelSR = generator()
        modelSR.load_weights('evaluation/weights/srgan/gan_generator.h5')

    yaw_min_angle = -args.max_angle
    pitch_min_angle = -args.max_angle
    yaw_max_angle = args.max_angle
    pitch_max_angle = args.max_angle
    delta_angle = 5

    rotation_offset = np.zeros((1, 3))

    eye_rotation_offset = np.zeros((1, 3))

    facemodel_param_names = list(
        confignet_model.config["facemodel_inputs"].keys())
    # remove eye rotation as in the demo it is controlled separately
    eye_rotation_param_idx = facemodel_param_names.index(
        "bone_rotations:left_eye")
    facemodel_param_names.pop(eye_rotation_param_idx)

    render_input_interp_0 = current_embedding_unmodified
    render_input_interp_1 = current_embedding_unmodified

    interpolation_coef = 0
    if not os.path.exists(dataset_directory):
        os.makedirs(dataset_directory)
    # This interpolates between the previous and next set embeddings
    current_renderer_input = render_input_interp_0 * (
        1 - interpolation_coef) + render_input_interp_1 * interpolation_coef
    # Set eye gaze direction as controlled by the user
    current_renderer_input = set_gaze_direction_in_embedding(
        current_renderer_input, eye_rotation_offset, confignet_model)

    # all angles
    #image = Image.open(args.image_path)
    #print(np.array(image))
    #return
    i = 1
    print('All angles')
    for yaw in range(yaw_min_angle, yaw_max_angle + 1, delta_angle):
        for pitch in range(pitch_min_angle, pitch_max_angle + 1, delta_angle):
            rotation_offset[0, 0] = to_rad(yaw)
            rotation_offset[0, 1] = to_rad(pitch)
            generated_imgs = confignet_model.generate_images(
                current_renderer_input, current_rotation + rotation_offset)
            if args.enable_sr == 1:
                img = cv2.resize(generated_imgs[0], (256, 256))
                sr_img = resolve_single(modelSR, img)
                cv2.imwrite(dataset_directory + '/%d_%d.png' % (yaw, pitch),
                            np.array(sr_img))
            else:
                img = cv2.resize(generated_imgs[0], (1024, 1024))
                cv2.imwrite(dataset_directory + '/%d_%d.png' % (yaw, pitch),
                            img)
            print(i)
            i += 1

    #all random
    # 100 картинок со случайными поворотами от -20 до 20, поворотами глаз, выражений лица
    print('All random')
    current_attribute_name = facemodel_param_names[1]  #blendshape_values
    frame_embedding = render_input_interp_0 * (
        1 - interpolation_coef) + render_input_interp_1 * interpolation_coef
    for i in range(100):
        eye_rotation_offset[0, 2] = to_rad(np.random.randint(-40, 40))
        eye_rotation_offset[0, 0] = to_rad(np.random.randint(-40, 40))
        rotation_offset[0, 0] = to_rad(np.random.randint(-20, 20))
        rotation_offset[0, 1] = to_rad(np.random.randint(-20, 20))
        frame_embedding = set_gaze_direction_in_embedding(
            frame_embedding, eye_rotation_offset, confignet_model)
        new_embedding_value = get_embedding_with_new_attribute_value(
            current_attribute_name, frame_embedding, confignet_model)

        generated_imgs = confignet_model.generate_images(
            new_embedding_value, current_rotation + rotation_offset)

        if args.enable_sr == 1:
            img = cv2.resize(generated_imgs[0], (256, 256))
            sr_img = resolve_single(modelSR, img)
            cv2.imwrite(dataset_directory + '/random_%d.png' % (i),
                        np.array(sr_img))
        else:
            img = cv2.resize(generated_imgs[0], (1024, 1024))
            cv2.imwrite(dataset_directory + '/random_%d.png' % (i), img)
        print(i)
Пример #23
0
def func(fn_img, fn_model, psf=None, 
         fn_img_hr=None, suptitle=None, 
         fnfigout='test.pdf', vm=75, nbit=8):

    if fn_img.endswith('npy'):
        datalr = np.load(fn_img)[:, :]
    elif fn_img.endswith('png'):
      try:
          datalr = load_image(fn_img)
      except:
          datalr = load_image('demo/0851x4-crop.png')
    else:
      print('Do not recognize input image file type, exiting')
      exit()

#    datalr = hr2lr.normalize_data(datalr, nbit=nbit)

    if fn_img_hr!=None:
        if fn_img_hr.endswith('.npy'):
            datahr = np.load(fn_img_hr)
        elif fn_img_hr.endswith('png'):
            datahr = load_image(fn_img_hr)
    else:
        datahr = None

    if psf is not None:
        if datahr is None:
          pass
        print("Convolving data")
        if psf in ('gaussian','Gaussian'):
          kernel1D = signal.gaussian(8, std=1).reshape(8, 1)
          kernel = np.outer(kernel1D, kernel1D)
        elif psf.endswith('.npy'):
          kernel = np.load(psf)
          nkern = len(kernel)
          kernel = kernel[nkern//2-256:nkern//2+256, nkern//2-256:nkern//2+256]
        else:
          print("Can't interpret kernel")
          exit()
        plt.figure()
        plt.subplot(121)
        plt.imshow(datahr, vmax=25, vmin=5)
        datalr = hr2lr.convolvehr(datahr, kernel, rebin=1)
        datahr = hr2lr.normalize_data(datahr, nbit=nbit)
        plt.subplot(122)
        print(datalr.shape)
        datalr = hr2lr.normalize_data(datalr, nbit=nbit)
        plt.imshow(datalr, vmax=50, vmin=20)
        plt.show()
    else:
        print("Assuming data is already convolved")

    model = wdsr_b(scale=4, num_res_blocks=32)
    model.load_weights(fn_model)
    datalr = datalr[:,:,None]
    if nbit==8:
      datasr = resolve_single(model, datalr)
    else:
      datasr = resolve_single16(model, datalr)
    plotter(datalr, datasr, datahr=datahr, 
            suptitle=suptitle, fnfigout=fnfigout, vm=vm, 
            nbit=nbit)
def test_sub_pix_translation(translat):
    """
	To define the tests run by this function we note P the prediction function, the ground truth image G 
	and I the downsampled image from G.
	We note G_t the image the translated image G by t on the horizontal axis and I_t the downsampled image from G_t. 

	sc = || G - P(I) ||
	sc_tr = || G_t - P(I_t) ||
	sc_mu = || P(I_t) - P(I) ||
	sc_tau = || P(I_t)_(-t) - P(I) ||
	best_tau = min || P(I_t)_(t') - P(I) ||

	the minimum is searched over t'.

	Args :
		- translat (float): value of t

	Output :
		- average of the previous scores 
	"""
    eval_model = load_pre_trained_model()
    _, test_ground_truth = dataset.images_paths()

    score = []
    score_tr = []
    score_tr_tr = []
    score_mutuel = []
    score_mutuel2 = []
    score_tau = []
    best_tau = []

    for ind in range(len(test_ground_truth)):
        img = np.array(Image.open(test_ground_truth[ind])).astype(float)
        h, w, _ = img.shape
        img_ground_truth = img
        img_test = down_sampling(img_ground_truth)

        prediction = resolve_single(eval_model, img_test)
        prediction = K.eval(prediction).astype(float)
        score.append(np.sqrt(np.mean((prediction - img_ground_truth)**2)))

        img_ground_truth_t = ffttranslate(img_ground_truth, translat)
        img_test_t = down_sampling(img_ground_truth_t)

        prediction2 = resolve_single(eval_model, img_test_t)
        prediction2 = K.eval(prediction2).astype(float)
        score_tr.append(np.sqrt(np.mean(
            (prediction2 - img_ground_truth_t)**2)))

        score_mutuel.append(np.sqrt(np.mean((prediction2 - prediction)**2)))
        prediction_tau = ffttranslate(prediction2, -translat)
        score_mutuel2.append(
            np.sqrt(np.mean((prediction2 - prediction_tau)**2)))
        score_tau.append(np.sqrt(np.mean((prediction_tau - prediction)**2)))
        score_tr_tr.append(
            np.sqrt(np.mean((prediction_tau - img_ground_truth)**2)))

        func = lambda x: np.mean(
            (ffttranslate(prediction2, -x) - prediction)**2)
        tau_opt = opt.minimize(func, translat)
        best_tau.append(tau_opt.x)
        print(
            "t = %f - testing pre-trained model %i/%i : score - %f ; score tr - %f ; score mutuel - %f ; score translation inv - %f ; best tau - %f"
            % (translat, ind, len(test_ground_truth), score[-1], score_tr[-1],
               score_mutuel[-1], score_tau[-1], best_tau[-1]))
    return np.mean(score), np.mean(score_tr), np.mean(score_tr_tr), np.mean(
        score_mutuel), np.mean(score_mutuel2), np.mean(score_tau), np.mean(
            best_tau), np.std(best_tau)
model = generator()
model.load_weights('weights/srgan/gan_generator.h5')

#lr = utils.load_image('demo/0805x4-crop.png')
#sr = resolve_single(model, lr)
#utils.plot_sample(lr, sr)
cap = cv2.VideoCapture(video)
ret, frame = cap.read()

scale = 30
width = int(frame.shape[1] * (scale / 100))
height = int(frame.shape[0] * (scale / 100))
dim = (width, height)

crop = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
upscale = resolve_single(model, crop).numpy()

print("Upscale Width: {}".format(upscale.shape[1]))
print("Upscale Height: {}".format(upscale.shape[0]))

crop_out = cv2.VideoWriter('Outputs/crop.avi',
                           cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30, dim)
upscale_out = cv2.VideoWriter('Outputs/upscale.avi',
                              cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
                              (upscale.shape[1], upscale.shape[0]))
i = 1
while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    if ret == True:
        crop = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
Пример #26
0
def resolve_and_plot(lr_image_path):
    lr = load_image(lr_image_path)
    #plot_sample_no_treatment(lr)
    sr = resolve_single(model,lr)
    plot_sample(lr,sr)
Пример #27
0
from model import resolve_single
from model.edsr import edsr

from utils import load_image, plot_sample

model = edsr(scale=4, num_res_blocks=16)
model.load_weights('weights/edsr-16-x4/weights.h5')

lr = load_image('demo/frame-1214_frame_blend')
sr = resolve_single(model, lr)

plot_sample(lr, sr)
Пример #28
0
def Use_SRGAN():
    model = generator()
    model.load_weights('weights/srgan/gan_generator.h5')
    lr = Load_Image(filename)
    sr = resolve_single(model, lr)
    Save_Image(sr)
Пример #29
0
def Use_WDSR():
    model = wdsr_b(scale=4, num_res_blocks=32)
    model.load_weights('weights/wdsr-b-32-x4/weights.h5')
    lr = Load_Image(filename)
    sr = resolve_single(model, lr)
    Save_Image(sr)
Пример #30
0
def Use_EDSR():
    model = edsr(scale=4, num_res_blocks=16)
    model.load_weights('weights/edsr-16-x4/weights.h5')
    lr = Load_Image(filename)
    sr = resolve_single(model, lr)
    Save_Image(sr)