def compare(batch_size, input_dir, output_dir): data = load_images(input_dir, batch_size) y_test, x_test = data['B'], data['A'] weights = [ 'generator.h5', 'weights/DIV2K_1/generator_3_374.h5', 'weights/DIV2K_2/generator_3_507.h5' ] if not os.path.exists(output_dir): os.makedirs(output_dir) generated = [] for weight in weights: g = generator_model() g.load_weights(weight) generated_images = g.predict(x=x_test, batch_size=batch_size) generated.append([deprocess_image(img) for img in generated_images]) generated = np.array(generated) x_test = deprocess_image(x_test) y_test = deprocess_image(y_test) for i in range(generated_images.shape[0]): y = y_test[i, :, :, :] x = x_test[i, :, :, :] img_0 = generated[0, i, :, :, :] # original img_1 = generated[1, i, :, :, :] # trainsfer learning img_2 = generated[ 2, i, :, :, :] # trainsfer learning with locked parameters # combine imgs and store output = np.concatenate((y, x, img_0, img_1, img_2), axis=1) im = Image.fromarray(output.astype(np.uint8)) im.save(os.path.join(output_dir, 'results{}.png'.format(i)))
def deblur(weight_path, input_dir, output_dir): g = generator_model() g.load_weights(weight_path) for image_name in os.listdir(input_dir): # test = cv2.imread(input_dir+image_name) # img_original = load_image(os.path.join(input_dir, image_name)) # img_original.show() img_tif = cv2.imread(input_dir + image_name) #img_tif = cv2.resize(img_tif,(256, 256)) #img_tif_2 = Image.fromarray(img_tif) #img_tif.show() #image = np.array([preprocess_image(load_image(os.path.join(input_dir, image_name)))]) image = np.array([preprocess_image_no_resize(img_tif)]) x_test = image generated_images = g.predict(x=x_test) generated = np.array( [deprocess_image(img) for img in generated_images]) x_test = deprocess_image(x_test) for i in range(generated_images.shape[0]): x = x_test[i, :, :, :] img = generated[i, :, :, :] plt.figure() plt.imshow(img) plt.show() #output = np.concatenate((x, img), axis=1) img_gen = Image.fromarray(img.astype(np.uint8)) img_gen.show() #im = Image.fromarray(output.astype(np.uint8)) img_gen.save(os.path.join(output_dir, image_name)) print("ok")
def test(batch_size): data = load_images('./images/test', batch_size) y_test, x_test = data['B'], data['A'] g = generator_model() g.load_weights('generator.h5') generated_images = g.predict(x=x_test, batch_size=batch_size) generated = np.array([deprocess_image(img) for img in generated_images]) x_test = deprocess_image(x_test) y_test = deprocess_image(y_test) acc = 0 for i in range(generated_images.shape[0]): y = y_test[i, :, :, :] x = x_test[i, :, :, :] img = generated[i, :, :, :] mse = np.sum( (y - img)**(2)) / (generated.shape[1] * generated.shape[2] * generated.shape[3]) psnr = 10 * math.log10((255**2) / mse) acc = acc + psnr output = np.concatenate((y, x, img), axis=1) im = Image.fromarray(output.astype(np.uint8)) im.save('results{}.png'.format(i)) final_acc = acc / (generated_images.shape[0]) print('test accuracy', final_acc)
def deblur(weight_path, input_dir, output_dir): g = generator_model() g.load_weights(weight_path) for image_name in os.listdir(input_dir): image = np.array([preprocess_image(load_image(os.path.join(input_dir, image_name)))]) x_test = image generated_images = g.predict(x=x_test) generated = np.array([deprocess_image(img) for img in generated_images]) x_test = deprocess_image(x_test) for i in range(generated_images.shape[0]): x = x_test[i, :, :, :] img = generated[i, :, :, :] output = np.concatenate((x, img), axis=1) im = Image.fromarray(output.astype(np.uint8)) im.save(os.path.join(output_dir, image_name))
def test(batch_size): data = load_images('./images/test', batch_size) y_test, x_test = data['B'], data['A'] g = generator_model() g.load_weights('generator.h5') generated_images = g.predict(x=x_test, batch_size=batch_size) generated = np.array([deprocess_image(img) for img in generated_images]) x_test = deprocess_image(x_test) y_test = deprocess_image(y_test) for i in range(generated_images.shape[0]): y = y_test[i, :, :, :] x = x_test[i, :, :, :] img = generated[i, :, :, :] output = np.concatenate((y, x, img), axis=1) im = Image.fromarray(output.astype(np.uint8)) im.save('results{}.png'.format(i))
def deblur(image_path): data = { 'A_paths': [image_path], 'A': np.array([preprocess_image(load_image(image_path))]) } x_test = data['A'] g = generator_model() g.load_weights('generator.h5') generated_images = g.predict(x=x_test) generated = np.array([deprocess_image(img) for img in generated_images]) x_test = deprocess_image(x_test) for i in range(generated_images.shape[0]): x = x_test[i, :, :, :] img = generated[i, :, :, :] output = np.concatenate((x, img), axis=1) im = Image.fromarray(output.astype(np.uint8)) im.save('deblur'+image_path)
def deblurOne(self, imageByte: bytes): image = np.array([preprocess_image(Image.open(io.BytesIO(imageByte)))]) try: image = np.delete(image, 3, 3) except: pass x_test = image with self.graph.as_default(): generated_images = self.g.predict(x=x_test) generated = np.array([deprocess_image(img) for img in generated_images]) x_test = deprocess_image(x_test) for i in range(generated_images.shape[0]): img = generated[i, :, :, :] im = Image.fromarray(img.astype(np.uint8)) output = io.BytesIO() im.save(output, format="JPEG") output.seek(0) return output
def DeBlur_GAN_Decomposition(): st.subheader("GAN based Blur removal") g = generator_model() g.load_weights(dblur_weight_path) img_file_buffer = st.file_uploader("Upload an image", type=["jpg"]) #"png", "jpg", "jpeg", if img_file_buffer is not None: sample_image = Image.open(img_file_buffer) image_resize = sample_image.resize((256, 256)) #st.write(type(image_resize), image_resize.size) image_in = np.array([preprocess_image(image_resize)]) #st.write(image_in.shape) x_test = image_in generated_images = g.predict(x=x_test) generated = np.array( [deprocess_image(img) for img in generated_images]) x_test = deprocess_image(x_test) #st.write("generated images size:", generated_images.shape) for i in range(generated_images.shape[0]): x = x_test[i, :, :, :] img = generated[i, :, :, :] #output = np.concatenate((x, img), axis=1) im = Image.fromarray(img.astype(np.uint8)) with st.spinner('Enhancing Image...'): # Display plt.figure(figsize=(25, 20)) plt.subplot(121) plt.title('Original Image') plt.axis('off') plt.imshow(image_resize) plt.subplot(122) plt.title('Final Image') plt.axis('off') plt.imshow(im) st.pyplot() time.sleep(10)
def deblur(weight_path, input_dir, output_dir): g = generator_model() g.load_weights(weight_path) lst_grap_img = [] lst_crop_img = [] count = 0 for image_name in os.listdir(input_dir): path_in = input_dir + "/" + image_name # path image input path_out = output_dir + "/" + image_name # path image output image_blur = cv2.imread(path_in) # read image img_add_padding = add_padding(image_blur) # add padding image % 256 lst_crop_img = crop_image( img_add_padding) # crop image size(256 x 256) for crop in range(len(lst_crop_img)): image_preprocess = np.array(lst_crop_img[crop]) image_swap = (image_preprocess - 127.5) / 127.5 image = np.array([image_swap]) x_test = image generator_images = g.predict(x=x_test) generator = np.array( [deprocess_image(img) for img in generator_images]) x_test = deprocess_image(x_test) for i in range(generator_images.shape[0]): x = x_test[i, :, :, :] img = generator[i, :, :, :] im = Image.fromarray(img.astype(np.uint8)) im_np = np.asarray(im) lst_grap_img.append(im_np) #im.save(os.path.join(output_dir, image_name)) img_sharp = graph_image(lst_grap_img, img_add_padding) cv2.imwrite(path_out, img_sharp) count += 1 print("done", count / len(os.listdir(input_dir))) lst_grap_img.clear() lst_crop_img.clear()
def test(batch_size): data = load_images('../images/test', 300) y_test, x_test = data['B'], data['A'] g = generator_model() # g.load_weights('./weights/331/generator_3_1538.h5') # g.load_weights('./weights_hard/331/generator_3_1746.h5') g.load_weights('../generator4-40.h5') # g.load_weights('../deblur-40.h5') # im1 = tf.decode_png('../images/test/A/GOPR0384_11_00_000001.png') # im2 = tf.decode_png('../images/test/B/GOPR0384_11_00_000001.png') # ssim = tf.image.ssim(im1, im2, max_val=255) # print(ssim) psnr = 0 ssim = 0 # with tf.Session() as sess: # sess.run(tf.initialize_all_variables()) for index in tqdm.tqdm(range(int(300 / batch_size))): batch_test = x_test[index * batch_size:(index + 1) * batch_size] batch_label = y_test[index * batch_size:(index + 1) * batch_size] generated_images = g.predict(x=batch_test, batch_size=batch_size) generated = np.array( [deprocess_image(img) for img in generated_images]) batch_test = deprocess_image(batch_test) batch_label = deprocess_image(batch_label) # for i in range(generated_images.shape[0]): # y = batch_label[i, :, :, :] # x = batch_test[i, :, :, :] # img = generated[i, :, :, :] # with tf.Session() as sess: # sess.run(tf.initialize_all_variables()) # yy = tf.convert_to_tensor(y, dtype=tf.float32) # imgimg = tf.convert_to_tensor(img, dtype=tf.float32) # # ssim = tf.image.ssim(yy, imgimg, max_val=255) # # psnr = tf.image.psnr(yy,imgimg,max_val=255) # # sess.run(psnr) # ssim += sess.run(tf.image.ssim(yy, imgimg, max_val=255)) # pp += sess.run(tf.image.psnr(yy,imgimg,max_val=255)) # # print(sim) # for i in range(generated_images.shape[0]): # y = batch_label[i, :, :, :] # x = batch_test[i, :, :, :] # img = generated[i, :, :, :] with tf.Session() as sess: sess.run(tf.initialize_all_variables()) yy = tf.convert_to_tensor(batch_label, dtype=tf.float32) imgimg = tf.convert_to_tensor(generated, dtype=tf.float32) # ssim = tf.image.ssim(yy, imgimg, max_val=255) # psnr = tf.image.psnr(yy,imgimg,max_val=255) # sess.run(psnr) ss = sess.run(tf.image.ssim(yy, imgimg, max_val=255)) ssim += np.mean(ss) pp = sess.run(tf.image.psnr(yy, imgimg, max_val=255)) psnr += np.mean(pp) # print(sim # print(ssim) # print(psnr) # yy= np.transpose(y[np.newaxis,...],(0,3,1,2)) # imgimg = np.transpose(img[np.newaxis,...],(0,3,1,2)) # psnr += PSNR(y,img) # print(psnr) # ssim += SSIM(yy,imgimg) # output = np.concatenate((y, x, img), axis=1) # im = Image.fromarray(output.astype(np.uint8)) # im.save('results{}.png'.format(i)) num = int(300 / batch_size) print(psnr / num) # print(pp/300) print(ssim / num)