from libs.pconv_model import PConvUnet from libs.util import random_mask, plot_images # Load image img = cv2.imread('./data/building.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img / 255 shape = img.shape print(f"Shape of image is: {shape}") # Load mask mask = random_mask(shape[0], shape[1]) # Image + mask masked_img = deepcopy(img) masked_img[mask == 0] = 1 model = PConvUnet(weight_filepath='result/logs/') model.load(r"result/logs/1_weights_2019-02-21-04-59-53.h5", train_bn=False) # Run prediction quickly pred = model.scan_predict((img, mask)) # Show result plot_images([img, masked_img, pred]) imsave('result/test_orginal.png', img) imsave('result/test_masked.png', masked_img) imsave('result/test_pred.png', pred) print("finish")
test_datagen = DataGenerator(rescale=1. / 255) test_generator = test_datagen.flow_from_directory(cst.TEST_PATH, target_size=(256, 256), batch_size=BATCH_SIZE, seed=1) # Pick out an example test_data = next(test_generator) (masked, mask), ori = test_data # Load weights from previous run model = PConvUnet(weight_filepath='data/model/') model.load('{}/data/model/weight/3000_weights_2018-09-29-08-46-51.h5'.format( cst.MNT_PATH), train_bn=False, lr=0.00005) n = 0 for (masked, mask), ori in tqdm(test_generator): # Run predictions for this batch of images pred_img = model.predict([masked, mask]) pred_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') # Clear current output and display test images for i in range(len(ori)): _, axes = plt.subplots(1, 2, figsize=(10, 5)) axes[0].imshow(masked[i, :, :, :]) axes[1].imshow(pred_img[i, :, :, :] * 1.) axes[0].set_title('Masked Image') axes[1].set_title('Predicted Image')
axes[2].imshow(ori[i, :, :, :]) axes[0].set_title('Masked Image') axes[1].set_title('Predicted Image') axes[2].set_title('Original Image') plt.savefig(r'data/custom_test_samples/img_{}_{}.png'.format( i, pred_time)) plt.close() # Instantiate the model model = PConvUnet() model = PConvUnet(weight_filepath='data/logs/') latest_weights = get_latest_weights_file() print(latest_weights) model.load(latest_weights) # Run training for certain amount of epochs model.fit(train_generator, steps_per_epoch=100, validation_data=val_generator, validation_steps=10, epochs=50, plot_callback=plot_callback, callbacks=[ TensorBoard(log_dir='../data/logs/initial_training', write_graph=False) ]) # Load weights from previous run latest_weights = get_latest_weights_file()
import cv2 import numpy as np from Sketcher import Sketcher from libs.util import MaskGenerator, ImageChunker from libs.pconv_model import PConvUnet import sys from copy import deepcopy print('load model...') model = PConvUnet(vgg_weights=None, inference_only=True) model.load('data/logs/MyDataset_phase1/weights.75-0.51.h5', train_bn=False) # model.summary() import os src = 'D:\git\crawler\zigbang\\test\class\\' dest = 'images\\' def random_pick(path): return os.listdir(path)[np.random.randint(0,len(os.listdir(path)))] img = cv2.imread(os.path.join(src,random_pick(src)), cv2.IMREAD_COLOR) img_masked = img.copy() mask = np.zeros(img.shape[:2], np.uint8) sketcher = Sketcher('image', [img_masked, mask], lambda : ((255, 255, 255), 255)) chunker = ImageChunker(512, 512, 30) while True: key = cv2.waitKey() if key == ord('s'): #save sketcher.save_files(dest)
mask = mask/255. mask = 1 - mask print ('original max {}'.format(ori.max())) #ori = ori / ori.max() ori = ori / 255. masked = deepcopy(ori) masked[mask==mask.min()] = 1 print("mask shape {}".format(masked.shape)) ori = np.expand_dims(ori, axis=0) mask = np.uint8(np.expand_dims(mask, axis=0)) masked = np.expand_dims(masked, axis=0) #mask = np.stack([random_mask_line_for_ct(ori.shape[1],ori.shape[2]) for _ in range(ori.shape[0])], axis=0) #masked = deepcopy(ori) masked[mask==0]=1 masks[i,] = mask maskeds[i,] = masked model = PConvUnet(weight_filepath='data/logs_ct_new/') model.load("data/logs_ct_new/316_weights_2018-12-11-10-45-45.h5") pred_img = model.predict([maskeds, masks]) for i in range(len(img_list)): print("saveing" + str(i)) split = img_list[i] splited = split.split('test_')[-1] plt.imsave(args.save_name + splited,pred_img[i]*1.)
def main(): #CALL PARSER args = parse_args() # # Change to root path if os.path.basename(os.getcwd()) != 'PConvInpainting': os.chdir('..') # SETTINGS TEST_FOLDER_IMG = args.img_path TEST_FOLDER_MASK = args.mask_path OUTPUT_FOLDER = args.out_path BATCH_SIZE = args.batch_size # model = PConvUnet(vgg_weights=None, inference_only=True) model.load("pconv_imagenet.h5", train_bn=False) fileList = os.listdir(TEST_FOLDER_IMG) # Used for chunking up images & stiching them back together chunker = ImageChunker(512, 512, 30) kernel = np.ones((7, 7), np.uint8) for i in range(0, len(fileList), BATCH_SIZE): #### # Lists for saving images and masks imgs, masks, indices = [], [], [] for j in range(0, BATCH_SIZE): imgName = "MSRA10K_image_{:06d}.jpg".format(i + j) imFile = Image.open(TEST_FOLDER_IMG + imgName) im = np.array(imFile) / 255 # convert to float maskName = imgName.replace(".jpg", ".png") maskName = maskName.replace("image", "mask") maskFile = Image.open(TEST_FOLDER_MASK + maskName) mask = np.array(maskFile) # extend from 1 channel to 3 mask3d = np.tile(mask[:, :, None], [1, 1, 3]) # dilate mask to process additional border mask3d = cv2.dilate(mask3d, kernel, iterations=1) mask3d = mask3d / 255 # convert to float mask3d = 1.0 - mask3d # need to invert mask due to framework imgs.append(im) masks.append(mask3d) indices.append(i + j) imFile.close() maskFile.close() print(imgName, maskName) #### # print("testing....") for img, mask, index in zip(imgs, masks, indices): ###begin resize height, width, depth = img.shape imgScale = 0.5 newX, newY = int(width * imgScale), int(height * imgScale) new_img = cv2.resize(img, (newX, newY)) new_mask = cv2.resize(mask, (newX, newY)) chunked_images = chunker.dimension_preprocess(deepcopy(new_img)) chunked_masks = chunker.dimension_preprocess(deepcopy(new_mask)) pred_imgs = model.predict([chunked_images, chunked_masks]) reconstructed_image_resized = chunker.dimension_postprocess( pred_imgs, new_img) reconstructed_image_original_size = cv2.resize( reconstructed_image_resized, (int(width), int(height))) maskExpanded = cv2.erode(mask, kernel, iterations=3) reconstructed_image_final = np.where( maskExpanded == 0, reconstructed_image_original_size, img) #apply generated over masked area only result = Image.fromarray( (reconstructed_image_final * 255).astype(np.uint8)) result.save(OUTPUT_FOLDER + "MSRA10K_image_{:06d}.png".format(index))
if os.path.basename(os.getcwd()) != 'PConv-Keras': os.chdir('..') from libs.pconv_model import PConvUnet #Both msk and img should be of order (512,512,3) exactly or else use Image Chunker img = '/image.jpg' #Path of image and mask msk= '/mask.jpg' im= Image.open(img) mk= Image.open(msk) mk= np.array(mk)/255 im= np.array(im)/255 mk= mk.reshape(-1,512,512,3) im= im.reshape(-1,512,512,3) #The model takes 4D input model = PConvUnet(vgg_weights=None, inference_only=True) model.load(r"/content/PConv-Keras/pconv_imagenet.26-1.07.h5", train_bn=False) #See more about weight in readme pred_imgs = model.predict([im,mk]) def plot_images(images, s=5): _, axes = plt.subplots(1, len(images), figsize=(s*len(images), s)) if len(images) == 1: axes = [axes] for img, ax in zip(images, axes): ax.imshow(img) plt.show() plot_images(pred_imgs) import cv2 cv2.imwrite('inpainted.jpg', pred_imgs)
_, axes = plt.subplots(1, 3, figsize=(20, 5)) axes[0].imshow(masked[i, :, :, :]) axes[1].imshow(pred_img[i, :, :, :] * 1.) axes[2].imshow(ori[i, :, :, :]) axes[0].set_title('Masked Image') axes[1].set_title('Predicted Image') axes[2].set_title('Original Image') plt.savefig(r'data/test_samples/img_{}_{}.png'.format(i, pred_time)) plt.close() # Instantiate the model model = PConvUnet(weight_filepath='data/logs/') model.load( r"C:\Users\MAFG\Documents\Github-Public\PConv-Keras\data\logs\50_weights_2018-06-01-16-41-43.h5" ) # Run training for certain amount of epochs model.fit(train_generator, steps_per_epoch=10000, validation_data=val_generator, validation_steps=100, epochs=50, plot_callback=plot_callback, callbacks=[ TensorBoard(log_dir='../data/logs/initial_training', write_graph=False) ]) # Load weights from previous run
train_generator, steps_per_epoch=10000, validation_data=val_generator, validation_steps=100, epochs=50, plot_callback=plot_callback, callbacks=[ TensorBoard(log_dir='../data/logs/initial_training', write_graph=False) ] ) print("Phase two training...") #phase two without bn model = PConvUnet(weight_filepath='model/logs/') model.load( "./model/logs/latest_weights.h5", train_bn=False, lr=0.00005 ) # Run training for certain amount of epochs model.fit( train_generator, steps_per_epoch=10000, validation_data=val_generator, validation_steps=100, epochs=20, workers=3, plot_callback=plot_callback, callbacks=[ TensorBoard(log_dir='../data/logs/fine_tuning', write_graph=False) ] )
def to_gray(image): return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) model1 = PConvUnet(vgg_weights=None, inference_only=True) # model2 = PConvUnet(vgg_weights=None, inference_only=True) #model3 = PConvUnet(vgg_weights=None, inference_only=True) #model4 = PConvUnet(vgg_weights=None, inference_only=True) #model5 = PConvUnet(vgg_weights=None, inference_only=True) #model6 = PConvUnet(vgg_weights=None, inference_only=True) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t172\weights.15-1.18.h5", train_bn=False) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t159\weights.31-1.17.h5", train_bn=False) model1.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t169\weights.13-0.98.h5", train_bn=False) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t171\weights.31-0.88.h5", train_bn=False) #model4.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t55\weights.25-1.28.h5", train_bn=False) #model5.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t44\weights.40-0.18.h5", train_bn=False) #model6.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t36\weights.59-0.30.h5", train_bn=False) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t16\weights.11-1.15.h5", train_bn=False) models = [] models.append(model1) # models.append(model2) #models.append(model3) #models.append(model4) #models.append(model5) #models.append(model6) mse = [0, 0, 0, 0]
def inpainting(quiz, debug=True): print('Step 2: 使用你的模型,補全影像\n') print('...') # Your code may lay here... # ====================== # # gen_image = some_black_magic(quiz) # # ====================== # Demo: mean-color inpainting raw_image = quiz.raw_image.copy() bbox = quiz.bbox # mean_color = quiz.raw_image.mean(axis=(0, 1)) # shape: (3,) # raw_roi = raw_image[bbox['y']:bbox['y']+bbox['h'], bbox['x']:bbox['x']+bbox['w'], :] # mask = np.zeros(raw_image.shape[:2]) # mask_roi = mask[bbox['y']:bbox['y']+bbox['h'], bbox['x']:bbox['x']+bbox['w']] # to_filling = (raw_roi[:, :, 1] == 255) & (raw_roi[:, :, 0] < 10) & (raw_roi[:, :, 2] < 10) # mask_roi[to_filling] = 1 # mask = ski_morph.dilation(mask, ski_morph.square(7)) # mask = np.expand_dims(mask, axis=-1) # gen_image = (raw_image * (1 - mask) + mean_color * mask).astype(np.uint8) masked = raw_image to_filling = (masked[:, :, 1] > 245) & (masked[:, :, 0] < 10) & (masked[:, :, 2] < 10) mask_roi = np.zeros((256, 256, 3), np.uint8) mask_roi[to_filling] = 1 mask = 1. - mask_roi erosion_size = 11 erosion_type = 0 val_type = cv2.MORPH_ELLIPSE element = cv2.getStructuringElement(erosion_type, (2 * erosion_size + 1, 2 * erosion_size + 1), (erosion_size, erosion_size)) erosion_mask = cv2.erode(mask, element) masked_tmp_list = [] masked_tmp_list.append(masked) masked_na = np.array(masked_tmp_list) mask_tmp_list = [] mask_tmp_list.append(erosion_mask) mask_na = np.array(mask_tmp_list) model = PConvUnet(weight_filepath='{}/PConv-Keras/data/model/'.format(path_prefix)) model.load("{}/PConv-Keras/data/model/12_weights_2018-09-26-14-05-28.h5".format(path_prefix)) pred_img_set = model.predict([masked_na, mask_na]) pred_img = 255. * pred_img_set[0, :, :, :] gen_image = masked.copy() gen_image[to_filling] = pred_img[to_filling] debug = True if debug: with warnings.catch_warnings(): warnings.simplefilter('ignore', category=UserWarning) os.makedirs('temp', exist_ok=True) cv2.imwrite("temp/raw_image.jpg",raw_image) cv2.imwrite("temp/mask.jpg",mask[:, :, 0]) cv2.imwrite("temp/gen_image.jpg",gen_image) print('=====================') return gen_image
im = np.array(im) / 255 mask_gen = MaskGenerator(*crop) mask = mask_gen._generate_mask() im[mask == 0] = 1 # Store for prediction imgs.append(im) masks.append(mask) # Show image ax.imshow(im) ax.set_title("{}x{}".format(crop[0], crop[1])) from libs.pconv_model import PConvUnet model = PConvUnet(vgg_weights=None, inference_only=True) model.load(r"/content/pconv_imagenet.26-1.07.h5", train_bn=False) chunker = ImageChunker(512, 512, 30) def plot_images(images, s=5): _, axes = plt.subplots(1, len(images), figsize=(s * len(images), s)) if len(images) == 1: axes = [axes] for img, ax in zip(images, axes): ax.imshow(img) plt.show() for img, mask in zip(imgs, masks): print("Image with size: {}".format(img.shape))
model1 = PConvUnet(vgg_weights=None, inference_only=True) # model2 = PConvUnet(vgg_weights=None, inference_only=True) # model3 = PConvUnet(vgg_weights=None, inference_only=True) # model4 = PConvUnet(vgg_weights=None, inference_only=True) #model5 = PConvUnet(vgg_weights=None, inference_only=True) #model6 = PConvUnet(vgg_weights=None, inference_only=True) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t147\weights.01-0.65.h5", train_bn=False) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t165\weights.46-1.23.h5", train_bn=False) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t153\weights.52-0.31.h5", train_bn=False) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t170\weights.26-1.19.h5", train_bn=False) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t172\weights.15-1.18.h5", train_bn=False) # model3.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t160\weights.12-1.53.h5", train_bn=False) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t152\weights.31-1.20.h5", train_bn=False) model1.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t34\weights.07-1.29.h5", train_bn=False) #model6.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t36\weights.59-0.30.h5", train_bn=False) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t16\weights.11-1.15.h5", train_bn=False) models = [] models.append(model1) # models.append(model2) # models.append(model3) # models.append(model4) #models.append(model5) #models.append(model6) mse = [0, 0, 0, 0] psnr = [0, 0, 0, 0] ssim = [0, 0, 0, 0] image_num = 0
# Clear current output and display test images for i in range(len(ori)): _, axes = plt.subplots(1, 3, figsize=(20, 5)) axes[0].imshow(masked[i, :, :, :]) axes[1].imshow(pred_img[i, :, :, :] * 1.) axes[2].imshow(ori[i, :, :, :]) axes[0].set_title('Masked Image') axes[1].set_title('Predicted Image') axes[2].set_title('Original Image') plt.savefig(r'data/custom_test_samples/img_{}_{}.png'.format(i, pred_time)) plt.close() # Instantiate the model model = PConvUnet() model.load('./data/logs/382_weights_2018-10-16-23-26-19.h5') n = 0 for (masked, mask), ori in tqdm(test_generator): # Run predictions for this batch of images pred_img = model.predict([masked, mask]) pred_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') # Clear current output and display test images for i in range(len(ori)): _, axes = plt.subplots(1, 2, figsize=(10, 5)) axes[0].imshow(masked[i,:,:,:]) axes[1].imshow(pred_img[i,:,:,:] * 1.) axes[0].set_title('Masked Image') axes[1].set_title('Predicted Image') axes[0].xaxis.set_major_formatter(NullFormatter()) axes[0].yaxis.set_major_formatter(NullFormatter())
class Paint(object): MARKER_COLOR = 'white' def __init__(self, config): self.config = config self.root = Tk() self.root.title("Image Inpainting (V1.0)") # self.LabelArea=Label(text="中国", bg="green", font=("Arial", 12), width=10, height=2) # self.LabelArea.grid(row=1, column=0, rowspan=1) self.c = Canvas(self.root, bg='white', width=config.img_shapes[1] + 4, height=config.img_shapes[0]) self.c.grid(row=0, column=0, rowspan=6) # self.masked_input = Canvas(self.root, bg='white', width=config.img_shapes[1] + 4, height=config.img_shapes[0]) # self.masked_input.grid(row=0, column=1, rowspan=8) self.out1 = Canvas(self.root, bg='white', width=config.img_shapes[1] / 2 + 4, height=config.img_shapes[0] / 2) self.out1.grid(row=0, column=1, rowspan=3) self.out2 = Canvas(self.root, bg='white', width=config.img_shapes[1] / 2 + 4, height=config.img_shapes[0] / 2) self.out2.grid(row=0, column=2, rowspan=3) self.out3 = Canvas(self.root, bg='white', width=config.img_shapes[1] / 2 + 4, height=config.img_shapes[0] / 2) self.out3.grid(row=3, column=1, rowspan=3) self.out4 = Canvas(self.root, bg='white', width=config.img_shapes[1] / 2 + 4, height=config.img_shapes[0] / 2) self.out4.grid(row=3, column=2, rowspan=3) #self.Label(self.root, text="First").grid(row=0) self.load_button = Button(self.root, text='load', command=self.load, width=12, height=3) self.load_button.grid(row=0, column=3) self.rect_button = Button(self.root, text='rectangle', command=self.use_rect, width=12, height=3) self.rect_button.grid(row=1, column=3) self.poly_button = Button(self.root, text='stroke', command=self.use_poly, width=12, height=3) self.poly_button.grid(row=2, column=3) self.fill_button = Button(self.root, text='fill', command=self.fill, width=12, height=3) self.fill_button.grid(row=3, column=3) self.clear_button = Button(self.root, text='clear', command=self.clear, width=12, height=3) self.clear_button.grid(row=4, column=3) self.save_button = Button(self.root, text="save", command=self.save, width=12, height=3) self.save_button.grid(row=5, column=3) # self.revoke_button = Button(self.root, text='unused1', command=self.revoke, width=12, height=3) # self.revoke_button.grid(row=2, column=6) # self.exit_button = Button(self.root, text='Exit', command=self.revoke, width=12, height=3) # self.exit_button.grid(row=2, column=7) self.filename = None self.setup() self.root.mainloop() def setup(self): self.predicted_img1 = None self.predicted_img2 = None self.predicted_img3 = None self.predicted_img4 = None self.old_x = None self.old_y = None self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.eraser_on = False self.active_button = self.rect_button self.isPainting = False self.c.bind('<B1-Motion>', self.paint) self.c.bind('<ButtonRelease-1>', self.reset) self.c.bind('<Button-1>', self.beginPaint) self.c.bind('<Enter>', self.icon2pen) self.c.bind('<Leave>', self.icon2mice) self.mode = 'poly' self.rect_buf = None self.line_buf = None assert self.mode in ['rect', 'poly'] self.paint_color = self.MARKER_COLOR self.mask_candidate = [] self.rect_candidate = [] self.im_h = None self.im_w = None self.mask = None self.result = None self.blank = None self.line_width = 24 ################################################################## # wny self.model = GMCNNModel() self.model1 = PConvUnet() self.model2 = PConvUnet() self.model3 = PConvUnet() self.model4 = PConvUnet() self.reuse = False sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = False self.sess = tf.Session(config=sess_config) self.input_image_tf = tf.placeholder( dtype=tf.float32, shape=[1, self.config.img_shapes[0], self.config.img_shapes[1], 3]) self.input_mask_tf = tf.placeholder( dtype=tf.float32, shape=[1, self.config.img_shapes[0], self.config.img_shapes[1], 1]) ############################################################### # wny # output = self.model.evaluate(self.input_image_tf, self.input_mask_tf, config=self.config, reuse=self.reuse) # output = (output + 1) * 127.5 # output = tf.minimum(tf.maximum(output[:, :, :, ::-1], 0), 255) # self.output = tf.cast(output, tf.uint8) # # load pretrained model # vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) # assign_ops = list(map(lambda x: tf.assign(x, tf.contrib.framework.load_variable(config.load_model_dir, x.name)), # vars_list)) # self.sess.run(assign_ops) #self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\pconv_imagenet.26-1.07.h5", train_bn=False) # P1T1 # self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\weights.07-1.89.h5", train_bn=False) # P1T2 # self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\weights.10-1.74.h5", train_bn=False) # P1T3 self.model1.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t152\weights.31-1.20.h5", train_bn=False) self.model2.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t167\weights.40-1.13.h5", train_bn=False) self.model3.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t168\weights.32-0.29.h5", train_bn=False) self.model4.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t169\weights.13-0.98.h5", train_bn=False) # wny ############################################################### print('Model loaded.') def checkResp(self): assert len(self.mask_candidate) == len(self.rect_candidate) def load(self): self.filename = tkFileDialog.askopenfilename( initialdir='./imgs', title="Select file", filetypes=(("all files", "*.*"), ("png files", "*.png"), ("jpg files", "*.jpg"))) self.filename_ = self.filename.split('/')[-1][:-4] self.filepath = '/'.join(self.filename.split('/')[:-1]) print(self.filename_, self.filepath) try: photo = Image.open(self.filename) self.image = cv2.imread(self.filename) except: print('do not load image') else: self.im_w, self.im_h = photo.size self.mask = np.zeros((self.im_h, self.im_w, 3)).astype(np.uint8) #self.mask=np.zeros_like(self.image) print(photo.size) self.displayPhoto = photo self.displayPhoto = self.displayPhoto.resize( (self.im_w, self.im_h)) self.draw = ImageDraw.Draw(self.displayPhoto) self.photo_tk = ImageTk.PhotoImage(image=self.displayPhoto) self.c.create_image(0, 0, image=self.photo_tk, anchor=NW) self.rect_candidate.clear() self.mask_candidate.clear() if self.blank is None: self.blank = Image.open('imgs/blank.png') self.blank = self.blank.resize( (int(self.im_w / 2), int(self.im_h / 2))) self.blank_tk = ImageTk.PhotoImage(image=self.blank) self.out1.create_image(0, 0, image=self.blank_tk, anchor=NW) self.out2.create_image(0, 0, image=self.blank_tk, anchor=NW) self.out3.create_image(0, 0, image=self.blank_tk, anchor=NW) self.out4.create_image(0, 0, image=self.blank_tk, anchor=NW) def save(self): #img = np.array(self.displayPhoto) #cv2.imwrite(os.path.join(self.filepath, 'tmp.png'), img) if self.mode == 'rect': self.mask[:, :, :] = 0 for rect in self.mask_candidate: self.mask[rect[1]:rect[3], rect[0]:rect[2], :] = 1 #self.mask=1-self.mask cv2.imwrite(os.path.join(self.filepath, self.filename_ + '_mask.png'), (1 - self.mask) * 255) #wny cv2.imwrite(os.path.join(self.filepath, self.filename_ + '_gm_result.png'), self.result[0][:, :, ::-1]) #cv2.imwrite(os.path.join(self.filepath, self.filename_ + '_gm_result.png'), self.predicted_img) cv2.imwrite( os.path.join(self.filepath, self.filename_ + '_result1.png'), cv2.cvtColor(self.predicted_img1, cv2.COLOR_BGR2RGB)) cv2.imwrite( os.path.join(self.filepath, self.filename_ + '_result2.png'), cv2.cvtColor(self.predicted_img2, cv2.COLOR_BGR2RGB)) cv2.imwrite( os.path.join(self.filepath, self.filename_ + '_result3.png'), cv2.cvtColor(self.predicted_img3, cv2.COLOR_BGR2RGB)) cv2.imwrite( os.path.join(self.filepath, self.filename_ + '_result4.png'), cv2.cvtColor(self.predicted_img4, cv2.COLOR_BGR2RGB)) def fill(self): if self.mode == 'rect': for rect in self.mask_candidate: self.mask[rect[1]:rect[3], rect[0]:rect[2], :] = 1 ######################################################################## # wny: to create a three-channel mask, input the original single-layer mask's value to each channel of new mask: mask_channel = np.zeros_like(self.image) mask_channel[:, :, 0] = self.mask[:, :, 0] mask_channel[:, :, 1] = self.mask[:, :, 0] mask_channel[:, :, 2] = self.mask[:, :, 0] self.mask = mask_channel # wny: to exchange 0 with 1, 1 with 0 in new mask: #self.mask=1-self.mask image_temp = Image.open(self.filename) image_temp = np.array(image_temp) / 255 # wny: add mask to input image: image_temp[self.mask == 1] = 1 image = np.expand_dims(image_temp, 0) mask = np.expand_dims(1 - self.mask, 0) ######################################################################## print(image.shape) print(mask.shape) image_temp = Image.fromarray(np.uint8(image_temp * 255)) image_temp.save('./imgs/current_masked_input.png') cv2.imwrite('./imgs/current_mask.png', (1 - self.mask) * 255) ######################################################################### # wny # self.result = self.sess.run(self.output, feed_dict={self.input_image_tf: image * 1.0, # self.input_mask_tf: mask * 1.0}) # output the predicted image self.predicted_img1 = (self.model1.predict([image, mask])[0]) * 255 self.predicted_img2 = (self.model2.predict([image, mask])[0]) * 255 self.predicted_img3 = (self.model3.predict([image, mask])[0]) * 255 self.predicted_img4 = (self.model4.predict([image, mask])[0]) * 255 # wny cv2.imwrite('./imgs/tmp.png', self.result[0][:, :, ::-1]) cv2.imwrite('./imgs/current_result1.png', cv2.cvtColor(self.predicted_img1, cv2.COLOR_BGR2RGB)) cv2.imwrite('./imgs/current_result2.png', cv2.cvtColor(self.predicted_img2, cv2.COLOR_BGR2RGB)) cv2.imwrite('./imgs/current_result3.png', cv2.cvtColor(self.predicted_img3, cv2.COLOR_BGR2RGB)) cv2.imwrite('./imgs/current_result4.png', cv2.cvtColor(self.predicted_img4, cv2.COLOR_BGR2RGB)) ########################################################################## photo1 = Image.open('./imgs/current_result1.png') photo2 = Image.open('./imgs/current_result2.png') photo3 = Image.open('./imgs/current_result3.png') photo4 = Image.open('./imgs/current_result4.png') self.displayPhotoResult1 = photo1 self.displayPhotoResult2 = photo2 self.displayPhotoResult3 = photo3 self.displayPhotoResult4 = photo4 self.displayPhotoResult1 = self.displayPhotoResult1.resize( (int(self.im_w / 2), int(self.im_h / 2))) self.displayPhotoResult2 = self.displayPhotoResult2.resize( (int(self.im_w / 2), int(self.im_h / 2))) self.displayPhotoResult3 = self.displayPhotoResult2.resize( (int(self.im_w / 2), int(self.im_h / 2))) self.displayPhotoResult4 = self.displayPhotoResult2.resize( (int(self.im_w / 2), int(self.im_h / 2))) self.photo_tk_result1 = ImageTk.PhotoImage( image=self.displayPhotoResult1) self.photo_tk_result2 = ImageTk.PhotoImage( image=self.displayPhotoResult2) self.photo_tk_result3 = ImageTk.PhotoImage( image=self.displayPhotoResult3) self.photo_tk_result4 = ImageTk.PhotoImage( image=self.displayPhotoResult4) self.out1.create_image(0, 0, image=self.photo_tk_result1, anchor=NW) self.out2.create_image(0, 0, image=self.photo_tk_result2, anchor=NW) self.out3.create_image(0, 0, image=self.photo_tk_result3, anchor=NW) self.out4.create_image(0, 0, image=self.photo_tk_result4, anchor=NW) return def use_rect(self): self.activate_button(self.rect_button) self.mode = 'rect' def use_poly(self): self.activate_button(self.poly_button) self.mode = 'poly' def revoke(self): if len(self.rect_candidate) > 0: self.c.delete(self.rect_candidate[-1]) self.rect_candidate.remove(self.rect_candidate[-1]) self.mask_candidate.remove(self.mask_candidate[-1]) self.checkResp() def clear(self): self.mask = np.zeros((self.im_h, self.im_w, 1)).astype(np.uint8) if self.mode == 'poly': photo = Image.open(self.filename) self.image = cv2.imread(self.filename) self.displayPhoto = photo self.displayPhoto = self.displayPhoto.resize( (self.im_w, self.im_h)) self.draw = ImageDraw.Draw(self.displayPhoto) self.photo_tk = ImageTk.PhotoImage(image=self.displayPhoto) self.c.create_image(0, 0, image=self.photo_tk, anchor=NW) else: if self.rect_candidate is None or len(self.rect_candidate) == 0: return for item in self.rect_candidate: self.c.delete(item) self.rect_candidate.clear() self.mask_candidate.clear() self.checkResp() #TODO: reset canvas #TODO: undo and redo #TODO: draw triangle, rectangle, oval, text def activate_button(self, some_button, eraser_mode=False): self.active_button.config(relief=RAISED) some_button.config(relief=SUNKEN) self.active_button = some_button self.eraser_on = eraser_mode def beginPaint(self, event): self.start_x = event.x self.start_y = event.y self.isPainting = True def paint(self, event): if self.start_x and self.start_y and self.mode == 'rect': self.end_x = max(min(event.x, self.im_w), 0) self.end_y = max(min(event.y, self.im_h), 0) rect = self.c.create_rectangle(self.start_x, self.start_y, self.end_x, self.end_y, fill=self.paint_color) if self.rect_buf is not None: self.c.delete(self.rect_buf) self.rect_buf = rect elif self.old_x and self.old_y and self.mode == 'poly': line = self.c.create_line(self.old_x, self.old_y, event.x, event.y, width=self.line_width, fill=self.paint_color, capstyle=ROUND, smooth=True, splinesteps=36) cv2.line(self.mask, (self.old_x, self.old_y), (event.x, event.y), (1), self.line_width) self.old_x = event.x self.old_y = event.y def reset(self, event): self.old_x, self.old_y = None, None if self.mode == 'rect': self.isPainting = False rect = self.c.create_rectangle(self.start_x, self.start_y, self.end_x, self.end_y, fill=self.paint_color) if self.rect_buf is not None: self.c.delete(self.rect_buf) self.rect_buf = None self.rect_candidate.append(rect) x1, y1, x2, y2 = min(self.start_x, self.end_x), min(self.start_y, self.end_y),\ max(self.start_x, self.end_x), max(self.start_y, self.end_y) # up left corner, low right corner self.mask_candidate.append((x1, y1, x2, y2)) print(self.mask_candidate[-1]) def icon2pen(self, event): return def icon2mice(self, event): return
), TQDMNotebookCallback() ] ) # ## Phase 2 - without batch normalization # In[ ]: # Load weights from previous run model = PConvUnet(vgg_weights='./data/logs/pytorch_vgg16.h5') model.load( r"C:\Users\Mathias Felix Gruber\Documents\GitHub\PConv-Keras\data\logs\imagenet_phase1\weights.23-1.18.h5", train_bn=False, lr=0.00005 ) # In[ ]: # Run training for certain amount of epochs model.fit_generator( train_generator, steps_per_epoch=10000, validation_data=val_generator, validation_steps=1000, epochs=50, verbose=0,
axes[1].set_title('Predicted Image') axes[2].set_title('Original Image') plt.savefig(os.path.join(path, '/img_{}_{}.png'.format(i, pred_time))) plt.close() # Load the model if args.vgg_path: model = PConvUnet(vgg_weights=args.vgg_path) else: model = PConvUnet() # Loading of checkpoint if args.checkpoint: if args.stage == 'train': model.load(args.checkpoint) elif args.stage == 'finetune': model.load(args.checkpoint, train_bn=False, lr=0.00005) # Fit model model.fit_generator( train_generator, steps_per_epoch=10000, validation_data=val_generator, validation_steps=1000, epochs=args.epochs, verbose=0, callbacks=[ TensorBoard( log_dir=os.path.join(args.log_path, args.name+'_'+args.stage), write_graph=False
# use following 2 line to load a single mask: mask = cv2.imread(r"C:\Users\dell\Desktop\paper2\\figure\Fig6\\r2_mask.png") mask = mask / 255 # This is to fuse mask to input image,and get a masked image im[mask == 0] = 1 # Create a model instance and import pre trained image_net weights provided by the author. from libs.pconv_model import PConvUnet model = PConvUnet(vgg_weights=None, inference_only=True) #model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\pconv_imagenet.26-1.07.h5", train_bn=False) # model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t39\weights.16-1.13.h5", train_bn=False) # model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t34\weights.07-1.29.h5", train_bn=False) model.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t152\weights.31-1.20.h5", train_bn=False) # model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t159\weights.31-1.17.h5", train_bn=False) #output the predicted image predicted_img = model.predict([np.expand_dims(im, 0), np.expand_dims(mask, 0)])[0] # save result result_name = r"C:\Users\dell\Desktop\paper2\\figure\Fig6\\r2_mapconv.png" cv2.imwrite(result_name, cv2.cvtColor(predicted_img * 255, cv2.COLOR_BGR2RGB)) # cv2.imwrite("./data/masked_input.png", im*255) # masked_input=cv2.imread("./data/masked_input.png") # # change to right color # cv2.imwrite("./data/masked_input.png", cv2.cvtColor(masked_input, cv2.COLOR_BGR2RGB)) # cv2.imwrite("./data/mask.png",mask*255)
np.save(FOLDER+'loss.npy', history_dictionary_loss) np.save(FOLDER+'val_loss.npy', history_dictionary_val_loss) print('Ending phase 1') ## ------------- End phase 1 - with batch normalization ----------------------- ## ---------- Phase 2 - batch normalization off - lr 5e-5 --------------------- print('Starting phase 2') # output path FOLDER = PATH+'phase2_GRAY/' ## ********************* Model definition ************************************** model = PConvUnet(vgg_weights=PATH+'vgg_grayscale.h5', gpus=GPUS) model.load( FOLDER+'weights.h5', train_bn=False, lr=0.00005) ## ************************* Callbacks ***************************************** checkpoint = ModelCheckpoint( FOLDER+'weights.h5', monitor='val_loss', save_best_only=True, save_weights_only=True) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, restore_best_weights=True) history = my_callback.Histories()
## ============================================================================ PATH = args.root save_path = PATH + 'outputs/' MASK_TEST_DIR = PATH + 'dataset/mask/full/test/' IM_TEST_DIR = PATH + 'dataset/gt/full/test/' ## ============================================================================ ## Data generator ## ============================================================================ test_datagen = AugmentingDataGenerator() test_generator = test_datagen.flow_from_directory(IM_TEST_DIR, MASK_TEST_DIR, target_size=(256, 256), color_mode='grayscale', batch_size=BATCH_SIZE, seed=42) ## ============================================================================ ## Load model ## ============================================================================ model = PConvUnet(vgg_weights=None, inference_only=True) model.load(PATH + 'phase2_gray/weights.h5', train_bn=False) ## ============================================================================ ## Predict ## ============================================================================ for n in range(steps_test): test_data = next(test_generator) (masked, mask), ori = test_data plot_callback(model, n, save_path, masked, mask, ori)
import cv2 import numpy as np from Sketcher import Sketcher from libs.util import MaskGenerator, ImageChunker from libs.pconv_model import PConvUnet import sys from copy import deepcopy print('load model...') model = PConvUnet(vgg_weights=None, inference_only=True) model.load('pconv_imagenet.h5', train_bn=False) # model.summary() img = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR) img_masked = img.copy() mask = np.zeros(img.shape[:2], np.uint8) sketcher = Sketcher('image', [img_masked, mask], lambda: ((255, 255, 255), 255)) chunker = ImageChunker(512, 512, 30) cv2.imwrite('Sketcher_img_masked.png', img_masked) cv2.imwrite('Sketcher_mask.png', mask) while True: key = cv2.waitKey() if key == ord('q'): # quit
axes[2].set_title('Original Image') plt.savefig( os.path.join(path, '/img_{}_{}.png'.format(i, pred_time))) plt.close() # Load the model if args.vgg_path: model = PConvUnet(vgg_weights=args.vgg_path) else: model = PConvUnet() # Loading of checkpoint if args.checkpoint: if args.stage == 'train': model.load(args.checkpoint) elif args.stage == 'finetune': model.load(args.checkpoint, lr=0.00005) # Fit model model.fit_generator( train_generator, steps_per_epoch=10000, validation_data=val_generator, validation_steps=1000, epochs=100, verbose=0, callbacks=[ TensorBoard(log_dir=os.path.join(args.log_path, args.name + '_phase1'), write_graph=False),