def img_analysis(original_url, mask_ori_url): print('original_url ::: ', original_url) print('mask_ori_url ::: ', mask_ori_url) # input image check ori = img_load(original_url) mask = img_load(mask_ori_url) masked = img_load(original_url) mask = np.where(mask == 0, 1, 0) # background == white / mask == black # background == white / mask == black masked[mask == 0] = 1 model_input_masked = masked[np.newaxis, ...] model_input_mask = mask[np.newaxis, ...] print(model_input_masked.shape, model_input_mask.shape) # Model trained_path = '/home/jin/flask/model/pconv_imagenet.26-1.07.h5' # vgg_weights = '/home/jin/flask/model/pytorch_to_keras_vgg16.h5' vgg_weights = '/home/jin/flask/model/weights.48-0.53.h5' model = PConvUnet(vgg_weights=vgg_weights, inference_only=True) model.load(trained_path, train_bn=False) pred_imgs = model.predict([model_input_masked, model_input_mask]) return pred_imgs[0], masked
class InPantingEngine(object): def __init__(self, model_path=MODAL_PATH): self.model_path = model_path tf.keras.backend.set_learning_phase(0) # Ignore dropout at inference self.model = PConvUnet(vgg_weights=None, inference_only=True) tf.keras.backend.set_learning_phase(0) # Ignore dropout at inference self.model.load(self.model_path) def preprocess(self, image, mask): im = _A(image, dtype=np.float32) / 255.0 msk = _A(mask, dtype=np.float32) / 255.0 msk = 1.0 - msk im[msk == 0] = 1 return im, msk def __call__(self, image, mask): im, msk = self.preprocess(image, mask) pred_imgs = self.model.predict([[im], [msk]])[0] pred_imgs = (pred_imgs * 255).astype(np.uint8) return pred_imgs def dump_to_pb(self): import shutil # The export path contains the name and the version of the model # Fetch the Keras session and save the model # The signature definition is defined by the input and output tensors # And stored with the default serving key try: shutil.rmtree(EXPORT_PATH) print("Deleted prevoius export path", EXPORT_PATH) except: pass with tf.keras.backend.get_session() as sess: sess.run(tf.global_variables_initializer()) tf.saved_model.simple_save( sess, EXPORT_PATH, inputs={ 'inputs_img': self.model.model.inputs[0], "inputs_mask": self.model.model.inputs[1] }, outputs={t.name: t for t in self.model.model.outputs}) def dump_to_estimator(self): self.model_dir = str(ROOT / 'exported_models' / 'estimator2') tf.io.write_graph(self.model.model.output.graph, self.model_dir, 'saved_model.pbtxt', as_text=True) tf.io.write_graph(self.model.model.output.graph, self.model_dir, 'saved_model.pb', as_text=False)
com_image[i, j, 0] = pred[i, j, 0] com_image[i, j, 1] = pred[i, j, 1] com_image[i, j, 2] = pred[i, j, 2] return com_image weight = options.weight model = PConvUnet(vgg_weights=None, inference_only=True) model.load(weight, train_bn=False) imid = int(options.id) masks = mpimg.imread('./datasets/mask/data/' + str(imid) + 'pict.png')[:, :, :3] Planck_Image = mpimg.imread('./datasets/test/data/' + str(imid) + 'pict.png')[:, :, :3] CosmoVae = complete_image( model.predict([np.expand_dims(Planck_Image, 0), np.expand_dims(masks, 0)])[0], Planck_Image, masks) masked = deepcopy(Planck_Image) masked[masks == 0] = 1 if not os.path.exists('./datasets/predicted/'): os.makedirs('./datasets/predicted/') fig = plt.imsave('./datasets/predicted/' + str(imid) + 'pred.png', CosmoVae) #_, axes = plt.subplots(1, 3, figsize=(20, 5)) #axes[0].imshow(masks) #axes[1].imshow(masked) #axes[2].imshow(CosmoVae) #axes[0].set_title('Mask') #axes[1].set_title('Masked Image') #axes[2].set_title('Predicted Image') # #plt.savefig(r'dataset/predicted/predicted_{imid}_img.png')
input_img = img_masked.copy() input_img = input_img.astype(np.float32) / 255. input_mask = cv2.bitwise_not(mask) cv2.imshow('input_mask', input_mask) input_mask = input_mask.astype(np.float32) / 255. input_mask = np.repeat(np.expand_dims(input_mask, axis=-1), repeats=3, axis=-1) # cv2.imshow('input_img', input_img) # cv2.imshow('input_mask', input_mask) print('processing...') chunked_imgs = chunker.dimension_preprocess(deepcopy(input_img)) chunked_masks = chunker.dimension_preprocess(deepcopy(input_mask)) # for i, im in enumerate(chunked_imgs): # cv2.imshow('im %s' % i, im) # cv2.imshow('mk %s' % i, chunked_masks[i]) pred_imgs = model.predict([chunked_imgs, chunked_masks]) result_img = chunker.dimension_postprocess(pred_imgs, input_img) print('completed!') cv2.imshow('result', result_img) result_img = cv2.convertScaleAbs(result_img, alpha=(255.0)) cv2.imwrite('result.png', result_img) cv2.destroyAllWindows()
if os.path.basename(os.getcwd()) != 'PConv-Keras': os.chdir('..') from libs.pconv_model import PConvUnet #Both msk and img should be of order (512,512,3) exactly or else use Image Chunker img = '/image.jpg' #Path of image and mask msk= '/mask.jpg' im= Image.open(img) mk= Image.open(msk) mk= np.array(mk)/255 im= np.array(im)/255 mk= mk.reshape(-1,512,512,3) im= im.reshape(-1,512,512,3) #The model takes 4D input model = PConvUnet(vgg_weights=None, inference_only=True) model.load(r"/content/PConv-Keras/pconv_imagenet.26-1.07.h5", train_bn=False) #See more about weight in readme pred_imgs = model.predict([im,mk]) def plot_images(images, s=5): _, axes = plt.subplots(1, len(images), figsize=(s*len(images), s)) if len(images) == 1: axes = [axes] for img, ax in zip(images, axes): ax.imshow(img) plt.show() plot_images(pred_imgs) import cv2 cv2.imwrite('inpainted.jpg', pred_imgs)
def main(): #CALL PARSER args = parse_args() # # Change to root path if os.path.basename(os.getcwd()) != 'PConvInpainting': os.chdir('..') # SETTINGS TEST_FOLDER_IMG = args.img_path TEST_FOLDER_MASK = args.mask_path OUTPUT_FOLDER = args.out_path BATCH_SIZE = args.batch_size # model = PConvUnet(vgg_weights=None, inference_only=True) model.load("pconv_imagenet.h5", train_bn=False) fileList = os.listdir(TEST_FOLDER_IMG) # Used for chunking up images & stiching them back together chunker = ImageChunker(512, 512, 30) kernel = np.ones((7, 7), np.uint8) for i in range(0, len(fileList), BATCH_SIZE): #### # Lists for saving images and masks imgs, masks, indices = [], [], [] for j in range(0, BATCH_SIZE): imgName = "MSRA10K_image_{:06d}.jpg".format(i + j) imFile = Image.open(TEST_FOLDER_IMG + imgName) im = np.array(imFile) / 255 # convert to float maskName = imgName.replace(".jpg", ".png") maskName = maskName.replace("image", "mask") maskFile = Image.open(TEST_FOLDER_MASK + maskName) mask = np.array(maskFile) # extend from 1 channel to 3 mask3d = np.tile(mask[:, :, None], [1, 1, 3]) # dilate mask to process additional border mask3d = cv2.dilate(mask3d, kernel, iterations=1) mask3d = mask3d / 255 # convert to float mask3d = 1.0 - mask3d # need to invert mask due to framework imgs.append(im) masks.append(mask3d) indices.append(i + j) imFile.close() maskFile.close() print(imgName, maskName) #### # print("testing....") for img, mask, index in zip(imgs, masks, indices): ###begin resize height, width, depth = img.shape imgScale = 0.5 newX, newY = int(width * imgScale), int(height * imgScale) new_img = cv2.resize(img, (newX, newY)) new_mask = cv2.resize(mask, (newX, newY)) chunked_images = chunker.dimension_preprocess(deepcopy(new_img)) chunked_masks = chunker.dimension_preprocess(deepcopy(new_mask)) pred_imgs = model.predict([chunked_images, chunked_masks]) reconstructed_image_resized = chunker.dimension_postprocess( pred_imgs, new_img) reconstructed_image_original_size = cv2.resize( reconstructed_image_resized, (int(width), int(height))) maskExpanded = cv2.erode(mask, kernel, iterations=3) reconstructed_image_final = np.where( maskExpanded == 0, reconstructed_image_original_size, img) #apply generated over masked area only result = Image.fromarray( (reconstructed_image_final * 255).astype(np.uint8)) result.save(OUTPUT_FOLDER + "MSRA10K_image_{:06d}.png".format(index))
class Paint(object): MARKER_COLOR = 'white' def __init__(self, config): self.config = config self.root = Tk() self.root.title("Image Inpainting (V1.0)") # self.LabelArea=Label(text="中国", bg="green", font=("Arial", 12), width=10, height=2) # self.LabelArea.grid(row=1, column=0, rowspan=1) self.c = Canvas(self.root, bg='white', width=config.img_shapes[1] + 4, height=config.img_shapes[0]) self.c.grid(row=0, column=0, rowspan=6) # self.masked_input = Canvas(self.root, bg='white', width=config.img_shapes[1] + 4, height=config.img_shapes[0]) # self.masked_input.grid(row=0, column=1, rowspan=8) self.out1 = Canvas(self.root, bg='white', width=config.img_shapes[1] / 2 + 4, height=config.img_shapes[0] / 2) self.out1.grid(row=0, column=1, rowspan=3) self.out2 = Canvas(self.root, bg='white', width=config.img_shapes[1] / 2 + 4, height=config.img_shapes[0] / 2) self.out2.grid(row=0, column=2, rowspan=3) self.out3 = Canvas(self.root, bg='white', width=config.img_shapes[1] / 2 + 4, height=config.img_shapes[0] / 2) self.out3.grid(row=3, column=1, rowspan=3) self.out4 = Canvas(self.root, bg='white', width=config.img_shapes[1] / 2 + 4, height=config.img_shapes[0] / 2) self.out4.grid(row=3, column=2, rowspan=3) #self.Label(self.root, text="First").grid(row=0) self.load_button = Button(self.root, text='load', command=self.load, width=12, height=3) self.load_button.grid(row=0, column=3) self.rect_button = Button(self.root, text='rectangle', command=self.use_rect, width=12, height=3) self.rect_button.grid(row=1, column=3) self.poly_button = Button(self.root, text='stroke', command=self.use_poly, width=12, height=3) self.poly_button.grid(row=2, column=3) self.fill_button = Button(self.root, text='fill', command=self.fill, width=12, height=3) self.fill_button.grid(row=3, column=3) self.clear_button = Button(self.root, text='clear', command=self.clear, width=12, height=3) self.clear_button.grid(row=4, column=3) self.save_button = Button(self.root, text="save", command=self.save, width=12, height=3) self.save_button.grid(row=5, column=3) # self.revoke_button = Button(self.root, text='unused1', command=self.revoke, width=12, height=3) # self.revoke_button.grid(row=2, column=6) # self.exit_button = Button(self.root, text='Exit', command=self.revoke, width=12, height=3) # self.exit_button.grid(row=2, column=7) self.filename = None self.setup() self.root.mainloop() def setup(self): self.predicted_img1 = None self.predicted_img2 = None self.predicted_img3 = None self.predicted_img4 = None self.old_x = None self.old_y = None self.start_x = None self.start_y = None self.end_x = None self.end_y = None self.eraser_on = False self.active_button = self.rect_button self.isPainting = False self.c.bind('<B1-Motion>', self.paint) self.c.bind('<ButtonRelease-1>', self.reset) self.c.bind('<Button-1>', self.beginPaint) self.c.bind('<Enter>', self.icon2pen) self.c.bind('<Leave>', self.icon2mice) self.mode = 'poly' self.rect_buf = None self.line_buf = None assert self.mode in ['rect', 'poly'] self.paint_color = self.MARKER_COLOR self.mask_candidate = [] self.rect_candidate = [] self.im_h = None self.im_w = None self.mask = None self.result = None self.blank = None self.line_width = 24 ################################################################## # wny self.model = GMCNNModel() self.model1 = PConvUnet() self.model2 = PConvUnet() self.model3 = PConvUnet() self.model4 = PConvUnet() self.reuse = False sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = False self.sess = tf.Session(config=sess_config) self.input_image_tf = tf.placeholder( dtype=tf.float32, shape=[1, self.config.img_shapes[0], self.config.img_shapes[1], 3]) self.input_mask_tf = tf.placeholder( dtype=tf.float32, shape=[1, self.config.img_shapes[0], self.config.img_shapes[1], 1]) ############################################################### # wny # output = self.model.evaluate(self.input_image_tf, self.input_mask_tf, config=self.config, reuse=self.reuse) # output = (output + 1) * 127.5 # output = tf.minimum(tf.maximum(output[:, :, :, ::-1], 0), 255) # self.output = tf.cast(output, tf.uint8) # # load pretrained model # vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) # assign_ops = list(map(lambda x: tf.assign(x, tf.contrib.framework.load_variable(config.load_model_dir, x.name)), # vars_list)) # self.sess.run(assign_ops) #self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\pconv_imagenet.26-1.07.h5", train_bn=False) # P1T1 # self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\weights.07-1.89.h5", train_bn=False) # P1T2 # self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\weights.10-1.74.h5", train_bn=False) # P1T3 self.model1.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t152\weights.31-1.20.h5", train_bn=False) self.model2.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t167\weights.40-1.13.h5", train_bn=False) self.model3.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t168\weights.32-0.29.h5", train_bn=False) self.model4.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t169\weights.13-0.98.h5", train_bn=False) # wny ############################################################### print('Model loaded.') def checkResp(self): assert len(self.mask_candidate) == len(self.rect_candidate) def load(self): self.filename = tkFileDialog.askopenfilename( initialdir='./imgs', title="Select file", filetypes=(("all files", "*.*"), ("png files", "*.png"), ("jpg files", "*.jpg"))) self.filename_ = self.filename.split('/')[-1][:-4] self.filepath = '/'.join(self.filename.split('/')[:-1]) print(self.filename_, self.filepath) try: photo = Image.open(self.filename) self.image = cv2.imread(self.filename) except: print('do not load image') else: self.im_w, self.im_h = photo.size self.mask = np.zeros((self.im_h, self.im_w, 3)).astype(np.uint8) #self.mask=np.zeros_like(self.image) print(photo.size) self.displayPhoto = photo self.displayPhoto = self.displayPhoto.resize( (self.im_w, self.im_h)) self.draw = ImageDraw.Draw(self.displayPhoto) self.photo_tk = ImageTk.PhotoImage(image=self.displayPhoto) self.c.create_image(0, 0, image=self.photo_tk, anchor=NW) self.rect_candidate.clear() self.mask_candidate.clear() if self.blank is None: self.blank = Image.open('imgs/blank.png') self.blank = self.blank.resize( (int(self.im_w / 2), int(self.im_h / 2))) self.blank_tk = ImageTk.PhotoImage(image=self.blank) self.out1.create_image(0, 0, image=self.blank_tk, anchor=NW) self.out2.create_image(0, 0, image=self.blank_tk, anchor=NW) self.out3.create_image(0, 0, image=self.blank_tk, anchor=NW) self.out4.create_image(0, 0, image=self.blank_tk, anchor=NW) def save(self): #img = np.array(self.displayPhoto) #cv2.imwrite(os.path.join(self.filepath, 'tmp.png'), img) if self.mode == 'rect': self.mask[:, :, :] = 0 for rect in self.mask_candidate: self.mask[rect[1]:rect[3], rect[0]:rect[2], :] = 1 #self.mask=1-self.mask cv2.imwrite(os.path.join(self.filepath, self.filename_ + '_mask.png'), (1 - self.mask) * 255) #wny cv2.imwrite(os.path.join(self.filepath, self.filename_ + '_gm_result.png'), self.result[0][:, :, ::-1]) #cv2.imwrite(os.path.join(self.filepath, self.filename_ + '_gm_result.png'), self.predicted_img) cv2.imwrite( os.path.join(self.filepath, self.filename_ + '_result1.png'), cv2.cvtColor(self.predicted_img1, cv2.COLOR_BGR2RGB)) cv2.imwrite( os.path.join(self.filepath, self.filename_ + '_result2.png'), cv2.cvtColor(self.predicted_img2, cv2.COLOR_BGR2RGB)) cv2.imwrite( os.path.join(self.filepath, self.filename_ + '_result3.png'), cv2.cvtColor(self.predicted_img3, cv2.COLOR_BGR2RGB)) cv2.imwrite( os.path.join(self.filepath, self.filename_ + '_result4.png'), cv2.cvtColor(self.predicted_img4, cv2.COLOR_BGR2RGB)) def fill(self): if self.mode == 'rect': for rect in self.mask_candidate: self.mask[rect[1]:rect[3], rect[0]:rect[2], :] = 1 ######################################################################## # wny: to create a three-channel mask, input the original single-layer mask's value to each channel of new mask: mask_channel = np.zeros_like(self.image) mask_channel[:, :, 0] = self.mask[:, :, 0] mask_channel[:, :, 1] = self.mask[:, :, 0] mask_channel[:, :, 2] = self.mask[:, :, 0] self.mask = mask_channel # wny: to exchange 0 with 1, 1 with 0 in new mask: #self.mask=1-self.mask image_temp = Image.open(self.filename) image_temp = np.array(image_temp) / 255 # wny: add mask to input image: image_temp[self.mask == 1] = 1 image = np.expand_dims(image_temp, 0) mask = np.expand_dims(1 - self.mask, 0) ######################################################################## print(image.shape) print(mask.shape) image_temp = Image.fromarray(np.uint8(image_temp * 255)) image_temp.save('./imgs/current_masked_input.png') cv2.imwrite('./imgs/current_mask.png', (1 - self.mask) * 255) ######################################################################### # wny # self.result = self.sess.run(self.output, feed_dict={self.input_image_tf: image * 1.0, # self.input_mask_tf: mask * 1.0}) # output the predicted image self.predicted_img1 = (self.model1.predict([image, mask])[0]) * 255 self.predicted_img2 = (self.model2.predict([image, mask])[0]) * 255 self.predicted_img3 = (self.model3.predict([image, mask])[0]) * 255 self.predicted_img4 = (self.model4.predict([image, mask])[0]) * 255 # wny cv2.imwrite('./imgs/tmp.png', self.result[0][:, :, ::-1]) cv2.imwrite('./imgs/current_result1.png', cv2.cvtColor(self.predicted_img1, cv2.COLOR_BGR2RGB)) cv2.imwrite('./imgs/current_result2.png', cv2.cvtColor(self.predicted_img2, cv2.COLOR_BGR2RGB)) cv2.imwrite('./imgs/current_result3.png', cv2.cvtColor(self.predicted_img3, cv2.COLOR_BGR2RGB)) cv2.imwrite('./imgs/current_result4.png', cv2.cvtColor(self.predicted_img4, cv2.COLOR_BGR2RGB)) ########################################################################## photo1 = Image.open('./imgs/current_result1.png') photo2 = Image.open('./imgs/current_result2.png') photo3 = Image.open('./imgs/current_result3.png') photo4 = Image.open('./imgs/current_result4.png') self.displayPhotoResult1 = photo1 self.displayPhotoResult2 = photo2 self.displayPhotoResult3 = photo3 self.displayPhotoResult4 = photo4 self.displayPhotoResult1 = self.displayPhotoResult1.resize( (int(self.im_w / 2), int(self.im_h / 2))) self.displayPhotoResult2 = self.displayPhotoResult2.resize( (int(self.im_w / 2), int(self.im_h / 2))) self.displayPhotoResult3 = self.displayPhotoResult2.resize( (int(self.im_w / 2), int(self.im_h / 2))) self.displayPhotoResult4 = self.displayPhotoResult2.resize( (int(self.im_w / 2), int(self.im_h / 2))) self.photo_tk_result1 = ImageTk.PhotoImage( image=self.displayPhotoResult1) self.photo_tk_result2 = ImageTk.PhotoImage( image=self.displayPhotoResult2) self.photo_tk_result3 = ImageTk.PhotoImage( image=self.displayPhotoResult3) self.photo_tk_result4 = ImageTk.PhotoImage( image=self.displayPhotoResult4) self.out1.create_image(0, 0, image=self.photo_tk_result1, anchor=NW) self.out2.create_image(0, 0, image=self.photo_tk_result2, anchor=NW) self.out3.create_image(0, 0, image=self.photo_tk_result3, anchor=NW) self.out4.create_image(0, 0, image=self.photo_tk_result4, anchor=NW) return def use_rect(self): self.activate_button(self.rect_button) self.mode = 'rect' def use_poly(self): self.activate_button(self.poly_button) self.mode = 'poly' def revoke(self): if len(self.rect_candidate) > 0: self.c.delete(self.rect_candidate[-1]) self.rect_candidate.remove(self.rect_candidate[-1]) self.mask_candidate.remove(self.mask_candidate[-1]) self.checkResp() def clear(self): self.mask = np.zeros((self.im_h, self.im_w, 1)).astype(np.uint8) if self.mode == 'poly': photo = Image.open(self.filename) self.image = cv2.imread(self.filename) self.displayPhoto = photo self.displayPhoto = self.displayPhoto.resize( (self.im_w, self.im_h)) self.draw = ImageDraw.Draw(self.displayPhoto) self.photo_tk = ImageTk.PhotoImage(image=self.displayPhoto) self.c.create_image(0, 0, image=self.photo_tk, anchor=NW) else: if self.rect_candidate is None or len(self.rect_candidate) == 0: return for item in self.rect_candidate: self.c.delete(item) self.rect_candidate.clear() self.mask_candidate.clear() self.checkResp() #TODO: reset canvas #TODO: undo and redo #TODO: draw triangle, rectangle, oval, text def activate_button(self, some_button, eraser_mode=False): self.active_button.config(relief=RAISED) some_button.config(relief=SUNKEN) self.active_button = some_button self.eraser_on = eraser_mode def beginPaint(self, event): self.start_x = event.x self.start_y = event.y self.isPainting = True def paint(self, event): if self.start_x and self.start_y and self.mode == 'rect': self.end_x = max(min(event.x, self.im_w), 0) self.end_y = max(min(event.y, self.im_h), 0) rect = self.c.create_rectangle(self.start_x, self.start_y, self.end_x, self.end_y, fill=self.paint_color) if self.rect_buf is not None: self.c.delete(self.rect_buf) self.rect_buf = rect elif self.old_x and self.old_y and self.mode == 'poly': line = self.c.create_line(self.old_x, self.old_y, event.x, event.y, width=self.line_width, fill=self.paint_color, capstyle=ROUND, smooth=True, splinesteps=36) cv2.line(self.mask, (self.old_x, self.old_y), (event.x, event.y), (1), self.line_width) self.old_x = event.x self.old_y = event.y def reset(self, event): self.old_x, self.old_y = None, None if self.mode == 'rect': self.isPainting = False rect = self.c.create_rectangle(self.start_x, self.start_y, self.end_x, self.end_y, fill=self.paint_color) if self.rect_buf is not None: self.c.delete(self.rect_buf) self.rect_buf = None self.rect_candidate.append(rect) x1, y1, x2, y2 = min(self.start_x, self.end_x), min(self.start_y, self.end_y),\ max(self.start_x, self.end_x), max(self.start_y, self.end_y) # up left corner, low right corner self.mask_candidate.append((x1, y1, x2, y2)) print(self.mask_candidate[-1]) def icon2pen(self, event): return def icon2mice(self, event): return
callbacks=[ TensorBoard(log_dir='../data/logs/fine_tuning', write_graph=False) ]) # Load weights from previous run latest_weights = get_latest_weights_file() model = PConvUnet(weight_filepath='data/logs/') model.load(latest_weights, train_bn=False, lr=0.00005) n = 0 for (masked, mask), ori in tqdm(test_generator): print(masked, mask, "-----") # Run predictions for this batch of images pred_img = model.predict([masked, mask]) pred_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') # Clear current output and display test images for i in range(len(ori)): _, axes = plt.subplots(1, 2, figsize=(10, 5)) axes[0].imshow(masked[i, :, :, :]) axes[1].imshow(pred_img[i, :, :, :] * 1.) axes[0].set_title('Masked Image') axes[1].set_title('Predicted Image') axes[0].xaxis.set_major_formatter(NullFormatter()) axes[0].yaxis.set_major_formatter(NullFormatter()) axes[1].xaxis.set_major_formatter(NullFormatter()) axes[1].yaxis.set_major_formatter(NullFormatter()) plt.savefig(r'data/custom_test_samples/img_{}_{}.png'.format(
mask = mask/255. mask = 1 - mask print ('original max {}'.format(ori.max())) #ori = ori / ori.max() ori = ori / 255. masked = deepcopy(ori) masked[mask==mask.min()] = 1 print("mask shape {}".format(masked.shape)) ori = np.expand_dims(ori, axis=0) mask = np.uint8(np.expand_dims(mask, axis=0)) masked = np.expand_dims(masked, axis=0) #mask = np.stack([random_mask_line_for_ct(ori.shape[1],ori.shape[2]) for _ in range(ori.shape[0])], axis=0) #masked = deepcopy(ori) masked[mask==0]=1 masks[i,] = mask maskeds[i,] = masked model = PConvUnet(weight_filepath='data/logs_ct_new/') model.load("data/logs_ct_new/316_weights_2018-12-11-10-45-45.h5") pred_img = model.predict([maskeds, masks]) for i in range(len(img_list)): print("saveing" + str(i)) split = img_list[i] splited = split.split('test_')[-1] plt.imsave(args.save_name + splited,pred_img[i]*1.)
# This is to fuse mask to input image,and get a masked image im[mask == 0] = 1 # Create a model instance and import pre trained image_net weights provided by the author. from libs.pconv_model import PConvUnet model = PConvUnet(vgg_weights=None, inference_only=True) #model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\pconv_imagenet.26-1.07.h5", train_bn=False) # model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t39\weights.16-1.13.h5", train_bn=False) # model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t34\weights.07-1.29.h5", train_bn=False) model.load( r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t152\weights.31-1.20.h5", train_bn=False) # model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t159\weights.31-1.17.h5", train_bn=False) #output the predicted image predicted_img = model.predict([np.expand_dims(im, 0), np.expand_dims(mask, 0)])[0] # save result result_name = r"C:\Users\dell\Desktop\paper2\\figure\Fig6\\r2_mapconv.png" cv2.imwrite(result_name, cv2.cvtColor(predicted_img * 255, cv2.COLOR_BGR2RGB)) # cv2.imwrite("./data/masked_input.png", im*255) # masked_input=cv2.imread("./data/masked_input.png") # # change to right color # cv2.imwrite("./data/masked_input.png", cv2.cvtColor(masked_input, cv2.COLOR_BGR2RGB)) # cv2.imwrite("./data/mask.png",mask*255) # This is to output both mask and masked image together. fig, ax = plt.subplots(1, 4) ax[0].imshow(input_img) ax[1].imshow(mask * 255) ax[2].imshow(im)
model = PConvUnet() model.load( r"C:\Users\Mathias Felix Gruber\Documents\GitHub\PConv-Keras\data\logs\imagenet_phase2\weights.26-1.07.h5", train_bn=False, lr=0.00005 ) # In[ ]: n = 0 for (masked, mask), ori in tqdm(test_generator): # Run predictions for this batch of images pred_img = model.predict([masked, mask]) pred_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') # Clear current output and display test images for i in range(len(ori)): _, axes = plt.subplots(1, 2, figsize=(10, 5)) axes[0].imshow(masked[i,:,:,:]) axes[1].imshow(pred_img[i,:,:,:] * 1.) axes[0].set_title('Masked Image') axes[1].set_title('Predicted Image') axes[0].xaxis.set_major_formatter(NullFormatter()) axes[0].yaxis.set_major_formatter(NullFormatter()) axes[1].xaxis.set_major_formatter(NullFormatter()) axes[1].yaxis.set_major_formatter(NullFormatter()) plt.savefig(r'data/test_samples/img_{}_{}.png'.format(i, pred_time))
def inpainting(quiz, debug=True): print('Step 2: 使用你的模型,補全影像\n') print('...') # Your code may lay here... # ====================== # # gen_image = some_black_magic(quiz) # # ====================== # Demo: mean-color inpainting raw_image = quiz.raw_image.copy() bbox = quiz.bbox # mean_color = quiz.raw_image.mean(axis=(0, 1)) # shape: (3,) # raw_roi = raw_image[bbox['y']:bbox['y']+bbox['h'], bbox['x']:bbox['x']+bbox['w'], :] # mask = np.zeros(raw_image.shape[:2]) # mask_roi = mask[bbox['y']:bbox['y']+bbox['h'], bbox['x']:bbox['x']+bbox['w']] # to_filling = (raw_roi[:, :, 1] == 255) & (raw_roi[:, :, 0] < 10) & (raw_roi[:, :, 2] < 10) # mask_roi[to_filling] = 1 # mask = ski_morph.dilation(mask, ski_morph.square(7)) # mask = np.expand_dims(mask, axis=-1) # gen_image = (raw_image * (1 - mask) + mean_color * mask).astype(np.uint8) masked = raw_image to_filling = (masked[:, :, 1] > 245) & (masked[:, :, 0] < 10) & (masked[:, :, 2] < 10) mask_roi = np.zeros((256, 256, 3), np.uint8) mask_roi[to_filling] = 1 mask = 1. - mask_roi erosion_size = 11 erosion_type = 0 val_type = cv2.MORPH_ELLIPSE element = cv2.getStructuringElement(erosion_type, (2 * erosion_size + 1, 2 * erosion_size + 1), (erosion_size, erosion_size)) erosion_mask = cv2.erode(mask, element) masked_tmp_list = [] masked_tmp_list.append(masked) masked_na = np.array(masked_tmp_list) mask_tmp_list = [] mask_tmp_list.append(erosion_mask) mask_na = np.array(mask_tmp_list) model = PConvUnet(weight_filepath='{}/PConv-Keras/data/model/'.format(path_prefix)) model.load("{}/PConv-Keras/data/model/12_weights_2018-09-26-14-05-28.h5".format(path_prefix)) pred_img_set = model.predict([masked_na, mask_na]) pred_img = 255. * pred_img_set[0, :, :, :] gen_image = masked.copy() gen_image[to_filling] = pred_img[to_filling] debug = True if debug: with warnings.catch_warnings(): warnings.simplefilter('ignore', category=UserWarning) os.makedirs('temp', exist_ok=True) cv2.imwrite("temp/raw_image.jpg",raw_image) cv2.imwrite("temp/mask.jpg",mask[:, :, 0]) cv2.imwrite("temp/gen_image.jpg",gen_image) print('=====================') return gen_image