def FaceSwap_DL(prn, pos, ref_pos, image, ref_image): # read image [h, w, _] = image.shape #-- 1. 3d reconstruction -> get texture. vertices = prn.get_vertices(pos) image = image / 255. texture = cv2.remap(image, pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) #-- 2. Texture Editing # change whole face(face swap) # texture from another image or a processed texture ref_image = ref_image / 255. ref_texture = cv2.remap(ref_image, ref_pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) ref_vertices = prn.get_vertices(ref_pos) new_texture = ref_texture #(texture + ref_texture)/2. #-- 3. remap to input image.(render) vis_colors = np.ones((vertices.shape[0], 1)) face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c=1) face_mask = np.squeeze(face_mask > 0).astype(np.float32) new_colors = prn.get_colors_from_texture(new_texture) new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c=3) new_image = image * (1 - face_mask[:, :, np.newaxis] ) + new_image * face_mask[:, :, np.newaxis] # Possion Editing for blending image vis_ind = np.argwhere(face_mask > 0) vis_min = np.min(vis_ind, 0) vis_max = np.max(vis_ind, 0) center = (int((vis_min[1] + vis_max[1]) / 2 + 0.5), int((vis_min[0] + vis_max[0]) / 2 + 0.5)) output = cv2.seamlessClone( (new_image * 255).astype(np.uint8), (image * 255).astype(np.uint8), (face_mask * 255).astype(np.uint8), center, cv2.NORMAL_CLONE) return output
def prnetSwapOneFace(prn, image, ref_image, pos, ref_pos, h, w): vertices = prn.get_vertices(pos) image = image / 255. texture = cv2.remap(image, pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) ref_image = ref_image / 255. ref_texture = cv2.remap(ref_image, ref_pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) ref_vertices = prn.get_vertices(ref_pos) new_texture = ref_texture #(texture + ref_texture)/2. #-- 3. remap to input image.(render) vis_colors = np.ones((vertices.shape[0], 1)) face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c=1) face_mask = np.squeeze(face_mask > 0).astype(np.float32) new_colors = prn.get_colors_from_texture(new_texture) new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c=3) new_image = image * (1 - face_mask[:, :, np.newaxis] ) + new_image * face_mask[:, :, np.newaxis] # Possion Editing for blending image vis_ind = np.argwhere(face_mask > 0) vis_min = np.min(vis_ind, 0) vis_max = np.max(vis_ind, 0) center = (int((vis_min[1] + vis_max[1]) / 2 + 0.5), int((vis_min[0] + vis_max[0]) / 2 + 0.5)) out = cv2.seamlessClone( (new_image * 255).astype(np.uint8), (image * 255).astype(np.uint8), (face_mask * 255).astype(np.uint8), center, cv2.MIXED_CLONE) return out
def render(self, vertices, new_colors, h, w): vis_colors = np.ones((vertices.shape[0], 1)) face_mask = render_texture(vertices.T, vis_colors.T, self.prn.triangles.T, h, w, c=1) face_mask = np.squeeze(face_mask > 0).astype(np.float32) new_image = render_texture(vertices.T, new_colors.T, self.prn.triangles.T, h, w, c=3) return face_mask, new_image
def synthesize(image,label,face_ind,triangles,mode): t1 = time.clock() turn_angle = 0 if mode == 1: turn_angle = random.randint(-30,30) if mode == 0: turn_angle = random.randint(-10,0) if mode == 2: turn_angle = random.randint(0,10) pitch_angle = random.randint(-10,10) input_image = image/255. #print('posmapsize: ',label.shape) pos_gt = label # cv2.imshow('pos_gt',pos_gt) # cv2.waitKey(0) # ref_texture_gt = cv2.remap(input_image, pos_gt[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) # all_colors_gt = np.reshape(ref_texture_gt, [256**2, -1]) # text_c_gt = all_colors_gt[face_ind, :]*255 all_vertices_gt = np.reshape(pos_gt, [256**2, -1]) #print('vsize: ',all_vertices_gt.shape) temp = all_vertices_gt vertices_gt = all_vertices_gt[face_ind, :] # pic_gt = render_texture(vertices_gt.T,text_c_gt.T,triangles.T,256,256,3,image) #cv2.imshow('ori',input_image) #cv2.waitKey(0) vertices_gt[:,0] = np.minimum(np.maximum(vertices_gt[:,0], 0), 256 - 1) # x vertices_gt[:,1] = np.minimum(np.maximum(vertices_gt[:,1], 0), 256 - 1) # y ref_texture_r = cv2.remap(input_image, pos_gt[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) all_colors_r = np.reshape(ref_texture_r, [256**2, -1]) text_c_r = all_colors_r[face_ind, :] #print('angle: ',[pitch_angle,turn_angle]) rv = rotate(vertices_gt,[pitch_angle,turn_angle,0]) temp[face_ind,:] = rv uv_position_map = np.reshape(temp,[256,256,3]) pic_gt2 = render_texture(rv.T,text_c_r.T,triangles.T,256,256) #cv2.imshow('ori_t',pic_gt2) # cv2.waitKey(0) # all_vertices_gt_show = np.reshape(uv_position_map, [256**2, -1]) # print('vsize: ',all_vertices_gt_show.shape) # vertices_gt_show = all_vertices_gt[face_ind, :] # vertices_gt_show[:,0] = np.minimum(np.maximum(vertices_gt_show[:,0], 0), 256 - 1) # x # vertices_gt_show[:,1] = np.minimum(np.maximum(vertices_gt_show[:,1], 0), 256 - 1) # y # ref_texture_show = cv2.remap(pic_gt2, uv_position_map[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) # all_colors_show = np.reshape(ref_texture_show, [256**2, -1]) # text_c_show = all_colors_show[face_ind, :] # #rv = rotate(vertices_gt,[0,0,0]) # #uv_position_map = render_texture(uv_coords.T,rv.T,triangles.T, 256, 256, c = 3) # pic_r = render_texture(vertices_gt_show.T,text_c_show.T,triangles.T,256,256) # cv2.imshow('img',pic_r) # cv2.waitKey(0) print(time.clock() - t1) return pic_gt2,uv_position_map
def get_uv_mask(vertices_vis, triangles, uv_coords, h, w, resolution): triangles = triangles.T vertices_vis = vertices_vis.astype(np.float32) uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1) uv_mask = np.squeeze(uv_mask > 0) uv_mask = ndimage.binary_closing(uv_mask) uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) uv_mask = ndimage.binary_closing(uv_mask) uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) uv_mask = uv_mask.astype(np.float32) return np.squeeze(uv_mask)
def get_uv_mask(vertices_vis, triangles, uv_coords, h, w, resolution): triangles = triangles.T vertices_vis = vertices_vis.astype(np.float32) uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1) uv_mask = np.squeeze(uv_mask > 0) uv_mask = ndimage.binary_closing(uv_mask) uv_mask = ndimage.binary_erosion(uv_mask, structure=np.ones((4, 4))) uv_mask = ndimage.binary_closing(uv_mask) uv_mask = ndimage.binary_erosion(uv_mask, structure=np.ones((4, 4))) uv_mask = ndimage.binary_erosion(uv_mask, structure=np.ones((4, 4))) uv_mask = ndimage.binary_erosion(uv_mask, structure=np.ones((4, 4))) uv_mask = uv_mask.astype(np.float32) return np.squeeze(uv_mask)
def swap_image(hd_image, better_hd_info): texture_ref = cv2.remap(better_hd_info["img"] / 255.0, better_hd_info["pos"][:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) [h, w, c] = hd_image.shape color = prn.get_colors_from_texture(texture_ref) color_mask = np.ones((better_hd_info["warp_vertices"].shape[0], 1)) new_image = render_texture(better_hd_info["warp_vertices"].T, color.T, prn.triangles.T, h, w, c=3) facemask = render_texture(better_hd_info["warp_vertices"].T, color_mask.T, prn.triangles.T, h, w, c=3) # Using seamlessCloning to blending images vis_ind = np.argwhere(facemask > 0) vis_min = np.min(vis_ind, 0) vis_max = np.max(vis_ind, 0) center = (int((vis_min[1] + vis_max[1]) / 2 + 0.5), int((vis_min[0] + vis_max[0]) / 2 + 0.5)) output = cv2.seamlessClone( (new_image * 255).astype(np.uint8), (hd_image).astype(np.uint8), (facemask * 255).astype(np.uint8), center, cv2.NORMAL_CLONE) return output
def utility(prn, image, ref_image): [h, w, _] = image.shape #-- 1. 3d reconstruction -> get texture. pos = prn.process(image) if np.any(pos) == None: return None vertices = prn.get_vertices(pos) image = image/255. texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) # ref_image = cv2.imread(args.ref_path) # imsave('TestImages/ref.jpg', ref_image) ref_pos = prn.process(ref_image) ref_image = ref_image/255. ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) ref_vertices = prn.get_vertices(ref_pos) new_texture = ref_texture#(texture + ref_texture)/2 vis_colors = np.ones((vertices.shape[0], 1)) face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c = 1) face_mask = np.squeeze(face_mask > 0).astype(np.float32) new_colors = prn.get_colors_from_texture(new_texture) new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c = 3) new_image = image*(1 - face_mask[:,:,np.newaxis]) + new_image*face_mask[:,:,np.newaxis] # Possion Editing for blending image vis_ind = np.argwhere(face_mask>0) vis_min = np.min(vis_ind, 0) vis_max = np.max(vis_ind, 0) center = (int((vis_min[1] + vis_max[1])/2+0.5), int((vis_min[0] + vis_max[0])/2+0.5)) output = cv2.seamlessClone((new_image*255).astype(np.uint8), (image*255).astype(np.uint8), (face_mask*255).astype(np.uint8), center, cv2.NORMAL_CLONE) return output
def render_new_image(info): texture_ref = cv2.remap(info["img"] / 255.0, info["pos"][:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) # print("Texture from pos map: ", time.time()-t1) [h, w, c] = info_syn["img"].shape # t1 = time.time() color = prn.get_colors_from_texture(texture_ref) # print("Get color of texture: ", time.time()-t1) new_image = render_texture(info["warp_vertices"].T, color.T, prn.triangles.T, h, w, c=3) return new_image
def texture_editing(prn, args): # read image if args.video == 0: image = imread(args.image_path) [h, w, _] = image.shape #-- 1. 3d reconstruction -> get texture. pos = prn.process(image) vertices = prn.get_vertices(pos) image = image/255. texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) #-- 2. Texture Editing Mode = args.mode # change part of texture(for data augumentation/selfie editing. Here modify eyes for example) if Mode == 0: # load eye mask uv_face_eye = imread('Data/uv-data/uv_face_eyes.png', as_grey=True)/255. uv_face = imread('Data/uv-data/uv_face.png', as_grey=True)/255. eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32) # texture from another image or a processed texture ref_image = imread(args.ref_path) ref_pos = prn.process(ref_image) ref_image = ref_image/255. ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) # modify texture new_texture = texture*(1 - eye_mask[:,:,np.newaxis]) + ref_texture*eye_mask[:,:,np.newaxis] # change whole face(face swap) elif Mode == 1 and (not args.video): # texture from another image or a processed texture ref_image = imread(args.ref_path) ref_pos = prn.process(ref_image) ref_image = ref_image/255. ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) ref_vertices = prn.get_vertices(ref_pos) new_texture = ref_texture#(texture + ref_texture)/2. elif Mode == 1 and (args.video): cap = cv2.VideoCapture(args.image_path) ret, trial = cap.read() h, w, _ = trial.shape print(h, w) vidWriter = cv2.VideoWriter(args.output_name,cv2.VideoWriter_fourcc(*'mp4v'), 24, (w, h)) detector, predictor = initializeDlib(args.shape_predictor) i = 0 while (cap.isOpened()): print('Frame Number {}'.format(i)) i += 1 ret, image = cap.read() if ret == False: break if args.face == 1: ref_image = cv2.imread(args.ref_path) output = utility(prn, image, ref_image) if np.any(output) == None: continue vidWriter.write(output) elif args.face == 2: img_tar = copy.deepcopy(image) rects = detector(image, 1) index = np.max((0, len(rects)-2)) if len(rects) == 2: img_src = img_tar[int(rects[len(rects)-1].top()-50):int(rects[len(rects)-1].bottom()+50), \ int(rects[len(rects)-1].left()-50):int(rects[len(rects)-1].right()+50)] img_tar = img_tar[int(rects[len(rects)-2].top()-50):int(rects[len(rects)-2].bottom()+50), \ int(rects[len(rects)-2].left()-50):int(rects[len(rects)-2].right()+50)] warped = utility(prn, img_tar, img_src) if np.any(warped) == None: continue image[int(rects[index].top()-50):int(rects[index].bottom()+50), \ int(rects[index].left()-50):int(rects[index].right()+50)] = warped warped2 = utility(prn, img_src, img_tar) if np.any(warped2) == None: continue image[int(rects[len(rects)-1].top()-50):int(rects[len(rects)-1].bottom()+50), \ int(rects[len(rects)-1].left()-50):int(rects[len(rects)-1].right()+50)] = warped2 vidWriter.write(image) # cv2.imshow("Double", image) # cv2.waitKey(0) # cv2.imwrite("Test.jpg",image) vidWriter.release() else: print('Wrong Mode or Input type! Mode and input type should be 0 or 1.') exit() #-- 3. remap to input image.(render) if args.video == 0: vis_colors = np.ones((vertices.shape[0], 1)) face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c = 1) face_mask = np.squeeze(face_mask > 0).astype(np.float32) new_colors = prn.get_colors_from_texture(new_texture) new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c = 3) new_image = image*(1 - face_mask[:,:,np.newaxis]) + new_image*face_mask[:,:,np.newaxis] # Possion Editing for blending image vis_ind = np.argwhere(face_mask>0) vis_min = np.min(vis_ind, 0) vis_max = np.max(vis_ind, 0) center = (int((vis_min[1] + vis_max[1])/2+0.5), int((vis_min[0] + vis_max[0])/2+0.5)) output = cv2.seamlessClone((new_image*255).astype(np.uint8), (image*255).astype(np.uint8), (face_mask*255).astype(np.uint8), center, cv2.NORMAL_CLONE) # save output imsave(args.output_path, output) # cv2.imwrite(args.output_path, output) print('Done.')
def run_two_image(bfm, uv_coords, uv_kpt_ind, face_ind, triangles, s_uv_coords, image_path_A, mat_path_A, image_path_B, mat_path_B, save_folder, name, mode=1, uv_h=256, uv_w=256, image_h=256, image_w=256): image, cropped_image, center, size, pos, vertices = \ run_one_image(bfm, uv_coords, face_ind, image_path_A, mat_path_A, uv_h, uv_w, image_h, image_w) ref_image, ref_cropped_image, ref_center, ref_size, ref_pos, ref_vertices = \ run_one_image(bfm, uv_coords, face_ind, image_path_B, mat_path_B, uv_h, uv_w, image_h, image_w) texture = cv2.remap(cropped_image, pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) ref_texture = cv2.remap(ref_cropped_image, ref_pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) if mode == 0: # load eye mask uv_face_eye = imread('images/uv_face_eyes.png', as_grey=True) / 255. uv_face = imread('images/uv_face.png', as_grey=True) / 255. eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32) # modify texture new_texture = texture * (1 - eye_mask[:, :, np.newaxis] ) + ref_texture * eye_mask[:, :, np.newaxis] else: uv_whole_face = imread('images/uv_face_mask.png', as_grey=True) / 255. new_texture = texture * (1 - uv_whole_face[:, :, np.newaxis] ) + ref_texture * uv_whole_face[:, :, np.newaxis] # new_texture = ref_texture #-- 3. remap to input image.(render) vis_colors = np.ones((vertices.shape[0], 1)) face_mask = render_texture(vertices.T, vis_colors.T, triangles.T, image_h, image_w, c=1) face_mask = np.squeeze(face_mask > 0).astype(np.float32) new_colors = get_colors_from_texture(new_texture, face_ind, uv_h) new_image = render_texture(vertices.T, new_colors.T, triangles.T, image_h, image_w, c=3) new_image = cropped_image * (1 - face_mask[:, :, np.newaxis] ) + new_image * face_mask[:, :, np.newaxis] # Possion Editing for blending image vis_ind = np.argwhere(face_mask > 0) vis_min = np.min(vis_ind, 0) vis_max = np.max(vis_ind, 0) center = (int((vis_min[1] + vis_max[1]) / 2 + 0.5), int((vis_min[0] + vis_max[0]) / 2 + 0.5)) output = cv2.seamlessClone((new_image * 255).astype(np.uint8), (cropped_image * 255).astype(np.uint8), (face_mask * 255).astype(np.uint8), center, cv2.NORMAL_CLONE) if mode == 0: imsave(os.path.join(save_folder, name + '_eyes.jpg'), output) else: imsave(os.path.join(save_folder, name + '_swap.jpg'), output)
def get_depth_image(vertices, triangles, h, w, isShow = False): z = vertices[:, 2:] if isShow: z = z/max(z) depth_image = render_texture(vertices.T, z.T, triangles.T, h, w, 1) return np.squeeze(depth_image)
def texture_editing(prn, args): # read image image = imread(args.image_path) if len(image.shape) > 2: [h, w, c] = image.shape if c > 3: image = image[:, :, :3] else: [h, w] = image.shape image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) #[h, w, _] = image.shape #-- 1. 3d reconstruction -> get texture. pos = prn.process(image) print("Pose shape: ", pos.shape) vertices = prn.get_vertices(pos) print("Vertice shape: ", vertices.shape) image = image / 255. texture = cv2.remap(image, pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) #filename, _ = os.path.splitext(args.image_path) #cv2.imwrite(filename + '_tex.jpg', texture[:, :, ::-1] * 255) #-- 2. Texture Editing Mode = args.mode # change part of texture(for data augumentation/selfie editing. Here modify eyes for example) if Mode == 0: # load eye mask uv_face_eye = imread('Data/uv-data/uv_face_eyes.png', as_grey=True) / 255. uv_face = imread('Data/uv-data/uv_face.png', as_grey=True) / 255. eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32) # texture from another image or a processed texture ref_image = imread(args.ref_path) ref_pos = prn.process(ref_image) ref_image = ref_image / 255. ref_texture = cv2.remap(ref_image, ref_pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) # modify texture new_texture = texture * (1 - eye_mask[:, :, np.newaxis] ) + ref_texture * eye_mask[:, :, np.newaxis] # change whole face(face swap) elif Mode == 1: # texture from another image or a processed texture ref_image = imread(args.ref_path) if len(ref_image.shape) > 2: #[h, w, c] = ref_image.shape if c > 3: ref_image = ref_image[:, :, :3] else: #[h, w] = ref_image.shape ref_image = cv2.cvtColor(ref_image, cv2.COLOR_GRAY2BGR) ref_pos = prn.process(ref_image) ref_image = ref_image / 255. ref_texture = cv2.remap(ref_image, ref_pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) filename, _ = os.path.splitext(args.ref_path) cv2.imwrite(filename + '_tex.jpg', ref_texture[:, :, ::-1] * 255) ref_vertices = prn.get_vertices(ref_pos) new_texture = ref_texture #(texture + ref_texture)/2. else: print('Wrong Mode! Mode should be 0 or 1.') exit() #-- 3. remap to input image.(render) vis_colors = np.ones((vertices.shape[0], 1)) face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c=1) face_mask = np.squeeze(face_mask > 0).astype(np.float32) new_colors = prn.get_colors_from_texture(new_texture) new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c=3) new_image = image * (1 - face_mask[:, :, np.newaxis] ) + new_image * face_mask[:, :, np.newaxis] # Possion Editing for blending image vis_ind = np.argwhere(face_mask > 0) vis_min = np.min(vis_ind, 0) vis_max = np.max(vis_ind, 0) center = (int((vis_min[1] + vis_max[1]) / 2 + 0.5), int((vis_min[0] + vis_max[0]) / 2 + 0.5)) output = cv2.seamlessClone( (new_image * 255).astype(np.uint8), (image * 255).astype(np.uint8), (face_mask * 255).astype(np.uint8), center, cv2.NORMAL_CLONE) # save output print("Vertice:", vertices) camera_matrix, pose, rotation_matrix = estimate_pose(vertices) # pose, rotation_matrix = estimate_pose(vertices) center_pt = np.mean(vertices, axis=0) vertices_trans = vertices - center_pt save_vertices = frontalize(vertices_trans, rotation_matrix) save_vertices = save_vertices + center_pt if not os.path.exists(os.path.dirname(args.output_path)): os.makedirs(os.path.dirname(args.output_path)) sio.savemat( os.path.join(os.path.dirname(args.output_path), os.path.basename(args.output_path) + '_mesh.mat'), { 'vertices': save_vertices, 'colors': new_colors, 'triangles': prn.triangles }) imsave(args.output_path, output) print('Done.')
def texture_editing(prn, args): # read image image = imread(args.image_path) [h, w, _] = image.shape # -- 1. 3d reconstruction -> get texture. pos = prn.process(image) vertices = prn.get_vertices(pos) image = image / 255. texture = cv2.remap(image, pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) # -- 2. Texture Editing Mode = args.mode # change part of texture(for data augumentation/selfie editing. Here modify eyes for example) if Mode == 0: # load eye mask uv_face_eye = imread('Data/uv-data/uv_face_eyes.png', as_grey=True) / 255. uv_face = imread('Data/uv-data/uv_face.png', as_grey=True) / 255. eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32) # texture from another image or a processed texture ref_image = imread(args.ref_path) ref_pos = prn.process(ref_image) ref_image = ref_image / 255. ref_texture = cv2.remap(ref_image, ref_pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) # modify texture new_texture = texture * (1 - eye_mask[:, :, np.newaxis] ) + ref_texture * eye_mask[:, :, np.newaxis] # change whole face(face swap) elif Mode == 1: # texture from another image or a processed texture ref_image = imread(args.ref_path) ref_pos = prn.process(ref_image) ref_image = ref_image / 255. ref_texture = cv2.remap(ref_image, ref_pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) ref_vertices = prn.get_vertices(ref_pos) new_texture = ref_texture # (texture + ref_texture)/2. else: print('Wrong Mode! Mode should be 0 or 1.') exit() # -- 3. remap to input image.(render) vis_colors = np.ones((vertices.shape[0], 1)) face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c=1) face_mask = np.squeeze(face_mask > 0).astype(np.float32) new_colors = prn.get_colors_from_texture(new_texture) new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c=3) new_image = image * (1 - face_mask[:, :, np.newaxis] ) + new_image * face_mask[:, :, np.newaxis] # Possion Editing for blending image vis_ind = np.argwhere(face_mask > 0) vis_min = np.min(vis_ind, 0) vis_max = np.max(vis_ind, 0) center = (int((vis_min[1] + vis_max[1]) / 2 + 0.5), int((vis_min[0] + vis_max[0]) / 2 + 0.5)) # imsave(args.output_path[:-4]+'_new_image'+args.output_path[-4:], new_image) # imsave(args.output_path[:-4]+'_image'+args.output_path[-4:], image) # imsave(args.output_path[:-4]+'_face_mask'+args.output_path[-4:], face_mask) output = cv2.seamlessClone( (new_image * 255).astype(np.uint8), (image * 255).astype(np.uint8), # (face_mask * 255).astype(np.uint8), center, cv2.NORMAL_CLONE) (face_mask * 255).astype(np.uint8), center, cv2.MIXED_CLONE) # (face_mask * 255).astype(np.uint8), center, cv2.MONOCHROME_TRANSFER) # save output imsave(args.output_path, output) print('Done.')
def main(args): trainConfig = Options() opt = trainConfig.get_config() #prn = PRN(is_dlib = True) # Some arguments os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu batch_size = opt.batch_size epochs = opt.epochs train_data_file = args.train_data_file model_path = args.model_path eval_pixel_file = args.eval_pixel_file eval_3DFAW_file = args.eval_3DFAW_file eval_300W_file = args.eval_300W_file #res_dir = args.res_dir #ref_dir = args.ref_dir ''' save_dir = args.checkpoint if not os.path.exists(save_dir): os.makedirs(save_dir) ''' # Training data data = TrainData(train_data_file) #eval_pixel = TrainData(eval_pixel_file) #eval_3DFAW = TrainData(eval_3DFAW_file) #eval_300W = TrainData(eval_300W_file) #show_data = TrainData(train_data_file) begin_epoch = 0 # if os.path.exists(model_path + '.data-00000-of-00001'): # begin_epoch = int(model_path.split('_')[-1]) + 1 # print('begin: ',begin_epoch) ''' epoch_iters = data.num_data / batch_size global_step = tf.Variable(epoch_iters * begin_epoch, trainable=False) # Declay learning rate half every 5 epochs decay_steps = 5 * epoch_iters # learning_rate = learning_rate * 0.5 ^ (global_step / decay_steps) learning_rate = tf.train.exponential_decay(opt.learning_rate, global_step, decay_steps, 0.5, staircase=True) ''' # Model model = PRNet(opt) model.setup(model_path) model.train() # Begining train error_f = open('./results/error.txt','w') time_now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") fp_log = open("./logs/log_" + time_now + ".txt","w") iters_total_each_epoch = int(math.ceil(1.0 * data.num_data / batch_size)) print('iters_total_each_epoch: ',iters_total_each_epoch) #eval_pixel_batch = eval_pixel(eval_pixel.num_data,1) #eval_3DFAW_batch = eval_3DFAW(eval_3DFAW.num_data,1) #eval_300W_batch = eval_300W(eval_300W.num_data,1) #loss_pixel = model.optmize_parameters(x=eval_pixel_batch[0], label=eval_pixel_batch[1]) #loss_3DFAW = model.optmize_parameters(x=eval_3DFAW_batch[0], label=eval_3DFAW_batch[1]) #loss_300W = model.optmize_parameters(x=eval_300W_batch[0], label=eval_300W_batch[1]) #print('error of Pixel start: ',loss_pixel) #('error of 3DFAW start: ',loss_3DFAW) #print('error of 300W start: ',loss_300W) #error_f.write('error in pixel 1st : '+str(loss_pixel)+' error in 3DFAW 1st : '+str(loss_3DFAW)+'\n') image = cv2.imread('./examples/10173-other_3-1.jpg') face_ind = np.loadtxt('./Data/uv-data/face_ind.txt').astype(np.int32) triangles = np.loadtxt('./Data/uv-data/triangles.txt').astype(np.int32) input_image = image/255. pos_gt = np.load('./examples/10173-other_3-1.npy') pos_gt = np.array(pos_gt).astype(np.float32) ref_texture_gt = cv2.remap(input_image, pos_gt[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) all_colors_gt = np.reshape(ref_texture_gt, [256**2, -1]) text_c_gt = all_colors_gt[face_ind, :]*255 all_vertices_gt = np.reshape(pos_gt, [256**2, -1]) vertices_gt = all_vertices_gt[face_ind, :] pic_gt = render_texture(vertices_gt.T,text_c_gt.T,triangles.T,256,256,3,image) vertices_gt[:,0] = np.minimum(np.maximum(vertices_gt[:,0], 0), 256 - 1) # x vertices_gt[:,1] = np.minimum(np.maximum(vertices_gt[:,1], 0), 256 - 1) # y ind = np.round(vertices_gt).astype(np.int32) col = image[ind[:,1], ind[:,0], :] # n x 3 imsave('./results/results_gt'+'.png',pic_gt) write_obj_with_colors('./results/results_gt'+'.obj',vertices_gt,triangles,col) # cv2.imshow('ref',ref_texture_gt) # cv2.waitKey(0) test_list = [] for epoch in range(begin_epoch, epochs): train_loss_mean = 0 #show_data_ = show_data(1) for iters in range(iters_total_each_epoch): if True:#iters % 100 == 0: batch = data(batch_size,0) else: batch = data(batch_size,1) loss_res = model.optimize_parameters(x=batch[0], label=batch[1]) train_loss_mean = train_loss_mean + loss_res #summary_str = sess.run(merged_summary_op,feed_dict={x: batch[0], label: batch[1]}) time_now_tmp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S") for_show = 'epoch: '+str(epoch)+' iters: '+str(iters)+' loss: '+str(loss_res) log_line = str(loss_res) print (for_show) fp_log.writelines(log_line + "\n") train_loss_mean = train_loss_mean/iters_total_each_epoch #eval_pixel_batch_ = eval_pixel(eval_pixel.num_data,1) #eval_3DFAW_batch_ = eval_3DFAW(eval_3DFAW.num_data,1) #eval_300W_batch_ = eval_300W(eval_300W.num_data,1) #text_c = prn.get_colors_from_texture(ref_texture) #print('colors: ',text_c) #pic = render_texture(cropped_vertices.T,text_c.T,prn.triangles.T,256,256) # pos = np.load('./10082-15-1.npy') # pos = np.array(pos).astype(np.float32) posmap = model.generate(x=input_image[np.newaxis, :,:,:]) posmap = np.squeeze(posmap) posmap = posmap*256*1.1 cropped_vertices = np.reshape(posmap, [-1, 3]).T pos = np.reshape(cropped_vertices.T, [256, 256, 3]) ref_texture = cv2.remap(input_image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) all_colors = np.reshape(ref_texture, [256**2, -1]) text_c = all_colors[face_ind, :]*255 test_list.append(pos) all_vertices = np.reshape(pos, [256**2, -1]) vertices = all_vertices[face_ind, :] pic = render_texture(vertices.T,text_c.T,triangles.T,256,256,3,image) imsave('./results/result'+str(epoch)+'.png',pic) write_obj_with_colors('./results/result'+str(epoch)+'.obj',vertices,triangles,triangles) #picture = np.multiply(pic,255) #print('pic',picture) # cv2.imshow('ref',ref_texture) # cv2.waitKey(0) # image_pic = tf.image.decode_png(picture, channels=4) # image_pic = tf.expand_dims(input_image, 0) # ori_pic = tf.image.decode_png(image, channels=4) # ori_pic = tf.expand_dims(ori_pic, 0) ''' _, loss_pixel_, _ = model.forward(x=eval_pixel_batch_[0], label=eval_pixel_batch_[1]) #loss_3DFAW_ = sess.run(error,feed_dict={x: eval_3DFAW_batch_[0], label: eval_3DFAW_batch_[1]}) _, _, loss_300W_ = model.forward(x=eval_300W_batch_[0], label=eval_300W_batch_[1]) ''' ''' summary =tf.Summary(value=[ tf.Summary.Value(tag="error_pixel", simple_value=loss_pixel_), #tf.Summary.Value(tag="error_3DFAW", simple_value=loss_3DFAW_), tf.Summary.Value(tag="error_300W", simple_value=loss_300W_), tf.Summary.Value(tag="train loss", simple_value=train_loss_mean)]) summary_writer.add_summary(summary, epoch) ''' model.save(save_path='./Data/train_result/256_256_resfcn256' + '_' + str(epoch)) # Test # eval_pixel_batcht = eval_pixel(eval_pixel.num_data,1) # eval_3DFAW_batcht = eval_3DFAW(eval_3DFAW.num_data,1) # loss_pixel2 = sess.run(loss,feed_dict={x: eval_pixel_batcht[0], label: eval_pixel_batcht[1]}) # loss_3DFAW2 = sess.run(loss,feed_dict={x: eval_3DFAW_batcht[0], label: eval_3DFAW_batcht[1]}) #print('error of Pixel: ',loss_pixel2) #print('error of 3DFAW: ',loss_3DFAW2) #error_f.write('error in pixel: '+str(loss_pixel2)+' error in 3DFAW: '+str(loss_3DFAW2)+'\n') ''' model.eval() eval_meshes(test_list, res_dir, ref_dir) ''' fp_log.close() error_f.close()
def texture_editing(prn, image_path, ref_path, output_path, mode=1): # read image image = imread(image_path) [h, w, _] = image.shape if h > 2000 or w > 2000: h = int(h * 0.6) w = int(w * 0.6) image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA) #-- 1. 3d reconstruction -> get texture. pos = prn.process(image) vertices = prn.get_vertices(pos) image = image / 255. texture = cv2.remap(image, pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) #-- 2. Texture Editing Mode = mode # change part of texture(for data augumentation/selfie editing. Here modify eyes for example) # change whole face(face swap) if Mode == 1: # texture from another image or a processed texture # ref_image = imread(ref_path) ref_image = ref_path ref_pos = prn.process(ref_image) ref_image = ref_image / 255. ref_texture = cv2.remap(ref_image, ref_pos[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) ref_vertices = prn.get_vertices(ref_pos) new_texture = ref_texture #(texture + ref_texture)/2. else: print('Wrong Mode! Mode should be 0 or 1.') exit() #-- 3. remap to input image.(render) vis_colors = np.ones((vertices.shape[0], 1)) face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c=1) face_mask = np.squeeze(face_mask > 0).astype(np.float32) new_colors = prn.get_colors_from_texture(new_texture) new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c=3) new_image = image * (1 - face_mask[:, :, np.newaxis] ) + new_image * face_mask[:, :, np.newaxis] # Possion Editing for blending image vis_ind = np.argwhere(face_mask > 0) vis_min = np.min(vis_ind, 0) vis_max = np.max(vis_ind, 0) center = (int((vis_min[1] + vis_max[1]) / 2 + 0.5), int((vis_min[0] + vis_max[0]) / 2 + 0.5)) output = cv2.seamlessClone( (new_image * 255).astype(np.uint8), (image * 255).astype(np.uint8), (face_mask * 255).astype(np.uint8), center, cv2.NORMAL_CLONE) # save output imsave(output_path, output) print('Done.')
def texture_editing(prn, args): # read image image = imread(args.image_path) [h, w, _] = image.shape #-- 1. 3d reconstruction -> get texture. pos = prn.process(image) vertices = prn.get_vertices(pos) image = image/255. texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) #-- 2. Texture Editing Mode = args.mode # change part of texture(for data augumentation/selfie editing. Here modify eyes for example) if Mode == 0: # load eye mask uv_face_eye = imread('Data/uv-data/uv_face_eyes.png', as_grey=True)/255. uv_face = imread('Data/uv-data/uv_face.png', as_grey=True)/255. eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32) # texture from another image or a processed texture ref_image = imread(args.ref_path) ref_pos = prn.process(ref_image) ref_image = ref_image/255. ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) # modify texture new_texture = texture*(1 - eye_mask[:,:,np.newaxis]) + ref_texture*eye_mask[:,:,np.newaxis] # change whole face(face swap) elif Mode == 1: # texture from another image or a processed texture ref_image = imread(args.ref_path) ref_pos = prn.process(ref_image) ref_image = ref_image/255. ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) ref_vertices = prn.get_vertices(ref_pos) new_texture = ref_texture#(texture + ref_texture)/2. else: print('Wrong Mode! Mode should be 0 or 1.') exit() #-- 3. remap to input image.(render) vis_colors = np.ones((vertices.shape[0], 1)) face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c = 1) face_mask = np.squeeze(face_mask > 0).astype(np.float32) new_colors = prn.get_colors_from_texture(new_texture) new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c = 3) new_image = image*(1 - face_mask[:,:,np.newaxis]) + new_image*face_mask[:,:,np.newaxis] # Possion Editing for blending image vis_ind = np.argwhere(face_mask>0) vis_min = np.min(vis_ind, 0) vis_max = np.max(vis_ind, 0) center = (int((vis_min[1] + vis_max[1])/2+0.5), int((vis_min[0] + vis_max[0])/2+0.5)) output = cv2.seamlessClone((new_image*255).astype(np.uint8), (image*255).astype(np.uint8), (face_mask*255).astype(np.uint8), center, cv2.NORMAL_CLONE) # save output imsave(args.output_path, output) print('Done.')
def get_depth_image(vertices, triangles, h, w, isShow=False): z = vertices[:, 2:] if isShow: z = z / max(z) depth_image = render_texture(vertices.T, z.T, triangles.T, h, w, 1) return np.squeeze(depth_image)
# # xr, yr = prnAugment_torch(x, y) # xr, yr = prnAugment_torch(x, y) # cv2.imshow('pic',xr) # cv2.waitKey(0) # print(time.clock() - t1) image = cv2.imread('../10173-other_3-1.jpg') face_ind = np.loadtxt('../Data/uv-data/face_ind.txt').astype(np.int32) triangles = np.loadtxt('../Data/uv-data/triangles.txt').astype(np.int32) input_image = image/255. pos_gt = np.load('../10173-other_3-1.npy') pos_gt = np.array(pos_gt).astype(np.float32) ref_texture_gt = cv2.remap(input_image, pos_gt[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) all_colors_gt = np.reshape(ref_texture_gt, [256**2, -1]) text_c_gt = all_colors_gt[face_ind, :]*255 all_vertices_gt = np.reshape(pos_gt, [256**2, -1]) vertices_gt = all_vertices_gt[face_ind, :] pic_gt = render_texture(vertices_gt.T,text_c_gt.T,triangles.T,256,256,3,image) cv2.imshow('ori',pic_gt) #cv2.waitKey(0) vertices_gt[:,0] = np.minimum(np.maximum(vertices_gt[:,0], 0), 256 - 1) # x vertices_gt[:,1] = np.minimum(np.maximum(vertices_gt[:,1], 0), 256 - 1) # y ref_texture_r = cv2.remap(input_image, pos_gt[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) all_colors_r = np.reshape(ref_texture_r, [256**2, -1]) text_c_r = all_colors_r[face_ind, :] rv = rotate(vertices_gt,[0,30,0]) rc = rotate(text_c_r,[0,10,0]) pic_r = render_texture(rv.T,text_c_r.T,triangles.T,256,256) cv2.imshow('pic_r',pic_r) cv2.waitKey(0)