def __call__(self, predicted_vertices, ground_truth_vertices, alignment_data=None, save_vertices=False, save_output='aligned_vertices.obj'): if alignment_data is not None: init_pose = alignment_data[:4] scale = alignment_data[4][0] else: init_pose = None scale = 1.0 original_predicted_vertices = predicted_vertices.copy() * scale original_f_vertices = ground_truth_vertices.copy() if (predicted_vertices.shape[0] > ground_truth_vertices.shape[0]): diff = predicted_vertices.shape[0] - ground_truth_vertices.shape[0] predicted_vertices = predicted_vertices[diff:, :] * scale else: diff = ground_truth_vertices.shape[0] - predicted_vertices.shape[0] ground_truth_vertices = ground_truth_vertices[diff:, :] * scale tform, distances, i = icp(predicted_vertices, ground_truth_vertices, max_iterations=100, tolerance=0.0001, init_pose=init_pose) aligned_predicted_vertices = apply_homogenous_tform(tform, predicted_vertices) aligned_original_vertices = apply_homogenous_tform(tform, original_predicted_vertices) if save_vertices: colors = np.ones((aligned_original_vertices.shape)) write_obj_with_colors(save_output, aligned_original_vertices, self.triangles, colors) error = self.nmse(aligned_original_vertices, original_f_vertices) return error
def get_3d_pkl_lrw( pkl, root, bbb=0 ): # the first cell is video path the last cell is the key frame nnuumber # ---- init PRN # os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU prn = PRN(is_dlib=True) _file = open(pkl, "rb") data = pickle.load(_file) _file.close() gg = len(data) data = data[int(gg * 1 * (bbb)):int(gg * 1 * (bbb + 1))] for kk, item in enumerate(data): print(kk) print(item) if os.path.exists(item[0] + '_original.obj'): continue target_id = item[-1] img_path = item[0] + '_%05d.png' % target_id print(img_path) target_frame = cv2.imread(img_path) target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB) image = target_frame # read image [h, w, c] = image.shape pos = prn.process(image) # use dlib to detect face image = image / 255. if pos is None: print('+++++') continue # landmark kpt = prn.get_landmarks(pos) kpt[:, 1] = h - kpt[:, 1] np.save(item[0] + '_prnet.npy', kpt) # 3D vertices vertices = prn.get_vertices(pos) save_vertices = vertices.copy() save_vertices[:, 1] = h - 1 - save_vertices[:, 1] # corresponding colors colors = prn.get_colors(image, vertices) # print (colors.shape) # print ('=========') # cv2.imwrite('./mask.png', colors * 255) write_obj_with_colors(item[0] + '_original.obj', save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab)
def get_3d_single(video_path=None, target_id=None, img_path=None): # ---- init PRN os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU prn = PRN(is_dlib=True) if video_path != None: if not os.path.exists(video_path): print(video_path) print('+++++') if os.path.exists(video_path[:-4] + '.obj'): print('-----') cap = cv2.VideoCapture(video_path) for i in range(target_id): ret, frame = cap.read() ret, target_frame = cap.read() cv2.imwrite(video_path[:-4] + '_%05d.png' % target_id, target_frame) elif img_path != None: target_frame = cv2.imread(img_path) target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB) image = target_frame # read image [h, w, c] = image.shape pos = prn.process(image) # use dlib to detect face image = image / 255. # landmark kpt = prn.get_landmarks(pos) kpt[:, 1] = h - kpt[:, 1] if video_path != None: np.save(video_path[:-4] + '_prnet.npy', kpt) else: np.save(img_path[:-4] + '_prnet.npy', kpt) # 3D vertices vertices = prn.get_vertices(pos) # save_vertices = vertices.copy() save_vertices[:, 1] = h - 1 - save_vertices[:, 1] # corresponding colors colors = prn.get_colors(image, vertices) if video_path != None: write_obj_with_colors(video_path[:-4] + '_original.obj', save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab) print('The generated 3d mesh model is stored in ' + video_path[:-4] + '_original.obj') else: write_obj_with_colors(img_path[:-4] + '_original.obj', save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab) print('The generated 3d mesh model is stored in ' + img_path[:-4] + '_original.obj')
def get_3d_single_video( img_path): # you need the image path of the most visible frame. # root = # ---- init PRN # os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU prn = PRN(is_dlib=True) # _file = open(pkl, "rb") # data = pickle.load(_file) # _file.close() # gg = len(data) # data = data[int(gg * 0.2 *( bbb) ): int(gg * 0.2 * (bbb + 1) ) ] # for kk ,item in enumerate(data) : print(img_path) target_frame = cv2.imread(img_path) target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB) image = target_frame # read image [h, w, c] = image.shape pos = prn.process(image) # use dlib to detect face image = image / 255. if pos is None: print('No pos') # landmark kpt = prn.get_landmarks(pos) kpt[:, 1] = h - kpt[:, 1] np.save(img_path[:-11] + '__prnet.npy', kpt) # 3D vertices vertices = prn.get_vertices(pos) save_vertices = vertices.copy() save_vertices[:, 1] = h - 1 - save_vertices[:, 1] # corresponding colors colors = prn.get_colors(image, vertices) # print (colors.shape) # print ('=========') # cv2.imwrite('./mask.png', colors * 255) write_obj_with_colors(img_path[:-11] + '__original.obj', save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab)
def frames_to_objs(frames, save_folder, name = ""): for idx in range(frames.shape[0]): out_obj = os.path.join(save_folder, name + "_" + str(idx) + ".obj") out_mat = os.path.join(save_folder, name + "_" + str(idx) + "_mesh.mat") frame = frames[idx, :, :, :] pos = prn.process(frame) """ get landmarks --> Saved to a txt file (not useful for us)""" kpt = prn.get_landmarks(pos) """ 3D vertices""" vertices = prn.get_vertices(pos) """ Corresponding colors """ colors = prn.get_colors(frame, vertices) print(colors) print(colors.shape) write_obj_with_colors(out_obj, vertices, prn.triangles, colors) # np.savetxt(os.path.join(save_folder, name + "_" + str(idx) + '.txt'), kpt) print("Outputted {}".format(idx))
def main(args): if args.isShow or args.isTexture: import cv2 from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box # ---- init PRN os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU prn = PRN(is_dlib=args.isDlib) # ------------- load data image_folder = args.inputDir save_folder = args.outputDir print("save_folder:", save_folder) if not os.path.exists(save_folder): os.mkdir(save_folder) types = ('*.jpg', '*.png') image_path_list = [] for files in types: image_path_list.extend(glob(os.path.join(image_folder, files))) total_num = len(image_path_list) for i, image_path in enumerate(image_path_list): name = image_path.strip().split(os.sep)[-1][:-4] # read image image = imread(image_path) [h, w, c] = image.shape if c > 3: image = image[:, :, :3] # the core: regress position map if args.isDlib: max_size = max(image.shape[0], image.shape[1]) if max_size > 1000: image = rescale(image, 1000. / max_size) image = (image * 255).astype(np.uint8) pos = prn.process(image) # use dlib to detect face else: if image.shape[0] == image.shape[1]: image = resize(image, (256, 256)) pos = prn.net_forward( image / 255.) # input image has been cropped to 256x256 else: box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1 ]) # cropped with bounding box pos = prn.process(image, box) image = image / 255. if pos is None: continue if args.is3d or args.isMat or args.isPose or args.isShow: # 3D vertices vertices = prn.get_vertices(pos) if args.isFront: save_vertices = frontalize(vertices) else: save_vertices = vertices.copy() save_vertices[:, 1] = h - 1 - save_vertices[:, 1] if args.isImage: imsave(os.path.join(save_folder, name + ".jpg"), image) if args.is3d: # corresponding colors colors = prn.get_colors(image, vertices) if args.isTexture: if args.texture_size != 256: pos_interpolated = resize( pos, (args.texture_size, args.texture_size), preserve_range=True) else: pos_interpolated = pos.copy() texture = cv2.remap(image, pos_interpolated[:, :, :2].astype( np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) if args.isMask: vertices_vis = get_visibility(vertices, prn.triangles, h, w) uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op) uv_mask = resize(uv_mask, (args.texture_size, args.texture_size), preserve_range=True) texture = texture * uv_mask[:, :, np.newaxis] write_obj_with_texture( os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, texture, prn.uv_coords / prn.resolution_op ) # save 3d face with texture(can open with meshlab) else: write_obj_with_colors( os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors) # save 3d face(can open with meshlab) if args.isDepth: depth_image = get_depth_image(vertices, prn.triangles, h, w, True) depth = get_depth_image(vertices, prn.triangles, h, w) imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image) sio.savemat(os.path.join(save_folder, name + '_depth.mat'), {'depth': depth}) if args.isMat: sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), { 'vertices': vertices, 'colors': colors, 'triangles': prn.triangles }) if args.isKpt or args.isShow: # get landmarks kpt = prn.get_landmarks(pos) np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt) if args.isPose or args.isShow: # estimate pose camera_matrix, pose = estimate_pose(vertices) np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) np.savetxt(os.path.join(save_folder, name + '_camera_matrix.txt'), camera_matrix) np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) if args.isShow: # ---------- Plot image_pose = plot_pose_box(image, camera_matrix, kpt) cv2.imshow('sparse alignment', plot_kpt(image, kpt)) cv2.imshow('dense alignment', plot_vertices(image, vertices)) cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt)) cv2.waitKey(0)
def get_3d(bbb): # ---- init PRN os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU prn = PRN(is_dlib=True) # ------------- load data # frame_id = "test_video/id00419/3U0abyjM2Po/00024" # mesh_file = os.path.join(root, frame_id + ".obj") # rt_file = os.path.join(root, frame_id + "_sRT.npy") # image_path # _file = open(os.path.join(root, 'txt', "front_rt.pkl"), "rb") # data = pickle._Unpickler(_file) # data.encoding = 'latin1' # data = data.load() _file = open(os.path.join(root, 'txt', "front_rt.pkl"), "rb") data = pickle.load(_file) _file.close() gg = len(data) print(len(data)) data = data[int(gg * 0.1 * bbb):int(gg * 0.1 * (bbb + 1))] for kk, item in enumerate(data): print(kk) target_id = item[-1] video_path = os.path.join(root, 'unzip', item[0] + '.mp4') if not os.path.exists(video_path): print(video_path) print('+++++') continue if os.path.exists(video_path[:-4] + '.obj'): print('-----') continue cap = cv2.VideoCapture(video_path) for i in range(target_id): ret, frame = cap.read() ret, target_frame = cap.read() cv2.imwrite(video_path[:-4] + '_%05d.png' % target_id, target_frame) target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB) image = target_frame # read image [h, w, c] = image.shape pos = prn.process(image) # use dlib to detect face image = image / 255. if pos is None: continue # landmark kpt = prn.get_landmarks(pos) kpt[:, 1] = 224 - kpt[:, 1] np.save(video_path[:-4] + '_prnet.npy', kpt) # 3D vertices vertices = prn.get_vertices(pos) # save_vertices, p = frontalize(vertices) # np.save(video_path[:-4] + '_p.npy', p) # if os.path.exists(video_path[:-4] + '.obj'): # continue save_vertices = vertices.copy() save_vertices[:, 1] = h - 1 - save_vertices[:, 1] # corresponding colors colors = prn.get_colors(image, vertices) # print (colors.shape) # print ('=========') # cv2.imwrite('./mask.png', colors * 255) write_obj_with_colors(video_path[:-4] + '_original.obj', save_vertices, prn.triangles, colors) # save 3d face(can open with meshlab)
def get_3d_single(video_path=None, target_id=None, img_path=None, device_id='3'): # ---- init PRN target_id = count_frames(video_path) os.environ['CUDA_VISIBLE_DEVICES'] = device_id # GPU number, -1 for CPU prn = PRN(is_dlib=True) # if video_path != None: # if not os.path.exists(video_path): # print (video_path) # print ('+++++') # if os.path.exists(video_path[:-4] + '.obj'): # print ('-----') # cap = cv2.VideoCapture(video_path) # for i in range(target_id): # ret, frame = cap.read() # ret, target_frame = cap.read() # cv2.imwrite(video_path[:-4] + '_%05d.png'%target_id,target_frame) # elif img_path != None: # target_frame = cv2.imread(img_path) # target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB) cap = cv2.VideoCapture(video_path) for i in range(target_id): ret, frame = cap.read() print(target_path[:-4] + '_%05d.png' % i) cv2.imwrite(target_path[:-4] + '_%05d.png' % i, frame) # tt = cv2.imread(target_path[:-4] + '_%05d.png' % i) # target_frame = cv2.cvtColor(cv2.imread(target_path[:-4] + '_%05d.png' % i), cv2.COLOR_BGR2RGB) target_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) image = target_frame [h, w, c] = image.shape pos = prn.process(image) image = image / 255 kpt = prn.get_landmarks(pos) kpt[:, 1] = 224 - kpt[:, 1] if video_path is not None: print(target_path[:-4] + '_%05d' % i + '_prnet.npy') np.save(target_path[:-4] + '_%05d' % i + '_prnet.npy', kpt) else: np.save(img_path[:-4] + '_%05d' % i + '_prnet.npy', kpt) vertices = prn.get_vertices(pos) # save_vertices, p = frontalize(vertices) # np.save(video_path[:-4] + '_p.npy', p) # if os.path.exists(video_path[:-4] + '.obj'): # continue save_vertices = vertices.copy() save_vertices[:, 1] = h - 1 - save_vertices[:, 1] # corresponding colors colors = prn.get_colors(image, vertices) # print (colors.shape) # print ('=========') # cv2.imwrite('./mask.png', colors * 255) if video_path != None: write_obj_with_colors( target_path[:-4] + '_%05d' % i + '_original.obj', save_vertices, prn.triangles, colors) # save 3d face(can open with meshlab) else: write_obj_with_colors( img_path[:-4] + '_%05d' % i + '_original.obj', save_vertices, prn.triangles, colors) # save 3d face(can open with meshlab)
image_path_list.extend(glob(os.path.join(image_folder, files))) total_num = len(image_path_list) for i, image_path in enumerate(image_path_list): # read image image = imread(image_path) # the core: regress position map if 'AFLW2000' in image_path: mat_path = image_path.replace('jpg', 'mat') info = sio.loadmat(mat_path) kpt = info['pt3d_68'] pos = prn.process(image, kpt) # kpt information is only used for detecting face and cropping image else: pos = prn.process(image) # use dlib to detect face # -- Basic Applications # get landmarks kpt = prn.get_landmarks(pos) # 3D vertices vertices = prn.get_vertices(pos) # corresponding colors colors = prn.get_colors(image, vertices) # -- save name = image_path.strip().split('/')[-1][:-4] np.savetxt(os.path.join(save_folder, name + '.txt'), kpt) write_obj_with_colors(os.path.join(save_folder, name + '.obj'), vertices, prn.triangles, colors) #save 3d face(can open with meshlab) sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), {'vertices': vertices, 'colors': colors, 'triangles': prn.triangles})
def main(args): if args.isShow or args.isTexture: import cv2 from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box # ---- init PRN os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU prn = PRN(is_dlib=args.isDlib, is_faceboxes=args.isFaceBoxes) # ---- load data image_folder = args.inputDir save_folder = args.outputDir if not os.path.exists(save_folder): os.mkdir(save_folder) types = ('*.jpg', '*.png') image_path_list = [] for files in types: image_path_list.extend(glob(os.path.join(image_folder, files))) total_num = len(image_path_list) for i, image_path in enumerate(image_path_list): name = image_path.strip().split('/')[-1][:-4] # read image image = imread(image_path) [h, w, c] = image.shape if c > 3: image = image[:, :, :3] # RGBA图中,去除A通道 # the core: regress position map if args.isDlib: max_size = max(image.shape[0], image.shape[1]) if max_size > 1000: image = rescale(image, 1000. / max_size) image = (image * 255).astype(np.uint8) pos = prn.process(image) # use dlib to detect face elif args.isFaceBoxes: pos, cropped_img = prn.process( image) # use faceboxes to detect face else: if image.shape[0] == image.shape[1]: image = resize(image, (256, 256)) pos = prn.net_forward( image / 255.) # input image has been cropped to 256x256 else: box = np.array([0, image.shape[1] - 1, 0, image.shape[0] - 1 ]) # cropped with bounding box pos = prn.process(image, box) image = image / 255. if pos is None: continue if args.is3d or args.isMat or args.isPose or args.isShow: # 3D vertices vertices = prn.get_vertices(pos) if args.isFront: save_vertices = frontalize(vertices) else: save_vertices = vertices.copy() save_vertices[:, 1] = h - 1 - save_vertices[:, 1] # 三维人脸旋转对齐方法 # if args.isImage: # vertices = prn.get_vertices(pos) # scale_init = 180 / (np.max(vertices[:, 1]) - np.min(vertices[:, 1])) # colors = prn.get_colors(image, vertices) # triangles = prn.triangles # camera_matrix, pose = estimate_pose(vertices) # yaw, pitch, roll = pos * ANGULAR # vertices1 = vertices - np.mean(vertices, 0)[np.newaxis, :] # # obj = {'s': scale_init, 'angles': [-pitch, yaw, -roll + 180], 't': [0, 0, 0]} # camera = {'eye':[0, 0, 256], 'proj_type':'perspective', 'at':[0, 0, 0], # 'near': 1000, 'far':-100, 'fovy':30, 'up':[0,1,0]} # # image1 = transform_test(vertices1, obj, camera, triangles, colors, h=256, w=256) * 255 # image1 = image1.astype(np.uint8) # imsave(os.path.join(save_folder, name + '.jpg'), image1) if args.is3d: # corresponding colors colors = prn.get_colors(image, vertices) if args.isTexture: if args.texture_size != 256: pos_interpolated = resize( pos, (args.texture_size, args.texture_size), preserve_range=True) else: pos_interpolated = pos.copy() texture = cv2.remap(image, pos_interpolated[:, :, :2].astype( np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) if args.isMask: vertices_vis = get_visibility(vertices, prn.triangles, h, w) uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op) uv_mask = resize(uv_mask, (args.texture_size, args.texture_size), preserve_range=True) texture = texture * uv_mask[:, :, np.newaxis] write_obj_with_texture( os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, texture, prn.uv_coords / prn.resolution_op ) #save 3d face with texture(can open with meshlab) else: write_obj_with_colors( os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab) if args.isDepth: depth_image = get_depth_image(vertices, prn.triangles, h, w, True) depth = get_depth_image(vertices, prn.triangles, h, w) imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image) sio.savemat(os.path.join(save_folder, name + '_depth.mat'), {'depth': depth}) if args.isMat: sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), { 'vertices': vertices, 'colors': colors, 'triangles': prn.triangles }) if args.isKpt: # get landmarks kpt = prn.get_landmarks(pos) np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt) if args.is2dKpt and args.is68Align: ori_kpt = prn.get_landmarks_2d(pos) dlib_aligner = DlibAlign() dst_img = dlib_aligner.dlib_68_align(image, ori_kpt, 256, 0.5) imsave(os.path.join(save_folder, name + '.jpg'), dst_img) if args.isPose: # estimate pose camera_matrix, pose, rot = estimate_pose(vertices) np.savetxt(os.path.join(save_folder, name + '_pose.txt'), np.array(pose) * ANGULAR) np.savetxt(os.path.join(save_folder, name + '_camera_matrix.txt'), camera_matrix) if args.isShow: kpt = prn.get_landmarks(pos) cv2.imshow('sparse alignment', plot_kpt(image, kpt)) # cv2.imshow('dense alignment', plot_vertices(image, vertices)) # cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt)) cv2.waitKey(1)
def run_one_image(uv_kpt_ind, face_ind, triangles, s_uv_coords, image_path, npy_path, save_folder, name, uv_h=256, uv_w=256, image_h=256, image_w=256): # 1. load image cropped_image = imread(image_path) / 255. print('input image is ok!') # 2. load uv position map pos = np.load(npy_path) print('uv map is ok!') # 3. deal uv map # run model to get uv_map max_pos = image_h pos = pos * max_pos # 4. get useful vertices vertices = get_vertices(pos, face_ind, uv_h) save_vertices = vertices.copy() save_vertices[:, 1] = image_h - 1 - save_vertices[:, 1] # 5. get colors colors = get_colors(cropped_image, vertices) write_obj_with_colors(os.path.join(save_folder, name + '_c.obj'), save_vertices, triangles, colors) print('color 3d face is ok!') # 6. get texture pos_interpolated = pos.copy() texture = cv2.remap(cropped_image, pos_interpolated[:, :, :2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) vertices_vis = get_visibility(vertices, triangles, image_h, image_w) uv_mask = get_uv_mask(vertices_vis, triangles, s_uv_coords, image_h, image_w, uv_h) uv_mask = resize(uv_mask, (256, 256), preserve_range=True) texture = texture * uv_mask[:, :, np.newaxis] write_obj_with_texture(os.path.join(save_folder, name + '.obj'), save_vertices, triangles, texture, s_uv_coords / uv_h) print('texture 3d face is ok!') # 7. get landmarks t_image = (cropped_image * 255.).astype(np.uint8) kpt = get_landmarks(pos, uv_kpt_ind) kpt_origin = plot_kpt(cropped_image, kpt).astype(np.uint8) kpt_gray = cv2.cvtColor(kpt_origin, cv2.COLOR_RGB2GRAY) ret, kpt_mask = cv2.threshold(kpt_gray, 127, 255, cv2.THRESH_BINARY) kpt_mask = cv2.bitwise_not(kpt_mask) kpt_and = cv2.bitwise_and(t_image, t_image, mask=kpt_mask) kpt_image = cv2.add(kpt_and, kpt_origin) imsave(os.path.join(save_folder, name + '_kpt.jpg'), kpt_image / 255.) print('kpt image is ok!')
def main(args): if args.isShow or args.isTexture: import cv2 from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box # ---- transform transform_img = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(FLAGS["normalize_mean"], FLAGS["normalize_std"]) ]) # ---- init PRN prn = PRN(args.model) # ------------- load data image_folder = args.inputDir save_folder = args.outputDir if not os.path.exists(save_folder): os.mkdir(save_folder) types = ('*.jpg', '*.png') image_path_list = [] for files in types: image_path_list.extend(glob(os.path.join(image_folder, files))) total_num = len(image_path_list) print("#" * 25) print("[PRNet Inference] {} picture were under processing~".format( total_num)) print("#" * 25) for i, image_path in enumerate(image_path_list): name = image_path.strip().split('/')[-1][:-4] # read image image = cv2.imread(image_path) [h, w, c] = image.shape # the core: regress position map image = cv2.resize(image, (256, 256)) image_t = transform_img(image) image_t = image_t.unsqueeze(0) pos = prn.net_forward( image_t) # input image has been cropped to 256x256 out = pos.cpu().detach().numpy() pos = np.squeeze(out) cropped_pos = pos * 255 pos = cropped_pos.transpose(1, 2, 0) if pos is None: continue if args.is3d or args.isMat or args.isPose or args.isShow: # 3D vertices vertices = prn.get_vertices(pos) if args.isFront: save_vertices = frontalize(vertices) else: save_vertices = vertices.copy() save_vertices[:, 1] = h - 1 - save_vertices[:, 1] if args.isImage: cv2.imwrite(os.path.join(save_folder, name + '.jpg'), image) if args.is3d: # corresponding colors colors = prn.get_colors(image, vertices) if args.isTexture: if args.texture_size != 256: pos_interpolated = cv2.resize( pos, (args.texture_size, args.texture_size), preserve_range=True) else: pos_interpolated = pos.copy() texture = cv2.remap(image, pos_interpolated[:, :, :2].astype( np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) if args.isMask: vertices_vis = get_visibility(vertices, prn.triangles, h, w) uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op) uv_mask = cv2.resize( uv_mask, (args.texture_size, args.texture_size), preserve_range=True) texture = texture * uv_mask[:, :, np.newaxis] write_obj_with_texture( os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, texture, prn.uv_coords / prn.resolution_op ) # save 3d face with texture(can open with meshlab) else: write_obj_with_colors( os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors) # save 3d face(can open with meshlab) # if args.isDepth: # depth_image = get_depth_image(vertices, prn.triangles, h, w, True) # depth = get_depth_image(vertices, prn.triangles, h, w) # cv2.imwrite(os.path.join(save_folder, name + '_depth.jpg'), depth_image) # sio.savemat(os.path.join(save_folder, name + '_depth.mat'), {'depth': depth}) if args.isKpt or args.isShow: # get landmarks kpt = prn.get_landmarks(pos) np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt) if args.isPose or args.isShow: # estimate pose camera_matrix, pose = estimate_pose(vertices) np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) np.savetxt(os.path.join(save_folder, name + '_camera_matrix.txt'), camera_matrix) np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) if args.isShow: # ---------- Plot image_pose = plot_pose_box(image, camera_matrix, kpt) cv2.imshow('sparse alignment', plot_kpt(image, kpt)) cv2.imshow('dense alignment', plot_vertices(image, vertices)) cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt)) cv2.waitKey(0)
def main(args): trainConfig = Options() opt = trainConfig.get_config() #prn = PRN(is_dlib = True) # Some arguments os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu batch_size = opt.batch_size epochs = opt.epochs train_data_file = args.train_data_file model_path = args.model_path eval_pixel_file = args.eval_pixel_file eval_3DFAW_file = args.eval_3DFAW_file eval_300W_file = args.eval_300W_file #res_dir = args.res_dir #ref_dir = args.ref_dir ''' save_dir = args.checkpoint if not os.path.exists(save_dir): os.makedirs(save_dir) ''' # Training data data = TrainData(train_data_file) #eval_pixel = TrainData(eval_pixel_file) #eval_3DFAW = TrainData(eval_3DFAW_file) #eval_300W = TrainData(eval_300W_file) #show_data = TrainData(train_data_file) begin_epoch = 0 # if os.path.exists(model_path + '.data-00000-of-00001'): # begin_epoch = int(model_path.split('_')[-1]) + 1 # print('begin: ',begin_epoch) ''' epoch_iters = data.num_data / batch_size global_step = tf.Variable(epoch_iters * begin_epoch, trainable=False) # Declay learning rate half every 5 epochs decay_steps = 5 * epoch_iters # learning_rate = learning_rate * 0.5 ^ (global_step / decay_steps) learning_rate = tf.train.exponential_decay(opt.learning_rate, global_step, decay_steps, 0.5, staircase=True) ''' # Model model = PRNet(opt) model.setup(model_path) model.train() # Begining train error_f = open('./results/error.txt','w') time_now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") fp_log = open("./logs/log_" + time_now + ".txt","w") iters_total_each_epoch = int(math.ceil(1.0 * data.num_data / batch_size)) print('iters_total_each_epoch: ',iters_total_each_epoch) #eval_pixel_batch = eval_pixel(eval_pixel.num_data,1) #eval_3DFAW_batch = eval_3DFAW(eval_3DFAW.num_data,1) #eval_300W_batch = eval_300W(eval_300W.num_data,1) #loss_pixel = model.optmize_parameters(x=eval_pixel_batch[0], label=eval_pixel_batch[1]) #loss_3DFAW = model.optmize_parameters(x=eval_3DFAW_batch[0], label=eval_3DFAW_batch[1]) #loss_300W = model.optmize_parameters(x=eval_300W_batch[0], label=eval_300W_batch[1]) #print('error of Pixel start: ',loss_pixel) #('error of 3DFAW start: ',loss_3DFAW) #print('error of 300W start: ',loss_300W) #error_f.write('error in pixel 1st : '+str(loss_pixel)+' error in 3DFAW 1st : '+str(loss_3DFAW)+'\n') image = cv2.imread('./examples/10173-other_3-1.jpg') face_ind = np.loadtxt('./Data/uv-data/face_ind.txt').astype(np.int32) triangles = np.loadtxt('./Data/uv-data/triangles.txt').astype(np.int32) input_image = image/255. pos_gt = np.load('./examples/10173-other_3-1.npy') pos_gt = np.array(pos_gt).astype(np.float32) ref_texture_gt = cv2.remap(input_image, pos_gt[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) all_colors_gt = np.reshape(ref_texture_gt, [256**2, -1]) text_c_gt = all_colors_gt[face_ind, :]*255 all_vertices_gt = np.reshape(pos_gt, [256**2, -1]) vertices_gt = all_vertices_gt[face_ind, :] pic_gt = render_texture(vertices_gt.T,text_c_gt.T,triangles.T,256,256,3,image) vertices_gt[:,0] = np.minimum(np.maximum(vertices_gt[:,0], 0), 256 - 1) # x vertices_gt[:,1] = np.minimum(np.maximum(vertices_gt[:,1], 0), 256 - 1) # y ind = np.round(vertices_gt).astype(np.int32) col = image[ind[:,1], ind[:,0], :] # n x 3 imsave('./results/results_gt'+'.png',pic_gt) write_obj_with_colors('./results/results_gt'+'.obj',vertices_gt,triangles,col) # cv2.imshow('ref',ref_texture_gt) # cv2.waitKey(0) test_list = [] for epoch in range(begin_epoch, epochs): train_loss_mean = 0 #show_data_ = show_data(1) for iters in range(iters_total_each_epoch): if True:#iters % 100 == 0: batch = data(batch_size,0) else: batch = data(batch_size,1) loss_res = model.optimize_parameters(x=batch[0], label=batch[1]) train_loss_mean = train_loss_mean + loss_res #summary_str = sess.run(merged_summary_op,feed_dict={x: batch[0], label: batch[1]}) time_now_tmp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S") for_show = 'epoch: '+str(epoch)+' iters: '+str(iters)+' loss: '+str(loss_res) log_line = str(loss_res) print (for_show) fp_log.writelines(log_line + "\n") train_loss_mean = train_loss_mean/iters_total_each_epoch #eval_pixel_batch_ = eval_pixel(eval_pixel.num_data,1) #eval_3DFAW_batch_ = eval_3DFAW(eval_3DFAW.num_data,1) #eval_300W_batch_ = eval_300W(eval_300W.num_data,1) #text_c = prn.get_colors_from_texture(ref_texture) #print('colors: ',text_c) #pic = render_texture(cropped_vertices.T,text_c.T,prn.triangles.T,256,256) # pos = np.load('./10082-15-1.npy') # pos = np.array(pos).astype(np.float32) posmap = model.generate(x=input_image[np.newaxis, :,:,:]) posmap = np.squeeze(posmap) posmap = posmap*256*1.1 cropped_vertices = np.reshape(posmap, [-1, 3]).T pos = np.reshape(cropped_vertices.T, [256, 256, 3]) ref_texture = cv2.remap(input_image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) all_colors = np.reshape(ref_texture, [256**2, -1]) text_c = all_colors[face_ind, :]*255 test_list.append(pos) all_vertices = np.reshape(pos, [256**2, -1]) vertices = all_vertices[face_ind, :] pic = render_texture(vertices.T,text_c.T,triangles.T,256,256,3,image) imsave('./results/result'+str(epoch)+'.png',pic) write_obj_with_colors('./results/result'+str(epoch)+'.obj',vertices,triangles,triangles) #picture = np.multiply(pic,255) #print('pic',picture) # cv2.imshow('ref',ref_texture) # cv2.waitKey(0) # image_pic = tf.image.decode_png(picture, channels=4) # image_pic = tf.expand_dims(input_image, 0) # ori_pic = tf.image.decode_png(image, channels=4) # ori_pic = tf.expand_dims(ori_pic, 0) ''' _, loss_pixel_, _ = model.forward(x=eval_pixel_batch_[0], label=eval_pixel_batch_[1]) #loss_3DFAW_ = sess.run(error,feed_dict={x: eval_3DFAW_batch_[0], label: eval_3DFAW_batch_[1]}) _, _, loss_300W_ = model.forward(x=eval_300W_batch_[0], label=eval_300W_batch_[1]) ''' ''' summary =tf.Summary(value=[ tf.Summary.Value(tag="error_pixel", simple_value=loss_pixel_), #tf.Summary.Value(tag="error_3DFAW", simple_value=loss_3DFAW_), tf.Summary.Value(tag="error_300W", simple_value=loss_300W_), tf.Summary.Value(tag="train loss", simple_value=train_loss_mean)]) summary_writer.add_summary(summary, epoch) ''' model.save(save_path='./Data/train_result/256_256_resfcn256' + '_' + str(epoch)) # Test # eval_pixel_batcht = eval_pixel(eval_pixel.num_data,1) # eval_3DFAW_batcht = eval_3DFAW(eval_3DFAW.num_data,1) # loss_pixel2 = sess.run(loss,feed_dict={x: eval_pixel_batcht[0], label: eval_pixel_batcht[1]}) # loss_3DFAW2 = sess.run(loss,feed_dict={x: eval_3DFAW_batcht[0], label: eval_3DFAW_batcht[1]}) #print('error of Pixel: ',loss_pixel2) #print('error of 3DFAW: ',loss_3DFAW2) #error_f.write('error in pixel: '+str(loss_pixel2)+' error in 3DFAW: '+str(loss_3DFAW2)+'\n') ''' model.eval() eval_meshes(test_list, res_dir, ref_dir) ''' fp_log.close() error_f.close()
def main(args): if args.isShow or args.isTexture: import cv2 from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box # ---- init PRN os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU prn = PRN(is_dlib = args.isDlib) # ------------- load data image_folder = args.inputDir save_folder = args.outputDir if not os.path.exists(save_folder): os.mkdir(save_folder) types = ('*.jpg', '*.png') image_path_list= [] for files in types: image_path_list.extend(glob(os.path.join(image_folder, files))) total_num = len(image_path_list) for i, image_path in enumerate(image_path_list): name = image_path.strip().split('/')[-1][:-4] # read image image = imread(image_path) [h, w, c] = image.shape if c>3: image = image[:,:,:3] # the core: regress position map if args.isDlib: max_size = max(image.shape[0], image.shape[1]) if max_size> 1000: image = rescale(image, 1000./max_size) image = (image*255).astype(np.uint8) pos = prn.process(image) # use dlib to detect face else: if image.shape[0] == image.shape[1]: image = resize(image, (256,256)) pos = prn.net_forward(image/255.) # input image has been cropped to 256x256 else: box = np.array([0, image.shape[1]-1, 0, image.shape[0]-1]) # cropped with bounding box pos = prn.process(image, box) image = image/255. if pos is None: continue if args.is3d or args.isMat or args.isPose or args.isShow: # 3D vertices vertices = prn.get_vertices(pos) if args.isFront: save_vertices = frontalize(vertices) else: save_vertices = vertices.copy() save_vertices[:,1] = h - 1 - save_vertices[:,1] if args.isImage: imsave(os.path.join(save_folder, name + '.jpg'), image) if args.is3d: # corresponding colors colors = prn.get_colors(image, vertices) if args.isTexture: if args.texture_size != 256: pos_interpolated = resize(pos, (args.texture_size, args.texture_size), preserve_range = True) else: pos_interpolated = pos.copy() texture = cv2.remap(image, pos_interpolated[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) if args.isMask: vertices_vis = get_visibility(vertices, prn.triangles, h, w) uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op) uv_mask = resize(uv_mask, (args.texture_size, args.texture_size), preserve_range = True) texture = texture*uv_mask[:,:,np.newaxis] write_obj_with_texture(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, texture, prn.uv_coords/prn.resolution_op)#save 3d face with texture(can open with meshlab) else: write_obj_with_colors(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab) if args.isDepth: depth_image = get_depth_image(vertices, prn.triangles, h, w, True) depth = get_depth_image(vertices, prn.triangles, h, w) imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image) sio.savemat(os.path.join(save_folder, name + '_depth.mat'), {'depth':depth}) if args.isMat: sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), {'vertices': vertices, 'colors': colors, 'triangles': prn.triangles}) if args.isKpt or args.isShow: # get landmarks kpt = prn.get_landmarks(pos) np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt) if args.isPose or args.isShow: # estimate pose camera_matrix, pose = estimate_pose(vertices) np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) np.savetxt(os.path.join(save_folder, name + '_camera_matrix.txt'), camera_matrix) np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) if args.isShow: # ---------- Plot image_pose = plot_pose_box(image, camera_matrix, kpt) cv2.imshow('sparse alignment', plot_kpt(image, kpt)) cv2.imshow('dense alignment', plot_vertices(image, vertices)) cv2.imshow('pose', plot_pose_box(image, camera_matrix, kpt)) cv2.waitKey(0)
def run_one_image(image_path, uv_kpt_ind, face_ind, triangles, s_uv_coords, pnet, rnet, onet, x, y, Tsess, minsize=30, threshold=[0.6, 0.7, 0.7], factor=0.709, best_score=0.7, uv_h=256, uv_w=256, image_h=256, image_w=256): input_image = cv2.imread(image_path, 1) output_image = input_image.copy() boxes, pnts = face_detect.detect_face(input_image, minsize, pnet, rnet, onet, threshold, factor) faces = process_bbox(boxes, input_image.shape) for idx, (x0, y1, x1, y0, conf_score) in enumerate(faces): if conf_score > best_score: det_face = input_image[int(x0):int(x1), int(y0):int(y1), :] face_shape = (int(y1) - int(y0), int(x1) - int(x0)) det_face = cv2.resize(det_face, (256, 256)) / 255. pos = Tsess.run(y, feed_dict={x: det_face[np.newaxis, :, :, :]}) pos = np.squeeze(pos) max_pos = image_h pos = pos * max_pos vertices = get_vertices(pos, face_ind, uv_h) from utils.write import write_obj_with_colors save_vertices = vertices.copy() save_vertices[:, 1] = image_h - 1 - save_vertices[:, 1] colors = get_colors(det_face, vertices) write_obj_with_colors(os.path.join('images', 'test' + '_c.obj'), save_vertices, triangles, colors) t_image = (det_face * 255.).astype(np.uint8) kpt = get_landmarks(pos, uv_kpt_ind) kpt_origin = plot_kpt(det_face, kpt).astype(np.uint8) kpt_gray = cv2.cvtColor(kpt_origin, cv2.COLOR_RGB2GRAY) ret, kpt_mask = cv2.threshold(kpt_gray, 127, 255, cv2.THRESH_BINARY) kpt_mask = cv2.bitwise_not(kpt_mask) kpt_and = cv2.bitwise_and(t_image, t_image, mask=kpt_mask) kpt_image = cv2.add(kpt_and, kpt_origin) imsave(os.path.join('images', 'test' + '_kpt.jpg'), kpt_image / 255.) t_image = (det_face * 255.).astype(np.uint8) ver_origin = plot_vertices(det_face, vertices).astype(np.uint8) ver_gray = cv2.cvtColor(ver_origin, cv2.COLOR_RGB2GRAY) ret, ver_mask = cv2.threshold(ver_gray, 127, 255, cv2.THRESH_BINARY) ver_mask = cv2.bitwise_not(ver_mask) ver_and = cv2.bitwise_and(t_image, t_image, mask=ver_mask) ver_image = cv2.add(ver_and, ver_origin) imsave(os.path.join('images', 'test' + '_ver.jpg'), ver_image / 255.) resize_ver_image = cv2.resize(ver_image, face_shape) output_image[int(x0):int(x1), int(y0):int(y1)] = resize_ver_image return output_image / 255.
def get_3d_folder( pkl ): # the first cell is video path the last cell is the key frame nnuumber # ---- init PRN # os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU number, -1 for CPU prn = PRN(is_dlib=True) _file = open(pkl, "rb") data = pickle.load(_file) _file.close() gg = len(data) print(len(data)) # data = data[int(gg * 0.1 *bbb ): int(gg * 0.1 * (bbb + 1) ) ] for kk, item in enumerate(data): print(kk) target_id = item[-1] video_path = os.path.join(root, 'unzip', item[0]) if not os.path.exists(video_path): print(video_path) print('+++++') continue if os.path.exists(video_path[:-4] + '.obj'): print('-----') continue cap = cv2.VideoCapture(video_path) for i in range(target_id): ret, frame = cap.read() ret, target_frame = cap.read() cv2.imwrite(video_path[:-4] + '_%05d.png' % target_id, target_frame) target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB) image = target_frame # read image [h, w, c] = image.shape pos = prn.process(image) # use dlib to detect face image = image / 255. if pos is None: continue # landmark kpt = prn.get_landmarks(pos) kpt[:, 1] = 224 - kpt[:, 1] np.save(video_path[:-4] + '_prnet.npy', kpt) # 3D vertices vertices = prn.get_vertices(pos) # save_vertices, p = frontalize(vertices) # np.save(video_path[:-4] + '_p.npy', p) # if os.path.exists(video_path[:-4] + '.obj'): # continue save_vertices = vertices.copy() save_vertices[:, 1] = h - 1 - save_vertices[:, 1] # corresponding colors colors = prn.get_colors(image, vertices) # print (colors.shape) # print ('=========') # cv2.imwrite('./mask.png', colors * 255) write_obj_with_colors(video_path[:-4] + '_original.obj', save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab)
if 'AFLW2000' in image_path: mat_path = image_path.replace('jpg', 'mat') info = sio.loadmat(mat_path) kpt = info['pt3d_68'] pos = prn.process( image, kpt ) # kpt information is only used for detecting face and cropping image else: pos = prn.process(image) # use dlib to detect face # -- Basic Applications # get landmarks kpt = prn.get_landmarks(pos) # 3D vertices vertices = prn.get_vertices(pos) # corresponding colors colors = prn.get_colors(image, vertices) # -- save name = image_path.strip().split('/')[-1][:-4] np.savetxt(os.path.join(save_folder, name + '.txt'), kpt) write_obj_with_colors(os.path.join(save_folder, name + '.obj'), vertices, prn.triangles, colors) #save 3d face(can open with meshlab) sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), { 'vertices': vertices, 'colors': colors, 'triangles': prn.triangles })
def main(args): if args.isShow or args.isTexture or args.isCamera: import cv2 from utils.cv_plot import plot_kpt, plot_vertices, plot_pose_box # ---- init PRN os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # GPU number, -1 for CPU prn = PRN(is_dlib=args.isDlib) # ------------- load data image_folder = args.inputDir save_folder = args.outputDir if not os.path.exists(save_folder): os.mkdir(save_folder) types = ('*.jpg', '*.png') image_path_list = [] for files in types: image_path_list.extend(glob(os.path.join(image_folder, files))) total_num = len(image_path_list) if args.isCamera: # Create a VideoCapture object and read from input file # If the input is the camera, pass 0 instead of the video file name cap = cv2.VideoCapture(0) # Check if camera opened successfully if (cap.isOpened() == False): print("Error opening video stream or file") # Read until video is completed while (cap.isOpened()): # Capture frame-by-frame ret, frame = cap.read() if ret == True: if args.isDlib: max_size = max(frame.shape[0], frame.shape[1]) if max_size > 1000: frame = rescale(frame, 1000. / max_size) frame = (frame * 255).astype(np.uint8) pos = prn.process(frame) # use dlib to detect face else: if frame.shape[0] == frame.shape[1]: frame = resize(frame, (256, 256)) pos = prn.net_forward( frame / 255.) # input frame has been cropped to 256x256 else: box = np.array( [0, frame.shape[1] - 1, 0, frame.shape[0] - 1]) # cropped with bounding box pos = prn.process(frame, box) # Normalizing the frame and skiping if there was no one in the frame frame = frame / 255. if pos is None: continue # Get landmarks in frame kpt = prn.get_landmarks(pos) # Display the resulting frame cv2.imshow('sparse alignment', plot_kpt(frame, kpt)) # Press Q on keyboard to exit if cv2.waitKey(25) & 0xFF == ord('q'): break # Break the loop else: break # When everything done, release the video capture object cap.release() # Closes all the frames cv2.destroyAllWindows() else: for i, image_path in enumerate(image_path_list): name = image_path.strip().split('/')[-1][:-4] # read image image = imread(image_path) [h, w, c] = image.shape if c > 3: image = image[:, :, :3] # the core: regress position map if args.isDlib: max_size = max(image.shape[0], image.shape[1]) if max_size > 1000: image = rescale(image, 1000. / max_size) image = (image * 255).astype(np.uint8) pos = prn.process(image) # use dlib to detect face else: if image.shape[0] == image.shape[1]: image = resize(image, (256, 256)) pos = prn.net_forward( image / 255.) # input image has been cropped to 256x256 else: box = np.array( [0, image.shape[1] - 1, 0, image.shape[0] - 1]) # cropped with bounding box pos = prn.process(image, box) image = image / 255. if pos is None: continue if args.is3d or args.isMat or args.isPose or args.isShow: # 3D vertices vertices = prn.get_vertices(pos) if args.isFront: save_vertices = frontalize(vertices) else: save_vertices = vertices.copy() save_vertices[:, 1] = h - 1 - save_vertices[:, 1] if args.isImage: imsave(os.path.join(save_folder, name + '.jpg'), image) if args.is3d: # corresponding colors colors = prn.get_colors(image, vertices) if args.isTexture: if args.texture_size != 256: pos_interpolated = resize( pos, (args.texture_size, args.texture_size), preserve_range=True) else: pos_interpolated = pos.copy() texture = cv2.remap(image, pos_interpolated[:, :, :2].astype( np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0)) if args.isMask: vertices_vis = get_visibility(vertices, prn.triangles, h, w) uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op) uv_mask = resize( uv_mask, (args.texture_size, args.texture_size), preserve_range=True) texture = texture * uv_mask[:, :, np.newaxis] write_obj_with_texture( os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, texture, prn.uv_coords / prn.resolution_op ) #save 3d face with texture(can open with meshlab) else: write_obj_with_colors( os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab) if args.isDepth: depth_image = get_depth_image(vertices, prn.triangles, h, w, True) depth = get_depth_image(vertices, prn.triangles, h, w) imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image) sio.savemat(os.path.join(save_folder, name + '_depth.mat'), {'depth': depth}) if args.isMat: sio.savemat( os.path.join(save_folder, name + '_mesh.mat'), { 'vertices': vertices, 'colors': colors, 'triangles': prn.triangles }) if args.isKpt or args.isShow: # get landmarks kpt = prn.get_landmarks(pos) np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt) if args.isPose or args.isShow: # estimate pose camera_matrix, pose = estimate_pose(vertices) np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) np.savetxt( os.path.join(save_folder, name + '_camera_matrix.txt'), camera_matrix) np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) if args.isShow: # ---------- Plot image_pose = plot_pose_box(image, camera_matrix, kpt) cv2.imshow( 'sparse alignment', cv2.cvtColor(np.float32(plot_kpt(image, kpt)), cv2.COLOR_RGB2BGR)) cv2.imshow( 'dense alignment', cv2.cvtColor(np.float32(plot_vertices(image, vertices)), cv2.COLOR_RGB2BGR)) cv2.imshow( 'pose', cv2.cvtColor( np.float32(plot_pose_box(image, camera_matrix, kpt)), cv2.COLOR_RGB2BGR)) cv2.waitKey(0)