def getFaceTextureCoords(textImag): checkpoint_fp = 'models/MobDenseNet.pth.tar' arch = 'densemobilenetv4_19' checkpoint = torch.load( checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] model = getattr(MobDenseNet, arch)( num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) model_dict = model.state_dict() # because the model is trained by multiple gpus, prefix module should be removed for k in checkpoint.keys(): model_dict[k.replace('module.', '')] = checkpoint[k] model.load_state_dict(model_dict) cudnn.benchmark = True model = model.cuda() model.eval() face_detector = dlib.get_frontal_face_detector() # 3. forward transform = transforms.Compose( [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) img_ori = textImag rects = face_detector(img_ori, 1) for rect in rects: # - use detected face bbox bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()] roi_box = parse_roi_box_from_bbox(bbox) img = crop_img(img_ori, roi_box) # forward: one step img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img).unsqueeze(0) with torch.no_grad(): input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype(np.float32) # 68 pts pts68 = predict_68pts(param, roi_box) return pts68[[0, 1], :]
def main(args): # 1. load pre-tained model checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar' arch = 'mobilenet_1' checkpoint = torch.load( checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] model = getattr(mobilenet_v1, arch)( num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) model_dict = model.state_dict() # because the model is trained by multiple gpus, prefix module should be removed for k in checkpoint.keys(): model_dict[k.replace('module.', '')] = checkpoint[k] model.load_state_dict(model_dict) if args.mode == 'gpu': cudnn.benchmark = True model = model.cuda() model.eval() # 2. load dlib model for face detection and landmark used for face cropping if args.dlib_landmark: dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat' face_regressor = dlib.shape_predictor(dlib_landmark_model) if args.dlib_bbox: face_detector = dlib.get_frontal_face_detector() # 3. forward tri = sio.loadmat('visualize/tri.mat')['tri'] transform = transforms.Compose( [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) for img_fp in args.files: img_ori = cv2.imread(img_fp) if args.dlib_bbox: rects = face_detector(img_ori, 1) else: rects = [] if len(rects) == 0: rects = dlib.rectangles() rect_fp = img_fp + '.bbox' try: lines = open(rect_fp).read().strip().split('\n')[1:] except FileNotFoundError: print('Cannot load bbox file') continue for l in lines: l, r, t, b = [int(_) for _ in l.split(' ')[1:]] rect = dlib.rectangle(l, r, t, b) rects.append(rect) pts_res = [] Ps = [] # Camera matrix collection poses = [] # pose collection, [todo: validate it] vertices_lst = [] # store multiple face vertices ind = 0 suffix = get_suffix(img_fp) for rect in rects: # whether use dlib landmark to crop image, if not, use only face bbox to calc roi bbox for cropping if args.dlib_landmark: # - use landmark for cropping pts = face_regressor(img_ori, rect).parts() pts = np.array([[pt.x, pt.y] for pt in pts]).T roi_box = parse_roi_box_from_landmark(pts) else: # - use detected face bbox bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()] roi_box = parse_roi_box_from_bbox(bbox) img = crop_img(img_ori, roi_box) # forward: one step img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) # 68 pts pts68 = predict_68pts(param, roi_box) # two-step for more accurate bbox to crop face if args.bbox_init == 'two': roi_box = parse_roi_box_from_landmark(pts68) img_step2 = crop_img(img_ori, roi_box) img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img_step2).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) pts68 = predict_68pts(param, roi_box) pts_res.append(pts68) P, pose = parse_pose(param) Ps.append(P) poses.append(pose) # dense face 3d vertices if args.dump_ply or args.dump_vertex or args.dump_depth or args.dump_pncc or args.dump_obj: vertices = predict_dense(param, roi_box) vertices_lst.append(vertices) if args.dump_ply: dump_to_ply( vertices, tri, '{}_{}.ply'.format(img_fp.replace(suffix, ''), ind)) if args.dump_vertex: dump_vertex( vertices, '{}_{}.mat'.format(img_fp.replace(suffix, ''), ind)) if args.dump_pts: wfp = '{}_{}.txt'.format(img_fp.replace(suffix, ''), ind) np.savetxt(wfp, pts68, fmt='%.3f') print('Save 68 3d landmarks to {}'.format(wfp)) if args.dump_roi_box: wfp = '{}_{}.roibox'.format(img_fp.replace(suffix, ''), ind) np.savetxt(wfp, roi_box, fmt='%.3f') print('Save roi box to {}'.format(wfp)) if args.dump_paf: wfp_paf = '{}_{}_paf.jpg'.format(img_fp.replace(suffix, ''), ind) wfp_crop = '{}_{}_crop.jpg'.format(img_fp.replace(suffix, ''), ind) paf_feature = gen_img_paf(img_crop=img, param=param, kernel_size=args.paf_size) cv2.imwrite(wfp_paf, paf_feature) cv2.imwrite(wfp_crop, img) print('Dump to {} and {}'.format(wfp_crop, wfp_paf)) if args.dump_obj: wfp = '{}_{}.obj'.format(img_fp.replace(suffix, ''), ind) colors = get_colors(img_ori, vertices) write_obj_with_colors(wfp, vertices, tri, colors) print('Dump obj with sampled texture to {}'.format(wfp)) ind += 1 if args.dump_pose: # P, pose = parse_pose(param) # Camera matrix (without scale), and pose (yaw, pitch, roll, to verify) img_pose = plot_pose_box(img_ori, Ps, pts_res) wfp = img_fp.replace(suffix, '_pose.jpg') cv2.imwrite(wfp, img_pose) print('Dump to {}'.format(wfp)) if args.dump_depth: wfp = img_fp.replace(suffix, '_depth.png') # depths_img = get_depths_image(img_ori, vertices_lst, tri-1) # python version depths_img = cget_depths_image(img_ori, vertices_lst, tri - 1) # cython version cv2.imwrite(wfp, depths_img) print('Dump to {}'.format(wfp)) if args.dump_pncc: wfp = img_fp.replace(suffix, '_pncc.png') pncc_feature = cpncc(img_ori, vertices_lst, tri - 1) # cython version cv2.imwrite( wfp, pncc_feature[:, :, ::-1]) # cv2.imwrite will swap RGB -> BGR print('Dump to {}'.format(wfp)) if args.dump_res: draw_landmarks(img_ori, pts_res, wfp=img_fp.replace(suffix, '_3DDFA.jpg'), show_flg=args.show_flg)
print('len(folder) :', len(folder)) for item in tqdm(folder): try: img_ori = cv2.imread(str(folder[item])) rects = face_detector(img_ori, 1) if len(rects) != 0: for rect in rects: bbox = [ rect.left(), rect.top(), rect.right(), rect.bottom() ] roi_box = parse_roi_box_from_bbox(bbox) img = crop_img(img_ori, roi_box) img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) vertices_lst = [] vertices = predict_dense(param, roi_box) vertices_lst.append(vertices)
def get_landmark_2d(root, image_path): # 0.read image img_ori = cv2.imread(os.path.join(root, image_path)) # 1. load pre-tained model checkpoint_fp = 'models/MobDenseNet.pth.tar' arch = 'mobdensenet_v1' checkpoint = torch.load( checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] model = getattr(MobDenseNet, arch)( num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) model_dict = model.state_dict() if args.mode == 'gpu': cudnn.benchmark = True model = model.cuda() model.eval() # 2. load dlib model for face detection and landmark used for face cropping if args.dlib_landmark: dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat' face_regressor = dlib.shape_predictor(dlib_landmark_model) if args.dlib_bbox: face_detector = dlib.get_frontal_face_detector() # 3. forward tri = sio.loadmat('visualize/tri.mat')['tri'] - 1 transform = transforms.Compose( [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) imgScale = 1 scaledImg = img_ori if max(img_ori.shape) > maxImgSizeForDetection: imgScale = maxImgSizeForDetection / float(max(img_ori.shape)) scaledImg = cv2.resize(img_ori, (int( img_ori.shape[1] * imgScale), int(img_ori.shape[0] * imgScale))) rects = face_detector(scaledImg, 1) for rect in rects: if args.dlib_landmark: faceRectangle = rectangle(int(rect.left() / imgScale), int(rect.top() / imgScale), int(rect.right() / imgScale), int(rect.bottom() / imgScale)) # - use landmark for cropping pts = face_regressor(img_ori, faceRectangle).parts() pts = np.array([[pt.x, pt.y] for pt in pts]).T roi_box = parse_roi_box_from_landmark(pts) else: bbox = [ int(rect.left() / imgScale), int(rect.top() / imgScale), int(rect.right() / imgScale), int(rect.bottom() / imgScale) ] roi_box = parse_roi_box_from_bbox(bbox) img = crop_img(img_ori, roi_box) # forward: one step img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype(np.float32) # 68 pts pts68 = predict_68pts(param, roi_box) return pts68
def main(args): # 1. load pre-tained model checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar' arch = 'mobilenet_1' checkpoint = torch.load( checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] model = getattr(mobilenet_v1, arch)( num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) model_dict = model.state_dict() # because the model is trained by multiple gpus, prefix module should be removed for k in checkpoint.keys(): model_dict[k.replace('module.', '')] = checkpoint[k] model.load_state_dict(model_dict) if args.mode == 'gpu': cudnn.benchmark = True model = model.cuda() model.eval() # 2. load dlib model for face detection and landmark used for face cropping if args.dlib_landmark: dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat' face_regressor = dlib.shape_predictor(dlib_landmark_model) if args.dlib_bbox: face_detector = dlib.get_frontal_face_detector() # 3. forward tri = sio.loadmat('visualize/tri.mat')['tri'] transform = transforms.Compose( [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) files = sorted( glob.glob(os.path.join(args.folder, '*.jpg')) + glob.glob(os.path.join(args.folder, '*.png'))) p_bar = tqdm(total=len(files)) for img_fp in files: img_ori = cv2.imread(img_fp) if args.dlib_bbox: rects = face_detector(img_ori, 1) else: rects = [] if len(rects) == 0: rects = dlib.rectangles() rect_fp = img_fp + '.bbox' lines = open(rect_fp).read().strip().split('\n')[1:] for l in lines: l, r, t, b = [int(_) for _ in l.split(' ')[1:]] rect = dlib.rectangle(l, r, t, b) rects.append(rect) pts_res = [] Ps = [] # Camera matrix collection poses = [] # pose collection, [todo: validate it] vertices_lst = [] # store multiple face vertices ind = 0 suffix = get_suffix(img_fp) for rect in rects: # whether use dlib landmark to crop image, if not, use only face bbox to calc roi bbox for cropping if args.dlib_landmark: # - use landmark for cropping pts = face_regressor(img_ori, rect).parts() pts = np.array([[pt.x, pt.y] for pt in pts]).T roi_box = parse_roi_box_from_landmark(pts) else: # - use detected face bbox bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()] roi_box = parse_roi_box_from_bbox(bbox) img = crop_img(img_ori, roi_box) # forward: one step img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input_ = transform(img).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input_ = input_.cuda() param = model(input_) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) # 68 pts pts68 = predict_68pts(param, roi_box) # two-step for more accurate bbox to crop face if args.bbox_init == 'two': roi_box = parse_roi_box_from_landmark(pts68) img_step2 = crop_img(img_ori, roi_box) img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input_ = transform(img_step2).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input_ = input_.cuda() param = model(input_) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) pts68 = predict_68pts(param, roi_box) pts_res.append(pts68) P, pose = parse_pose(param) Ps.append(P) poses.append(pose) points = np.array(pts_res)[0].T rotated = eulerAnglesToRotationMatrix(np.array([0., np.pi, 0.])) points = points.dot(rotated) scaler = MinMaxScaler(feature_range=(-1., 1)) scaled_points = scaler.fit_transform(points) points = scaled_points f_name = img_fp.replace(args.folder + '/', '').replace('.png', '').replace('.jpg', '') np.save('./results/{}.npy'.format(f_name), points.reshape(-1)) if args.plot: plot_face(points, img_fp) if args.show_flg: plt.show() else: plt.savefig('./results/{}.png'.format(f_name)) p_bar.update(1)
def test_video(args): start_time = time.time() x = 1 # displays the frame rate every 1 second counter = 0 # 1. load pre-tained model # checkpoint_fp='models/phase1_wpdc_vdc_v2.pth.tar' # arch='mobilenet_1' checkpoint_fp = 'models/MobDenseNet.pth.tar' arch = 'mobdensenet_v1' checkpoint = torch.load( checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] model = getattr(MobDenseNet, arch)( num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) model_dict = model.state_dict() # because the model is trained by multiple gpus, prefix module should be removed for k in checkpoint.keys(): model_dict[k.replace('module.', '')] = checkpoint[k] model.load_state_dict(model_dict) if args.mode == 'gpu': cudnn.benchmark = True model = model.cuda() model.eval() # 2. load dlib model for face detection and landmark used for face cropping if args.dlib_landmark: dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat' face_regressor = dlib.shape_predictor(dlib_landmark_model) if args.dlib_bbox: face_detector = dlib.get_frontal_face_detector() # 3. forward tri = sio.loadmat('visualize/tri.mat')['tri'] - 1 tri_pts68 = sio.loadmat('visualize/pats68_tri.mat')['tri'] textureImg = cv2.imread(image_name) cameraImg = cap.read()[1] # textureCoords=df.getFaceTextureCoords(textureImg) # drawface=Drawing3DFace.Draw3DFace(cameraImg,textureImg,textureCoords,tri_pts68.T) transform = transforms.Compose( [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) while True: # get a frame img_ori = cap.read()[1] imgScale = 1 scaledImg = img_ori if max(img_ori.shape) > maxImgSizeForDetection: imgScale = maxImgSizeForDetection / float(max(img_ori.shape)) scaledImg = cv2.resize(img_ori, (int(img_ori.shape[1] * imgScale), int(img_ori.shape[0] * imgScale))) rects = face_detector(scaledImg, 1) Ps = [] # Camera matrix collection poses = [] # pose collection pts_res = [] # suffix=get_suffix(img_ori) for rect in rects: if args.dlib_landmark: faceRectangle = rectangle(int(rect.left() / imgScale), int(rect.top() / imgScale), int(rect.right() / imgScale), int(rect.bottom() / imgScale)) # - use landmark for cropping pts = face_regressor(img_ori, faceRectangle).parts() pts = np.array([[pt.x, pt.y] for pt in pts]).T roi_box = parse_roi_box_from_landmark(pts) else: bbox = [ int(rect.left() / imgScale), int(rect.top() / imgScale), int(rect.right() / imgScale), int(rect.bottom() / imgScale) ] roi_box = parse_roi_box_from_bbox(bbox) img = crop_img(img_ori, roi_box) # forward: one step img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype(np.float32) # 68 pts pts68 = predict_68pts(param, roi_box) # df.triDelaunay(pts68) densePts = predict_dense(param, roi_box) P, pose = parse_pose(param) Ps.append(P) poses.append(pose) # two-step for more accurate bbox to crop face if args.bbox_init == 'two': roi_box = parse_roi_box_from_landmark(pts68) img_step2 = crop_img(img_ori, roi_box) img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img_step2).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) pts68 = predict_68pts(param, roi_box) pts_res.append(pts68) pts = [] #draw landmark for indx in range(68): pos = (pts68[0, indx], pts68[1, indx]) pts.append(pos) cv2.circle(img_ori, pos, 3, color=(255, 255, 255), thickness=-1) ##draw pose box if args.dump_pose: img_ori = plot_pose_box(img_ori, Ps, pts_res) #draw face mesh if args.dump_2D_face_mesh: img_ori = df.drawMesh(img_ori, densePts.T, tri.T) if args.dump_3D_face_mesh: pass # img=drawface.render(pts68) cv2.imshow("faceDetector", img_ori) counter += 1 if (time.time() - start_time) > x: print("FPS: ", counter / (time.time() - start_time)) counter = 0 start_time = time.time() if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
def main(args): # 1. load pre-trained model checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar' arch = 'mobilenet_1' checkpoint = torch.load( checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] model = getattr(mobilenet_v1, arch)( num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) model_dict = model.state_dict() # because the model is trained by multiple gpus, prefix module should be removed for k in checkpoint.keys(): model_dict[k.replace('module.', '')] = checkpoint[k] model.load_state_dict(model_dict) if args.mode == 'gpu': cudnn.benchmark = True model = model.cuda() model.eval() # 2. load pre-trained model uv-gan if args.uvgan: if args.checkpoint_uv_gan == "": print("Specify the path to checkpoint uv_gan") exit() uvgan = infer_uv_gan.UV_GAN(args.checkpoint_uv_gan) # 3. load dlib model for face detection and landmark used for face cropping if args.dlib_landmark: dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat' face_regressor = dlib.shape_predictor(dlib_landmark_model) if args.dlib_bbox: face_detector = dlib.get_frontal_face_detector() # 4. forward tri = sio.loadmat('visualize/tri.mat')['tri'] transform = transforms.Compose( [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) for img_fp in args.files: img_ori = cv2.imread(img_fp) if args.dlib_bbox: rects = face_detector(img_ori, 1) else: rects = [] if len(rects) == 0: rects = dlib.rectangles() rect_fp = img_fp + '.bbox' lines = open(rect_fp).read().strip().split('\n')[1:] for l in lines: l, r, t, b = [int(_) for _ in l.split(' ')[1:]] rect = dlib.rectangle(l, r, t, b) rects.append(rect) pts_res = [] Ps = [] # Camera matrix collection poses = [] # pose collection, [todo: validate it] vertices_lst = [] # store multiple face vertices ind = 0 suffix = get_suffix(img_fp) for rect in rects: # whether use dlib landmark to crop image, if not, use only face bbox to calc roi bbox for cropping if args.dlib_landmark: # - use landmark for cropping pts = face_regressor(img_ori, rect).parts() pts = np.array([[pt.x, pt.y] for pt in pts]).T roi_box = parse_roi_box_from_landmark(pts) else: # - use detected face bbox bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()] roi_box = parse_roi_box_from_bbox(bbox) img = crop_img(img_ori, roi_box) # forward: one step img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) # 68 pts pts68 = predict_68pts(param, roi_box) # two-step for more accurate bbox to crop face if args.bbox_init == 'two': roi_box = parse_roi_box_from_landmark(pts68) img_step2 = crop_img(img_ori, roi_box) img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img_step2).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) pts68 = predict_68pts(param, roi_box) pts_res.append(pts68) P, pose = parse_pose(param) Ps.append(P) poses.append(pose) if args.dump_obj: vertices = predict_dense(param, roi_box) vertices_lst.append(vertices) wfp = '{}_{}.obj'.format(img_fp.replace(suffix, ''), ind) colors = get_colors(img_ori, vertices) p, offset, alpha_shp, alpha_exp = _parse_param(param) vertices = (u + w_shp @ alpha_shp + w_exp @ alpha_exp).reshape( 3, -1, order='F') + offset vertices = vertices.T tri = tri.T - 1 print('Dump obj with sampled texture to {}'.format(wfp)) unwraps = create_unwraps(vertices) h, w = args.height, args.width tcoords = process_uv(unwraps[:, :2], h, w) texture = render_colors(tcoords, tri, colors, h, w, c=3).astype('uint8') scaled_tcoords = scale_tcoords(tcoords) if args.uvgan: texture = uvgan.infer(texture) else: texture = cv2.cvtColor(texture, cv2.COLOR_BGR2RGB) vertices, colors, uv_coords = vertices.astype( np.float32).copy(), colors.astype( np.float32).copy(), scaled_tcoords.astype( np.float32).copy() write_obj_with_colors_texture(wfp, vertices, colors, tri, texture * 255.0, uv_coords) ind += 1
def main(args): # 1. load pre-tained model checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar' arch = 'mobilenet_1' app = RenderPipeline(**cfg) checkpoint = torch.load( checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] model = getattr(mobilenet_v1, arch)( num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) model_dict = model.state_dict() # because the model is trained by multiple gpus, prefix module should be removed for k in checkpoint.keys(): model_dict[k.replace('module.', '')] = checkpoint[k] model.load_state_dict(model_dict) if args.mode == 'gpu': cudnn.benchmark = True model = model.cuda() model.eval() face_detector = dlib.get_frontal_face_detector() # 3. forward tri = sio.loadmat('tri_refine.mat')['tri'] tri = _to_ctype(tri).astype(np.int32) # for type compatible transform = transforms.Compose( [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) last_frame_lmks = [] vc = cv2.VideoCapture("C:\\Users\\zh13\\Videos\\TrackingTest.mov") success, frame = vc.read() while success: roi_box = [] if len(last_frame_lmks) == 0: rects = face_detector(frame, 1) for rect in rects: bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()] roi_box.append(parse_roi_box_from_bbox(bbox)) else: for lmk in last_frame_lmks: roi_box.append(parse_roi_box_from_landmark(lmk)) this_frame_lmk = [] params = [] for box in roi_box: img_to_net = crop_img(frame, box) img_to_net = cv2.resize(img_to_net, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img_to_net).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) params.append(param) this_frame_lmk.append(predict_68pts(param, box)) last_frame_lmks = this_frame_lmk if args.render_mesh: for box, param in zip(roi_box, params): vertices = predict_dense(param, box) frame = app(_to_ctype(vertices.T), tri, _to_ctype(frame.astype(np.float32) / 255.)) else: for lmk in last_frame_lmks: for p in lmk.T: cv2.circle(frame, (int(round(p[0] * draw_multiplier)), int(round(p[1] * draw_multiplier))), draw_multiplier, (255, 0, 0), 1, cv2.LINE_AA, draw_shiftbits) cv2.imshow("3ddfa video demo", frame) cv2.waitKey(1) success, frame = vc.read()
def classify(model, inputs): in_img = inputs['photo'] img_ori = np.array(in_img) img_fp = 'samples/test1.jpg' face_detector = dlib.get_frontal_face_detector() # 3. forward tri = sio.loadmat('visualize/tri.mat')['tri'] transform = transforms.Compose( [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) #print(transform) rects = face_detector(img_ori, 1) pts_res = [] Ps = [] # Camera matrix collection poses = [] # pose collection, [todo: validate it] vertices_lst = [] # store multiple face vertices ind = 0 suffix = get_suffix(img_fp) for rect in rects: # - use detected face bbox bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()] roi_box = parse_roi_box_from_bbox(bbox) img = crop_img(img_ori, roi_box) # forward: one step img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img).unsqueeze(0) print(input) with torch.no_grad(): if mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype(np.float32) # 68 pts pts68 = predict_68pts(param, roi_box) # two-step for more accurate bbox to crop face if bbox_init == 'two': roi_box = parse_roi_box_from_landmark(pts68) img_step2 = crop_img(img_ori, roi_box) img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img_step2).unsqueeze(0) with torch.no_grad(): if mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) pts68 = predict_68pts(param, roi_box) pts_res.append(pts68) P, pose = parse_pose(param) Ps.append(P) poses.append(pose) vertices = predict_dense(param, roi_box) vertices_lst.append(vertices) ind += 1 pncc_feature = cpncc(img_ori, vertices_lst, tri - 1) output = pncc_feature[:, :, ::-1] print(type(output)) pilImg = transforms.ToPILImage()(np.uint8(output)) return {"image": pilImg}