def process_one_frame(frame): if frame is None: print("No frame, check your camera") img_ori = frame img_fp = "camera.png" # 随便给一个filename 反正之后用不到 rects = face_detector(img_ori, 1) pts_res = [] Ps = [] # Camera matrix collection poses = [] # pose collection, [todo: validate it] vertices_lst = [] # store multiple face vertices ind = 0 suffix = get_suffix(img_fp) for i in range(len(rects)): if i != 0: break # 这里只检测第一个脸 rect = rects[i] # whether use dlib landmark to crop image, if not, use only face bbox to calc roi bbox for cropping # - use landmark for cropping pts = face_regressor(img_ori, rect).parts() pts = np.array([[pt.x, pt.y] for pt in pts]).T roi_box = parse_roi_box_from_landmark(pts) img = crop_img(img_ori, roi_box) # forward: one step img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img).unsqueeze(0) with torch.no_grad(): if use_gpu: input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) # 68 pts pts68 = predict_68pts(param, roi_box) # two-step for more accurate bbox to crop face if use_two_step_bbox_init: roi_box = parse_roi_box_from_landmark(pts68) img_step2 = crop_img(img_ori, roi_box) img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img_step2).unsqueeze(0) with torch.no_grad(): if use_gpu: input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) pts68 = predict_68pts(param, roi_box) pts_res.append(pts68) P, pose = parse_pose(param) Ps.append(P) poses.append(pose) # dense face 3d vertices vertices = predict_dense(param, roi_box) vertices_lst.append(vertices) if is_dump_to_ply: dump_to_ply( vertices, tri, '{}_{}.ply'.format(img_fp.replace(suffix, ''), ind)) if is_dump_vertex: dump_vertex( vertices, '{}_{}.mat'.format(img_fp.replace(suffix, ''), ind)) if is_dump_pts: wfp = '{}_{}.txt'.format(img_fp.replace(suffix, ''), ind) np.savetxt(wfp, pts68, fmt='%.3f') print('Save 68 3d landmarks to {}'.format(wfp)) if is_dump_roi_box: wfp = '{}_{}.roibox'.format(img_fp.replace(suffix, ''), ind) np.savetxt(wfp, roi_box, fmt='%.3f') print('Save roi box to {}'.format(wfp)) if is_dump_paf: wfp_paf = '{}_{}_paf.jpg'.format(img_fp.replace(suffix, ''), ind) wfp_crop = '{}_{}_crop.jpg'.format(img_fp.replace(suffix, ''), ind) paf_feature = gen_img_paf(img_crop=img, param=param, kernel_size=paf_size) cv2.imwrite(wfp_paf, paf_feature) cv2.imwrite(wfp_crop, img) print('Dump to {} and {}'.format(wfp_crop, wfp_paf)) if is_dump_obj: wfp = '{}_{}.obj'.format(img_fp.replace(suffix, ''), ind) colors = get_colors(img_ori, vertices) write_obj_with_colors(wfp, vertices, tri, colors) print('Dump obj with sampled texture to {}'.format(wfp)) ind += 1 if is_dump_pose: # P, pose = parse_pose(param) # Camera matrix (without scale), and pose (yaw, pitch, roll, to verify) img_pose = plot_pose_box(img_ori, Ps, pts_res) wfp = img_fp.replace(suffix, '_pose.jpg') cv2.imwrite(wfp, img_pose) print('Dump to {}'.format(wfp)) if is_dump_depth: wfp = img_fp.replace(suffix, '_depth.png') # depths_img = get_depths_image(img_ori, vertices_lst, tri-1) # python version depths_img = cget_depths_image(img_ori, vertices_lst, tri - 1) # cython version cv2.imwrite(wfp, depths_img) print('Dump to {}'.format(wfp)) if is_dump_pncc: wfp = img_fp.replace(suffix, '_pncc.png') pncc_feature = cpncc(img_ori, vertices_lst, tri - 1) # cython version cv2.imwrite( wfp, pncc_feature[:, :, ::-1]) # cv2.imwrite will swap RGB -> BGR print('Dump to {}'.format(wfp)) if is_dump_res: draw_landmarks(img_ori, pts_res, wfp=img_fp.replace(suffix, '_3DDFA.jpg'), show_flg=show_landmarks_fig) # 下面这一句将原始图像关掉 img_ori = np.ones_like(img_ori) * 255 img_pose = plot_pose_box(img_ori, Ps, pts_res) draw_landmarks_opencv(img_pose, pts_res) cv2.imshow("pose", img_pose)
def main(args): # 1. load pre-tained model checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar' arch = 'mobilenet_1' checkpoint = torch.load( checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] model = getattr(mobilenet_v1, arch)( num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) model_dict = model.state_dict() # because the model is trained by multiple gpus, prefix module should be removed for k in checkpoint.keys(): model_dict[k.replace('module.', '')] = checkpoint[k] model.load_state_dict(model_dict) if args.mode == 'gpu': cudnn.benchmark = True model = model.cuda() model.eval() # 2. load dlib model for face detection and landmark used for face cropping if args.dlib_landmark: dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat' face_regressor = dlib.shape_predictor(dlib_landmark_model) if args.dlib_bbox: face_detector = dlib.get_frontal_face_detector() # 3. forward tri = sio.loadmat('visualize/tri.mat')['tri'] transform = transforms.Compose( [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) for img_fp in args.files: img_ori = cv2.imread(img_fp) if args.dlib_bbox: rects = face_detector(img_ori, 1) else: rects = [] if len(rects) == 0: rects = dlib.rectangles() rect_fp = img_fp + '.bbox' try: lines = open(rect_fp).read().strip().split('\n')[1:] except FileNotFoundError: print('Cannot load bbox file') continue for l in lines: l, r, t, b = [int(_) for _ in l.split(' ')[1:]] rect = dlib.rectangle(l, r, t, b) rects.append(rect) pts_res = [] Ps = [] # Camera matrix collection poses = [] # pose collection, [todo: validate it] vertices_lst = [] # store multiple face vertices ind = 0 suffix = get_suffix(img_fp) for rect in rects: # whether use dlib landmark to crop image, if not, use only face bbox to calc roi bbox for cropping if args.dlib_landmark: # - use landmark for cropping pts = face_regressor(img_ori, rect).parts() pts = np.array([[pt.x, pt.y] for pt in pts]).T roi_box = parse_roi_box_from_landmark(pts) else: # - use detected face bbox bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()] roi_box = parse_roi_box_from_bbox(bbox) img = crop_img(img_ori, roi_box) # forward: one step img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) # 68 pts pts68 = predict_68pts(param, roi_box) # two-step for more accurate bbox to crop face if args.bbox_init == 'two': roi_box = parse_roi_box_from_landmark(pts68) img_step2 = crop_img(img_ori, roi_box) img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img_step2).unsqueeze(0) with torch.no_grad(): if args.mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) pts68 = predict_68pts(param, roi_box) pts_res.append(pts68) P, pose = parse_pose(param) Ps.append(P) poses.append(pose) # dense face 3d vertices if args.dump_ply or args.dump_vertex or args.dump_depth or args.dump_pncc or args.dump_obj: vertices = predict_dense(param, roi_box) vertices_lst.append(vertices) if args.dump_ply: dump_to_ply( vertices, tri, '{}_{}.ply'.format(img_fp.replace(suffix, ''), ind)) if args.dump_vertex: dump_vertex( vertices, '{}_{}.mat'.format(img_fp.replace(suffix, ''), ind)) if args.dump_pts: wfp = '{}_{}.txt'.format(img_fp.replace(suffix, ''), ind) np.savetxt(wfp, pts68, fmt='%.3f') print('Save 68 3d landmarks to {}'.format(wfp)) if args.dump_roi_box: wfp = '{}_{}.roibox'.format(img_fp.replace(suffix, ''), ind) np.savetxt(wfp, roi_box, fmt='%.3f') print('Save roi box to {}'.format(wfp)) if args.dump_paf: wfp_paf = '{}_{}_paf.jpg'.format(img_fp.replace(suffix, ''), ind) wfp_crop = '{}_{}_crop.jpg'.format(img_fp.replace(suffix, ''), ind) paf_feature = gen_img_paf(img_crop=img, param=param, kernel_size=args.paf_size) cv2.imwrite(wfp_paf, paf_feature) cv2.imwrite(wfp_crop, img) print('Dump to {} and {}'.format(wfp_crop, wfp_paf)) if args.dump_obj: wfp = '{}_{}.obj'.format(img_fp.replace(suffix, ''), ind) colors = get_colors(img_ori, vertices) write_obj_with_colors(wfp, vertices, tri, colors) print('Dump obj with sampled texture to {}'.format(wfp)) ind += 1 if args.dump_pose: # P, pose = parse_pose(param) # Camera matrix (without scale), and pose (yaw, pitch, roll, to verify) img_pose = plot_pose_box(img_ori, Ps, pts_res) wfp = img_fp.replace(suffix, '_pose.jpg') cv2.imwrite(wfp, img_pose) print('Dump to {}'.format(wfp)) if args.dump_depth: wfp = img_fp.replace(suffix, '_depth.png') # depths_img = get_depths_image(img_ori, vertices_lst, tri-1) # python version depths_img = cget_depths_image(img_ori, vertices_lst, tri - 1) # cython version cv2.imwrite(wfp, depths_img) print('Dump to {}'.format(wfp)) if args.dump_pncc: wfp = img_fp.replace(suffix, '_pncc.png') pncc_feature = cpncc(img_ori, vertices_lst, tri - 1) # cython version cv2.imwrite( wfp, pncc_feature[:, :, ::-1]) # cv2.imwrite will swap RGB -> BGR print('Dump to {}'.format(wfp)) if args.dump_res: draw_landmarks(img_ori, pts_res, wfp=img_fp.replace(suffix, '_3DDFA.jpg'), show_flg=args.show_flg)
def classify(model, inputs): in_img = inputs['photo'] img_ori = np.array(in_img) img_fp = 'samples/test1.jpg' face_detector = dlib.get_frontal_face_detector() # 3. forward tri = sio.loadmat('visualize/tri.mat')['tri'] transform = transforms.Compose( [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) #print(transform) rects = face_detector(img_ori, 1) pts_res = [] Ps = [] # Camera matrix collection poses = [] # pose collection, [todo: validate it] vertices_lst = [] # store multiple face vertices ind = 0 suffix = get_suffix(img_fp) for rect in rects: # - use detected face bbox bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()] roi_box = parse_roi_box_from_bbox(bbox) img = crop_img(img_ori, roi_box) # forward: one step img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img).unsqueeze(0) print(input) with torch.no_grad(): if mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype(np.float32) # 68 pts pts68 = predict_68pts(param, roi_box) # two-step for more accurate bbox to crop face if bbox_init == 'two': roi_box = parse_roi_box_from_landmark(pts68) img_step2 = crop_img(img_ori, roi_box) img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) input = transform(img_step2).unsqueeze(0) with torch.no_grad(): if mode == 'gpu': input = input.cuda() param = model(input) param = param.squeeze().cpu().numpy().flatten().astype( np.float32) pts68 = predict_68pts(param, roi_box) pts_res.append(pts68) P, pose = parse_pose(param) Ps.append(P) poses.append(pose) vertices = predict_dense(param, roi_box) vertices_lst.append(vertices) ind += 1 pncc_feature = cpncc(img_ori, vertices_lst, tri - 1) output = pncc_feature[:, :, ::-1] print(type(output)) pilImg = transforms.ToPILImage()(np.uint8(output)) return {"image": pilImg}