コード例 #1
0
ファイル: lighting.py プロジェクト: wjgaas/work-in-horizon
    def __call__(self, vertices, triangles, background):
        height, width = background.shape[:2]  # 图片大小

        # 1. compute triangle/face normals and vertex normals
        # ## Old style: very slow
        normal = np.zeros((vertices.shape[0], 3), dtype=np.float32)
        # surface_count = np.zeros((vertices.shape[0], 1))
        # TODO why transpose triangle need to -1
        for i in range(triangles.shape[0]):
            i1, i2, i3 = triangles[i, :]
            v1, v2, v3 = vertices[[i1, i2, i3], :]
            surface_normal = np.cross(v2 - v1, v3 - v1)
            normal[[i1, i2, i3], :] += surface_normal
            # surface_count[[i1, i2, i3], :] += 1

        # normal /= surface_count
        # normal /= np.linalg.norm(normal, axis=1, keepdims=True)
        normal = _norm(normal)

        # Cython style
        # normal = np.zeros((vertices.shape[0], 3), dtype=np.float32)
        # mesh_core_cython.get_normal(normal, vertices, triangles, vertices.shape[0], triangles.shape[0])

        # 2. lighting
        color = np.zeros_like(vertices, dtype=np.float32)
        # ambient component
        if self.intensity_ambient > 0:
            color += self.intensity_ambient * self.color_ambient

        vertices_n = norm_vertices(vertices.copy())
        if self.intensity_directional > 0:
            # diffuse component
            direction = _norm(self.light_pos - vertices_n)
            cos = np.sum(normal * direction, axis=1)[:, None]
            # cos = np.clip(cos, 0, 1)
            #  todo: check below
            color += self.intensity_directional * (self.color_directional *
                                                   np.clip(cos, 0, 1))

            # specular component
            if self.intensity_specular > 0:
                v2v = _norm(self.view_pos - vertices_n)
                reflection = 2 * cos * normal - direction
                spe = np.sum((v2v * reflection)**self.specular_exp,
                             axis=1)[:, None]
                spe = np.where(cos != 0, np.clip(spe, 0, 1),
                               np.zeros_like(spe))
                color += self.intensity_specular * self.color_directional * np.clip(
                    spe, 0, 1)
        color = np.clip(color, 0, 1)

        # 2. rasterization, [0, 1]
        render_img = render.crender_colors(vertices,
                                           triangles,
                                           color,
                                           height,
                                           width,
                                           BG=background)
        render_img = (render_img * 255).astype(np.uint8)
        return render_img
コード例 #2
0
def getPoses(img_ori):
    # 1. load pre-tained model
    checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
    arch = 'mobilenet_1'

    checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
    model = getattr(mobilenet_v1, arch)(num_classes=62)  # 62 = 12(pose) + 40(shape) +10(expression)

    model_dict = model.state_dict()
    # because the model is trained by multiple gpus, prefix module should be removed
    for k in checkpoint.keys():
        model_dict[k.replace('module.', '')] = checkpoint[k]
    model.load_state_dict(model_dict)
    cudnn.benchmark = True
    model = model.cuda()
    model.eval()

    tri = sio.loadmat('visualize/tri.mat')['tri']
    transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])

    alignment_model = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False,device='cuda')

    # face alignment model use RGB as input, result is a tuple with landmarks and boxes
    preds = alignment_model.get_landmarks(img_ori[:, :, ::-1])
    pts_2d_68 = preds[0]
    roi_box = parse_roi_box_from_landmark(pts_2d_68.T)

    img = crop_img(img_ori, roi_box)
    # import pdb; pdb.set_trace()

    # forward: one step
    img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
    input = transform(img).unsqueeze(0)
    with torch.no_grad():
        input = input.cuda()
        param = model(input)
        param = param.squeeze().cpu().numpy().flatten().astype(np.float32)

    # 68 pts
    pts68 = predict_68pts(param, roi_box)

    roi_box = parse_roi_box_from_landmark(pts68)
    img_step2 = crop_img(img_ori, roi_box)
    img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
    input = transform(img_step2).unsqueeze(0)
    with torch.no_grad():
        input = input.cuda()
        param = model(input)
        param = param.squeeze().cpu().numpy().flatten().astype(np.float32)

    P, pose = parse_pose(param)        
    # dense face 3d vertices
    vertices = predict_dense(param, roi_box)
    colors = get_colors(img_ori, vertices)
    # aligned_param = get_aligned_param(param)
    # vertices_aligned = predict_dense(aligned_param, roi_box)
    # h, w, c = 120, 120, 3
    h, w, c = img_ori.shape
    img_2d = crender_colors(vertices.T, (tri - 1).T, colors[:, ::-1], h, w)
    img_2d = img_2d[:,:,::-1]
        
    return img_2d, pose
コード例 #3
0
def main(args):
    # 1. load pre-tained model
    checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
    arch = 'mobilenet_1'

    checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
    model = getattr(mobilenet_v1, arch)(num_classes=62)  # 62 = 12(pose) + 40(shape) +10(expression)

    model_dict = model.state_dict()
    # because the model is trained by multiple gpus, prefix module should be removed
    for k in checkpoint.keys():
        model_dict[k.replace('module.', '')] = checkpoint[k]
    model.load_state_dict(model_dict)
    if args.mode == 'gpu':
        cudnn.benchmark = True
        model = model.cuda()
    model.eval()

    tri = sio.loadmat('visualize/tri.mat')['tri']
    transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])

    # 2. parse images list 
    with open(args.img_list) as f:
        img_list = [x.strip() for x in f.readlines()]
    landmark_list = []

    alignment_model = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)

    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)
    if not os.path.exists(args.save_lmk_dir):
        os.mkdir(args.save_lmk_dir)

    for img_idx, img_fp in enumerate(tqdm(img_list)):
        img_ori = cv2.imread(os.path.join(args.img_prefix, img_fp))

        pts_res = []
        Ps = []  # Camera matrix collection
        poses = []  # pose collection, [todo: validate it]
        vertices_lst = []  # store multiple face vertices
        ind = 0
        suffix = get_suffix(img_fp)

        # face alignment model use RGB as input, result is a tuple with landmarks and boxes, cv2读取图片的书序是 BGR
        preds = alignment_model.get_landmarks(img_ori[:, :, ::-1])
        pts_2d_68 = preds[0]
        pts_2d_5 = get_5lmk_from_68lmk(pts_2d_68)
        landmark_list.append(pts_2d_5)
        roi_box = parse_roi_box_from_landmark(pts_2d_68.T)      # 根据68个关键点,确定roi区域

        img = crop_img(img_ori, roi_box)
        # import pdb; pdb.set_trace()

        # forward: one step
        img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
        input = transform(img).unsqueeze(0)
        with torch.no_grad():
            if args.mode == 'gpu':
                input = input.cuda()
            param = model(input)                        # 人脸的RT矩阵,形状和表情的系数
            param = param.squeeze().cpu().numpy().flatten().astype(np.float32)

        # 68 pts
        pts68 = predict_68pts(param, roi_box)                   # 此时pts68是一个68*3的顶点坐标列表

        # two-step for more accurate bbox to crop face
        if args.bbox_init == 'two':
            roi_box = parse_roi_box_from_landmark(pts68)
            img_step2 = crop_img(img_ori, roi_box)
            img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
            input = transform(img_step2).unsqueeze(0)
            with torch.no_grad():
                if args.mode == 'gpu':
                    input = input.cuda()
                param = model(input)
                param = param.squeeze().cpu().numpy().flatten().astype(np.float32)

            pts68 = predict_68pts(param, roi_box)

        pts_res.append(pts68)
        P, pose = parse_pose(param)
        Ps.append(P)
        poses.append(pose)

        # dense face 3d vertices
        vertices = predict_dense(param, roi_box)

        if args.dump_2d_img:                # 人脸区域的2d图像
            wfp_2d_img = os.path.join(args.save_dir, os.path.basename(img_fp))
            colors = get_colors(img_ori, vertices)
            # aligned_param = get_aligned_param(param)
            # vertices_aligned = predict_dense(aligned_param, roi_box)
            # h, w, c = 120, 120, 3
            h, w, c = img_ori.shape
            img_2d = crender_colors(vertices.T, (tri - 1).T, colors[:, ::-1], h, w)
            cv2.imwrite(wfp_2d_img, img_2d[:, :, ::-1])
        if args.dump_param:
            split = img_fp.split('/')
            save_name = os.path.join(args.save_dir, '{}.txt'.format(os.path.splitext(split[-1])[0]))
            this_param = param * param_std + param_mean
            this_param = np.concatenate((this_param, roi_box))
            this_param.tofile(save_name, sep=' ')
    if args.dump_lmk:                   # 存储
        save_path = os.path.join(args.save_lmk_dir, 'realign_lmk')
        with open(save_path, 'w') as f:
            for idx, (fname, land) in enumerate(zip(img_list, landmark_list)):
                # f.write('{} {} {} {}')
                land = land.astype(np.int)
                land_str = ' '.join([str(x) for x in land])
                msg = f'{fname} {idx} {land_str}\n'
                f.write(msg)
コード例 #4
0
ファイル: inference.py プロジェクト: hchouchen/faceswap
def main(args):
    # 1. load pre-tained model
    checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
    arch = 'mobilenet_1'

    checkpoint = torch.load(
        checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
    model = getattr(mobilenet_v1, arch)(
        num_classes=62)  # 62 = 12(pose) + 40(shape) +10(expression)

    model_dict = model.state_dict()
    # because the model is trained by multiple gpus, prefix module should be removed
    for k in checkpoint.keys():
        model_dict[k.replace('module.', '')] = checkpoint[k]
    model.load_state_dict(model_dict)
    if args.mode == 'gpu':
        cudnn.benchmark = True
        model = model.cuda()
    model.eval()

    tri = sio.loadmat('visualize/tri.mat')['tri']
    transform = transforms.Compose(
        [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])

    # 2. parse images list
    img_list = listdir(args.img_prefix)

    alignment_model = face_alignment.FaceAlignment(
        face_alignment.LandmarksType._2D, flip_input=False, device=args.mode)

    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)

    for img_idx, img_fp in enumerate(tqdm(img_list)):
        img_ori = cv2.imread(os.path.join(args.img_prefix, img_fp))

        pts_res = []
        Ps = []  # Camera matrix collection
        poses = []  # pose collection, [todo: validate it]
        vertices_lst = []  # store multiple face vertices
        ind = 0
        suffix = get_suffix(img_fp)

        # face alignment model use RGB as input, result is a tuple with landmarks and boxes
        preds = alignment_model.get_landmarks(img_ori[:, :, ::-1])
        try:
            pts_2d_68 = preds[0]
        except:
            continue
        roi_box = parse_roi_box_from_landmark(pts_2d_68.T)

        img = crop_img(img_ori, roi_box)
        # import pdb; pdb.set_trace()

        # forward: one step
        img = cv2.resize(img,
                         dsize=(STD_SIZE, STD_SIZE),
                         interpolation=cv2.INTER_LINEAR)
        input = transform(img).unsqueeze(0)
        with torch.no_grad():
            if args.mode == 'gpu':
                input = input.cuda()
            param = model(input)
            param = param.squeeze().cpu().numpy().flatten().astype(np.float32)

        # 68 pts
        pts68 = predict_68pts(param, roi_box)

        # two-step for more accurate bbox to crop face
        if args.bbox_init == 'two':
            roi_box = parse_roi_box_from_landmark(pts68)
            img_step2 = crop_img(img_ori, roi_box)
            img_step2 = cv2.resize(img_step2,
                                   dsize=(STD_SIZE, STD_SIZE),
                                   interpolation=cv2.INTER_LINEAR)
            input = transform(img_step2).unsqueeze(0)
            with torch.no_grad():
                if args.mode == 'gpu':
                    input = input.cuda()
                param = model(input)
                param = param.squeeze().cpu().numpy().flatten().astype(
                    np.float32)

            pts68 = predict_68pts(param, roi_box)

        pts_res.append(pts68)
        P, pose = parse_pose(param)
        Ps.append(P)
        poses.append(pose)

        # dense face 3d vertices
        vertices = predict_dense(param, roi_box)

        if args.dump_2d_img:
            wfp_2d_img = os.path.join(args.save_dir, os.path.basename(img_fp))
            colors = get_colors(img_ori, vertices)
            # aligned_param = get_aligned_param(param)
            # vertices_aligned = predict_dense(aligned_param, roi_box)
            # h, w, c = 120, 120, 3
            h, w, c = img_ori.shape
            img_2d = crender_colors(vertices.T, (tri - 1).T, colors[:, ::-1],
                                    h, w)
            cv2.imwrite(wfp_2d_img, img_2d[:, :, ::-1])
        del img_ori
        del img
        del img_2d