示例#1
0
def get_depth_image(image_path):
    im = cv2.imread(image_path)
    h, w, c = im.shape
    landmarks = pd.read_pickle(image_path.replace('.jpg', '.pkl'))

    bfm = MorphabelModel('Data/BFM/Out/BFM.mat')
    x = mesh.transform.from_image(landmarks, h, w)
    X_ind = bfm.kpt_ind

    fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = bfm.fit(
        x, X_ind, max_iter=200, isShow=False)
    colors = bfm.generate_colors(np.random.rand(bfm.n_tex_para, 1))
    colors = np.minimum(np.maximum(colors, 0), 1)

    fitted_vertices = bfm.generate_vertices(fitted_sp, fitted_ep)
    transformed_vertices = bfm.transform(fitted_vertices, fitted_s,
                                         fitted_angles, fitted_t)
    image_vertices = mesh.transform.to_image(transformed_vertices, h, w)

    triangles = bfm.triangles
    z = image_vertices[:, 2:]
    z = z - np.min(z)
    z = z / np.max(z)
    attribute = z
    depth_image = mesh.render.render_colors(image_vertices,
                                            triangles,
                                            attribute,
                                            h,
                                            w,
                                            c=1)
    depth_image = (depth_image * 255).astype('uint8')

    savename = image_path.replace('.jpg', '-depth.jpg')
    io.imsave(savename, np.squeeze(depth_image))
示例#2
0
文件: 2_3dmm.py 项目: nwdxbx/xbx_nd
print('init bfm model success')

# --- 2. generate face mesh: vertices(represent shape) & colors(represent texture)
sp = bfm.get_shape_para('random')
ep = bfm.get_exp_para('random')
vertices = bfm.generate_vertices(sp, ep)

tp = bfm.get_tex_para('random')
colors = bfm.generate_colors(tp)
colors = np.minimum(np.maximum(colors, 0), 1)

# --- 3. transform vertices to proper position
s = 8e-04
angles = [10, 30, 20]
t = [0, 0, 0]
transformed_vertices = bfm.transform(vertices, s, angles, t)
projected_vertices = transformed_vertices.copy(
)  # using stantard camera & orth projection

# --- 4. render(3d obj --> 2d image)
# set prop of rendering
h = w = 256
c = 3
image_vertices = mesh.transform.to_image(projected_vertices, h, w)
image = mesh.render.render_colors(image_vertices, bfm.triangles, colors, h, w)

# -------------------- Back:  2D image points and corresponding 3D vertex indices-->  parameters(pose, shape, expression) ------
## only use 68 key points to fit
x = projected_vertices[bfm.kpt_ind, :
                       2]  # 2d keypoint, which can be detected from image
X_ind = bfm.kpt_ind  # index of keypoints in 3DMM. fixed.
示例#3
0
for i, p in enumerate(shape.parts()):
    landmarks[i] = [p.x, p.y]
    im = cv2.circle(im, (p.x, p.y), radius=3, color=(0, 0, 255), thickness=5)

bfm = MorphabelModel('Data/BFM/Out/BFM.mat')
x = mesh.transform.from_image(landmarks, h, w)
X_ind = bfm.kpt_ind

global colors
global triangles
fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = bfm.fit(x, X_ind, max_iter=200, isShow=False)
colors = bfm.generate_colors(np.random.rand(bfm.n_tex_para, 1))
colors = np.minimum(np.maximum(colors, 0), 1)

fitted_vertices = bfm.generate_vertices(fitted_sp, fitted_ep)
transformed_vertices = bfm.transform(fitted_vertices, fitted_s, fitted_angles, fitted_t)
image_vertices = mesh.transform.to_image(transformed_vertices, h, w)

vertices = image_vertices
triangles = bfm.triangles
colors = colors/np.max(colors)

# load data 
#C = sio.loadmat('Data/example1.mat')
#vertices = C['vertices']; colors = C['colors']; triangles = C['triangles']
#colors = colors/np.max(colors)
# transform
vertices = vertices - np.mean(vertices, 0)[np.newaxis, :]
s = 180/(np.max(vertices[:,1]) - np.min(vertices[:,1]))
R = mesh.transform.angle2matrix([0, 0, 0]) 
t = [0, 0, 0]
示例#4
0
文件: 2_3dmm.py 项目: Gzzgz/face3d
print('init bfm model success')

# --- 2. generate face mesh: vertices(represent shape) & colors(represent texture)
sp = bfm.get_shape_para('random')
ep = bfm.get_exp_para('random')
vertices = bfm.generate_vertices(sp, ep)

tp = bfm.get_tex_para('random')
colors = bfm.generate_colors(tp)
colors = np.minimum(np.maximum(colors, 0), 1)

# --- 3. transform vertices to proper position
s = 8e-04
angles = [10, 30, 20]
t = [0, 0, 0]
transformed_vertices = bfm.transform(vertices, s, angles, t)
projected_vertices = transformed_vertices.copy() # using stantard camera & orth projection

# --- 4. render(3d obj --> 2d image)
# set prop of rendering
h = w = 256; c = 3
image_vertices = mesh.transform.to_image(projected_vertices, h, w)
image = mesh_cython.render.render_colors(image_vertices, bfm.triangles, colors, h, w)

# -------------------- Back:  2D image points and corresponding 3D vertex indices-->  parameters(pose, shape, expression) ------
## only use 68 key points to fit
x = projected_vertices[bfm.kpt_ind, :2] # 2d keypoint, which can be detected from image
X_ind = bfm.kpt_ind # index of keypoints in 3DMM. fixed.

# fit
fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = bfm.fit(x, X_ind, max_iter = 3)
示例#5
0
	sp = bfm.get_shape_para('zero')
	#ep = bfm.get_exp_para('zero')


	tp = bfm.get_tex_para('zero')
	colors = bfm.generate_colors(tp)
	colors = np.minimum(np.maximum(colors, 0), 1)

	# --- 3. transform vertices to proper position

	s = pose[-1]
	angles = pose[:3]
	t = np.r_[pose[3:5], [0]]

	vertices = bfm.generate_vertices(sp, ep)
	transformed_vertices = bfm.transform(vertices, s, angles, t)
	projected_vertices = transformed_vertices.copy() # using stantard camera & orth projection

	# --- 4. render(3d obj --> 2d image)
	# set prop of rendering
	light_intensity = np.array([[1, 1, 1]])
	light_position = np.array([[0, 0, 300]])
	lit_colors = mesh_cython.light.add_light(transformed_vertices, bfm.triangles, colors, light_position, light_intensity)
	colors = (0.8 * lit_colors + 1.2 * colors) / 2.0
	image_vertices = mesh.transform.to_image(projected_vertices, h, w)
	image = mesh_cython.render.render_colors(image_vertices, bfm.triangles, colors, h, w, c)



	print(pose)
	image = np.clip(image, 0, 1)
示例#6
0
    tp = bfm.get_tex_para('zero', std)
    colors = bfm.generate_colors(tp)
    colors = np.minimum(np.maximum(colors, 0), 1)

    # --- 3. transform vertices to proper position
    s = np.random.normal(0, 8, 1)
    t = np.random.normal(0, 6, 3)
    angles = np.random.normal(0, 10, 3)

    # s = 0
    # angles = np.array([0, 0, 0])
    # t = np.array([0, 0])

    pose = np.r_[angles.flatten(), t[:2].flatten(), s]
    transformed_vertices = bfm.transform(vertices, pose[-1], pose[:3],
                                         np.r_[pose[3:5], [0]])
    projected_vertices = transformed_vertices.copy(
    )  # using stantard camera & orth projection

    # --- 4. render(3d obj --> 2d image)
    # set prop of rendering

    light_intensity = np.array([[1, 1, 1]])
    light_position = np.array([[0, 0, 300]])
    lit_colors = mesh_cython.light.add_light(transformed_vertices,
                                             bfm.triangles, colors,
                                             light_position, light_intensity)
    colors = (0.8 * lit_colors + 1.2 * colors) / 2.0
    image_vertices = mesh.transform.to_image(projected_vertices, h, w)
    image = mesh_cython.render.render_colors(image_vertices, bfm.triangles,
                                             colors, h, w, c, BG)
示例#7
0
def main(args):
    with open(args.img_list) as f:
        img_list = [x.strip() for x in f.readlines()]
    landmark_list = []
    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)
    if not os.path.exists(args.save_lmk_dir):
        os.mkdir(args.save_lmk_dir)

    for img_idx, img_fp in enumerate(tqdm(img_list)):
        im = cv2.imread(os.path.join(args.img_prefix, img_fp), 1)
        gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        h, w, c = im.shape

        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor(
            "face3d/models/shape_predictor_68_face_landmarks.dat")

        rects = detector(gray, 1)
        shape = predictor(gray, rects[0])
        tl_corner_x = rects[0].center().x - rects[0].width() / 2
        tl_corner_y = rects[0].center().y - rects[0].height() / 2
        br_corner_x = rects[0].center().x + rects[0].width() / 2
        br_corner_y = rects[0].center().y + rects[0].height() / 2
        rects = [(tl_corner_x, tl_corner_y), (br_corner_x, br_corner_y)]
        landmarks = np.zeros((68, 2))

        for i, p in enumerate(shape.parts()):
            landmarks[i] = [p.x, p.y]
            im = cv2.circle(im, (p.x, p.y),
                            radius=3,
                            color=(0, 0, 255),
                            thickness=5)

        bfm = MorphabelModel('face3d/Data/BFM/Out/BFM.mat')
        x = mesh_numpy.transform.from_image(landmarks, h, w)
        X_ind = bfm.kpt_ind

        fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = bfm.fit(
            x, X_ind, max_iter=200, isShow=False)
        colors = bfm.generate_colors(np.random.rand(bfm.n_tex_para, 1))
        colors = np.minimum(np.maximum(colors, 0), 1)

        fitted_vertices = bfm.generate_vertices(fitted_sp, fitted_ep)
        transformed_vertices = bfm.transform(fitted_vertices, fitted_s,
                                             fitted_angles, fitted_t)
        image_vertices = mesh_numpy.transform.to_image(transformed_vertices, h,
                                                       w)

        triangles = bfm.triangles
        colors = colors / np.max(colors)

        attribute = colors
        color_image = mesh_numpy.render.render_colors(image_vertices,
                                                      triangles,
                                                      attribute,
                                                      h,
                                                      w,
                                                      c=3)
        io.imsave(os.path.join(args.save_lmk_dir,
                               str(img_idx) + ".png"),
                  img_as_ubyte(color_image))
示例#8
0
class MaskRenderer:
    def __init__(self, model_dir, render_only=False):
        self.bfm = MorphabelModel(osp.join(model_dir, 'BFM.mat'))
        self.index_ind = self.bfm.kpt_ind
        uv_coords = face3d.morphable_model.load.load_uv_coords(
            osp.join(model_dir, 'BFM_UV.mat'))
        self.uv_size = (224, 224)
        self.mask_stxr = 0.1
        self.mask_styr = 0.33
        self.mask_etxr = 0.9
        self.mask_etyr = 0.7
        self.tex_h, self.tex_w, self.tex_c = self.uv_size[1], self.uv_size[
            0], 3
        texcoord = np.zeros_like(uv_coords)
        texcoord[:, 0] = uv_coords[:, 0] * (self.tex_h - 1)
        texcoord[:, 1] = uv_coords[:, 1] * (self.tex_w - 1)
        texcoord[:, 1] = self.tex_w - texcoord[:, 1] - 1
        self.texcoord = np.hstack((texcoord, np.zeros((texcoord.shape[0], 1))))
        self.X_ind = self.bfm.kpt_ind
        if not render_only:
            from image_3d68 import Handler
            self.if3d68_handler = Handler(osp.join(model_dir, 'if1k3d68'),
                                          0,
                                          192,
                                          ctx_id=0)

    def transform(self, shape3D, R):
        s = 1.0
        shape3D[:2, :] = shape3D[:2, :]
        shape3D = s * np.dot(R, shape3D)
        return shape3D

    def preprocess(self, vertices, w, h):
        R1 = mesh.transform.angle2matrix([0, 180, 180])
        t = np.array([-w // 2, -h // 2, 0])
        vertices = vertices.T
        vertices += t
        vertices = self.transform(vertices.T, R1).T
        return vertices

    def project_to_2d(self, vertices, s, angles, t):
        transformed_vertices = self.bfm.transform(vertices, s, angles, t)
        projected_vertices = transformed_vertices.copy(
        )  # using stantard camera & orth projection
        return projected_vertices[self.bfm.kpt_ind, :2]

    def params_to_vertices(self, params, H, W):
        fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = params
        fitted_vertices = self.bfm.generate_vertices(fitted_sp, fitted_ep)
        transformed_vertices = self.bfm.transform(fitted_vertices, fitted_s,
                                                  fitted_angles, fitted_t)
        transformed_vertices = self.preprocess(transformed_vertices.T, W, H)
        image_vertices = mesh.transform.to_image(transformed_vertices, H, W)
        return image_vertices

    def build_params(self, face_image):

        landmark = self.if3d68_handler.get(face_image)[:, :2]
        #print(landmark.shape, landmark.dtype)
        if landmark is None:
            return None  #face not found
        fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = self.bfm.fit(
            landmark, self.X_ind, max_iter=3)
        return [fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t]

    def generate_mask_uv(self, mask, positions):
        uv_size = (self.uv_size[1], self.uv_size[0], 3)
        h, w, c = uv_size
        uv = np.zeros(shape=(self.uv_size[1], self.uv_size[0], 3),
                      dtype=np.uint8)
        stxr, styr = positions[0], positions[1]
        etxr, etyr = positions[2], positions[3]
        stx, sty = int(w * stxr), int(h * styr)
        etx, ety = int(w * etxr), int(h * etyr)
        height = ety - sty
        width = etx - stx
        mask = cv2.resize(mask, (width, height))
        uv[sty:ety, stx:etx] = mask
        return uv

    def render_mask(self,
                    face_image,
                    mask_image,
                    params,
                    auto_blend=True,
                    positions=[0.1, 0.33, 0.9, 0.7]):
        uv_mask_image = self.generate_mask_uv(mask_image, positions)
        h, w, c = face_image.shape
        image_vertices = self.params_to_vertices(params, h, w)
        output = (1 - mesh.render.render_texture(
            image_vertices, self.bfm.full_triangles, uv_mask_image,
            self.texcoord, self.bfm.full_triangles, h, w)) * 255
        output = output.astype(np.uint8)
        if auto_blend:
            mask_bd = (output == 255).astype(np.uint8)
            final = face_image * mask_bd + (1 - mask_bd) * output
            return final
        return output