Exemplo n.º 1
0
 def img2tex(self, img, coeffs):
     renderer = mesh_renderer.get_mesh_renderer()
     head_mesh_warped = face3d.head_mesh(self.pix2face_data.head_mesh)
     head_mesh_warped.apply_coefficients(
         self.pix2face_data.subject_components,
         self.pix2face_data.expression_components, coeffs.subject_coeffs(),
         coeffs.expression_coeffs(0))
     tex = face3d.image_to_texture_float(img, head_mesh_warped,
                                         coeffs.camera(0), renderer)
     return tex
Exemplo n.º 2
0
 def composite_texture(self, img, subj_coeffs, expr_coeffs, camera, tex):
     renderer = mesh_renderer.get_mesh_renderer()
     head_mesh_warped = face3d.head_mesh(self.pix2face_data.head_mesh)
     head_mesh_warped.apply_coefficients(
         self.pix2face_data.subject_components,
         self.pix2face_data.expression_components, subj_coeffs, expr_coeffs)
     render_walpha = face3d.texture_to_image_float(tex, head_mesh_warped,
                                                   camera, renderer)
     render_rgb = (render_walpha[:, :, 0:3] * 255).astype(np.uint8)
     render_mask = (render_walpha[:, :, 3] * 255).astype(np.uint8)
     render_mask[render_mask < 10] = 0
     render_mask[render_mask > 0] = 255
     render_mask[0, 0] = 255
     render_mask[-1, -1] = 255
     img_out = cv2.seamlessClone(render_rgb, img, render_mask,
                                 (img.shape[1] // 2, img.shape[0] // 2),
                                 cv2.NORMAL_CLONE)
     return img_out
def render_coefficients(coeffs, pix2face_data, img_idx=0):
    """
    Returns an image of the face described by coeffs
    pix2face_data should be created by calling load_pix2face_data
    """
    # lazily create renderer (of which there should only be one.)
    renderer = mesh_renderer.get_mesh_renderer()

    texture_res = 64  # resolution not important as this will be a solid color texture
    green_tex = np.zeros((texture_res, texture_res, 3), np.uint8)
    green_tex[:, :, 1] = 255
    head_mesh_warped = face3d.head_mesh(pix2face_data.head_mesh)
    head_mesh_warped.apply_coefficients(pix2face_data.subject_components,
                                        pix2face_data.expression_components,
                                        coeffs.subject_coeffs(),
                                        coeffs.expression_coeffs(img_idx))
    meshes = head_mesh_warped.meshes()
    for mesh in meshes:
        mesh.set_texture(green_tex)
    renderer.set_ambient_weight(0.5)
    synth = renderer.render(meshes, coeffs.camera(img_idx))
    return synth
Exemplo n.º 4
0
def run_pipeline():
	# Read pipeline type from command line
	parser = argparse.ArgumentParser()
	parser.add_argument('--pipeline', help='sets pipeline', default='default')
	args = parser.parse_args()

	# Read json config
	json_file = open('/pix2face/config.json')
	data = json.load(json_file)
	pipeline_config = data["pipelines"][args.pipeline]
	json_file.close()

	# Load pretrained model
	cuda_device = 0
	model = pix2face.test.load_pretrained_model(cuda_device=cuda_device)

	this_dir = os.path.dirname(__file__)
	pvr_data_dir = os.path.join('/pix2face/lib/face3d/data_3DMM/')
	debug_dir = ''
	debug_mode = False
	num_subject_coeffs = 199  # max 199
	num_expression_coeffs = 29  # max 29

	# load needed data files
	head_mesh = face3d.head_mesh(pvr_data_dir)
	subject_components = np.load(os.path.join(pvr_data_dir, 'pca_components_subject.npy'))
	expression_components = np.load(os.path.join(pvr_data_dir, 'pca_components_expression.npy'))
	subject_ranges = np.load(os.path.join(pvr_data_dir,'pca_coeff_ranges_subject.npy'))
	expression_ranges = np.load(os.path.join(pvr_data_dir,'pca_coeff_ranges_expression.npy'))

	# keep only the PCA components that we will be estimating
	subject_components = vxl.vnl.matrix(subject_components[0:num_subject_coeffs,:])
	expression_components = vxl.vnl.matrix(expression_components[0:num_expression_coeffs,:])
	subject_ranges = vxl.vnl.matrix(subject_ranges[0:num_subject_coeffs,:])
	expression_ranges = vxl.vnl.matrix(expression_ranges[0:num_expression_coeffs,:])

	# create rendering object (encapsulates OpenGL context)
	renderer = face3d.mesh_renderer()
	# create coefficient estimator
	coeff_estimator = face3d.media_coefficient_from_PNCC_and_offset_estimator(head_mesh, subject_components, expression_components, subject_ranges, expression_ranges, debug_mode, debug_dir)


	data_dir = pipeline_config["inputDir"]
	output_dir = pipeline_config["outputDir"]
	directories = os.listdir(data_dir)
	for directory in directories:
		files = os.listdir(os.path.join(data_dir, directory))
		for file_item in files:
			file_path, ext_path = os.path.splitext(file_item)
			img_fname = os.path.join(data_dir, directory, file_item)
			print(data_dir, directory, file_item)
			img = np.array(Image.open(img_fname))
			outputs = pix2face.test.test(model, [img,])
			pncc = outputs[0][0]
			offsets = outputs[0][1]
			pncc_rgb = pncc / 300.0 + 0.5
			offsets_rgb = offsets / 60.0 + 0.5
			pix2face_data = pix2face_estimation.coefficient_estimation.load_pix2face_data()

			# create rendering object (encapsulates OpenGL context)
			renderer = face3d.mesh_renderer()
			
			# create coefficient estimator
			coeff_estimator = face3d.media_coefficient_from_PNCC_and_offset_estimator(head_mesh, subject_components, expression_components, subject_ranges, expression_ranges, debug_mode, debug_dir)
			
			# Estimate Coefficients from PNCC and Offsets
			print('Estimating Coefficients..')
			img_ids = ['img0',]
			coeffs, result = coeff_estimator.estimate_coefficients_perspective(img_ids, [pncc,], [offsets,])

			# Print Yaw, Pitch, Roll of Head
			R_cam = np.array(coeffs.camera(0).rotation.as_matrix())  # rotation matrix of estimated camera
			R0 = np.diag((1,-1,-1))  # R0 is the rotation matrix of a frontal camera
			R_head = np.dot(R0,R_cam)
			yaw, pitch, roll = geometry_utils.matrix_to_Euler_angles(R_head, order='YXZ')
			print('yaw, pitch, roll = %0.1f, %0.1f, %0.1f (degrees)' % (np.rad2deg(yaw), np.rad2deg(pitch), np.rad2deg(roll)))

			# Render 3D-Jittered Images
			print('Rendering Jittered Images..')
			jitterer = face3d.media_jitterer_perspective([img,], coeffs, head_mesh, subject_components, expression_components, renderer, "")

			for entity in pipeline_config["entities"]:
				if entity["type"] == "emote":
					# manually alter expression
					new_expression_coeffs = np.zeros_like(coeffs.expression_coeffs(0))
					parameters = entity["parameters"]
					new_expression_coeffs[0] = parameters["anger"]
					new_expression_coeffs[1] = parameters["disgust"]
					new_expression_coeffs[2] = parameters["fear"]
					new_expression_coeffs[3] = parameters["happiness"]
					new_expression_coeffs[4] = parameters["sadness"]
					new_expression_coeffs[5] = parameters["surprise"]
					render_img = jitterer.render(coeffs.camera(0), coeffs.subject_coeffs(), new_expression_coeffs, subject_components, expression_components)
				elif entity["type"] == "pose":
					# manually alter pose
					delta_R = vxl.vgl.rotation_3d(geometry_utils.Euler_angles_to_quaternion(np.pi/3, 0, 0, order='YXZ'))
					cam = coeffs.camera(0)
					new_R = cam.rotation * delta_R
					new_cam = face3d.perspective_camera_parameters(cam.focal_len, cam.principal_point, new_R, cam.translation, cam.nx, cam.ny)
					render_img = jitterer.render(new_cam, coeffs.subject_coeffs(), coeffs.expression_coeffs(0), subject_components, expression_components)

				Image.fromarray(render_img[:,:,0:3]).save(os.path.join(os.path.join(output_dir, directory), file_path + "_" + entity["name"] + "." + ext_path))
			Image.fromarray(img).save(os.path.join(os.path.join(output_dir, directory), file_path + "_original." + ext_path))
def load_pix2face_data(pvr_data_dir=None,
                       num_subject_coeffs=None,
                       num_expression_coeffs=None,
                       use_offsets_for_estimation=True):
    """
    Load PCA components and ranges.
    Returns a structure containing the following  matrices loaded as instances of vxl.vnl_matrix
    """
    if pvr_data_dir is None:
        # guess the pvr_data_dir
        this_dir = os.path.dirname(__file__)
        pvr_data_dir = os.path.join(this_dir, '../../face3d/data_3DMM')

    # load data files as numpy arrays
    head_mesh = face3d.head_mesh(pvr_data_dir)
    subject_components = np.load(
        os.path.join(pvr_data_dir, 'pca_components_subject.npy'))
    expression_components = np.load(
        os.path.join(pvr_data_dir, 'pca_components_expression.npy'))
    subject_ranges = np.load(
        os.path.join(pvr_data_dir, 'pca_coeff_ranges_subject.npy'))
    expression_ranges = np.load(
        os.path.join(pvr_data_dir, 'pca_coeff_ranges_expression.npy'))

    if num_subject_coeffs is None:
        num_subject_coeffs = subject_components.shape[0]
    if num_expression_coeffs is None:
        num_expression_coeffs = expression_components.shape[0]

    # keep only needed rows of subject and expression matrices and convert to vnl matrices
    subject_components = vxl.vnl.matrix(
        subject_components[0:num_subject_coeffs, :])
    subject_ranges = vxl.vnl.matrix(subject_ranges[0:num_subject_coeffs, :])
    expression_components = vxl.vnl.matrix(
        expression_components[0:num_expression_coeffs, :])
    expression_ranges = vxl.vnl.matrix(
        expression_ranges[0:num_expression_coeffs, :])

    debug_mode = False
    debug_dir = ""

    if use_offsets_for_estimation:
        coeff_estimator = \
            face3d.media_coefficient_from_PNCC_and_offset_estimator(head_mesh,
                                                                    subject_components, expression_components,
                                                                    subject_ranges, expression_ranges,
                                                                    debug_mode, debug_dir)
    else:
        coeff_estimator = \
            face3d.media_coefficient_from_PNCC_estimator(head_mesh,
                                                         subject_components, expression_components,
                                                         subject_ranges, expression_ranges,
                                                         debug_mode, debug_dir)

    # return in MMData structure
    return Pix2FaceData(head_mesh=head_mesh,
                        subject_components=subject_components,
                        subject_ranges=subject_ranges,
                        expression_components=expression_components,
                        expression_ranges=expression_ranges,
                        coeff_estimator=coeff_estimator,
                        use_offsets=use_offsets_for_estimation)
Exemplo n.º 6
0
outputs = pix2face.test.test(model, [
    img,
], cuda_device=cuda_device)
pncc = outputs[0][0]
offsets = outputs[0][1]
print('..Done')

pvr_data_dir = os.path.join(this_dir, '../face3d/data_3DMM/')
debug_dir = ''
debug_mode = False

num_subject_coeffs = 199  # max 199
num_expression_coeffs = 29  # max 29

# load needed data files
head_mesh = face3d.head_mesh(pvr_data_dir)
subject_components = np.load(
    os.path.join(pvr_data_dir, 'pca_components_subject.npy'))
expression_components = np.load(
    os.path.join(pvr_data_dir, 'pca_components_expression.npy'))
subject_ranges = np.load(
    os.path.join(pvr_data_dir, 'pca_coeff_ranges_subject.npy'))
expression_ranges = np.load(
    os.path.join(pvr_data_dir, 'pca_coeff_ranges_expression.npy'))

# keep only the PCA components that we will be estimating
subject_components = vxl.vnl.matrix(
    subject_components[0:num_subject_coeffs, :])
expression_components = vxl.vnl.matrix(
    expression_components[0:num_expression_coeffs, :])
subject_ranges = vxl.vnl.matrix(subject_ranges[0:num_subject_coeffs, :])