Ejemplo n.º 1
0
def get_transformed_geometry(translation, rotation, scale):

    # Build bent square in object space, on z = 0 plane
    vertices_object = tf.constant(
        [[-1, -1, 0.], [-1, 1, 0], [1, 1, 0], [1, -1, -1.3]],
        dtype=tf.float32) * square_size / 2
    faces = [[0, 1, 2], [0, 2, 3]]

    # ** we should add an occluding triangle!
    # ** also a non-planar meeting-of-faces

    vertices_object, faces = lighting.split_vertices_by_face(
        vertices_object, faces)

    # Convert vertices to homogeneous coordinates
    vertices_object = tf.concat(
        [vertices_object,
         tf.ones_like(vertices_object[:, -1:])], axis=1)

    # Transform vertices from object to world space, by rotating around the z-axis
    vertices_world = tf.matmul(
        vertices_object, matrices.rodrigues(
            [0., 0., rotation])) * scale + tf.concat([translation, [0.]],
                                                     axis=0)

    # Calculate face normals
    normals_world = lighting.vertex_normals(vertices_world, faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.translation([-0.5, 0., -3.5
                                        ])  # translate it away from the camera
    vertices_camera = tf.matmul(vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1,
        far=20.,
        right=0.1,
        aspect=float(canvas_height) / canvas_width)
    vertices_clip = tf.matmul(vertices_camera, projection_matrix)

    vertex_colours = tf.concat(
        [tf.ones([3, 3]) * [0.8, 0.5, 0.],
         tf.ones([3, 3]) * [0.5, 0.8, 0.]],
        axis=0)

    return vertices_clip, faces, normals_world, vertex_colours
Ejemplo n.º 2
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    cube_vertices_object = []
    cube_uvs = []
    cube_faces = []

    def add_quad(vertices, uvs):
        index = len(cube_vertices_object)
        cube_faces.extend([[index + 2, index + 1, index],
                           [index, index + 3, index + 2]])
        cube_vertices_object.extend(vertices)
        cube_uvs.extend(uvs)

    add_quad(vertices=[[-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0.1, 0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]])  # front
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1]],
             uvs=[[1, 1], [0, 1], [0, 0], [1, 0]])  # back

    add_quad(vertices=[[1, 1, 1], [1, 1, -1], [1, -1, -1], [1, -1, 1]],
             uvs=[[0.3, 0.25], [0.6, 0.25], [0.6, 0.55], [0.3, 0.55]])  # right
    add_quad(vertices=[[-1, 1, 1], [-1, 1, -1], [-1, -1, -1], [-1, -1, 1]],
             uvs=[[0.4, 0.4], [0.5, 0.4], [0.5, 0.5], [0.4, 0.5]])  # left

    add_quad(vertices=[[-1, 1, -1], [1, 1, -1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # top
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, -1, 1], [-1, -1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # bottom

    cube_vertices_object = np.asarray(cube_vertices_object, np.float32)
    cube_uvs = np.asarray(cube_uvs, np.float32)

    # Load the texture image
    texture = tf.cast(
        tf.image.decode_jpeg(
            tf.read_file(os.path.dirname(__file__) + '/cat.jpg')),
        tf.float32) / 255.

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.6, 0.]))

    # Calculate face normals
    cube_normals_world = lighting.vertex_normals(cube_vertices_world,
                                                 cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -2.,
                              -3.2]),  # translate it away from the camera
        matrices.rodrigues([-0.5, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # The following function is applied to the G-buffer, which is a multi-channel image containing all the vertex attributes. It
    # uses this to calculate the shading (texture and lighting) at each pixel, hence their final intensities
    def shader_fn(gbuffer, texture, light_direction):

        # Unpack the different attributes from the G-buffer
        mask = gbuffer[:, :, :1]
        uvs = gbuffer[:, :, 1:3]
        normals = gbuffer[:, :, 3:]

        # Sample the texture at locations corresponding to each pixel; this defines the unlit material color at each point
        unlit_colors = sample_texture(
            texture, uvs_to_pixel_indices(uvs,
                                          tf.shape(texture)[:2]))

        # Calculate a simple grey ambient lighting component
        ambient_contribution = unlit_colors * [0.4, 0.4, 0.4]

        # Calculate a diffuse (Lambertian) lighting component
        diffuse_contribution = lighting.diffuse_directional(
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction,
            light_color=[0.6, 0.6, 0.6],
            double_sided=True)
        diffuse_contribution = tf.reshape(diffuse_contribution,
                                          [frame_height, frame_width, 3])

        # The final pixel intensities inside the shape are given by combining the ambient and diffuse components;
        # outside the shape, they are set to a uniform background color
        pixels = (diffuse_contribution +
                  ambient_contribution) * mask + [0., 0., 0.3] * (1. - mask)

        return pixels

    # Render the G-buffer channels (mask, UVs, and normals at each pixel), then perform the deferred shading calculation
    # In general, any tensor required by shader_fn and wrt which we need derivatives should be included in shader_additional_inputs;
    # although in this example they are constant, we pass the texture and lighting direction through this route as an illustration
    light_direction = tf.linalg.l2_normalize([1., -0.3, -0.5])
    pixels = dirt.rasterise_deferred(
        vertices=cube_vertices_clip,
        vertex_attributes=tf.concat(
            [
                tf.ones_like(cube_vertices_object[:, :1]),  # mask
                cube_uvs,  # texture coordinates
                cube_normals_world  # normals
            ],
            axis=1),
        faces=cube_faces,
        background_attributes=tf.zeros([frame_height, frame_width, 6]),
        shader_fn=shader_fn,
        shader_additional_inputs=[texture, light_direction])

    save_pixels = tf.write_file(
        'textured.jpg', tf.image.encode_jpeg(tf.cast(pixels * 255, tf.uint8)))

    session = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True)))
    with session.as_default():

        save_pixels.run()
    n_tex_para = 40

    mat_data = sio.loadmat(mat_filename)

    shape_param = tf.constant(mat_data['Shape_Para'], dtype=tf.float32)
    shape_param = tf.expand_dims(shape_param, 0)
    exp_param = tf.constant(mat_data['Exp_Para'], dtype=tf.float32)
    exp_param = tf.expand_dims(exp_param, 0)
    tex_param = tf.constant(mat_data['Tex_Para'][:n_tex_para, :],
                            dtype=tf.float32)
    tex_param = tf.expand_dims(tex_param, 0)
    color_param = tf.constant(mat_data['Color_Para'], dtype=tf.float32)
    color_param = tf.expand_dims(color_param, 0)
    illum_param = tf.constant(mat_data['Illum_Para'], dtype=tf.float32)
    illum_param = tf.expand_dims(illum_param, 0)
    pose_param = tf.constant(mat_data['Pose_Para'], dtype=tf.float32)
    pose_param = tf.expand_dims(pose_param, 0)

    tf_bfm = TfMorphableModel(model_path='../../examples/Data/BFM/Out/BFM.mat',
                              n_tex_para=n_tex_para)

    vertices = tf_bfm.get_vertices(shape_param=shape_param,
                                   exp_param=exp_param,
                                   batch_size=1)

    from dirt import lighting

    vertex_norm = lighting.vertex_normals(vertices, tf_bfm.triangles)
    texture = tf_bfm.get_vertex_colors(tex_param, color_param, illum_param,
                                       -vertex_norm, 1)
Ejemplo n.º 4
0
def dirt_rendering_orthographic( vertices, faces, reflectances, \
                    frame_width, frame_height, cx, cy, vertices_to_keep_track = None ):

    if vertices.shape[-1] == 2:
        vertices = tf.concat(
            [vertices, tf.zeros_like(vertices[..., -1:])], axis=-1)

    vertical_flip = False
    if frame_height < 0:
        frame_height = -frame_height
        vertical_flip = True

    if reflectances is None:
        reflectances = tf.ones_like(vertices)

    rendering_batch_size = tf.shape(vertices)[0]
    ones = tf.ones([rendering_batch_size], dtype=tf.float32)
    normals = lighting.vertex_normals(vertices, faces)

    bz_facemodel_vertices_object, bz_facemodel_vertex_colors, bz_facemodel_vertex_normals, bz_facemodel_faces = split_vertices_by_color_normal_face(
        vertices, reflectances, normals, faces)

    if vertices_to_keep_track is None:
        landmark_vertices = vertices
        landmark_normals = normals
    else:
        landmark_vertices, landmark_normals = get_landmarks(
            vertices, normals, vertices_to_keep_track)

    translation_parameters = np.array([[0, 0, 100]], dtype=np.float32)
    translation_parameters = tf.tile(
        tf.constant(translation_parameters, dtype=tf.float32),
        [rendering_batch_size, 1])
    rotation_parameters = np.array([[1e-10, 0, 0]], dtype=np.float32)
    rotation_parameters = tf.tile(
        tf.constant(rotation_parameters, dtype=tf.float32),
        [rendering_batch_size, 1])
    cx = tf.ones([rendering_batch_size], dtype=tf.float32) * cx
    cy = tf.ones([rendering_batch_size],
                 dtype=tf.float32) * (frame_height - cy)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation(
            translation_parameters),  # translate it away from the camera
        matrices.rodrigues(rotation_parameters)  # tilt the view downwards
    )
    """
    with tf.Session() as sess: 
        _batch = sess.run( rendering_batch_size )
        _a = sess.run( matrices.translation(translation_parameters) )
        _b = sess.run( matrices.rodrigues(rotation_parameters)  )
        _c = sess.run( view_matrix )

        print("translation", np.transpose(_a[0]))
        print("rotation",  np.transpose(_b[0]))
        print("camera",  np.transpose(_c[0]))
    """

    # Convert vertices to homogeneous coordinates
    bz_facemodel_vertices_object = tf.concat([
        bz_facemodel_vertices_object,
        tf.ones_like(bz_facemodel_vertices_object[..., -1:])
    ],
                                             axis=-1)

    landmark_vertices = tf.concat(
        [landmark_vertices,
         tf.ones_like(landmark_vertices[..., -1:])],
        axis=-1)

    viewpoint_direction = -tf.ones_like(translation_parameters)
    norms = tf.norm(viewpoint_direction, axis=-1,
                    keep_dims=True)  # indexed by *, singleton
    viewpoint_direction /= norms

    viewpoint_direction = np.array([0, 0, -1], np.float32)
    viewpoint_direction = np.expand_dims(viewpoint_direction, axis=0)
    viewpoint_direction = tf.constant(viewpoint_direction)
    viewpoint_direction = tf.tile(viewpoint_direction,
                                  [rendering_batch_size, 1])

    # Transform vertices from object to world space, by rotating around the vertical axis
    bz_facemodel_vertices_world = bz_facemodel_vertices_object  #  tf.matmul(cube_vertices_object, [matrices.rodrigues([0.0, 0.0, 0.0])] )
    landmark_vertices_world = landmark_vertices

    # Calculate face normals; pre_split implies that no faces share a vertex
    bz_facemodel_faces = tf.expand_dims(bz_facemodel_faces, axis=0)
    bz_facemodel_faces = tf.tile(bz_facemodel_faces,
                                 (rendering_batch_size, 1, 1))
    bz_facemodel_normals_world = bz_facemodel_vertex_normals  #  lighting.vertex_normals_pre_split(cube_vertices_world, cube_faces)
    bz_facemodel_vertices_camera = tf.matmul(bz_facemodel_vertices_world,
                                             view_matrix)

    # Transform vertices from camera to clip space
    near = 10.0 * ones
    far = 200.0 * ones

    projection_matrix = orthographic_projection(near=near,
                                                far=far,
                                                w=frame_width,
                                                ones=ones,
                                                h=frame_height,
                                                cx=cx,
                                                cy=cy)
    bz_facemodel_vertices_clip = tf.matmul(bz_facemodel_vertices_camera,
                                           projection_matrix)
    landmark_vertices_vertices_clip = tf.matmul(landmark_vertices_world,
                                                projection_matrix)
    """
    with tf.Session() as sess: 
        
        _v0 = sess.run( bz_facemodel_vertices_world )
        _v1 = sess.run( bz_facemodel_vertices_camera )
        _v2 = sess.run( bz_facemodel_vertices_clip )


        print("vertices\n", (_v0[0]))
        print("vertices\n", (_v1[0]))
        print("vertices\n", (_v2[0]))
    """

    full_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=bz_facemodel_vertex_colors,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    # landmarks
    denom = landmark_vertices_vertices_clip[:, :, 3]
    landmark_points = landmark_vertices_vertices_clip[:, :, 0:2] / (
        denom[..., tf.newaxis])
    landmark_xpoint = (1.0 + landmark_points[:, :, 0:1]) * frame_width * 0.5
    landmark_ypoint = (1.0 + landmark_points[:, :, 1:2]) * frame_height * 0.5
    landmark_points = tf.concat([landmark_xpoint, landmark_ypoint], axis=-1)

    output_dictionary = {}
    output_dictionary["rendering_results"] = full_model_pixels
    output_dictionary["vertices"] = landmark_points
    output_dictionary["landmark_normals"] = landmark_normals
    """
    with tf.Session() as sess:
        _vertices = sess.run( bz_facemodel_vertices_clip )
        print( _vertices.shape )
        print( _vertices )
    """

    if vertical_flip is True:
        output_dictionary["rendering_results"] = output_dictionary[
            "rendering_results"][:, ::-1, :, :]

    return output_dictionary
Ejemplo n.º 5
0
def dirt_rendering( vertices, faces, reflectances, \
                    spherical_harmonics_parameters, translation_parameters, camera_rotation_parameters, base_rotation, rotation_parameters, \
                    frame_width, frame_height, focal_length, cx, cy, vertices_indices ):

    rendering_batch_size = tf.shape(vertices)[0]
    vertices = tf.matmul(vertices,
                         matrices.rodrigues(rotation_parameters)[..., :3, :3])

    vertex_normals = normals = lighting.vertex_normals(vertices, faces)

    landmark_vertices, landmark_normals = get_landmarks(
        vertices, normals, vertices_indices)
    bz_facemodel_vertices_object, bz_facemodel_vertex_colors, bz_facemodel_vertex_normals, bz_facemodel_faces = split_vertices_by_color_normal_face(
        vertices, reflectances, normals, faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        #        matrices.rodrigues(rotation_parameters),
        matrices.translation(-translation_parameters
                             ),  # translate it away from the camera
        matrices.rodrigues(
            camera_rotation_parameters),  # tilt the view downwards
        matrices.rodrigues(base_rotation))
    """

    view_matrix = matrices.compose(
        matrices.translation(translation_parameters),  # translate it away from the camera                
        matrices.rodrigues(camera_rotation_parameters),        # tilt the view downwards
        matrices.translation(-translation_parameters)  # translate it away from the camera        
    )
    """

    # Convert vertices to homogeneous coordinates
    bz_facemodel_vertices_object = tf.concat([
        bz_facemodel_vertices_object,
        tf.ones_like(bz_facemodel_vertices_object[..., -1:])
    ],
                                             axis=-1)

    landmark_vertices = tf.concat(
        [landmark_vertices,
         tf.ones_like(landmark_vertices[..., -1:])],
        axis=-1)
    """
    with tf.Session() as sess:
        _view_matrix = sess.run( view_matrix )
        print( _view_matrix )
    """

    viewpoint_direction = -tf.ones_like(translation_parameters)
    norms = tf.norm(viewpoint_direction, axis=-1,
                    keep_dims=True)  # indexed by *, singleton
    viewpoint_direction /= (norms + 1e-5)

    viewpoint_direction = np.array([0, 0, 1], np.float32)
    viewpoint_direction = np.expand_dims(viewpoint_direction, axis=0)
    viewpoint_direction = tf.constant(viewpoint_direction)
    viewpoint_direction = tf.tile(viewpoint_direction,
                                  [rendering_batch_size, 1])

    # Transform vertices from object to world space, by rotating around the vertical axis
    bz_facemodel_vertices_world = bz_facemodel_vertices_object  #  tf.matmul(cube_vertices_object, [matrices.rodrigues([0.0, 0.0, 0.0])] )
    landmark_vertices_world = landmark_vertices

    # Calculate face normals; pre_split implies that no faces share a vertex
    bz_facemodel_faces = tf.expand_dims(bz_facemodel_faces, axis=0)
    bz_facemodel_faces = tf.tile(bz_facemodel_faces,
                                 (rendering_batch_size, 1, 1))

    bz_facemodel_normals_world = bz_facemodel_vertex_normals  #  lighting.vertex_normals_pre_split(cube_vertices_world, cube_faces)
    landmark_vertices_normals = landmark_normals

    bz_facemodel_vertices_camera = tf.matmul(bz_facemodel_vertices_world,
                                             view_matrix)
    landmark_vertices_camera = tf.matmul(landmark_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    near = focal_length * 0.1  # 0.01 just constant
    far = focal_length * 100
    #    right = frame_width * 0.5 * 0.01
    #    projection_matrix = matrices.perspective_projection(near=near, far=1000., right=right, aspect=float(frame_height) / frame_width)
    projection_matrix = matrices.perspective_projection( near=near, far=far, fx = focal_length, fy=focal_length, \
        w = frame_width, h = frame_height, cx = cx, cy = cy )

    #    projection_matrix = tf.expand_dims( projection_matrix, axis = 0)
    #    projection_matrix = tf.tile( projection_matrix, (rendering_batch_size,1,1))

    bz_facemodel_vertices_clip = tf.matmul(bz_facemodel_vertices_camera,
                                           projection_matrix)
    landmark_vertices_vertices_clip = tf.matmul(landmark_vertices_camera,
                                                projection_matrix)

    #K = tf.constant(
    #    np.array( [ [focal_length,0,frame_width*0.5],[0,focal_length,frame_height*0.5],[0.0,0.0,1.0]] ), dtype = tf.float32)
    """
    ### if you want check projection matrix 
    with tf.Session() as sess:
        _landmarks = sess.run( landmark_vertices )    
        _view_matrix = sess.run( view_matrix )
        _projection_matrix = sess.run( projection_matrix )
        #_K = sess.run(K )
        _T = sess.run( matrices.translation(translation_parameters) )
        _R = sess.run( matrices.rodrigues(rotation_parameters)  )

        print(np.array2string(_T, separator=', ')) 
        print(np.array2string(_R, separator=', ')) 
        print(np.array2string(_landmarks, separator=', ')) 
        print(np.array2string(_view_matrix, separator=', ')) 
        #print(np.array2string(_K, separator=', ')) 
        print(np.array2string(_projection_matrix, separator=', ')) 
    """
    """
    # Calculate lighting, as combination of diffuse and ambient
    vertex_colors_lit = diffuse_directional_lights(
        bz_facemodel_normals_world, bz_facemodel_vertex_colors,
        light_direction=light_directions, viewpoint_direction=viewpoint_direction, light_color=light_colors
    )  * 1.0 #+ bz_facemodel_vertex_colors * 0.2
    """

    # geometry
    use_spherical_harmonics = False

    if use_spherical_harmonics == True:
        vertex_colors_lit = spherical_harmonics(
            bz_facemodel_normals_world,
            bz_facemodel_vertex_colors,
            spherical_harmonics_parameters,
            viewpoint_direction=viewpoint_direction)

        geometry_visualization_spherical_harmonics_parameters = np.zeros(
            [27], dtype=np.float32)
        geometry_visualization_spherical_harmonics_parameters[3:3 + 9] = 1.0
        geometry_visualization_spherical_harmonics_parameters = tf.constant(
            geometry_visualization_spherical_harmonics_parameters,
            dtype=tf.float32)
        geometry_visualization_vertex_colors_lit = spherical_harmonics( bz_facemodel_normals_world, tf.ones_like(bz_facemodel_vertex_colors), \
                                geometry_visualization_spherical_harmonics_parameters, viewpoint_direction=viewpoint_direction )
    else:
        light_directions = np.array([[0, 0, 1]])
        norm = np.linalg.norm(light_directions, axis=-1)
        light_directions = light_directions / norm[:, np.newaxis]
        light_directions = tf.constant(light_directions, dtype=tf.float32)
        light_directions = tf.tile(light_directions, (rendering_batch_size, 1))

        light_colors = tf.constant([1., 1., 1.], dtype=tf.float32)
        light_colors = tf.expand_dims(light_colors, axis=0)
        light_colors = tf.tile(light_colors, (rendering_batch_size, 1))

        geometry_visualization_vertex_colors_lit = diffuse_directional_lights( bz_facemodel_normals_world,  tf.ones_like(bz_facemodel_vertex_colors), \
                                    light_direction=light_directions, viewpoint_direction=viewpoint_direction, light_color=light_colors, double_sided = False ) * 1.0 #+ bz_facemodel_vertex_colors * 0.2
        vertex_colors_lit = diffuse_directional_lights( bz_facemodel_normals_world,  bz_facemodel_vertex_colors, light_direction=light_directions, \
                                            viewpoint_direction=viewpoint_direction, light_color=light_colors ) * 1.0 #+ bz_facemodel_vertex_colors * 0.2

    # reflectance
    reflectance_visualization_spherical_harmonics_parameters = np.zeros(
        [27], dtype=np.float32)
    reflectance_visualization_spherical_harmonics_parameters[0:3] = 3.0
    reflectance_visualization_spherical_harmonics_parameters = tf.constant(
        reflectance_visualization_spherical_harmonics_parameters,
        dtype=tf.float32)
    reflectance_visualization_vertex_colors_lit = spherical_harmonics( bz_facemodel_normals_world, bz_facemodel_vertex_colors, \
                                reflectance_visualization_spherical_harmonics_parameters, viewpoint_direction=viewpoint_direction )

    # illumination
    illumination_visualization_vertex_colors_lit = spherical_harmonics( bz_facemodel_normals_world, tf.ones_like(bz_facemodel_vertex_colors), \
                            spherical_harmonics_parameters, viewpoint_direction=viewpoint_direction )

    # depth
    depth_vertex_colors_lit = tf.concat([
        geometry_visualization_vertex_colors_lit[:, :, 0:1],
        bz_facemodel_normals_world[:, :, 2:3],
        bz_facemodel_vertices_camera[:, :, 2:3]
    ],
                                        axis=-1)

    # landmarks
    denom = landmark_vertices_vertices_clip[:, :, 3]
    landmark_points = landmark_vertices_vertices_clip[:, :, 0:2] / (
        denom[..., tf.newaxis])
    landmark_xpoint = (1.0 + landmark_points[:, :, 0:1]) * frame_width * 0.5
    landmark_ypoint = (1.0 - landmark_points[:, :, 1:2]) * frame_height * 0.5
    landmark_points = tf.concat([landmark_xpoint, landmark_ypoint], axis=-1)
    """
    with tf.Session() as sess:
        _AAA = sess.run( landmark_points )
        print(_AAA)
    """

    # landmark visibility
    landmark_visibility = tf.matmul(landmark_vertices_normals,
                                    viewpoint_direction[..., tf.newaxis])
    #visibility = tf.minimum( landmark_visibility+0.9999, 1.0)
    #visibility = tf.cast( visibility, tf.int32 )

    full_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    geometry_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=geometry_visualization_vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    reflectance_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=tf.ones_like(
            reflectance_visualization_vertex_colors_lit),
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    illumination_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=illumination_visualization_vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    depth_maps = -dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=depth_vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    output1_dict = {}
    output1_dict["full_model_pixels"] = full_model_pixels
    output1_dict["vertices_clip"] = bz_facemodel_vertices_clip
    output1_dict["landmark_points"] = landmark_points
    output1_dict["landmark_visibility"] = landmark_visibility
    #output1_dict["depth_masks"] =  tf.clip_by_value( 5*( -depth_maps[:,:,:,0] ), 0, 1 ) # tf.stop_gradient( ???
    #output1_dict["depth_masks"] =   tf.stop_gradient( tf.clip_by_value( 100*( -depth_maps[:,:,:,0] ), 0, 1 ) )
    output1_dict["depth_maps"] = depth_maps[:, :, :, 2]
    output1_dict["geometry_model_pixels"] = geometry_model_pixels
    output1_dict["reflectance_model_pixels"] = reflectance_model_pixels
    output1_dict["illumination_model_pixels"] = illumination_model_pixels
    output1_dict["surface_normals"] = (1 + depth_maps[:, :, :, 1]) / 2.0
    output1_dict["vertex_normals"] = vertex_normals

    return output1_dict
Ejemplo n.º 6
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    cube_vertices_object = []
    cube_uvs = []
    cube_faces = []

    def add_quad(vertices, uvs):
        index = len(cube_vertices_object)
        cube_faces.extend([[index + 2, index + 1, index],
                           [index, index + 3, index + 2]])
        cube_vertices_object.extend(vertices)
        cube_uvs.extend(uvs)

    add_quad(vertices=[[-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0.1, 0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]])  # front
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1]],
             uvs=[[1, 1], [0, 1], [0, 0], [1, 0]])  # back

    add_quad(vertices=[[1, 1, 1], [1, 1, -1], [1, -1, -1], [1, -1, 1]],
             uvs=[[0.4, 0.35], [0.5, 0.35], [0.5, 0.45], [0.4, 0.45]])  # right
    add_quad(vertices=[[-1, 1, 1], [-1, 1, -1], [-1, -1, -1], [-1, -1, 1]],
             uvs=[[0.4, 0.4], [0.5, 0.4], [0.5, 0.5], [0.4, 0.5]])  # left

    add_quad(vertices=[[-1, 1, -1], [1, 1, -1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # top
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, -1, 1], [-1, -1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # bottom

    cube_vertices_object = np.asarray(cube_vertices_object, np.float32)
    cube_uvs = np.asarray(cube_uvs, np.float32)

    # Load the texture image
    texture = tf.constant(
        imageio.imread(os.path.dirname(__file__) + '/cat.jpg'),
        dtype=tf.float32) / 255.

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.6, 0.]))

    # Calculate face normals
    cube_normals_world = lighting.vertex_normals(cube_vertices_world,
                                                 cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -2.,
                              -3.2]),  # translate it away from the camera
        matrices.rodrigues([-0.5, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Render the G-buffer channels (mask, UVs, and normals at each pixel) needed for deferred shading
    gbuffer_mask = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=tf.ones_like(cube_vertices_object[:, :1]),
        background=tf.zeros([frame_height, frame_width, 1]),
        width=frame_width,
        height=frame_height,
        channels=1)[..., 0]
    background_value = -1.e4
    gbuffer_vertex_uvs = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=tf.concat(
            [cube_uvs, tf.zeros_like(cube_uvs[:, :1])], axis=1),
        background=tf.ones([frame_height, frame_width, 3]) * background_value,
        width=frame_width,
        height=frame_height,
        channels=3)[..., :2]
    gbuffer_vertex_normals_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_normals_world,
        background=tf.ones([frame_height, frame_width, 3]) * background_value,
        width=frame_width,
        height=frame_height,
        channels=3)

    # Dilate the normals and UVs to ensure correct gradients on the silhouette
    gbuffer_mask = gbuffer_mask[:, :, None]
    gbuffer_vertex_normals_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_normals_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_normals_world = gbuffer_vertex_normals_world * gbuffer_mask + gbuffer_vertex_normals_world_dilated * (
        1. - gbuffer_mask)
    gbuffer_vertex_uvs_dilated = tf.nn.max_pool(gbuffer_vertex_uvs[None, ...],
                                                ksize=[1, 3, 3, 1],
                                                strides=[1, 1, 1, 1],
                                                padding='SAME')[0]
    gbuffer_vertex_uvs = gbuffer_vertex_uvs * gbuffer_mask + gbuffer_vertex_uvs_dilated * (
        1. - gbuffer_mask)

    # Calculate the colour buffer, by sampling the texture according to the rasterised UVs
    gbuffer_colours = gbuffer_mask * sample_texture(
        texture, uvs_to_pixel_indices(gbuffer_vertex_uvs,
                                      tf.shape(texture)[:2]))

    # Calculate a simple grey ambient lighting component
    ambient_contribution = gbuffer_colours * [0.4, 0.4, 0.4]

    # Calculate a diffuse (Lambertian) lighting component
    light_direction = unit([1., -0.3, -0.5])
    diffuse_contribution = lighting.diffuse_directional(
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_colours, [-1, 3]),
        light_direction,
        light_color=[0.6, 0.6, 0.6],
        double_sided=True)
    diffuse_contribution = tf.reshape(diffuse_contribution,
                                      [frame_height, frame_width, 3])

    # Final pixels are given by combining the ambient and diffuse components
    pixels = diffuse_contribution + ambient_contribution

    session = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True)))
    with session.as_default():

        pixels_eval = pixels.eval()
        imageio.imsave('textured.jpg', (pixels_eval * 255).astype(np.uint8))
def get_vertex_normals(vertices, faces):
    """ Get vertex normals. [Vtx, Face_def] -> [Vtx_normals] """
    vtx_nrm = lighting.vertex_normals(vertices=vertices, faces=faces)

    return vtx_nrm