Beispiel #1
0
def get_pixels_deferred_v2(transformed_vertices, faces, vertex_normals,
                           vertex_colours, light_intensity, background):

    vertex_attributes = tf.concat([
        tf.ones_like(transformed_vertices[:, :1]), vertex_colours,
        vertex_normals
    ],
                                  axis=1)
    background_attributes = tf.zeros([canvas_height, canvas_width, 1 + 3 + 3])

    def shader_fn(gbuffer, light_intensity, background):
        mask = gbuffer[..., :1]
        colours = gbuffer[..., 1:4]
        normals = gbuffer[..., 4:7]
        pixels = mask * calculate_shading(
            colours, normals, light_intensity) + (1. - mask) * background
        return pixels

    pixels = dirt.rasterise_deferred(background_attributes,
                                     transformed_vertices, vertex_attributes,
                                     faces, shader_fn,
                                     [light_intensity, background])

    return pixels
Beispiel #2
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    cube_vertices_object = []
    cube_uvs = []
    cube_faces = []

    def add_quad(vertices, uvs):
        index = len(cube_vertices_object)
        cube_faces.extend([[index + 2, index + 1, index],
                           [index, index + 3, index + 2]])
        cube_vertices_object.extend(vertices)
        cube_uvs.extend(uvs)

    add_quad(vertices=[[-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0.1, 0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]])  # front
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1]],
             uvs=[[1, 1], [0, 1], [0, 0], [1, 0]])  # back

    add_quad(vertices=[[1, 1, 1], [1, 1, -1], [1, -1, -1], [1, -1, 1]],
             uvs=[[0.3, 0.25], [0.6, 0.25], [0.6, 0.55], [0.3, 0.55]])  # right
    add_quad(vertices=[[-1, 1, 1], [-1, 1, -1], [-1, -1, -1], [-1, -1, 1]],
             uvs=[[0.4, 0.4], [0.5, 0.4], [0.5, 0.5], [0.4, 0.5]])  # left

    add_quad(vertices=[[-1, 1, -1], [1, 1, -1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # top
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, -1, 1], [-1, -1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # bottom

    cube_vertices_object = np.asarray(cube_vertices_object, np.float32)
    cube_uvs = np.asarray(cube_uvs, np.float32)

    # Load the texture image
    texture = tf.cast(
        tf.image.decode_jpeg(
            tf.read_file(os.path.dirname(__file__) + '/cat.jpg')),
        tf.float32) / 255.

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.6, 0.]))

    # Calculate face normals
    cube_normals_world = lighting.vertex_normals(cube_vertices_world,
                                                 cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -2.,
                              -3.2]),  # translate it away from the camera
        matrices.rodrigues([-0.5, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # The following function is applied to the G-buffer, which is a multi-channel image containing all the vertex attributes. It
    # uses this to calculate the shading (texture and lighting) at each pixel, hence their final intensities
    def shader_fn(gbuffer, texture, light_direction):

        # Unpack the different attributes from the G-buffer
        mask = gbuffer[:, :, :1]
        uvs = gbuffer[:, :, 1:3]
        normals = gbuffer[:, :, 3:]

        # Sample the texture at locations corresponding to each pixel; this defines the unlit material color at each point
        unlit_colors = sample_texture(
            texture, uvs_to_pixel_indices(uvs,
                                          tf.shape(texture)[:2]))

        # Calculate a simple grey ambient lighting component
        ambient_contribution = unlit_colors * [0.4, 0.4, 0.4]

        # Calculate a diffuse (Lambertian) lighting component
        diffuse_contribution = lighting.diffuse_directional(
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction,
            light_color=[0.6, 0.6, 0.6],
            double_sided=True)
        diffuse_contribution = tf.reshape(diffuse_contribution,
                                          [frame_height, frame_width, 3])

        # The final pixel intensities inside the shape are given by combining the ambient and diffuse components;
        # outside the shape, they are set to a uniform background color
        pixels = (diffuse_contribution +
                  ambient_contribution) * mask + [0., 0., 0.3] * (1. - mask)

        return pixels

    # Render the G-buffer channels (mask, UVs, and normals at each pixel), then perform the deferred shading calculation
    # In general, any tensor required by shader_fn and wrt which we need derivatives should be included in shader_additional_inputs;
    # although in this example they are constant, we pass the texture and lighting direction through this route as an illustration
    light_direction = tf.linalg.l2_normalize([1., -0.3, -0.5])
    pixels = dirt.rasterise_deferred(
        vertices=cube_vertices_clip,
        vertex_attributes=tf.concat(
            [
                tf.ones_like(cube_vertices_object[:, :1]),  # mask
                cube_uvs,  # texture coordinates
                cube_normals_world  # normals
            ],
            axis=1),
        faces=cube_faces,
        background_attributes=tf.zeros([frame_height, frame_width, 6]),
        shader_fn=shader_fn,
        shader_additional_inputs=[texture, light_direction])

    save_pixels = tf.write_file(
        'textured.jpg', tf.image.encode_jpeg(tf.cast(pixels * 255, tf.uint8)))

    session = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True)))
    with session.as_default():

        save_pixels.run()
Beispiel #3
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat([
        cube_vertices_object,
        tf.ones_like(cube_vertices_object[:, -1:])
    ], axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object, matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5, -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # The following function is applied to the G-buffer, which is a multi-channel image containing all the vertex attributes.
    # It uses this to calculate the shading at each pixel, hence their final intensities
    def shader_fn(gbuffer, view_matrix, light_direction):

        # Unpack the different attributes from the G-buffer
        mask = gbuffer[:, :, :1]
        positions = gbuffer[:, :, 1:4]
        unlit_colors = gbuffer[:, :, 4:7]
        normals = gbuffer[:, :, 7:]

        # Calculate a simple grey ambient lighting component
        ambient_contribution = unlit_colors * [0.2, 0.2, 0.2]

        # Calculate a red diffuse (Lambertian) lighting component
        diffuse_contribution = lighting.diffuse_directional(
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction, light_color=[1., 0., 0.], double_sided=False
        )
        diffuse_contribution = tf.reshape(diffuse_contribution, [frame_height, frame_width, 3])

        # Calculate a white specular (Phong) lighting component
        camera_position_world = tf.linalg.inv(view_matrix)[3, :3]
        specular_contribution = lighting.specular_directional(
            tf.reshape(positions, [-1, 3]),
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction, light_color=[1., 1., 1.],
            camera_position=camera_position_world,
            shininess=6., double_sided=False
        )
        specular_contribution = tf.reshape(specular_contribution, [frame_height, frame_width, 3])

        # The final pixel intensities inside the shape are given by combining the three lighting components;
        # outside the shape, they are set to a uniform background color. We clip the final values as the specular
        # component saturates some pixels
        pixels = tf.clip_by_value(
            (diffuse_contribution + specular_contribution + ambient_contribution) * mask + [0., 0., 0.3] * (1. - mask),
            0., 1.
        )

        return pixels

    # Render the G-buffer channels (mask, vertex positions, vertex colours, and normals at each pixel), then perform
    # the deferred shading calculation. In general, any tensor required by shader_fn and wrt which we need derivatives
    # should be included in shader_additional_inputs; although in this example they are constant, we pass the view
    # matrix and lighting direction through this route as an illustration
    light_direction = tf.linalg.l2_normalize([1., -0.3, -0.5])
    pixels = dirt.rasterise_deferred(
        vertices=cube_vertices_clip,
        vertex_attributes=tf.concat([
            tf.ones_like(cube_vertices_object[:, :1]),  # mask
            cube_vertices_world[:, :3],  # vertex positions
            cube_vertex_colors,  # vertex colors
            cube_normals_world  # normals
        ], axis=1),
        faces=cube_faces,
        background_attributes=tf.zeros([frame_height, frame_width, 10]),
        shader_fn=shader_fn,
        shader_additional_inputs=[view_matrix, light_direction]
    )

    pixels = tf.cast(pixels * 255, tf.uint8)

    session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(allow_growth=True)))
    with session.as_default():
        image = pixels
        img = Image.fromarray( np.asarray(image))
        img.save("test_def.png")