示例#1
0
文件: textured.py 项目: won21kr/dirt
    def shader_fn(gbuffer, texture, light_direction):

        # Unpack the different attributes from the G-buffer
        mask = gbuffer[:, :, :1]
        uvs = gbuffer[:, :, 1:3]
        normals = gbuffer[:, :, 3:]

        # Sample the texture at locations corresponding to each pixel; this defines the unlit material color at each point
        unlit_colors = sample_texture(
            texture, uvs_to_pixel_indices(uvs,
                                          tf.shape(texture)[:2]))

        # Calculate a simple grey ambient lighting component
        ambient_contribution = unlit_colors * [0.4, 0.4, 0.4]

        # Calculate a diffuse (Lambertian) lighting component
        diffuse_contribution = lighting.diffuse_directional(
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction,
            light_color=[0.6, 0.6, 0.6],
            double_sided=True)
        diffuse_contribution = tf.reshape(diffuse_contribution,
                                          [frame_height, frame_width, 3])

        # The final pixel intensities inside the shape are given by combining the ambient and diffuse components;
        # outside the shape, they are set to a uniform background color
        pixels = (diffuse_contribution +
                  ambient_contribution) * mask + [0., 0., 0.3] * (1. - mask)

        return pixels
示例#2
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(
        cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(
        cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5,
                              -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Calculate lighting, as combination of diffuse and ambient
    vertex_colors_lit = lighting.diffuse_directional(
        cube_normals_world,
        cube_vertex_colors,
        light_direction=[1., 0., 0.],
        light_color=[1., 1., 1.]) * 0.8 + cube_vertex_colors * 0.2

    pixels = dirt.rasterise(vertices=cube_vertices_clip,
                            faces=cube_faces,
                            vertex_colors=vertex_colors_lit,
                            background=tf.zeros([frame_height, frame_width,
                                                 3]),
                            width=frame_width,
                            height=frame_height,
                            channels=3)

    session = tf.Session()
    with session.as_default():

        pixels_eval = pixels.eval()
        cv2.imshow('simple.py', pixels_eval[:, :, (2, 1, 0)])
        cv2.waitKey(0)
示例#3
0
def calculate_shading(colours, normals, light_intensity):

    ambient = colours * [0.4, 0.4, 0.4]

    light_direction = tf.linalg.l2_normalize([1., -0.3, -0.5])
    diffuse_contribution = lighting.diffuse_directional(
        tf.reshape(normals, [-1, 3]),
        tf.reshape(colours, [-1, 3]),
        light_direction,
        light_color=tf.constant([0., 1., 0.]) * light_intensity,
        double_sided=True)
    diffuse = tf.reshape(diffuse_contribution, colours.get_shape())

    return ambient + diffuse
示例#4
0
文件: deferred.py 项目: won21kr/dirt
    def shader_fn(gbuffer, view_matrix, light_direction):

        # Unpack the different attributes from the G-buffer
        mask = gbuffer[:, :, :1]
        positions = gbuffer[:, :, 1:4]
        unlit_colors = gbuffer[:, :, 4:7]
        normals = gbuffer[:, :, 7:]

        # Calculate a simple grey ambient lighting component
        ambient_contribution = unlit_colors * [0.2, 0.2, 0.2]

        # Calculate a red diffuse (Lambertian) lighting component
        diffuse_contribution = lighting.diffuse_directional(
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction,
            light_color=[1., 0., 0.],
            double_sided=False)
        diffuse_contribution = tf.reshape(diffuse_contribution,
                                          [frame_height, frame_width, 3])

        # Calculate a white specular (Phong) lighting component
        camera_position_world = tf.matrix_inverse(view_matrix)[3, :3]
        specular_contribution = lighting.specular_directional(
            tf.reshape(positions, [-1, 3]),
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction,
            light_color=[1., 1., 1.],
            camera_position=camera_position_world,
            shininess=6.,
            double_sided=False)
        specular_contribution = tf.reshape(specular_contribution,
                                           [frame_height, frame_width, 3])

        # The final pixel intensities inside the shape are given by combining the three lighting components;
        # outside the shape, they are set to a uniform background color. We clip the final values as the specular
        # component saturates some pixels
        pixels = tf.clip_by_value(
            (diffuse_contribution + specular_contribution +
             ambient_contribution) * mask + [0., 0., 0.3] * (1. - mask), 0.,
            1.)

        return pixels
示例#5
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    cube_vertices_object = []
    cube_uvs = []
    cube_faces = []

    def add_quad(vertices, uvs):
        index = len(cube_vertices_object)
        cube_faces.extend([[index + 2, index + 1, index],
                           [index, index + 3, index + 2]])
        cube_vertices_object.extend(vertices)
        cube_uvs.extend(uvs)

    add_quad(vertices=[[-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0.1, 0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]])  # front
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1]],
             uvs=[[1, 1], [0, 1], [0, 0], [1, 0]])  # back

    add_quad(vertices=[[1, 1, 1], [1, 1, -1], [1, -1, -1], [1, -1, 1]],
             uvs=[[0.4, 0.35], [0.5, 0.35], [0.5, 0.45], [0.4, 0.45]])  # right
    add_quad(vertices=[[-1, 1, 1], [-1, 1, -1], [-1, -1, -1], [-1, -1, 1]],
             uvs=[[0.4, 0.4], [0.5, 0.4], [0.5, 0.5], [0.4, 0.5]])  # left

    add_quad(vertices=[[-1, 1, -1], [1, 1, -1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # top
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, -1, 1], [-1, -1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # bottom

    cube_vertices_object = np.asarray(cube_vertices_object, np.float32)
    cube_uvs = np.asarray(cube_uvs, np.float32)

    # Load the texture image
    texture = tf.constant(
        imageio.imread(os.path.dirname(__file__) + '/cat.jpg'),
        dtype=tf.float32) / 255.

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.6, 0.]))

    # Calculate face normals
    cube_normals_world = lighting.vertex_normals(cube_vertices_world,
                                                 cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -2.,
                              -3.2]),  # translate it away from the camera
        matrices.rodrigues([-0.5, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Render the G-buffer channels (mask, UVs, and normals at each pixel) needed for deferred shading
    gbuffer_mask = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=tf.ones_like(cube_vertices_object[:, :1]),
        background=tf.zeros([frame_height, frame_width, 1]),
        width=frame_width,
        height=frame_height,
        channels=1)[..., 0]
    background_value = -1.e4
    gbuffer_vertex_uvs = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=tf.concat(
            [cube_uvs, tf.zeros_like(cube_uvs[:, :1])], axis=1),
        background=tf.ones([frame_height, frame_width, 3]) * background_value,
        width=frame_width,
        height=frame_height,
        channels=3)[..., :2]
    gbuffer_vertex_normals_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_normals_world,
        background=tf.ones([frame_height, frame_width, 3]) * background_value,
        width=frame_width,
        height=frame_height,
        channels=3)

    # Dilate the normals and UVs to ensure correct gradients on the silhouette
    gbuffer_mask = gbuffer_mask[:, :, None]
    gbuffer_vertex_normals_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_normals_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_normals_world = gbuffer_vertex_normals_world * gbuffer_mask + gbuffer_vertex_normals_world_dilated * (
        1. - gbuffer_mask)
    gbuffer_vertex_uvs_dilated = tf.nn.max_pool(gbuffer_vertex_uvs[None, ...],
                                                ksize=[1, 3, 3, 1],
                                                strides=[1, 1, 1, 1],
                                                padding='SAME')[0]
    gbuffer_vertex_uvs = gbuffer_vertex_uvs * gbuffer_mask + gbuffer_vertex_uvs_dilated * (
        1. - gbuffer_mask)

    # Calculate the colour buffer, by sampling the texture according to the rasterised UVs
    gbuffer_colours = gbuffer_mask * sample_texture(
        texture, uvs_to_pixel_indices(gbuffer_vertex_uvs,
                                      tf.shape(texture)[:2]))

    # Calculate a simple grey ambient lighting component
    ambient_contribution = gbuffer_colours * [0.4, 0.4, 0.4]

    # Calculate a diffuse (Lambertian) lighting component
    light_direction = unit([1., -0.3, -0.5])
    diffuse_contribution = lighting.diffuse_directional(
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_colours, [-1, 3]),
        light_direction,
        light_color=[0.6, 0.6, 0.6],
        double_sided=True)
    diffuse_contribution = tf.reshape(diffuse_contribution,
                                      [frame_height, frame_width, 3])

    # Final pixels are given by combining the ambient and diffuse components
    pixels = diffuse_contribution + ambient_contribution

    session = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True)))
    with session.as_default():

        pixels_eval = pixels.eval()
        imageio.imsave('textured.jpg', (pixels_eval * 255).astype(np.uint8))
示例#6
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(
        cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(
        cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5,
                              -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Render the G-buffer channels (vertex position, colour and normal at each pixel) needed for deferred shading
    gbuffer_vertex_positions_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_vertices_world[:, :3],
        background=tf.ones([frame_height, frame_width, 3]) * float('-inf'),
        width=frame_width,
        height=frame_height,
        channels=3)
    gbuffer_vertex_colours_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_vertex_colors,
        background=tf.zeros([frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)
    gbuffer_vertex_normals_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_normals_world,
        background=tf.ones([frame_height, frame_width, 3]) * float('-inf'),
        width=frame_width,
        height=frame_height,
        channels=3)

    # Dilate the position and normal channels at the silhouette boundary; this doesn't affect the image, but
    # ensures correct gradients for pixels just outside the silhouette
    background_mask = tf.cast(
        tf.equal(gbuffer_vertex_positions_world, float('-inf')), tf.float32)
    gbuffer_vertex_positions_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_positions_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_positions_world = gbuffer_vertex_positions_world * (
        1. - background_mask
    ) + gbuffer_vertex_positions_world_dilated * background_mask
    gbuffer_vertex_normals_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_normals_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_normals_world = gbuffer_vertex_normals_world * (
        1. - background_mask
    ) + gbuffer_vertex_normals_world_dilated * background_mask

    # Calculate a simple grey ambient lighting component
    ambient_contribution = gbuffer_vertex_colours_world * [0.2, 0.2, 0.2]

    # Calculate a red diffuse (Lambertian) lighting component
    light_direction = unit([1., -0.3, -0.5])
    diffuse_contribution = lighting.diffuse_directional(
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_colours_world, [-1, 3]),
        light_direction,
        light_color=[1., 0., 0.],
        double_sided=False)
    diffuse_contribution = tf.reshape(diffuse_contribution,
                                      [frame_height, frame_width, 3])

    # Calculate a white specular (Phong) lighting component
    camera_position_world = tf.matrix_inverse(view_matrix)[3, :3]
    specular_contribution = lighting.specular_directional(
        tf.reshape(gbuffer_vertex_positions_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_colours_world, [-1, 3]),
        light_direction,
        light_color=[1., 1., 1.],
        camera_position=camera_position_world,
        shininess=6.,
        double_sided=False)
    specular_contribution = tf.reshape(specular_contribution,
                                       [frame_height, frame_width, 3])

    # Final pixels are given by combining ambient, diffuse, and specular components
    pixels = diffuse_contribution + specular_contribution + ambient_contribution

    session = tf.Session()
    with session.as_default():

        pixels_eval = pixels.eval()
        cv2.imshow('deferred.py', pixels_eval[:, :, (2, 1, 0)])
        cv2.waitKey(0)