예제 #1
0
def get_pixels_deferred_v1(transformed_vertices, faces, vertex_normals,
                           vertex_colours, light_intensity, background):

    # This is a naive implementation of deferred shading, that gives incorrect gradients. See
    # get_pixels_deferred_v2 below for a correct implementation!

    gbuffer_mask = dirt.rasterise(
        vertices=transformed_vertices,
        faces=faces,
        vertex_colors=tf.ones_like(transformed_vertices[:, :1]),
        background=tf.zeros([canvas_height, canvas_width, 1]),
        width=canvas_width,
        height=canvas_height,
        channels=1)[..., 0]
    background_value = -1.e4
    gbuffer_vertex_colours_world = dirt.rasterise(
        vertices=transformed_vertices,
        faces=faces,
        vertex_colors=vertex_colours,
        background=tf.ones([canvas_height, canvas_width, 3]) * background,
        width=canvas_width,
        height=canvas_height,
        channels=3)
    gbuffer_vertex_normals_world = dirt.rasterise(
        vertices=transformed_vertices,
        faces=faces,
        vertex_colors=vertex_normals,
        background=tf.ones([canvas_height, canvas_width, 3]) *
        background_value,
        width=canvas_width,
        height=canvas_height,
        channels=3)

    # Dilate the normals to ensure correct gradients on the silhouette
    gbuffer_mask = gbuffer_mask[:, :, None]
    gbuffer_vertex_normals_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_normals_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_normals_world = gbuffer_vertex_normals_world * gbuffer_mask + gbuffer_vertex_normals_world_dilated * (
        1. - gbuffer_mask)

    pixels = gbuffer_mask * calculate_shading(
        gbuffer_vertex_colours_world, gbuffer_vertex_normals_world,
        light_intensity) + (1. - gbuffer_mask) * background

    return pixels
예제 #2
0
    def call(self, x):
        #N: num. of vertices, F: num of faces
        vertices = x[0]  #(1xNx3) #same for each batch
        faces = tf.cast(x[1], tf.int32)  #1xFx3, same for each batch
        poses = x[2]  #1 x 4x4, same for each batch

        # Transform vertices from camera to clip space
        vert_obj, vert_3d, vert_clip, normals = self.transform_vertices(
            vertices[0], poses[0], faces[0])

        gbuffer_temp = dirt.rasterise(
            background=tf.zeros([self.img_h, self.img_w, 11]),
            vertices=vert_clip,
            vertex_colors=tf.concat(
                [
                    tf.ones_like(vert_obj[:, :1]),  #1 mask
                    vert_3d,
                    normals,
                    vert_obj
                ],
                axis=1),
            faces=faces[0],
            height=self.img_h,
            width=self.img_w,
            channels=11)
        return tf.expand_dims(gbuffer_temp, axis=0)
예제 #3
0
def render_colored(m_v, m_f, m_vc, width, height, camera_f, camera_c, bgcolor=np.zeros(3, dtype=np.float32),
                   num_channels=3, camera_t=np.zeros(3, dtype=np.float32), camera_rt=np.zeros(3, dtype=np.float32),
                   name=None):
    with ops.name_scope(name, "render", [m_v]) as name:
        assert (num_channels == m_vc.shape[-1] == bgcolor.shape[0])

        projection_matrix = perspective_projection(
            camera_f, camera_c, width, height, .1, 10)
        # projection_matrix = matrices.perspective_projection(near=0.1, far=20., right=0.1, aspect=1.)

        view_matrix = matrices.compose(
            matrices.rodrigues(camera_rt.astype(np.float32)),
            matrices.translation(camera_t.astype(np.float32)),
        )

        bg = tf.tile(bgcolor.astype(np.float32)[
                     np.newaxis, np.newaxis, :], (height, width, 1))

        m_v = tf.cast(m_v, tf.float32)
        m_v = tf.concat([m_v, tf.ones_like(m_v[:, -1:])], axis=1)

        m_v = tf.matmul(m_v, view_matrix)
        m_v = tf.matmul(m_v, projection_matrix)

        return dirt.rasterise(bg, m_v, tf.cast(m_vc, tf.float32), tf.cast(m_f, tf.int32), name=name)
예제 #4
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(
        cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(
        cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5,
                              -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Calculate lighting, as combination of diffuse and ambient
    vertex_colors_lit = lighting.diffuse_directional(
        cube_normals_world,
        cube_vertex_colors,
        light_direction=[1., 0., 0.],
        light_color=[1., 1., 1.]) * 0.8 + cube_vertex_colors * 0.2

    pixels = dirt.rasterise(vertices=cube_vertices_clip,
                            faces=cube_faces,
                            vertex_colors=vertex_colors_lit,
                            background=tf.zeros([frame_height, frame_width,
                                                 3]),
                            width=frame_width,
                            height=frame_height,
                            channels=3)

    session = tf.Session()
    with session.as_default():

        pixels_eval = pixels.eval()
        cv2.imshow('simple.py', pixels_eval[:, :, (2, 1, 0)])
        cv2.waitKey(0)
예제 #5
0
def get_pixels_direct(transformed_vertices, faces, vertex_normals,
                      vertex_colours, light_intensity, background):

    return dirt.rasterise(
        vertices=transformed_vertices,
        faces=faces,
        vertex_colors=calculate_shading(vertex_colours, vertex_normals,
                                        light_intensity),
        background=tf.ones([canvas_height, canvas_width, 3]) * background)
예제 #6
0
def get_dirt_pixels():

    # Build square in screen space
    square_vertices = tf.constant([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=tf.float32) * square_size - square_size / 2.
    square_vertices += [centre_x, centre_y]

    # Transform to homogeneous coordinates in clip space
    square_vertices = square_vertices * 2. / [canvas_width, canvas_height] - 1.
    square_vertices = tf.concat([square_vertices, tf.zeros([4, 1]), tf.ones([4, 1])], axis=1)

    return dirt.rasterise(
        vertices=square_vertices,
        faces=[[0, 1, 2], [0, 2, 3]],
        vertex_colors=tf.ones([4, 1]),
        background=tf.zeros([canvas_height, canvas_width, 1]),
        height=canvas_height, width=canvas_width, channels=1
    )[:, :, 0]
예제 #7
0
def get_dirt_pixels(width=canvas_width, height=canvas_height):

    square_vertices = tf.constant([[-1, -1, 0, 1], [-1, 1, 0, 1], [1, 1, 0, 1], [1, -1, 0, 1]], dtype=tf.float32)

    #background = skimage.io.imread('/n/fs/shaderml/datas_oceanic/test_img/test_middle_ground00000.png')
    #background = tf.constant(skimage.img_as_float(background), dtype=tf.float32)
    background = tf.zeros([height, width, 3], dtype=tf.float32)
    
    camera_pos = tf.placeholder(tf.float32, 8)
    
    return dirt.rasterise(
        vertices=square_vertices,
        faces=[[0, 1, 2], [0, 2, 3]],
        vertex_colors=tf.ones([4, 3]),
        background=background,
        camera_pos = camera_pos,
        height=height, width=width, channels=3
    ), camera_pos
예제 #8
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    cube_vertices_object = []
    cube_uvs = []
    cube_faces = []

    def add_quad(vertices, uvs):
        index = len(cube_vertices_object)
        cube_faces.extend([[index + 2, index + 1, index],
                           [index, index + 3, index + 2]])
        cube_vertices_object.extend(vertices)
        cube_uvs.extend(uvs)

    add_quad(vertices=[[-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0.1, 0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]])  # front
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1]],
             uvs=[[1, 1], [0, 1], [0, 0], [1, 0]])  # back

    add_quad(vertices=[[1, 1, 1], [1, 1, -1], [1, -1, -1], [1, -1, 1]],
             uvs=[[0.4, 0.35], [0.5, 0.35], [0.5, 0.45], [0.4, 0.45]])  # right
    add_quad(vertices=[[-1, 1, 1], [-1, 1, -1], [-1, -1, -1], [-1, -1, 1]],
             uvs=[[0.4, 0.4], [0.5, 0.4], [0.5, 0.5], [0.4, 0.5]])  # left

    add_quad(vertices=[[-1, 1, -1], [1, 1, -1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # top
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, -1, 1], [-1, -1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # bottom

    cube_vertices_object = np.asarray(cube_vertices_object, np.float32)
    cube_uvs = np.asarray(cube_uvs, np.float32)

    # Load the texture image
    texture = tf.constant(
        imageio.imread(os.path.dirname(__file__) + '/cat.jpg'),
        dtype=tf.float32) / 255.

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.6, 0.]))

    # Calculate face normals
    cube_normals_world = lighting.vertex_normals(cube_vertices_world,
                                                 cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -2.,
                              -3.2]),  # translate it away from the camera
        matrices.rodrigues([-0.5, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Render the G-buffer channels (mask, UVs, and normals at each pixel) needed for deferred shading
    gbuffer_mask = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=tf.ones_like(cube_vertices_object[:, :1]),
        background=tf.zeros([frame_height, frame_width, 1]),
        width=frame_width,
        height=frame_height,
        channels=1)[..., 0]
    background_value = -1.e4
    gbuffer_vertex_uvs = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=tf.concat(
            [cube_uvs, tf.zeros_like(cube_uvs[:, :1])], axis=1),
        background=tf.ones([frame_height, frame_width, 3]) * background_value,
        width=frame_width,
        height=frame_height,
        channels=3)[..., :2]
    gbuffer_vertex_normals_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_normals_world,
        background=tf.ones([frame_height, frame_width, 3]) * background_value,
        width=frame_width,
        height=frame_height,
        channels=3)

    # Dilate the normals and UVs to ensure correct gradients on the silhouette
    gbuffer_mask = gbuffer_mask[:, :, None]
    gbuffer_vertex_normals_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_normals_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_normals_world = gbuffer_vertex_normals_world * gbuffer_mask + gbuffer_vertex_normals_world_dilated * (
        1. - gbuffer_mask)
    gbuffer_vertex_uvs_dilated = tf.nn.max_pool(gbuffer_vertex_uvs[None, ...],
                                                ksize=[1, 3, 3, 1],
                                                strides=[1, 1, 1, 1],
                                                padding='SAME')[0]
    gbuffer_vertex_uvs = gbuffer_vertex_uvs * gbuffer_mask + gbuffer_vertex_uvs_dilated * (
        1. - gbuffer_mask)

    # Calculate the colour buffer, by sampling the texture according to the rasterised UVs
    gbuffer_colours = gbuffer_mask * sample_texture(
        texture, uvs_to_pixel_indices(gbuffer_vertex_uvs,
                                      tf.shape(texture)[:2]))

    # Calculate a simple grey ambient lighting component
    ambient_contribution = gbuffer_colours * [0.4, 0.4, 0.4]

    # Calculate a diffuse (Lambertian) lighting component
    light_direction = unit([1., -0.3, -0.5])
    diffuse_contribution = lighting.diffuse_directional(
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_colours, [-1, 3]),
        light_direction,
        light_color=[0.6, 0.6, 0.6],
        double_sided=True)
    diffuse_contribution = tf.reshape(diffuse_contribution,
                                      [frame_height, frame_width, 3])

    # Final pixels are given by combining the ambient and diffuse components
    pixels = diffuse_contribution + ambient_contribution

    session = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True)))
    with session.as_default():

        pixels_eval = pixels.eval()
        imageio.imsave('textured.jpg', (pixels_eval * 255).astype(np.uint8))
예제 #9
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(
        cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(
        cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5,
                              -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Render the G-buffer channels (vertex position, colour and normal at each pixel) needed for deferred shading
    gbuffer_vertex_positions_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_vertices_world[:, :3],
        background=tf.ones([frame_height, frame_width, 3]) * float('-inf'),
        width=frame_width,
        height=frame_height,
        channels=3)
    gbuffer_vertex_colours_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_vertex_colors,
        background=tf.zeros([frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)
    gbuffer_vertex_normals_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_normals_world,
        background=tf.ones([frame_height, frame_width, 3]) * float('-inf'),
        width=frame_width,
        height=frame_height,
        channels=3)

    # Dilate the position and normal channels at the silhouette boundary; this doesn't affect the image, but
    # ensures correct gradients for pixels just outside the silhouette
    background_mask = tf.cast(
        tf.equal(gbuffer_vertex_positions_world, float('-inf')), tf.float32)
    gbuffer_vertex_positions_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_positions_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_positions_world = gbuffer_vertex_positions_world * (
        1. - background_mask
    ) + gbuffer_vertex_positions_world_dilated * background_mask
    gbuffer_vertex_normals_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_normals_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_normals_world = gbuffer_vertex_normals_world * (
        1. - background_mask
    ) + gbuffer_vertex_normals_world_dilated * background_mask

    # Calculate a simple grey ambient lighting component
    ambient_contribution = gbuffer_vertex_colours_world * [0.2, 0.2, 0.2]

    # Calculate a red diffuse (Lambertian) lighting component
    light_direction = unit([1., -0.3, -0.5])
    diffuse_contribution = lighting.diffuse_directional(
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_colours_world, [-1, 3]),
        light_direction,
        light_color=[1., 0., 0.],
        double_sided=False)
    diffuse_contribution = tf.reshape(diffuse_contribution,
                                      [frame_height, frame_width, 3])

    # Calculate a white specular (Phong) lighting component
    camera_position_world = tf.matrix_inverse(view_matrix)[3, :3]
    specular_contribution = lighting.specular_directional(
        tf.reshape(gbuffer_vertex_positions_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_colours_world, [-1, 3]),
        light_direction,
        light_color=[1., 1., 1.],
        camera_position=camera_position_world,
        shininess=6.,
        double_sided=False)
    specular_contribution = tf.reshape(specular_contribution,
                                       [frame_height, frame_width, 3])

    # Final pixels are given by combining ambient, diffuse, and specular components
    pixels = diffuse_contribution + specular_contribution + ambient_contribution

    session = tf.Session()
    with session.as_default():

        pixels_eval = pixels.eval()
        cv2.imshow('deferred.py', pixels_eval[:, :, (2, 1, 0)])
        cv2.waitKey(0)
예제 #10
0
    def __call__(self,
                 verts,
                 trans,
                 cam=None,
                 img=None,
                 do_alpha=False,
                 far=None,
                 near=None,
                 color_id=0,
                 img_size=None,
                 seg_parts=13774):
        """
        cam is 3D [f, px, py]
        """
        '''if img is not None:
            h, w = img.shape[:2]
        elif img_size is not None:
            h = img_size[0]
            w = img_size[1]
        else:
            h = self.h
            w = self.w

        if cam is None:
            cam = [self.flength, w / 2., h / 2.]

        use_cam = ProjectPoints(
            f=cam[0] * np.ones(2),
            rt=np.zeros(3),
            t=np.zeros(3),
            k=np.zeros(5),
            c=cam[1:3])

        if near is None:
            near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
        if far is None:
            far = np.maximum(np.max(verts[:, 2]) + 25, 25)

        imtmp = render_model(
            verts,
            self.faces,
            w,
            h,
            use_cam,
            do_alpha=do_alpha,
            img=img,
            far=far,
            near=near,
            color_id=color_id)'''

        #print (self.textura.shape)
        
        frame_width, frame_height = self.w, self.h
        cube_vertices_object = verts[0,:,:]
        cube_faces = tf.constant(self.faces,dtype=tf.int64)
        #cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
        cube_vertices_object, cube_faces = lighting.split_vertices_by_face(cube_vertices_object, cube_faces)
        #cube_vertex_colors = tf.ones_like(cube_vertices_object)
        cube_vertex_colors = tf.constant(self.textura,dtype=tf.float32)
        # Convert vertices to homogeneous coordinates
        cube_vertices_object = tf.concat([cube_vertices_object,tf.ones_like(cube_vertices_object[:, -1:])], axis=1)

        # Transform vertices from object to world space, by rotating around the vertical axis
        

        # Calculate face normals; pre_split implies that no faces share a vertex
        cube_normals_world = lighting.vertex_normals_pre_split(cube_vertices_object, cube_faces)

        # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
        view_matrix = matrices.translation(trans)

        cube_vertices_world = tf.matmul(cube_vertices_object, view_matrix)

        cube_vertices_camera = tf.matmul(cube_vertices_world, matrices.rodrigues([np.pi, 0.0, 0.]))

        # Transform vertices from camera to clip space
        projection_matrix = matrices.perspective_projection(near=self.near, far=self.far, right=self.right, aspect=float(frame_height) / frame_width)
        cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

        # Calculate lighting, as combination of diffuse and ambient
        #vertex_colors_lit = lighting.diffuse_directional(
        #   cube_normals_world, cube_vertex_colors,
        #   light_direction=[1., 0., 0.], light_color=[1., 1., 1.]
        #) * 0.8 + cube_vertex_colors * 0.2

        #2987 seg_parts
        pixels = dirt.rasterise(
            vertices=cube_vertices_clip,
            faces=cube_faces[:seg_parts,:],
            vertex_colors=cube_vertex_colors,
            background=tf.zeros([frame_height, frame_width, 3]),
            width=frame_width, height=frame_height, channels=3
        )
       
        return pixels #,cube_vertices_object, cube_faces# use this to dum color