Пример #1
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(
        cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(
        cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5,
                              -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Calculate lighting, as combination of diffuse and ambient
    vertex_colors_lit = lighting.diffuse_directional(
        cube_normals_world,
        cube_vertex_colors,
        light_direction=[1., 0., 0.],
        light_color=[1., 1., 1.]) * 0.8 + cube_vertex_colors * 0.2

    pixels = dirt.rasterise(vertices=cube_vertices_clip,
                            faces=cube_faces,
                            vertex_colors=vertex_colors_lit,
                            background=tf.zeros([frame_height, frame_width,
                                                 3]),
                            width=frame_width,
                            height=frame_height,
                            channels=3)

    session = tf.Session()
    with session.as_default():

        pixels_eval = pixels.eval()
        cv2.imshow('simple.py', pixels_eval[:, :, (2, 1, 0)])
        cv2.waitKey(0)
def project_points_perspective(m_v,
                               camera_f,
                               camera_c,
                               camera_t,
                               camera_rt,
                               width,
                               height,
                               near=0.1,
                               far=10):
    projection_matrix = perspective_projection(camera_f, camera_c, width,
                                               height, near, far)

    view_matrix = matrices.compose(
        matrices.rodrigues(camera_rt.astype(np.float32)),
        matrices.translation(camera_t.astype(np.float32)),
    )
    m_v = tf.cast(m_v, tf.float32)
    m_v = tf.concat([m_v, tf.ones_like(m_v[:, :, -1:])], axis=2)
    m_v = tf.matmul(
        m_v, tf.tile(view_matrix[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1)))
    m_v = tf.matmul(
        m_v,
        tf.tile(projection_matrix[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1)))

    return m_v
Пример #3
0
def render_colored(m_v, m_f, m_vc, width, height, camera_f, camera_c, bgcolor=np.zeros(3, dtype=np.float32),
                   num_channels=3, camera_t=np.zeros(3, dtype=np.float32), camera_rt=np.zeros(3, dtype=np.float32),
                   name=None):
    with ops.name_scope(name, "render", [m_v]) as name:
        assert (num_channels == m_vc.shape[-1] == bgcolor.shape[0])

        projection_matrix = perspective_projection(
            camera_f, camera_c, width, height, .1, 10)
        # projection_matrix = matrices.perspective_projection(near=0.1, far=20., right=0.1, aspect=1.)

        view_matrix = matrices.compose(
            matrices.rodrigues(camera_rt.astype(np.float32)),
            matrices.translation(camera_t.astype(np.float32)),
        )

        bg = tf.tile(bgcolor.astype(np.float32)[
                     np.newaxis, np.newaxis, :], (height, width, 1))

        m_v = tf.cast(m_v, tf.float32)
        m_v = tf.concat([m_v, tf.ones_like(m_v[:, -1:])], axis=1)

        m_v = tf.matmul(m_v, view_matrix)
        m_v = tf.matmul(m_v, projection_matrix)

        return dirt.rasterise(bg, m_v, tf.cast(m_vc, tf.float32), tf.cast(m_f, tf.int32), name=name)
Пример #4
0
    def __call__(self, mesh, rot, trans, segColor, name=None):
        with tf.name_scope(name, "object_main", [rot, trans]):
            # get rotation matrix
            view_matrix_1 = tf.transpose(rodrigues(rot), perm=[0, 2,
                                                               1])  # Nx4x4

            # get translation matrix
            trans4 = tf.concat(
                [trans,
                 tf.ones((tf.shape(trans)[0], 1), dtype=tf.float32)],
                axis=1)
            trans4 = tf.expand_dims(trans4, 1)
            trans4 = tf.concat([
                np.tile(np.array([[[0., 0., 1., 0.]]]),
                        [trans.shape[0], 1, 1]), trans4
            ],
                               axis=1)
            trans4 = tf.concat([
                np.tile(np.array([[[0., 1., 0., 0.]]]),
                        [trans.shape[0], 1, 1]), trans4
            ],
                               axis=1)
            view_matrix_2 = tf.concat([
                np.tile(np.array([[[1., 0., 0., 0.]]]),
                        [trans.shape[0], 1, 1]), trans4
            ],
                                      axis=1)  # Nx4x4

            vertices = tf.concat(
                [mesh.v, tf.ones([tf.shape(mesh.v)[0], 1])], axis=1)  # Mx4
            vertices = tf.tile(tf.expand_dims(vertices, 0),
                               [trans.shape[0], 1, 1])

            self.objPoseMat = tf.matmul(view_matrix_1, view_matrix_2)  # Nx4x4

            verts = tf.matmul(vertices, self.objPoseMat)  # NxMx4

            class objMesh(object):
                pass

            objMesh.v = verts
            objMesh.f = mesh.f  #tf.tile(tf.expand_dims(self.f, 0), [tf.shape(verts_t)[0], 1, 1])
            objMesh.vcSeg = np.tile(np.expand_dims(segColor, 0),
                                    [mesh.v.shape[0], 1])
            if hasattr(mesh, 'vc'):
                objMesh.vc = mesh.vc
            else:
                objMesh.vc = objMesh.vcSeg

            if hasattr(mesh, 'vn'):
                vn = mesh.vn / np.expand_dims(
                    np.linalg.norm(mesh.vn, ord=2, axis=1),
                    1)  # normalize to unit vec
                vn = tf.concat([vn, tf.ones([tf.shape(vn)[0], 1])],
                               axis=1)  # Mx4
                vn = tf.tile(tf.expand_dims(vn, 0), [trans.shape[0], 1, 1])
                objMesh.vn = tf.matmul(vn, view_matrix_1)

            return objMesh
Пример #5
0
def render_colored_batch(m_v,
                         m_f,
                         m_vc,
                         width,
                         height,
                         camera_f,
                         camera_c,
                         bgcolor=np.zeros(3, dtype=np.float32),
                         num_channels=3,
                         camera_t=np.zeros(3, dtype=np.float32),
                         camera_rt=np.zeros(3, dtype=np.float32),
                         name=None,
                         batch_size=None,
                         cam_pred=None):
    """ Render a batch of meshes with fixed BG. Supported projection types 1) Perspective, 2) Orthographic. """

    with ops.name_scope(name, "render_batch", [m_v]) as name:
        assert (num_channels == m_vc.shape[-1] == bgcolor.shape[0])

        #projection_matrix = perspective_projection(camera_f, camera_c, width, height, .1, 10)
        projection_matrix = orthgraphic_projection(
            width, height, -(width / 2),
            (width / 2))  ### im_w x im_h x im_w cube

        ## Camera Extrinsics, rotate & trans
        view_matrix = matrices.compose(
            matrices.rodrigues(camera_rt.astype(np.float32)),
            matrices.translation(camera_t.astype(np.float32)),
        )
        ## Fixed clr BG
        bg = tf.tile(bgcolor[tf.newaxis, tf.newaxis, tf.newaxis, ...],
                     [tf.shape(m_v)[0], width, height, 1])

        m_v = tf.cast(m_v, tf.float32)
        m_v = tf.concat([m_v, tf.ones_like(m_v[:, :, -1:])], axis=2)

        ## Extrinsic multiplication
        m_v = tf.matmul(
            m_v, tf.tile(view_matrix[np.newaxis, ...],
                         (tf.shape(m_v)[0], 1, 1)))

        ## Intrinsic Camera projection
        m_v = tf.matmul(
            m_v,
            tf.tile(projection_matrix[np.newaxis, ...],
                    (tf.shape(m_v)[0], 1, 1)))

        m_f = tf.tile(
            tf.cast(m_f, tf.int32)[tf.newaxis, ...], (tf.shape(m_v)[0], 1, 1))

        ## Rasterize
        return dirt.rasterise_batch(bg, m_v, m_vc, m_f, name=name)
Пример #6
0
    def getObjPoseMat(self):
        # get rotation matrix
        view_matrix_1 = tf.transpose(rodrigues(self.rot), perm=[0, 2, 1])  # Nx4x4

        # get translation matrix
        trans4 = tf.concat([self.trans, tf.ones((tf.shape(self.trans)[0], 1), dtype=tf.float32)], axis=1)
        trans4 = tf.expand_dims(trans4, 1)
        trans4 = tf.concat([np.tile(np.array([[[0., 0., 1., 0.]]]), [self.trans.shape[0], 1, 1]), trans4], axis=1)
        trans4 = tf.concat([np.tile(np.array([[[0., 1., 0., 0.]]]), [self.trans.shape[0], 1, 1]), trans4], axis=1)
        view_matrix_2 = tf.concat([np.tile(np.array([[[1., 0., 0., 0.]]]), [self.trans.shape[0], 1, 1]), trans4],
                                  axis=1)  # Nx4x4

        objPoseMat = tf.matmul(view_matrix_1, view_matrix_2) # this is a right multiplication matrix of size Nx4x4

        return objPoseMat
Пример #7
0
def get_transformed_geometry(translation, rotation, scale):

    # Build bent square in object space, on z = 0 plane
    vertices_object = tf.constant(
        [[-1, -1, 0.], [-1, 1, 0], [1, 1, 0], [1, -1, -1.3]],
        dtype=tf.float32) * square_size / 2
    faces = [[0, 1, 2], [0, 2, 3]]

    # ** we should add an occluding triangle!
    # ** also a non-planar meeting-of-faces

    vertices_object, faces = lighting.split_vertices_by_face(
        vertices_object, faces)

    # Convert vertices to homogeneous coordinates
    vertices_object = tf.concat(
        [vertices_object,
         tf.ones_like(vertices_object[:, -1:])], axis=1)

    # Transform vertices from object to world space, by rotating around the z-axis
    vertices_world = tf.matmul(
        vertices_object, matrices.rodrigues(
            [0., 0., rotation])) * scale + tf.concat([translation, [0.]],
                                                     axis=0)

    # Calculate face normals
    normals_world = lighting.vertex_normals(vertices_world, faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.translation([-0.5, 0., -3.5
                                        ])  # translate it away from the camera
    vertices_camera = tf.matmul(vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1,
        far=20.,
        right=0.1,
        aspect=float(canvas_height) / canvas_width)
    vertices_clip = tf.matmul(vertices_camera, projection_matrix)

    vertex_colours = tf.concat(
        [tf.ones([3, 3]) * [0.8, 0.5, 0.],
         tf.ones([3, 3]) * [0.5, 0.8, 0.]],
        axis=0)

    return vertices_clip, faces, normals_world, vertex_colours
Пример #8
0
    def openpose_joints(self,
                 verts,
                 trans,
                 cam=None,
                 img=None,
                 do_alpha=False,
                 far=None,
                 near=None,
                 color_id=0,
                 img_size=None):

        frame_width, frame_height = self.w, self.h
        cube_vertices_object = verts[0,:,:]
        #cube_faces = tf.constant(self.faces,dtype=tf.int64)
        #cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
        #cube_vertices_object, cube_faces = lighting.split_vertices_by_face(cube_vertices_object, cube_faces)
        #cube_vertex_colors = tf.ones_like(cube_vertices_object)
        #cube_vertex_colors = tf.constant(self.textura,dtype=tf.float32)
        # Convert vertices to homogeneous coordinates
        cube_vertices_object = tf.concat([cube_vertices_object,tf.ones_like(cube_vertices_object[:, -1:])], axis=1)

        # Transform vertices from object to world space, by rotating around the vertical axis
        

        # Calculate face normals; pre_split implies that no faces share a vertex
        #cube_normals_world = lighting.vertex_normals_pre_split(cube_vertices_object, cube_faces)

        # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
        view_matrix = matrices.translation(trans)

        cube_vertices_world = tf.matmul(cube_vertices_object, view_matrix)

        cube_vertices_camera = tf.matmul(cube_vertices_world, matrices.rodrigues([np.pi, 0.0, 0.]))

        # Transform vertices from camera to clip space
        projection_matrix = matrices.perspective_projection(near=self.near, far=self.far, right=self.right, aspect=float(frame_height) / frame_width)
        cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)
        xs = tf.divide(cube_vertices_clip[:, 0], cube_vertices_clip[:, 3])
        ys = tf.divide(cube_vertices_clip[:, 1], cube_vertices_clip[:, 3])
        us = (self.w/2.0) * xs + self.w/2.0
        vs = (self.h/2.0) *( tf.ones_like(ys) - ys) 
		#vs = 480 - vs
        res = tf.stack([us, vs], axis=1)


        return res
Пример #9
0
def render_colored_batch(m_v,
                         m_f,
                         m_vc,
                         width,
                         height,
                         camera_f,
                         camera_c,
                         bgcolor=np.zeros(3, dtype=np.float32),
                         num_channels=3,
                         camera_t=np.zeros(3, dtype=np.float32),
                         camera_rt=np.zeros(3, dtype=np.float32),
                         name=None):
    with ops.name_scope(name, "render_batch", [m_v]) as name:
        assert (num_channels == m_vc.shape[-1] == bgcolor.shape[0])

        projection_matrix = perspective_projection(camera_f, camera_c, width,
                                                   height, .1, 10)

        view_matrix = matrices.compose(
            matrices.rodrigues(camera_rt.astype(np.float32)),
            matrices.translation(camera_t.astype(np.float32)),
        )

        bg = tf.tile(
            bgcolor.astype(np.float32)[np.newaxis, np.newaxis, np.newaxis, :],
            (tf.shape(m_v)[0], height, width, 1))
        m_vc = tf.tile(
            tf.cast(m_vc, tf.float32)[np.newaxis, ...],
            (tf.shape(m_v)[0], 1, 1))

        m_v = tf.cast(m_v, tf.float32)
        m_v = tf.concat([m_v, tf.ones_like(m_v[:, :, -1:])], axis=2)
        m_v = tf.matmul(
            m_v, tf.tile(view_matrix[np.newaxis, ...],
                         (tf.shape(m_v)[0], 1, 1)))
        m_v = tf.matmul(
            m_v,
            tf.tile(projection_matrix[np.newaxis, ...],
                    (tf.shape(m_v)[0], 1, 1)))

        m_f = tf.tile(
            tf.cast(m_f, tf.int32)[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1))

        return dirt.rasterise_batch(bg, m_v, m_vc, m_f, name=name)
Пример #10
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    cube_vertices_object = []
    cube_uvs = []
    cube_faces = []

    def add_quad(vertices, uvs):
        index = len(cube_vertices_object)
        cube_faces.extend([[index + 2, index + 1, index],
                           [index, index + 3, index + 2]])
        cube_vertices_object.extend(vertices)
        cube_uvs.extend(uvs)

    add_quad(vertices=[[-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0.1, 0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]])  # front
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1]],
             uvs=[[1, 1], [0, 1], [0, 0], [1, 0]])  # back

    add_quad(vertices=[[1, 1, 1], [1, 1, -1], [1, -1, -1], [1, -1, 1]],
             uvs=[[0.3, 0.25], [0.6, 0.25], [0.6, 0.55], [0.3, 0.55]])  # right
    add_quad(vertices=[[-1, 1, 1], [-1, 1, -1], [-1, -1, -1], [-1, -1, 1]],
             uvs=[[0.4, 0.4], [0.5, 0.4], [0.5, 0.5], [0.4, 0.5]])  # left

    add_quad(vertices=[[-1, 1, -1], [1, 1, -1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # top
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, -1, 1], [-1, -1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # bottom

    cube_vertices_object = np.asarray(cube_vertices_object, np.float32)
    cube_uvs = np.asarray(cube_uvs, np.float32)

    # Load the texture image
    texture = tf.cast(
        tf.image.decode_jpeg(
            tf.read_file(os.path.dirname(__file__) + '/cat.jpg')),
        tf.float32) / 255.

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.6, 0.]))

    # Calculate face normals
    cube_normals_world = lighting.vertex_normals(cube_vertices_world,
                                                 cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -2.,
                              -3.2]),  # translate it away from the camera
        matrices.rodrigues([-0.5, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # The following function is applied to the G-buffer, which is a multi-channel image containing all the vertex attributes. It
    # uses this to calculate the shading (texture and lighting) at each pixel, hence their final intensities
    def shader_fn(gbuffer, texture, light_direction):

        # Unpack the different attributes from the G-buffer
        mask = gbuffer[:, :, :1]
        uvs = gbuffer[:, :, 1:3]
        normals = gbuffer[:, :, 3:]

        # Sample the texture at locations corresponding to each pixel; this defines the unlit material color at each point
        unlit_colors = sample_texture(
            texture, uvs_to_pixel_indices(uvs,
                                          tf.shape(texture)[:2]))

        # Calculate a simple grey ambient lighting component
        ambient_contribution = unlit_colors * [0.4, 0.4, 0.4]

        # Calculate a diffuse (Lambertian) lighting component
        diffuse_contribution = lighting.diffuse_directional(
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction,
            light_color=[0.6, 0.6, 0.6],
            double_sided=True)
        diffuse_contribution = tf.reshape(diffuse_contribution,
                                          [frame_height, frame_width, 3])

        # The final pixel intensities inside the shape are given by combining the ambient and diffuse components;
        # outside the shape, they are set to a uniform background color
        pixels = (diffuse_contribution +
                  ambient_contribution) * mask + [0., 0., 0.3] * (1. - mask)

        return pixels

    # Render the G-buffer channels (mask, UVs, and normals at each pixel), then perform the deferred shading calculation
    # In general, any tensor required by shader_fn and wrt which we need derivatives should be included in shader_additional_inputs;
    # although in this example they are constant, we pass the texture and lighting direction through this route as an illustration
    light_direction = tf.linalg.l2_normalize([1., -0.3, -0.5])
    pixels = dirt.rasterise_deferred(
        vertices=cube_vertices_clip,
        vertex_attributes=tf.concat(
            [
                tf.ones_like(cube_vertices_object[:, :1]),  # mask
                cube_uvs,  # texture coordinates
                cube_normals_world  # normals
            ],
            axis=1),
        faces=cube_faces,
        background_attributes=tf.zeros([frame_height, frame_width, 6]),
        shader_fn=shader_fn,
        shader_additional_inputs=[texture, light_direction])

    save_pixels = tf.write_file(
        'textured.jpg', tf.image.encode_jpeg(tf.cast(pixels * 255, tf.uint8)))

    session = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True)))
    with session.as_default():

        save_pixels.run()
Пример #11
0
def dirt_rendering_orthographic( vertices, faces, reflectances, \
                    frame_width, frame_height, cx, cy, vertices_to_keep_track = None ):

    if vertices.shape[-1] == 2:
        vertices = tf.concat(
            [vertices, tf.zeros_like(vertices[..., -1:])], axis=-1)

    vertical_flip = False
    if frame_height < 0:
        frame_height = -frame_height
        vertical_flip = True

    if reflectances is None:
        reflectances = tf.ones_like(vertices)

    rendering_batch_size = tf.shape(vertices)[0]
    ones = tf.ones([rendering_batch_size], dtype=tf.float32)
    normals = lighting.vertex_normals(vertices, faces)

    bz_facemodel_vertices_object, bz_facemodel_vertex_colors, bz_facemodel_vertex_normals, bz_facemodel_faces = split_vertices_by_color_normal_face(
        vertices, reflectances, normals, faces)

    if vertices_to_keep_track is None:
        landmark_vertices = vertices
        landmark_normals = normals
    else:
        landmark_vertices, landmark_normals = get_landmarks(
            vertices, normals, vertices_to_keep_track)

    translation_parameters = np.array([[0, 0, 100]], dtype=np.float32)
    translation_parameters = tf.tile(
        tf.constant(translation_parameters, dtype=tf.float32),
        [rendering_batch_size, 1])
    rotation_parameters = np.array([[1e-10, 0, 0]], dtype=np.float32)
    rotation_parameters = tf.tile(
        tf.constant(rotation_parameters, dtype=tf.float32),
        [rendering_batch_size, 1])
    cx = tf.ones([rendering_batch_size], dtype=tf.float32) * cx
    cy = tf.ones([rendering_batch_size],
                 dtype=tf.float32) * (frame_height - cy)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation(
            translation_parameters),  # translate it away from the camera
        matrices.rodrigues(rotation_parameters)  # tilt the view downwards
    )
    """
    with tf.Session() as sess: 
        _batch = sess.run( rendering_batch_size )
        _a = sess.run( matrices.translation(translation_parameters) )
        _b = sess.run( matrices.rodrigues(rotation_parameters)  )
        _c = sess.run( view_matrix )

        print("translation", np.transpose(_a[0]))
        print("rotation",  np.transpose(_b[0]))
        print("camera",  np.transpose(_c[0]))
    """

    # Convert vertices to homogeneous coordinates
    bz_facemodel_vertices_object = tf.concat([
        bz_facemodel_vertices_object,
        tf.ones_like(bz_facemodel_vertices_object[..., -1:])
    ],
                                             axis=-1)

    landmark_vertices = tf.concat(
        [landmark_vertices,
         tf.ones_like(landmark_vertices[..., -1:])],
        axis=-1)

    viewpoint_direction = -tf.ones_like(translation_parameters)
    norms = tf.norm(viewpoint_direction, axis=-1,
                    keep_dims=True)  # indexed by *, singleton
    viewpoint_direction /= norms

    viewpoint_direction = np.array([0, 0, -1], np.float32)
    viewpoint_direction = np.expand_dims(viewpoint_direction, axis=0)
    viewpoint_direction = tf.constant(viewpoint_direction)
    viewpoint_direction = tf.tile(viewpoint_direction,
                                  [rendering_batch_size, 1])

    # Transform vertices from object to world space, by rotating around the vertical axis
    bz_facemodel_vertices_world = bz_facemodel_vertices_object  #  tf.matmul(cube_vertices_object, [matrices.rodrigues([0.0, 0.0, 0.0])] )
    landmark_vertices_world = landmark_vertices

    # Calculate face normals; pre_split implies that no faces share a vertex
    bz_facemodel_faces = tf.expand_dims(bz_facemodel_faces, axis=0)
    bz_facemodel_faces = tf.tile(bz_facemodel_faces,
                                 (rendering_batch_size, 1, 1))
    bz_facemodel_normals_world = bz_facemodel_vertex_normals  #  lighting.vertex_normals_pre_split(cube_vertices_world, cube_faces)
    bz_facemodel_vertices_camera = tf.matmul(bz_facemodel_vertices_world,
                                             view_matrix)

    # Transform vertices from camera to clip space
    near = 10.0 * ones
    far = 200.0 * ones

    projection_matrix = orthographic_projection(near=near,
                                                far=far,
                                                w=frame_width,
                                                ones=ones,
                                                h=frame_height,
                                                cx=cx,
                                                cy=cy)
    bz_facemodel_vertices_clip = tf.matmul(bz_facemodel_vertices_camera,
                                           projection_matrix)
    landmark_vertices_vertices_clip = tf.matmul(landmark_vertices_world,
                                                projection_matrix)
    """
    with tf.Session() as sess: 
        
        _v0 = sess.run( bz_facemodel_vertices_world )
        _v1 = sess.run( bz_facemodel_vertices_camera )
        _v2 = sess.run( bz_facemodel_vertices_clip )


        print("vertices\n", (_v0[0]))
        print("vertices\n", (_v1[0]))
        print("vertices\n", (_v2[0]))
    """

    full_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=bz_facemodel_vertex_colors,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    # landmarks
    denom = landmark_vertices_vertices_clip[:, :, 3]
    landmark_points = landmark_vertices_vertices_clip[:, :, 0:2] / (
        denom[..., tf.newaxis])
    landmark_xpoint = (1.0 + landmark_points[:, :, 0:1]) * frame_width * 0.5
    landmark_ypoint = (1.0 + landmark_points[:, :, 1:2]) * frame_height * 0.5
    landmark_points = tf.concat([landmark_xpoint, landmark_ypoint], axis=-1)

    output_dictionary = {}
    output_dictionary["rendering_results"] = full_model_pixels
    output_dictionary["vertices"] = landmark_points
    output_dictionary["landmark_normals"] = landmark_normals
    """
    with tf.Session() as sess:
        _vertices = sess.run( bz_facemodel_vertices_clip )
        print( _vertices.shape )
        print( _vertices )
    """

    if vertical_flip is True:
        output_dictionary["rendering_results"] = output_dictionary[
            "rendering_results"][:, ::-1, :, :]

    return output_dictionary
Пример #12
0
def dirt_rendering( vertices, faces, reflectances, \
                    spherical_harmonics_parameters, translation_parameters, camera_rotation_parameters, base_rotation, rotation_parameters, \
                    frame_width, frame_height, focal_length, cx, cy, vertices_indices ):

    rendering_batch_size = tf.shape(vertices)[0]
    vertices = tf.matmul(vertices,
                         matrices.rodrigues(rotation_parameters)[..., :3, :3])

    vertex_normals = normals = lighting.vertex_normals(vertices, faces)

    landmark_vertices, landmark_normals = get_landmarks(
        vertices, normals, vertices_indices)
    bz_facemodel_vertices_object, bz_facemodel_vertex_colors, bz_facemodel_vertex_normals, bz_facemodel_faces = split_vertices_by_color_normal_face(
        vertices, reflectances, normals, faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        #        matrices.rodrigues(rotation_parameters),
        matrices.translation(-translation_parameters
                             ),  # translate it away from the camera
        matrices.rodrigues(
            camera_rotation_parameters),  # tilt the view downwards
        matrices.rodrigues(base_rotation))
    """

    view_matrix = matrices.compose(
        matrices.translation(translation_parameters),  # translate it away from the camera                
        matrices.rodrigues(camera_rotation_parameters),        # tilt the view downwards
        matrices.translation(-translation_parameters)  # translate it away from the camera        
    )
    """

    # Convert vertices to homogeneous coordinates
    bz_facemodel_vertices_object = tf.concat([
        bz_facemodel_vertices_object,
        tf.ones_like(bz_facemodel_vertices_object[..., -1:])
    ],
                                             axis=-1)

    landmark_vertices = tf.concat(
        [landmark_vertices,
         tf.ones_like(landmark_vertices[..., -1:])],
        axis=-1)
    """
    with tf.Session() as sess:
        _view_matrix = sess.run( view_matrix )
        print( _view_matrix )
    """

    viewpoint_direction = -tf.ones_like(translation_parameters)
    norms = tf.norm(viewpoint_direction, axis=-1,
                    keep_dims=True)  # indexed by *, singleton
    viewpoint_direction /= (norms + 1e-5)

    viewpoint_direction = np.array([0, 0, 1], np.float32)
    viewpoint_direction = np.expand_dims(viewpoint_direction, axis=0)
    viewpoint_direction = tf.constant(viewpoint_direction)
    viewpoint_direction = tf.tile(viewpoint_direction,
                                  [rendering_batch_size, 1])

    # Transform vertices from object to world space, by rotating around the vertical axis
    bz_facemodel_vertices_world = bz_facemodel_vertices_object  #  tf.matmul(cube_vertices_object, [matrices.rodrigues([0.0, 0.0, 0.0])] )
    landmark_vertices_world = landmark_vertices

    # Calculate face normals; pre_split implies that no faces share a vertex
    bz_facemodel_faces = tf.expand_dims(bz_facemodel_faces, axis=0)
    bz_facemodel_faces = tf.tile(bz_facemodel_faces,
                                 (rendering_batch_size, 1, 1))

    bz_facemodel_normals_world = bz_facemodel_vertex_normals  #  lighting.vertex_normals_pre_split(cube_vertices_world, cube_faces)
    landmark_vertices_normals = landmark_normals

    bz_facemodel_vertices_camera = tf.matmul(bz_facemodel_vertices_world,
                                             view_matrix)
    landmark_vertices_camera = tf.matmul(landmark_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    near = focal_length * 0.1  # 0.01 just constant
    far = focal_length * 100
    #    right = frame_width * 0.5 * 0.01
    #    projection_matrix = matrices.perspective_projection(near=near, far=1000., right=right, aspect=float(frame_height) / frame_width)
    projection_matrix = matrices.perspective_projection( near=near, far=far, fx = focal_length, fy=focal_length, \
        w = frame_width, h = frame_height, cx = cx, cy = cy )

    #    projection_matrix = tf.expand_dims( projection_matrix, axis = 0)
    #    projection_matrix = tf.tile( projection_matrix, (rendering_batch_size,1,1))

    bz_facemodel_vertices_clip = tf.matmul(bz_facemodel_vertices_camera,
                                           projection_matrix)
    landmark_vertices_vertices_clip = tf.matmul(landmark_vertices_camera,
                                                projection_matrix)

    #K = tf.constant(
    #    np.array( [ [focal_length,0,frame_width*0.5],[0,focal_length,frame_height*0.5],[0.0,0.0,1.0]] ), dtype = tf.float32)
    """
    ### if you want check projection matrix 
    with tf.Session() as sess:
        _landmarks = sess.run( landmark_vertices )    
        _view_matrix = sess.run( view_matrix )
        _projection_matrix = sess.run( projection_matrix )
        #_K = sess.run(K )
        _T = sess.run( matrices.translation(translation_parameters) )
        _R = sess.run( matrices.rodrigues(rotation_parameters)  )

        print(np.array2string(_T, separator=', ')) 
        print(np.array2string(_R, separator=', ')) 
        print(np.array2string(_landmarks, separator=', ')) 
        print(np.array2string(_view_matrix, separator=', ')) 
        #print(np.array2string(_K, separator=', ')) 
        print(np.array2string(_projection_matrix, separator=', ')) 
    """
    """
    # Calculate lighting, as combination of diffuse and ambient
    vertex_colors_lit = diffuse_directional_lights(
        bz_facemodel_normals_world, bz_facemodel_vertex_colors,
        light_direction=light_directions, viewpoint_direction=viewpoint_direction, light_color=light_colors
    )  * 1.0 #+ bz_facemodel_vertex_colors * 0.2
    """

    # geometry
    use_spherical_harmonics = False

    if use_spherical_harmonics == True:
        vertex_colors_lit = spherical_harmonics(
            bz_facemodel_normals_world,
            bz_facemodel_vertex_colors,
            spherical_harmonics_parameters,
            viewpoint_direction=viewpoint_direction)

        geometry_visualization_spherical_harmonics_parameters = np.zeros(
            [27], dtype=np.float32)
        geometry_visualization_spherical_harmonics_parameters[3:3 + 9] = 1.0
        geometry_visualization_spherical_harmonics_parameters = tf.constant(
            geometry_visualization_spherical_harmonics_parameters,
            dtype=tf.float32)
        geometry_visualization_vertex_colors_lit = spherical_harmonics( bz_facemodel_normals_world, tf.ones_like(bz_facemodel_vertex_colors), \
                                geometry_visualization_spherical_harmonics_parameters, viewpoint_direction=viewpoint_direction )
    else:
        light_directions = np.array([[0, 0, 1]])
        norm = np.linalg.norm(light_directions, axis=-1)
        light_directions = light_directions / norm[:, np.newaxis]
        light_directions = tf.constant(light_directions, dtype=tf.float32)
        light_directions = tf.tile(light_directions, (rendering_batch_size, 1))

        light_colors = tf.constant([1., 1., 1.], dtype=tf.float32)
        light_colors = tf.expand_dims(light_colors, axis=0)
        light_colors = tf.tile(light_colors, (rendering_batch_size, 1))

        geometry_visualization_vertex_colors_lit = diffuse_directional_lights( bz_facemodel_normals_world,  tf.ones_like(bz_facemodel_vertex_colors), \
                                    light_direction=light_directions, viewpoint_direction=viewpoint_direction, light_color=light_colors, double_sided = False ) * 1.0 #+ bz_facemodel_vertex_colors * 0.2
        vertex_colors_lit = diffuse_directional_lights( bz_facemodel_normals_world,  bz_facemodel_vertex_colors, light_direction=light_directions, \
                                            viewpoint_direction=viewpoint_direction, light_color=light_colors ) * 1.0 #+ bz_facemodel_vertex_colors * 0.2

    # reflectance
    reflectance_visualization_spherical_harmonics_parameters = np.zeros(
        [27], dtype=np.float32)
    reflectance_visualization_spherical_harmonics_parameters[0:3] = 3.0
    reflectance_visualization_spherical_harmonics_parameters = tf.constant(
        reflectance_visualization_spherical_harmonics_parameters,
        dtype=tf.float32)
    reflectance_visualization_vertex_colors_lit = spherical_harmonics( bz_facemodel_normals_world, bz_facemodel_vertex_colors, \
                                reflectance_visualization_spherical_harmonics_parameters, viewpoint_direction=viewpoint_direction )

    # illumination
    illumination_visualization_vertex_colors_lit = spherical_harmonics( bz_facemodel_normals_world, tf.ones_like(bz_facemodel_vertex_colors), \
                            spherical_harmonics_parameters, viewpoint_direction=viewpoint_direction )

    # depth
    depth_vertex_colors_lit = tf.concat([
        geometry_visualization_vertex_colors_lit[:, :, 0:1],
        bz_facemodel_normals_world[:, :, 2:3],
        bz_facemodel_vertices_camera[:, :, 2:3]
    ],
                                        axis=-1)

    # landmarks
    denom = landmark_vertices_vertices_clip[:, :, 3]
    landmark_points = landmark_vertices_vertices_clip[:, :, 0:2] / (
        denom[..., tf.newaxis])
    landmark_xpoint = (1.0 + landmark_points[:, :, 0:1]) * frame_width * 0.5
    landmark_ypoint = (1.0 - landmark_points[:, :, 1:2]) * frame_height * 0.5
    landmark_points = tf.concat([landmark_xpoint, landmark_ypoint], axis=-1)
    """
    with tf.Session() as sess:
        _AAA = sess.run( landmark_points )
        print(_AAA)
    """

    # landmark visibility
    landmark_visibility = tf.matmul(landmark_vertices_normals,
                                    viewpoint_direction[..., tf.newaxis])
    #visibility = tf.minimum( landmark_visibility+0.9999, 1.0)
    #visibility = tf.cast( visibility, tf.int32 )

    full_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    geometry_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=geometry_visualization_vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    reflectance_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=tf.ones_like(
            reflectance_visualization_vertex_colors_lit),
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    illumination_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=illumination_visualization_vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    depth_maps = -dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=depth_vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    output1_dict = {}
    output1_dict["full_model_pixels"] = full_model_pixels
    output1_dict["vertices_clip"] = bz_facemodel_vertices_clip
    output1_dict["landmark_points"] = landmark_points
    output1_dict["landmark_visibility"] = landmark_visibility
    #output1_dict["depth_masks"] =  tf.clip_by_value( 5*( -depth_maps[:,:,:,0] ), 0, 1 ) # tf.stop_gradient( ???
    #output1_dict["depth_masks"] =   tf.stop_gradient( tf.clip_by_value( 100*( -depth_maps[:,:,:,0] ), 0, 1 ) )
    output1_dict["depth_maps"] = depth_maps[:, :, :, 2]
    output1_dict["geometry_model_pixels"] = geometry_model_pixels
    output1_dict["reflectance_model_pixels"] = reflectance_model_pixels
    output1_dict["illumination_model_pixels"] = illumination_model_pixels
    output1_dict["surface_normals"] = (1 + depth_maps[:, :, :, 1]) / 2.0
    output1_dict["vertex_normals"] = vertex_normals

    return output1_dict
Пример #13
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    cube_vertices_object = []
    cube_uvs = []
    cube_faces = []

    def add_quad(vertices, uvs):
        index = len(cube_vertices_object)
        cube_faces.extend([[index + 2, index + 1, index],
                           [index, index + 3, index + 2]])
        cube_vertices_object.extend(vertices)
        cube_uvs.extend(uvs)

    add_quad(vertices=[[-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0.1, 0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]])  # front
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1]],
             uvs=[[1, 1], [0, 1], [0, 0], [1, 0]])  # back

    add_quad(vertices=[[1, 1, 1], [1, 1, -1], [1, -1, -1], [1, -1, 1]],
             uvs=[[0.4, 0.35], [0.5, 0.35], [0.5, 0.45], [0.4, 0.45]])  # right
    add_quad(vertices=[[-1, 1, 1], [-1, 1, -1], [-1, -1, -1], [-1, -1, 1]],
             uvs=[[0.4, 0.4], [0.5, 0.4], [0.5, 0.5], [0.4, 0.5]])  # left

    add_quad(vertices=[[-1, 1, -1], [1, 1, -1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # top
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, -1, 1], [-1, -1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # bottom

    cube_vertices_object = np.asarray(cube_vertices_object, np.float32)
    cube_uvs = np.asarray(cube_uvs, np.float32)

    # Load the texture image
    texture = tf.constant(
        imageio.imread(os.path.dirname(__file__) + '/cat.jpg'),
        dtype=tf.float32) / 255.

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.6, 0.]))

    # Calculate face normals
    cube_normals_world = lighting.vertex_normals(cube_vertices_world,
                                                 cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -2.,
                              -3.2]),  # translate it away from the camera
        matrices.rodrigues([-0.5, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Render the G-buffer channels (mask, UVs, and normals at each pixel) needed for deferred shading
    gbuffer_mask = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=tf.ones_like(cube_vertices_object[:, :1]),
        background=tf.zeros([frame_height, frame_width, 1]),
        width=frame_width,
        height=frame_height,
        channels=1)[..., 0]
    background_value = -1.e4
    gbuffer_vertex_uvs = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=tf.concat(
            [cube_uvs, tf.zeros_like(cube_uvs[:, :1])], axis=1),
        background=tf.ones([frame_height, frame_width, 3]) * background_value,
        width=frame_width,
        height=frame_height,
        channels=3)[..., :2]
    gbuffer_vertex_normals_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_normals_world,
        background=tf.ones([frame_height, frame_width, 3]) * background_value,
        width=frame_width,
        height=frame_height,
        channels=3)

    # Dilate the normals and UVs to ensure correct gradients on the silhouette
    gbuffer_mask = gbuffer_mask[:, :, None]
    gbuffer_vertex_normals_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_normals_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_normals_world = gbuffer_vertex_normals_world * gbuffer_mask + gbuffer_vertex_normals_world_dilated * (
        1. - gbuffer_mask)
    gbuffer_vertex_uvs_dilated = tf.nn.max_pool(gbuffer_vertex_uvs[None, ...],
                                                ksize=[1, 3, 3, 1],
                                                strides=[1, 1, 1, 1],
                                                padding='SAME')[0]
    gbuffer_vertex_uvs = gbuffer_vertex_uvs * gbuffer_mask + gbuffer_vertex_uvs_dilated * (
        1. - gbuffer_mask)

    # Calculate the colour buffer, by sampling the texture according to the rasterised UVs
    gbuffer_colours = gbuffer_mask * sample_texture(
        texture, uvs_to_pixel_indices(gbuffer_vertex_uvs,
                                      tf.shape(texture)[:2]))

    # Calculate a simple grey ambient lighting component
    ambient_contribution = gbuffer_colours * [0.4, 0.4, 0.4]

    # Calculate a diffuse (Lambertian) lighting component
    light_direction = unit([1., -0.3, -0.5])
    diffuse_contribution = lighting.diffuse_directional(
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_colours, [-1, 3]),
        light_direction,
        light_color=[0.6, 0.6, 0.6],
        double_sided=True)
    diffuse_contribution = tf.reshape(diffuse_contribution,
                                      [frame_height, frame_width, 3])

    # Final pixels are given by combining the ambient and diffuse components
    pixels = diffuse_contribution + ambient_contribution

    session = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True)))
    with session.as_default():

        pixels_eval = pixels.eval()
        imageio.imsave('textured.jpg', (pixels_eval * 255).astype(np.uint8))
Пример #14
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat([
        cube_vertices_object,
        tf.ones_like(cube_vertices_object[:, -1:])
    ], axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object, matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5, -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # The following function is applied to the G-buffer, which is a multi-channel image containing all the vertex attributes.
    # It uses this to calculate the shading at each pixel, hence their final intensities
    def shader_fn(gbuffer, view_matrix, light_direction):

        # Unpack the different attributes from the G-buffer
        mask = gbuffer[:, :, :1]
        positions = gbuffer[:, :, 1:4]
        unlit_colors = gbuffer[:, :, 4:7]
        normals = gbuffer[:, :, 7:]

        # Calculate a simple grey ambient lighting component
        ambient_contribution = unlit_colors * [0.2, 0.2, 0.2]

        # Calculate a red diffuse (Lambertian) lighting component
        diffuse_contribution = lighting.diffuse_directional(
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction, light_color=[1., 0., 0.], double_sided=False
        )
        diffuse_contribution = tf.reshape(diffuse_contribution, [frame_height, frame_width, 3])

        # Calculate a white specular (Phong) lighting component
        camera_position_world = tf.linalg.inv(view_matrix)[3, :3]
        specular_contribution = lighting.specular_directional(
            tf.reshape(positions, [-1, 3]),
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction, light_color=[1., 1., 1.],
            camera_position=camera_position_world,
            shininess=6., double_sided=False
        )
        specular_contribution = tf.reshape(specular_contribution, [frame_height, frame_width, 3])

        # The final pixel intensities inside the shape are given by combining the three lighting components;
        # outside the shape, they are set to a uniform background color. We clip the final values as the specular
        # component saturates some pixels
        pixels = tf.clip_by_value(
            (diffuse_contribution + specular_contribution + ambient_contribution) * mask + [0., 0., 0.3] * (1. - mask),
            0., 1.
        )

        return pixels

    # Render the G-buffer channels (mask, vertex positions, vertex colours, and normals at each pixel), then perform
    # the deferred shading calculation. In general, any tensor required by shader_fn and wrt which we need derivatives
    # should be included in shader_additional_inputs; although in this example they are constant, we pass the view
    # matrix and lighting direction through this route as an illustration
    light_direction = tf.linalg.l2_normalize([1., -0.3, -0.5])
    pixels = dirt.rasterise_deferred(
        vertices=cube_vertices_clip,
        vertex_attributes=tf.concat([
            tf.ones_like(cube_vertices_object[:, :1]),  # mask
            cube_vertices_world[:, :3],  # vertex positions
            cube_vertex_colors,  # vertex colors
            cube_normals_world  # normals
        ], axis=1),
        faces=cube_faces,
        background_attributes=tf.zeros([frame_height, frame_width, 10]),
        shader_fn=shader_fn,
        shader_additional_inputs=[view_matrix, light_direction]
    )

    pixels = tf.cast(pixels * 255, tf.uint8)

    session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(allow_growth=True)))
    with session.as_default():
        image = pixels
        img = Image.fromarray( np.asarray(image))
        img.save("test_def.png")
Пример #15
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(
        cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(
        cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5,
                              -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Render the G-buffer channels (vertex position, colour and normal at each pixel) needed for deferred shading
    gbuffer_vertex_positions_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_vertices_world[:, :3],
        background=tf.ones([frame_height, frame_width, 3]) * float('-inf'),
        width=frame_width,
        height=frame_height,
        channels=3)
    gbuffer_vertex_colours_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_vertex_colors,
        background=tf.zeros([frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)
    gbuffer_vertex_normals_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_normals_world,
        background=tf.ones([frame_height, frame_width, 3]) * float('-inf'),
        width=frame_width,
        height=frame_height,
        channels=3)

    # Dilate the position and normal channels at the silhouette boundary; this doesn't affect the image, but
    # ensures correct gradients for pixels just outside the silhouette
    background_mask = tf.cast(
        tf.equal(gbuffer_vertex_positions_world, float('-inf')), tf.float32)
    gbuffer_vertex_positions_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_positions_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_positions_world = gbuffer_vertex_positions_world * (
        1. - background_mask
    ) + gbuffer_vertex_positions_world_dilated * background_mask
    gbuffer_vertex_normals_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_normals_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_normals_world = gbuffer_vertex_normals_world * (
        1. - background_mask
    ) + gbuffer_vertex_normals_world_dilated * background_mask

    # Calculate a simple grey ambient lighting component
    ambient_contribution = gbuffer_vertex_colours_world * [0.2, 0.2, 0.2]

    # Calculate a red diffuse (Lambertian) lighting component
    light_direction = unit([1., -0.3, -0.5])
    diffuse_contribution = lighting.diffuse_directional(
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_colours_world, [-1, 3]),
        light_direction,
        light_color=[1., 0., 0.],
        double_sided=False)
    diffuse_contribution = tf.reshape(diffuse_contribution,
                                      [frame_height, frame_width, 3])

    # Calculate a white specular (Phong) lighting component
    camera_position_world = tf.matrix_inverse(view_matrix)[3, :3]
    specular_contribution = lighting.specular_directional(
        tf.reshape(gbuffer_vertex_positions_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_colours_world, [-1, 3]),
        light_direction,
        light_color=[1., 1., 1.],
        camera_position=camera_position_world,
        shininess=6.,
        double_sided=False)
    specular_contribution = tf.reshape(specular_contribution,
                                       [frame_height, frame_width, 3])

    # Final pixels are given by combining ambient, diffuse, and specular components
    pixels = diffuse_contribution + specular_contribution + ambient_contribution

    session = tf.Session()
    with session.as_default():

        pixels_eval = pixels.eval()
        cv2.imshow('deferred.py', pixels_eval[:, :, (2, 1, 0)])
        cv2.waitKey(0)
Пример #16
0
    def __call__(self,
                 verts,
                 trans,
                 cam=None,
                 img=None,
                 do_alpha=False,
                 far=None,
                 near=None,
                 color_id=0,
                 img_size=None,
                 seg_parts=13774):
        """
        cam is 3D [f, px, py]
        """
        '''if img is not None:
            h, w = img.shape[:2]
        elif img_size is not None:
            h = img_size[0]
            w = img_size[1]
        else:
            h = self.h
            w = self.w

        if cam is None:
            cam = [self.flength, w / 2., h / 2.]

        use_cam = ProjectPoints(
            f=cam[0] * np.ones(2),
            rt=np.zeros(3),
            t=np.zeros(3),
            k=np.zeros(5),
            c=cam[1:3])

        if near is None:
            near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
        if far is None:
            far = np.maximum(np.max(verts[:, 2]) + 25, 25)

        imtmp = render_model(
            verts,
            self.faces,
            w,
            h,
            use_cam,
            do_alpha=do_alpha,
            img=img,
            far=far,
            near=near,
            color_id=color_id)'''

        #print (self.textura.shape)
        
        frame_width, frame_height = self.w, self.h
        cube_vertices_object = verts[0,:,:]
        cube_faces = tf.constant(self.faces,dtype=tf.int64)
        #cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
        cube_vertices_object, cube_faces = lighting.split_vertices_by_face(cube_vertices_object, cube_faces)
        #cube_vertex_colors = tf.ones_like(cube_vertices_object)
        cube_vertex_colors = tf.constant(self.textura,dtype=tf.float32)
        # Convert vertices to homogeneous coordinates
        cube_vertices_object = tf.concat([cube_vertices_object,tf.ones_like(cube_vertices_object[:, -1:])], axis=1)

        # Transform vertices from object to world space, by rotating around the vertical axis
        

        # Calculate face normals; pre_split implies that no faces share a vertex
        cube_normals_world = lighting.vertex_normals_pre_split(cube_vertices_object, cube_faces)

        # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
        view_matrix = matrices.translation(trans)

        cube_vertices_world = tf.matmul(cube_vertices_object, view_matrix)

        cube_vertices_camera = tf.matmul(cube_vertices_world, matrices.rodrigues([np.pi, 0.0, 0.]))

        # Transform vertices from camera to clip space
        projection_matrix = matrices.perspective_projection(near=self.near, far=self.far, right=self.right, aspect=float(frame_height) / frame_width)
        cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

        # Calculate lighting, as combination of diffuse and ambient
        #vertex_colors_lit = lighting.diffuse_directional(
        #   cube_normals_world, cube_vertex_colors,
        #   light_direction=[1., 0., 0.], light_color=[1., 1., 1.]
        #) * 0.8 + cube_vertex_colors * 0.2

        #2987 seg_parts
        pixels = dirt.rasterise(
            vertices=cube_vertices_clip,
            faces=cube_faces[:seg_parts,:],
            vertex_colors=cube_vertex_colors,
            background=tf.zeros([frame_height, frame_width, 3]),
            width=frame_width, height=frame_height, channels=3
        )
       
        return pixels #,cube_vertices_object, cube_faces# use this to dum color