Exemplo n.º 1
0
    def transform_vertices(self, vertices, pose, faces):
        #vertices = inputs[0]
        #pose = inputs[1]
        #faces= inputs[2]
        cube_vertices_object = tf.concat(
            [vertices, tf.ones_like(vertices[:, -1:])], axis=1)
        cube_normals_world = lighting.vertex_normals_pre_split(
            cube_vertices_object, faces)
        transform_gl = tf.constant([[1, 0, 0], [0, -1, 0], [0, 0, -1]],
                                   tf.float32)
        tensor_rot = tf.matmul(transform_gl, pose[:3, :3])
        rot_list = tf.unstack(tf.reshape(tensor_rot, [-1]))
        pose_list = tf.unstack(tf.reshape(pose, [-1]))
        pose_list[0:3] = rot_list[0:3]
        pose_list[4:7] = rot_list[3:6]
        pose_list[8:11] = rot_list[6:9]
        pose_list[7] = -pose_list[7]
        pose_list[11] = -pose_list[11]
        cam_pose = tf.stack(pose_list)
        cam_pose = tf.reshape(cam_pose, (4, 4))
        view_matrix = tf.transpose(cam_pose)
        cube_vertices_camera = tf.matmul(cube_vertices_object, view_matrix)
        cube_vertices_3d = tf.transpose(
            tf.matmul(pose[:3, :3], tf.transpose(
                cube_vertices_object[:, :3]))) + tf.transpose(pose[:3,
                                                                   3:4])  #3xN

        cube_vertices_clip = tf.matmul(cube_vertices_camera,
                                       self.projection_matrix)
        return cube_vertices_object, cube_vertices_3d, cube_vertices_clip, cube_normals_world
Exemplo n.º 2
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(
        cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(
        cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5,
                              -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Calculate lighting, as combination of diffuse and ambient
    vertex_colors_lit = lighting.diffuse_directional(
        cube_normals_world,
        cube_vertex_colors,
        light_direction=[1., 0., 0.],
        light_color=[1., 1., 1.]) * 0.8 + cube_vertex_colors * 0.2

    pixels = dirt.rasterise(vertices=cube_vertices_clip,
                            faces=cube_faces,
                            vertex_colors=vertex_colors_lit,
                            background=tf.zeros([frame_height, frame_width,
                                                 3]),
                            width=frame_width,
                            height=frame_height,
                            channels=3)

    session = tf.Session()
    with session.as_default():

        pixels_eval = pixels.eval()
        cv2.imshow('simple.py', pixels_eval[:, :, (2, 1, 0)])
        cv2.waitKey(0)
Exemplo n.º 3
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat([
        cube_vertices_object,
        tf.ones_like(cube_vertices_object[:, -1:])
    ], axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object, matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5, -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # The following function is applied to the G-buffer, which is a multi-channel image containing all the vertex attributes.
    # It uses this to calculate the shading at each pixel, hence their final intensities
    def shader_fn(gbuffer, view_matrix, light_direction):

        # Unpack the different attributes from the G-buffer
        mask = gbuffer[:, :, :1]
        positions = gbuffer[:, :, 1:4]
        unlit_colors = gbuffer[:, :, 4:7]
        normals = gbuffer[:, :, 7:]

        # Calculate a simple grey ambient lighting component
        ambient_contribution = unlit_colors * [0.2, 0.2, 0.2]

        # Calculate a red diffuse (Lambertian) lighting component
        diffuse_contribution = lighting.diffuse_directional(
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction, light_color=[1., 0., 0.], double_sided=False
        )
        diffuse_contribution = tf.reshape(diffuse_contribution, [frame_height, frame_width, 3])

        # Calculate a white specular (Phong) lighting component
        camera_position_world = tf.linalg.inv(view_matrix)[3, :3]
        specular_contribution = lighting.specular_directional(
            tf.reshape(positions, [-1, 3]),
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction, light_color=[1., 1., 1.],
            camera_position=camera_position_world,
            shininess=6., double_sided=False
        )
        specular_contribution = tf.reshape(specular_contribution, [frame_height, frame_width, 3])

        # The final pixel intensities inside the shape are given by combining the three lighting components;
        # outside the shape, they are set to a uniform background color. We clip the final values as the specular
        # component saturates some pixels
        pixels = tf.clip_by_value(
            (diffuse_contribution + specular_contribution + ambient_contribution) * mask + [0., 0., 0.3] * (1. - mask),
            0., 1.
        )

        return pixels

    # Render the G-buffer channels (mask, vertex positions, vertex colours, and normals at each pixel), then perform
    # the deferred shading calculation. In general, any tensor required by shader_fn and wrt which we need derivatives
    # should be included in shader_additional_inputs; although in this example they are constant, we pass the view
    # matrix and lighting direction through this route as an illustration
    light_direction = tf.linalg.l2_normalize([1., -0.3, -0.5])
    pixels = dirt.rasterise_deferred(
        vertices=cube_vertices_clip,
        vertex_attributes=tf.concat([
            tf.ones_like(cube_vertices_object[:, :1]),  # mask
            cube_vertices_world[:, :3],  # vertex positions
            cube_vertex_colors,  # vertex colors
            cube_normals_world  # normals
        ], axis=1),
        faces=cube_faces,
        background_attributes=tf.zeros([frame_height, frame_width, 10]),
        shader_fn=shader_fn,
        shader_additional_inputs=[view_matrix, light_direction]
    )

    pixels = tf.cast(pixels * 255, tf.uint8)

    session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(allow_growth=True)))
    with session.as_default():
        image = pixels
        img = Image.fromarray( np.asarray(image))
        img.save("test_def.png")
Exemplo n.º 4
0
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed
    cube_vertices_object, cube_faces = build_cube()
    cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
    cube_vertices_object, cube_faces = lighting.split_vertices_by_face(
        cube_vertices_object, cube_faces)
    cube_vertex_colors = tf.ones_like(cube_vertices_object)

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.5, 0.]))

    # Calculate face normals; pre_split implies that no faces share a vertex
    cube_normals_world = lighting.vertex_normals_pre_split(
        cube_vertices_world, cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -1.5,
                              -3.5]),  # translate it away from the camera
        matrices.rodrigues([-0.3, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # Render the G-buffer channels (vertex position, colour and normal at each pixel) needed for deferred shading
    gbuffer_vertex_positions_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_vertices_world[:, :3],
        background=tf.ones([frame_height, frame_width, 3]) * float('-inf'),
        width=frame_width,
        height=frame_height,
        channels=3)
    gbuffer_vertex_colours_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_vertex_colors,
        background=tf.zeros([frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)
    gbuffer_vertex_normals_world = dirt.rasterise(
        vertices=cube_vertices_clip,
        faces=cube_faces,
        vertex_colors=cube_normals_world,
        background=tf.ones([frame_height, frame_width, 3]) * float('-inf'),
        width=frame_width,
        height=frame_height,
        channels=3)

    # Dilate the position and normal channels at the silhouette boundary; this doesn't affect the image, but
    # ensures correct gradients for pixels just outside the silhouette
    background_mask = tf.cast(
        tf.equal(gbuffer_vertex_positions_world, float('-inf')), tf.float32)
    gbuffer_vertex_positions_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_positions_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_positions_world = gbuffer_vertex_positions_world * (
        1. - background_mask
    ) + gbuffer_vertex_positions_world_dilated * background_mask
    gbuffer_vertex_normals_world_dilated = tf.nn.max_pool(
        gbuffer_vertex_normals_world[None, ...],
        ksize=[1, 3, 3, 1],
        strides=[1, 1, 1, 1],
        padding='SAME')[0]
    gbuffer_vertex_normals_world = gbuffer_vertex_normals_world * (
        1. - background_mask
    ) + gbuffer_vertex_normals_world_dilated * background_mask

    # Calculate a simple grey ambient lighting component
    ambient_contribution = gbuffer_vertex_colours_world * [0.2, 0.2, 0.2]

    # Calculate a red diffuse (Lambertian) lighting component
    light_direction = unit([1., -0.3, -0.5])
    diffuse_contribution = lighting.diffuse_directional(
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_colours_world, [-1, 3]),
        light_direction,
        light_color=[1., 0., 0.],
        double_sided=False)
    diffuse_contribution = tf.reshape(diffuse_contribution,
                                      [frame_height, frame_width, 3])

    # Calculate a white specular (Phong) lighting component
    camera_position_world = tf.matrix_inverse(view_matrix)[3, :3]
    specular_contribution = lighting.specular_directional(
        tf.reshape(gbuffer_vertex_positions_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_normals_world, [-1, 3]),
        tf.reshape(gbuffer_vertex_colours_world, [-1, 3]),
        light_direction,
        light_color=[1., 1., 1.],
        camera_position=camera_position_world,
        shininess=6.,
        double_sided=False)
    specular_contribution = tf.reshape(specular_contribution,
                                       [frame_height, frame_width, 3])

    # Final pixels are given by combining ambient, diffuse, and specular components
    pixels = diffuse_contribution + specular_contribution + ambient_contribution

    session = tf.Session()
    with session.as_default():

        pixels_eval = pixels.eval()
        cv2.imshow('deferred.py', pixels_eval[:, :, (2, 1, 0)])
        cv2.waitKey(0)
Exemplo n.º 5
0
    def __call__(self,
                 verts,
                 trans,
                 cam=None,
                 img=None,
                 do_alpha=False,
                 far=None,
                 near=None,
                 color_id=0,
                 img_size=None,
                 seg_parts=13774):
        """
        cam is 3D [f, px, py]
        """
        '''if img is not None:
            h, w = img.shape[:2]
        elif img_size is not None:
            h = img_size[0]
            w = img_size[1]
        else:
            h = self.h
            w = self.w

        if cam is None:
            cam = [self.flength, w / 2., h / 2.]

        use_cam = ProjectPoints(
            f=cam[0] * np.ones(2),
            rt=np.zeros(3),
            t=np.zeros(3),
            k=np.zeros(5),
            c=cam[1:3])

        if near is None:
            near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
        if far is None:
            far = np.maximum(np.max(verts[:, 2]) + 25, 25)

        imtmp = render_model(
            verts,
            self.faces,
            w,
            h,
            use_cam,
            do_alpha=do_alpha,
            img=img,
            far=far,
            near=near,
            color_id=color_id)'''

        #print (self.textura.shape)
        
        frame_width, frame_height = self.w, self.h
        cube_vertices_object = verts[0,:,:]
        cube_faces = tf.constant(self.faces,dtype=tf.int64)
        #cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32)
        cube_vertices_object, cube_faces = lighting.split_vertices_by_face(cube_vertices_object, cube_faces)
        #cube_vertex_colors = tf.ones_like(cube_vertices_object)
        cube_vertex_colors = tf.constant(self.textura,dtype=tf.float32)
        # Convert vertices to homogeneous coordinates
        cube_vertices_object = tf.concat([cube_vertices_object,tf.ones_like(cube_vertices_object[:, -1:])], axis=1)

        # Transform vertices from object to world space, by rotating around the vertical axis
        

        # Calculate face normals; pre_split implies that no faces share a vertex
        cube_normals_world = lighting.vertex_normals_pre_split(cube_vertices_object, cube_faces)

        # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
        view_matrix = matrices.translation(trans)

        cube_vertices_world = tf.matmul(cube_vertices_object, view_matrix)

        cube_vertices_camera = tf.matmul(cube_vertices_world, matrices.rodrigues([np.pi, 0.0, 0.]))

        # Transform vertices from camera to clip space
        projection_matrix = matrices.perspective_projection(near=self.near, far=self.far, right=self.right, aspect=float(frame_height) / frame_width)
        cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

        # Calculate lighting, as combination of diffuse and ambient
        #vertex_colors_lit = lighting.diffuse_directional(
        #   cube_normals_world, cube_vertex_colors,
        #   light_direction=[1., 0., 0.], light_color=[1., 1., 1.]
        #) * 0.8 + cube_vertex_colors * 0.2

        #2987 seg_parts
        pixels = dirt.rasterise(
            vertices=cube_vertices_clip,
            faces=cube_faces[:seg_parts,:],
            vertex_colors=cube_vertex_colors,
            background=tf.zeros([frame_height, frame_width, 3]),
            width=frame_width, height=frame_height, channels=3
        )
       
        return pixels #,cube_vertices_object, cube_faces# use this to dum color
def get_pre_split_vertex_normals(vertices, faces):
    """ Get Pre-split vertex normals, computationlly slightly more efficient. [Vtx, Face_def] -> [Vtx_normals] """
    norms_by_vertex = lighting.vertex_normals_pre_split(vertices=vertices,
                                                        faces=faces)

    return norms_by_vertex