def main(): # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed cube_vertices_object, cube_faces = build_cube() cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32) cube_vertices_object, cube_faces = lighting.split_vertices_by_face( cube_vertices_object, cube_faces) cube_vertex_colors = tf.ones_like(cube_vertices_object) # Convert vertices to homogeneous coordinates cube_vertices_object = tf.concat( [cube_vertices_object, tf.ones_like(cube_vertices_object[:, -1:])], axis=1) # Transform vertices from object to world space, by rotating around the vertical axis cube_vertices_world = tf.matmul(cube_vertices_object, matrices.rodrigues([0., 0.5, 0.])) # Calculate face normals; pre_split implies that no faces share a vertex cube_normals_world = lighting.vertex_normals_pre_split( cube_vertices_world, cube_faces) # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space view_matrix = matrices.compose( matrices.translation([0., -1.5, -3.5]), # translate it away from the camera matrices.rodrigues([-0.3, 0., 0.]) # tilt the view downwards ) cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix) # Transform vertices from camera to clip space projection_matrix = matrices.perspective_projection( near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width) cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix) # Calculate lighting, as combination of diffuse and ambient vertex_colors_lit = lighting.diffuse_directional( cube_normals_world, cube_vertex_colors, light_direction=[1., 0., 0.], light_color=[1., 1., 1.]) * 0.8 + cube_vertex_colors * 0.2 pixels = dirt.rasterise(vertices=cube_vertices_clip, faces=cube_faces, vertex_colors=vertex_colors_lit, background=tf.zeros([frame_height, frame_width, 3]), width=frame_width, height=frame_height, channels=3) session = tf.Session() with session.as_default(): pixels_eval = pixels.eval() cv2.imshow('simple.py', pixels_eval[:, :, (2, 1, 0)]) cv2.waitKey(0)
def get_transformed_geometry(translation, rotation, scale): # Build bent square in object space, on z = 0 plane vertices_object = tf.constant( [[-1, -1, 0.], [-1, 1, 0], [1, 1, 0], [1, -1, -1.3]], dtype=tf.float32) * square_size / 2 faces = [[0, 1, 2], [0, 2, 3]] # ** we should add an occluding triangle! # ** also a non-planar meeting-of-faces vertices_object, faces = lighting.split_vertices_by_face( vertices_object, faces) # Convert vertices to homogeneous coordinates vertices_object = tf.concat( [vertices_object, tf.ones_like(vertices_object[:, -1:])], axis=1) # Transform vertices from object to world space, by rotating around the z-axis vertices_world = tf.matmul( vertices_object, matrices.rodrigues( [0., 0., rotation])) * scale + tf.concat([translation, [0.]], axis=0) # Calculate face normals normals_world = lighting.vertex_normals(vertices_world, faces) # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space view_matrix = matrices.translation([-0.5, 0., -3.5 ]) # translate it away from the camera vertices_camera = tf.matmul(vertices_world, view_matrix) # Transform vertices from camera to clip space projection_matrix = matrices.perspective_projection( near=0.1, far=20., right=0.1, aspect=float(canvas_height) / canvas_width) vertices_clip = tf.matmul(vertices_camera, projection_matrix) vertex_colours = tf.concat( [tf.ones([3, 3]) * [0.8, 0.5, 0.], tf.ones([3, 3]) * [0.5, 0.8, 0.]], axis=0) return vertices_clip, faces, normals_world, vertex_colours
def main(): # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed cube_vertices_object, cube_faces = build_cube() cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32) cube_vertices_object, cube_faces = lighting.split_vertices_by_face(cube_vertices_object, cube_faces) cube_vertex_colors = tf.ones_like(cube_vertices_object) # Convert vertices to homogeneous coordinates cube_vertices_object = tf.concat([ cube_vertices_object, tf.ones_like(cube_vertices_object[:, -1:]) ], axis=1) # Transform vertices from object to world space, by rotating around the vertical axis cube_vertices_world = tf.matmul(cube_vertices_object, matrices.rodrigues([0., 0.5, 0.])) # Calculate face normals; pre_split implies that no faces share a vertex cube_normals_world = lighting.vertex_normals_pre_split(cube_vertices_world, cube_faces) # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space view_matrix = matrices.compose( matrices.translation([0., -1.5, -3.5]), # translate it away from the camera matrices.rodrigues([-0.3, 0., 0.]) # tilt the view downwards ) cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix) # Transform vertices from camera to clip space projection_matrix = matrices.perspective_projection(near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width) cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix) # The following function is applied to the G-buffer, which is a multi-channel image containing all the vertex attributes. # It uses this to calculate the shading at each pixel, hence their final intensities def shader_fn(gbuffer, view_matrix, light_direction): # Unpack the different attributes from the G-buffer mask = gbuffer[:, :, :1] positions = gbuffer[:, :, 1:4] unlit_colors = gbuffer[:, :, 4:7] normals = gbuffer[:, :, 7:] # Calculate a simple grey ambient lighting component ambient_contribution = unlit_colors * [0.2, 0.2, 0.2] # Calculate a red diffuse (Lambertian) lighting component diffuse_contribution = lighting.diffuse_directional( tf.reshape(normals, [-1, 3]), tf.reshape(unlit_colors, [-1, 3]), light_direction, light_color=[1., 0., 0.], double_sided=False ) diffuse_contribution = tf.reshape(diffuse_contribution, [frame_height, frame_width, 3]) # Calculate a white specular (Phong) lighting component camera_position_world = tf.linalg.inv(view_matrix)[3, :3] specular_contribution = lighting.specular_directional( tf.reshape(positions, [-1, 3]), tf.reshape(normals, [-1, 3]), tf.reshape(unlit_colors, [-1, 3]), light_direction, light_color=[1., 1., 1.], camera_position=camera_position_world, shininess=6., double_sided=False ) specular_contribution = tf.reshape(specular_contribution, [frame_height, frame_width, 3]) # The final pixel intensities inside the shape are given by combining the three lighting components; # outside the shape, they are set to a uniform background color. We clip the final values as the specular # component saturates some pixels pixels = tf.clip_by_value( (diffuse_contribution + specular_contribution + ambient_contribution) * mask + [0., 0., 0.3] * (1. - mask), 0., 1. ) return pixels # Render the G-buffer channels (mask, vertex positions, vertex colours, and normals at each pixel), then perform # the deferred shading calculation. In general, any tensor required by shader_fn and wrt which we need derivatives # should be included in shader_additional_inputs; although in this example they are constant, we pass the view # matrix and lighting direction through this route as an illustration light_direction = tf.linalg.l2_normalize([1., -0.3, -0.5]) pixels = dirt.rasterise_deferred( vertices=cube_vertices_clip, vertex_attributes=tf.concat([ tf.ones_like(cube_vertices_object[:, :1]), # mask cube_vertices_world[:, :3], # vertex positions cube_vertex_colors, # vertex colors cube_normals_world # normals ], axis=1), faces=cube_faces, background_attributes=tf.zeros([frame_height, frame_width, 10]), shader_fn=shader_fn, shader_additional_inputs=[view_matrix, light_direction] ) pixels = tf.cast(pixels * 255, tf.uint8) session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(allow_growth=True))) with session.as_default(): image = pixels img = Image.fromarray( np.asarray(image)) img.save("test_def.png")
def main(): # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space # We replicate vertices that are shared, so normals are effectively per-face instead of smoothed cube_vertices_object, cube_faces = build_cube() cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32) cube_vertices_object, cube_faces = lighting.split_vertices_by_face( cube_vertices_object, cube_faces) cube_vertex_colors = tf.ones_like(cube_vertices_object) # Convert vertices to homogeneous coordinates cube_vertices_object = tf.concat( [cube_vertices_object, tf.ones_like(cube_vertices_object[:, -1:])], axis=1) # Transform vertices from object to world space, by rotating around the vertical axis cube_vertices_world = tf.matmul(cube_vertices_object, matrices.rodrigues([0., 0.5, 0.])) # Calculate face normals; pre_split implies that no faces share a vertex cube_normals_world = lighting.vertex_normals_pre_split( cube_vertices_world, cube_faces) # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space view_matrix = matrices.compose( matrices.translation([0., -1.5, -3.5]), # translate it away from the camera matrices.rodrigues([-0.3, 0., 0.]) # tilt the view downwards ) cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix) # Transform vertices from camera to clip space projection_matrix = matrices.perspective_projection( near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width) cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix) # Render the G-buffer channels (vertex position, colour and normal at each pixel) needed for deferred shading gbuffer_vertex_positions_world = dirt.rasterise( vertices=cube_vertices_clip, faces=cube_faces, vertex_colors=cube_vertices_world[:, :3], background=tf.ones([frame_height, frame_width, 3]) * float('-inf'), width=frame_width, height=frame_height, channels=3) gbuffer_vertex_colours_world = dirt.rasterise( vertices=cube_vertices_clip, faces=cube_faces, vertex_colors=cube_vertex_colors, background=tf.zeros([frame_height, frame_width, 3]), width=frame_width, height=frame_height, channels=3) gbuffer_vertex_normals_world = dirt.rasterise( vertices=cube_vertices_clip, faces=cube_faces, vertex_colors=cube_normals_world, background=tf.ones([frame_height, frame_width, 3]) * float('-inf'), width=frame_width, height=frame_height, channels=3) # Dilate the position and normal channels at the silhouette boundary; this doesn't affect the image, but # ensures correct gradients for pixels just outside the silhouette background_mask = tf.cast( tf.equal(gbuffer_vertex_positions_world, float('-inf')), tf.float32) gbuffer_vertex_positions_world_dilated = tf.nn.max_pool( gbuffer_vertex_positions_world[None, ...], ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME')[0] gbuffer_vertex_positions_world = gbuffer_vertex_positions_world * ( 1. - background_mask ) + gbuffer_vertex_positions_world_dilated * background_mask gbuffer_vertex_normals_world_dilated = tf.nn.max_pool( gbuffer_vertex_normals_world[None, ...], ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME')[0] gbuffer_vertex_normals_world = gbuffer_vertex_normals_world * ( 1. - background_mask ) + gbuffer_vertex_normals_world_dilated * background_mask # Calculate a simple grey ambient lighting component ambient_contribution = gbuffer_vertex_colours_world * [0.2, 0.2, 0.2] # Calculate a red diffuse (Lambertian) lighting component light_direction = unit([1., -0.3, -0.5]) diffuse_contribution = lighting.diffuse_directional( tf.reshape(gbuffer_vertex_normals_world, [-1, 3]), tf.reshape(gbuffer_vertex_colours_world, [-1, 3]), light_direction, light_color=[1., 0., 0.], double_sided=False) diffuse_contribution = tf.reshape(diffuse_contribution, [frame_height, frame_width, 3]) # Calculate a white specular (Phong) lighting component camera_position_world = tf.matrix_inverse(view_matrix)[3, :3] specular_contribution = lighting.specular_directional( tf.reshape(gbuffer_vertex_positions_world, [-1, 3]), tf.reshape(gbuffer_vertex_normals_world, [-1, 3]), tf.reshape(gbuffer_vertex_colours_world, [-1, 3]), light_direction, light_color=[1., 1., 1.], camera_position=camera_position_world, shininess=6., double_sided=False) specular_contribution = tf.reshape(specular_contribution, [frame_height, frame_width, 3]) # Final pixels are given by combining ambient, diffuse, and specular components pixels = diffuse_contribution + specular_contribution + ambient_contribution session = tf.Session() with session.as_default(): pixels_eval = pixels.eval() cv2.imshow('deferred.py', pixels_eval[:, :, (2, 1, 0)]) cv2.waitKey(0)
def __call__(self, verts, trans, cam=None, img=None, do_alpha=False, far=None, near=None, color_id=0, img_size=None, seg_parts=13774): """ cam is 3D [f, px, py] """ '''if img is not None: h, w = img.shape[:2] elif img_size is not None: h = img_size[0] w = img_size[1] else: h = self.h w = self.w if cam is None: cam = [self.flength, w / 2., h / 2.] use_cam = ProjectPoints( f=cam[0] * np.ones(2), rt=np.zeros(3), t=np.zeros(3), k=np.zeros(5), c=cam[1:3]) if near is None: near = np.maximum(np.min(verts[:, 2]) - 25, 0.1) if far is None: far = np.maximum(np.max(verts[:, 2]) + 25, 25) imtmp = render_model( verts, self.faces, w, h, use_cam, do_alpha=do_alpha, img=img, far=far, near=near, color_id=color_id)''' #print (self.textura.shape) frame_width, frame_height = self.w, self.h cube_vertices_object = verts[0,:,:] cube_faces = tf.constant(self.faces,dtype=tf.int64) #cube_vertices_object = tf.constant(cube_vertices_object, dtype=tf.float32) cube_vertices_object, cube_faces = lighting.split_vertices_by_face(cube_vertices_object, cube_faces) #cube_vertex_colors = tf.ones_like(cube_vertices_object) cube_vertex_colors = tf.constant(self.textura,dtype=tf.float32) # Convert vertices to homogeneous coordinates cube_vertices_object = tf.concat([cube_vertices_object,tf.ones_like(cube_vertices_object[:, -1:])], axis=1) # Transform vertices from object to world space, by rotating around the vertical axis # Calculate face normals; pre_split implies that no faces share a vertex cube_normals_world = lighting.vertex_normals_pre_split(cube_vertices_object, cube_faces) # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space view_matrix = matrices.translation(trans) cube_vertices_world = tf.matmul(cube_vertices_object, view_matrix) cube_vertices_camera = tf.matmul(cube_vertices_world, matrices.rodrigues([np.pi, 0.0, 0.])) # Transform vertices from camera to clip space projection_matrix = matrices.perspective_projection(near=self.near, far=self.far, right=self.right, aspect=float(frame_height) / frame_width) cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix) # Calculate lighting, as combination of diffuse and ambient #vertex_colors_lit = lighting.diffuse_directional( # cube_normals_world, cube_vertex_colors, # light_direction=[1., 0., 0.], light_color=[1., 1., 1.] #) * 0.8 + cube_vertex_colors * 0.2 #2987 seg_parts pixels = dirt.rasterise( vertices=cube_vertices_clip, faces=cube_faces[:seg_parts,:], vertex_colors=cube_vertex_colors, background=tf.zeros([frame_height, frame_width, 3]), width=frame_width, height=frame_height, channels=3 ) return pixels #,cube_vertices_object, cube_faces# use this to dum color