# The last material is the teapot material, set it to a specular material with tf.device(pyredner.get_device_name()): scene.materials[-1].diffuse_reflectance = \ pyredner.Texture(tf.Variable([0.15, 0.2, 0.15], dtype=tf.float32)) scene.materials[-1].specular_reflectance = \ pyredner.Texture(tf.Variable([0.8, 0.8, 0.8], dtype=tf.float32)) scene.materials[-1].roughness = \ pyredner.Texture(tf.Variable([0.0001], dtype=tf.float32)) scene_args = pyredner.serialize_scene(scene=scene, num_samples=512, max_bounces=2) # Render our target. The first argument is the seed for RNG in the renderer. img = pyredner.render(0, *scene_args) pyredner.imwrite(img, 'results/test_teapot_specular/target.exr') pyredner.imwrite(img, 'results/test_teapot_specular/target.png') target = pyredner.imread('results/test_teapot_specular/target.exr') # Perturb the scene, this is our initial guess # We perturb the last shape, which is the SIGGRAPH logo ref_pos = scene.shapes[-1].vertices with tf.device(pyredner.get_device_name()): translation = tf.Variable([20.0, 0.0, 2.0], trainable=True) scene.shapes[-1].vertices = ref_pos + translation scene_args = pyredner.serialize_scene(scene=scene, num_samples=512, max_bounces=2) # Render the initial guess img = pyredner.render(1, *scene_args) pyredner.imwrite(img, 'results/test_teapot_specular/init.png')
light_indices = tf.constant([[0, 1, 2], [1, 3, 2]], dtype=tf.int32) shape_light = pyredner.Shape(light_vertices, light_indices, 1) shapes = [shape_plane, shape_light] with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())): light_intensity = tf.Variable([20.0, 20.0, 20.0], dtype=tf.float32) # The first argument is the shape id of the light light = pyredner.AreaLight(1, light_intensity) area_lights = [light] scene = pyredner.Scene(cam, shapes, materials, area_lights) scene_args = pyredner.serialize_scene(scene=scene, num_samples=16, max_bounces=1) # Render our target img = pyredner.render(0, *scene_args) pyredner.imwrite(img, 'results/test_svbrdf/target.exr') pyredner.imwrite(img, 'results/test_svbrdf/target.png') target = pyredner.imread('results/test_svbrdf/target.exr') # Our initial guess is three gray textures with tf.device(pyredner.get_device_name()): diffuse_tex = tf.Variable(tf.ones((256, 256, 3), dtype=np.float32) * 0.5, trainable=True) specular_tex = tf.Variable(tf.ones((256, 256, 3), dtype=np.float32) * 0.5, trainable=True) roughness_tex = tf.Variable(tf.ones((256, 256, 1), dtype=np.float32) * 0.5, trainable=True) mat_perlin.diffuse_reflectance = pyredner.Texture(diffuse_tex) mat_perlin.specular_reflectance = pyredner.Texture(specular_tex) mat_perlin.roughness = pyredner.Texture(roughness_tex) scene_args = pyredner.serialize_scene(scene=scene,
shapes = [shape_sphere] with tf.device(pyredner.get_device_name()): envmap = pyredner.imread('sunsky.exr') envmap = pyredner.EnvironmentMap(envmap) scene = pyredner.Scene(camera=cam, shapes=shapes, materials=materials, area_lights=[], envmap=envmap) scene_args = pyredner.serialize_scene(scene=scene, num_samples=256, max_bounces=1) img = pyredner.render(0, *scene_args) pyredner.imwrite(img, 'results/test_envmap/target.exr') pyredner.imwrite(img, 'results/test_envmap/target.png') target = pyredner.imread('results/test_envmap/target.exr') with tf.device(pyredner.get_device_name()): envmap_texels = tf.Variable(0.5 * tf.ones([32, 64, 3], dtype=tf.float32), trainable=True) envmap = pyredner.EnvironmentMap(tf.abs(envmap_texels)) scene = pyredner.Scene(camera=cam, shapes=shapes, materials=materials, area_lights=[], envmap=envmap) scene_args = pyredner.serialize_scene(scene=scene, num_samples=256, max_bounces=1)
with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())): light_intensity = tf.Variable([1000.0, 1000.0, 1000.0], dtype=tf.float32, use_resource=True) # The first argument is the shape id of the light light = pyredner.AreaLight(2, light_intensity) area_lights = [light] scene = pyredner.Scene(cam, shapes, materials, area_lights) scene_args = pyredner.serialize_scene(scene=scene, num_samples=256, max_bounces=1) # Render our target img = pyredner.render(0, *scene_args) pyredner.imwrite(img, 'results/test_shadow_blocker/target.exr') pyredner.imwrite(img, 'results/test_shadow_blocker/target.png') target = pyredner.imread('results/test_shadow_blocker/target.exr') # Perturb the scene, this is our initial guess with tf.device(pyredner.get_device_name()): shape_blocker.vertices = tf.Variable([[-0.2, 3.5, -0.8], [-0.8, 3.0, 0.3], [0.4, 2.8, -0.8], [0.3, 3.2, 1.0]], trainable=True, use_resource=True) scene_args = pyredner.serialize_scene(scene=scene, num_samples=256, max_bounces=1) # Render the initial guess img = pyredner.render(1, *scene_args) pyredner.imwrite(img, 'results/test_shadow_blocker/init.png')
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' import tensorflow as tf tf.compat.v1.enable_eager_execution() # redner only supports eager mode import pyredner_tensorflow as pyredner objects = pyredner.load_obj('scenes/teapot.obj', return_objects=True) camera = pyredner.automatic_camera_placement(objects, resolution=(512, 512)) scene = pyredner.Scene(camera=camera, objects=objects) light = pyredner.PointLight(position=(camera.position + tf.constant( (0.0, 0.0, 100.0))), intensity=tf.constant((20000.0, 30000.0, 20000.0))) img = pyredner.render_deferred(scene=scene, lights=[light]) pyredner.imwrite(img, 'results/test_compute_vertex_normals/no_vertex_normal.exr') for obj in objects: obj.normals = pyredner.compute_vertex_normal(obj.vertices, obj.indices, 'max') scene = pyredner.Scene(camera=camera, objects=objects) img = pyredner.render_deferred(scene=scene, lights=[light]) pyredner.imwrite(img, 'results/test_compute_vertex_normals/max_vertex_normal.exr') for obj in objects: obj.normals = pyredner.compute_vertex_normal(obj.vertices, obj.indices, 'cotangent') scene = pyredner.Scene(camera=camera, objects=objects) img = pyredner.render_deferred(scene=scene, lights=[light]) pyredner.imwrite(
shape_light = pyredner.Shape(light_vertices, light_indices, 0) shapes = [shape_triangle, shape_light] with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())): light_intensity = tf.Variable([30.0, 30.0, 30.0], dtype=tf.float32) light = pyredner.AreaLight(1, light_intensity) area_lights = [light] scene = pyredner.Scene(cam, shapes, materials, area_lights) scene_args = pyredner.serialize_scene(scene=scene, num_samples=16, max_bounces=1) # Render our target img = pyredner.render(0, *scene_args) pyredner.imwrite(img, 'results/test_single_triangle_camera_fisheye/target.exr') pyredner.imwrite(img, 'results/test_single_triangle_camera_fisheye/target.png') target = pyredner.imread( 'results/test_single_triangle_camera_fisheye/target.exr') # Perturb the scene, this is our initial guess with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())): position = tf.Variable([0.5, -0.5, -3.0], trainable=True) scene.camera = pyredner.Camera(position=position, look_at=look_at, up=up, fov=fov, clip_near=clip_near, resolution=resolution, fisheye=True) scene_args = pyredner.serialize_scene(scene=scene,
material_id_map[key] = count count += 1 materials.append(value) # Setup geometries shapes = [] with tf.device(pyredner.get_device_name()): for mtl_name, mesh in mesh_list: shapes.append( pyredner.Shape(vertices=mesh.vertices, indices=mesh.indices, uvs=mesh.uvs, normals=mesh.normals, material_id=material_id_map[mtl_name])) with tf.device(pyredner.get_device_name()): envmap = pyredner.imread('sunsky.exr') envmap = pyredner.EnvironmentMap(envmap) # Construct the scene scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=envmap) # Serialize the scene # Here we specify the output channels as "depth", "shading_normal" scene_args = pyredner.serialize_scene(scene=scene, num_samples=512, max_bounces=1) img = pyredner.render(0, *scene_args) pyredner.imwrite(img, 'results/test_compute_uvs/target.exr') pyredner.imwrite(img, 'results/test_compute_uvs/target.png') target = pyredner.imread('results/test_compute_uvs/target.exr')
max_bounces = 1, channels = [redner.channels.radiance, redner.channels.alpha]) # Render the scene as our target image. # Render. The first argument is the seed for RNG in the renderer. img = pyredner.render(0, *scene_args) background = pyredner.imread('scenes/textures/siggraph.jpg') background = tf.convert_to_tensor(skimage.transform.resize( background.numpy(), (256, 256, 3)), dtype=tf.float32) img = img[:, :, :3] * img[:, :, 3:4] + background * (1 - img[:, :, 3:4]) # Save the images. # The output image is in the GPU memory if you are using GPU. pyredner.imwrite(img, 'results/test_single_triangle_background/target.exr') pyredner.imwrite(img, 'results/test_single_triangle_background/target.png') # Read the target image we just saved. target = pyredner.imread('results/test_single_triangle_background/target.exr') scene_args = pyredner.serialize_scene( scene=scene, num_samples=16, max_bounces=1, channels=[redner.channels.radiance, redner.channels.alpha]) # Render. The first argument is the seed for RNG in the renderer. img = pyredner.render(0, *scene_args) # Since we specified alpha as output channel, img has 4 channels now # We blend the image with a background image img = img[:, :, :3] * img[:, :, 3:4] + background * (1 - img[:, :, 3:4])
# Construct the scene scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=None) # Serialize the scene # Here we specify the output channels as "depth", "shading_normal" scene_args = pyredner.serialize_scene( scene=scene, num_samples=16, max_bounces=0, channels=[redner.channels.depth, redner.channels.shading_normal]) # Render. The first argument is the seed for RNG in the renderer. img = pyredner.render(0, *scene_args) # Save the images. depth = img[:, :, 0] normal = img[:, :, 1:4] pyredner.imwrite(depth, 'results/test_g_buffer/target_depth.exr') pyredner.imwrite(depth, 'results/test_g_buffer/target_depth.png', normalize=True) pyredner.imwrite(normal, 'results/test_g_buffer/target_normal.exr') pyredner.imwrite(normal, 'results/test_g_buffer/target_normal.png', normalize=True) # Read the target image we just saved. target_depth = pyredner.imread('results/test_g_buffer/target_depth.exr') target_depth = target_depth[:, :, 0] target_normal = pyredner.imread('results/test_g_buffer/target_normal.exr') with tf.device(pyredner.get_device_name()): # Perturb the teapot by a translation and a rotation to the object translation_params = tf.Variable([0.1, -0.1, 0.1], trainable=True)
import os os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' import tensorflow as tf tf.compat.v1.enable_eager_execution() # redner only supports eager mode import pyredner_tensorflow as pyredner # Test the sample pixel center flag objects = pyredner.load_obj('scenes/teapot.obj', return_objects=True) camera = pyredner.automatic_camera_placement(objects, resolution=(128, 128)) scene = pyredner.Scene(camera=camera, objects=objects) img = pyredner.render_albedo(scene, sample_pixel_center=True) pyredner.imwrite(img.cpu(), 'results/test_sample_pixel_center/img_no_aa.exr') img = pyredner.render_albedo(scene, sample_pixel_center=False) pyredner.imwrite(img.cpu(), 'results/test_sample_pixel_center/img_with_aa.exr')
# Tensorflow by default allocates all GPU memory, leaving very little for rendering. # We set the environment variable TF_FORCE_GPU_ALLOW_GROWTH to true to enforce on demand # memory allocation to reduce page faults. import os os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' import tensorflow as tf tf.compat.v1.enable_eager_execution() import pyredner_tensorflow as pyredner vertices, indices, uvs, normals = pyredner.generate_sphere(64, 128) m = pyredner.Material(diffuse_reflectance=tf.constant((0.5, 0.5, 0.5))) obj = pyredner.Object(vertices=vertices, indices=indices, uvs=uvs, normals=normals, material=m) cam = pyredner.automatic_camera_placement([obj], resolution=(480, 640)) scene = pyredner.Scene(objects=[obj], camera=cam) img = pyredner.render_g_buffer( scene, channels=[pyredner.channels.uv, pyredner.channels.shading_normal]) uv_img = tf.concat([img[:, :, :2], tf.zeros((480, 640, 1))], axis=2) normal_img = img[:, :, 2:] pyredner.imwrite(uv_img, 'results/test_sphere/uv.png') pyredner.imwrite(normal_img, 'results/test_sphere/normal.png')
diffuse_reflectance = checkerboard_texture) mat_black = pyredner.Material(\ diffuse_reflectance = tf.Variable([0.0, 0.0, 0.0], dtype=tf.float32)) plane = pyredner.Object(vertices=tf.Variable([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0], [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]]), indices=tf.constant([[0, 1, 2], [1, 3, 2]], dtype=tf.int32), uvs=tf.Variable([[0.05, 0.05], [0.05, 0.95], [0.95, 0.05], [0.95, 0.95]]), material=mat_checkerboard) scene = pyredner.Scene(camera=cam, objects=[plane]) img = pyredner.render_albedo(scene=scene) pyredner.imwrite(img, 'results/test_camera_distortion/target.exr') pyredner.imwrite(img, 'results/test_camera_distortion/target.png') # Read the target image we just saved. target = pyredner.imread('results/test_camera_distortion/target.exr') cam.distortion_params = tf.Variable(tf.zeros(8), trainable=True) scene = pyredner.Scene(camera=cam, objects=[plane]) img = pyredner.render_albedo(scene=scene) pyredner.imwrite(img, 'results/test_camera_distortion/init.exr') pyredner.imwrite(img, 'results/test_camera_distortion/init.png') # Optimize for triangle vertices. optimizer = tf.compat.v1.train.AdamOptimizer(1e-3) for t in range(200): print('iteration:', t)
envmap = pyredner.EnvironmentMap(envmap) scene = pyredner.Scene(camera=cam, shapes=shapes, materials=materials, area_lights=[], envmap=envmap) scene_args = pyredner.serialize_scene( scene=scene, num_samples=256, max_bounces=1, channels=[redner.channels.radiance, redner.channels.vertex_color]) img = pyredner.render(0, *scene_args) img_radiance = img[:, :, :3] img_vertex_color = img[:, :, 3:] pyredner.imwrite(img_radiance, 'results/test_vertex_color/target.exr') pyredner.imwrite(img_radiance, 'results/test_vertex_color/target.png') pyredner.imwrite(img_vertex_color, 'results/test_vertex_color/target_color.png') target_radiance = pyredner.imread('results/test_vertex_color/target.exr') # Initial guess. Set to 0.5 for all vertices. with tf.device(pyredner.get_device_name()): shape_sphere.colors = tf.Variable(tf.zeros_like(vertices) + 0.5) scene_args = pyredner.serialize_scene( scene=scene, num_samples=256, max_bounces=1, channels=[redner.channels.radiance, redner.channels.vertex_color]) img = pyredner.render(1, *scene_args) img_radiance = img[:, :, :3]