import pyredner import numpy as np import torch # Optimize for a textured plane in a specular reflection # Use GPU if available pyredner.set_use_gpu(torch.cuda.is_available()) # Load the scene from a Mitsuba scene file scene = pyredner.load_mitsuba('scenes/teapot_specular.xml') # The last material is the teapot material, set it to a specular material scene.materials[-1].diffuse_reflectance = \ torch.tensor([0.15, 0.2, 0.15], device = pyredner.get_device()) scene.materials[-1].specular_reflectance = \ torch.tensor([0.8, 0.8, 0.8], device = pyredner.get_device()) scene.materials[-1].roughness = \ torch.tensor([0.0001], device = pyredner.get_device()) args=pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 512, max_bounces = 2) render = pyredner.RenderFunction.apply # Render our target. The first argument is the seed for RNG in the renderer. img = render(0, *args) pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/target.exr') pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/target.png') target = pyredner.imread('results/test_teapot_specular/target.exr') if pyredner.get_use_gpu():
filedata = urllib.request.urlretrieve('https://benedikt-bitterli.me/resources/mitsuba/living-room-3.zip', 'living-room-3.zip') print('Unzipping living-room-3.zip') zip_ref = zipfile.ZipFile('living-room-3.zip', 'r') zip_ref.extractall('scenes/') print('Copying scene file') copyfile('scenes/living-room-3-scene.xml', 'scenes/living-room-3/scene.xml') print('Removing zip file') os.remove('living-room-3.zip') # Optimize for camera pose # Use GPU if available pyredner.set_use_gpu(torch.cuda.is_available()) # Load the scene from a Mitsuba scene file scene = pyredner.load_mitsuba('scenes/living-room-3/scene.xml') print('scene loaded') max_bounces = 6 args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 512, max_bounces = max_bounces) render = pyredner.RenderFunction.apply # Render our target. The first argument is the seed for RNG in the renderer. img = render(0, *args) pyredner.imwrite(img.cpu(), 'results/test_living_room/target.exr') pyredner.imwrite(img.cpu(), 'results/test_living_room/target.png') target = pyredner.imread('results/test_living_room/target.exr') if pyredner.get_use_gpu():
import pyredner import numpy as np import torch import scipy import scipy.ndimage # Optimize for material parameters and camera pose # Use GPU if available pyredner.set_use_gpu(torch.cuda.is_available()) # Load the scene from a Mitsuba scene file scene = pyredner.load_mitsuba('scenes/teapot.xml') # The last material is the teapot material, set it to the target scene.materials[-1].diffuse_reflectance = \ pyredner.Texture(torch.tensor([0.3, 0.2, 0.2], device = pyredner.get_device())) scene.materials[-1].specular_reflectance = \ pyredner.Texture(torch.tensor([0.6, 0.6, 0.6], device = pyredner.get_device())) scene.materials[-1].roughness = \ pyredner.Texture(torch.tensor([0.05], device = pyredner.get_device())) args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 1024, max_bounces = 2) # Alias of the render function render = pyredner.RenderFunction.apply # Render our target. The first argument is the seed for RNG in the renderer. img = render(0, *args) pyredner.imwrite(img.cpu(), 'results/test_teapot_reflectance/target.exr')
# Joint material & camera pose estimation with global illumination # using coarse to fine estimation # This time we will learn the following through a Cornell box example. # - Loading from a Mitsuba scene file # - Global illumination and glossy/specular materials # - Coarse to fine estimation with a Gaussian pyramid loss # - Incorporate box constraints in your optimizer # In addition to Wavefront obj file, redner also supports loading from a Mitsuba # scene file. Currently we only support a limited amount of features. In particular # we only support two kinds of materials: diffuse and roughplastic. Note that the # "alpha" values in roughplastic is the square root of the roughness. See cbox.xml # for how a Mitsuba scene file should look like. # We can load a scene using pyredner.load_mitsuba() utility, and render it as usual. scene = pyredner.load_mitsuba('cbox/cbox.xml') scene_args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 512, max_bounces = 5) # Set max_bounces = 5 for global illumination render = pyredner.RenderFunction.apply img = render(0, *scene_args) pyredner.imwrite(img.cpu(), 'results/coarse_to_fine_estimation/target.exr') pyredner.imwrite(img.cpu(), 'results/coarse_to_fine_estimation/target.png') target = pyredner.imread('results/coarse_to_fine_estimation/target.exr') if pyredner.get_use_gpu(): target = target.cuda() # Now let's generate an initial guess by perturbing the reference. # Let's set all the diffuse color to gray by manipulating material.diffuse_reflectance. # We also store all the material variables to optimize in a list.
import scipy.ndimage.filters import pyredner import numpy as np import torch pyredner.set_use_gpu(torch.cuda.is_available()) scene = pyredner.load_mitsuba('scenes/bunny_box.xml') scene.shapes[-1].vertices += torch.tensor([0, 0.01, 0], device=pyredner.get_device()) args=pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 1024, max_bounces = 6) render = pyredner.RenderFunction.apply # Render our target. The first argument is the seed for RNG in the renderer. img = render(0, *args) pyredner.imwrite(img.cpu(), 'results/test_bunny_box/target.exr') pyredner.imwrite(img.cpu(), 'results/test_bunny_box/target.png') target = pyredner.imread('results/test_bunny_box/target.exr') target = target.cuda(pyredner.get_device()) bunny_vertices = scene.shapes[-1].vertices.clone() bunny_translation = torch.tensor([0.1, 0.4, 0.1], device=pyredner.get_device(), requires_grad=True) bunny_rotation = torch.tensor([-0.2, 0.1, -0.1], device=pyredner.get_device(), requires_grad=True)
print('Unzipping living-room-3.zip') zip_ref = zipfile.ZipFile('living-room-3.zip', 'r') zip_ref.extractall('scenes/') print('Copying scene file') copyfile('scenes/living-room-3-scene.xml', 'scenes/living-room-3/scene.xml') print('Removing zip file') os.remove('living-room-3.zip') # Optimize for camera pose # Use GPU if available pyredner.set_use_gpu(torch.cuda.is_available()) # Load the scene from a Mitsuba scene file scene = pyredner.load_mitsuba('scenes/living-room-3/scene.xml') print('scene loaded') max_bounces = 6 args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 512, max_bounces = max_bounces) render = pyredner.RenderFunction.apply # Render our target. The first argument is the seed for RNG in the renderer. img = render(0, *args) pyredner.imwrite(img.cpu(), 'results/test_living_room/target.exr') pyredner.imwrite(img.cpu(), 'results/test_living_room/target.png') target = pyredner.imread('results/test_living_room/target.exr') if pyredner.get_use_gpu():
import pyredner import numpy as np import torch import scipy # Optimize for material parameters and camera pose # Use GPU if available pyredner.set_use_gpu(torch.cuda.is_available()) # Load the scene from a Mitsuba scene file scene = pyredner.load_mitsuba('scenes/teapot.xml') # The last material is the teapot material, set it to the target scene.materials[-1].diffuse_reflectance = \ pyredner.Texture(torch.tensor([0.3, 0.2, 0.2], device = pyredner.get_device())) scene.materials[-1].specular_reflectance = \ pyredner.Texture(torch.tensor([0.6, 0.6, 0.6], device = pyredner.get_device())) scene.materials[-1].roughness = \ pyredner.Texture(torch.tensor([0.05], device = pyredner.get_device())) args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 1024, max_bounces = 2) # Alias of the render function render = pyredner.RenderFunction.apply # Render our target. The first argument is the seed for RNG in the renderer. img = render(0, *args) pyredner.imwrite(img.cpu(), 'results/test_teapot_reflectance/target.exr') pyredner.imwrite(img.cpu(), 'results/test_teapot_reflectance/target.png')
import pyredner import numpy as np import torch # Optimize for a textured plane in a specular reflection # Use GPU if available pyredner.set_use_gpu(torch.cuda.is_available()) # Load the scene from a Mitsuba scene file scene = pyredner.load_mitsuba('scenes/teapot_specular.xml') # The last material is the teapot material, set it to a specular material scene.materials[-1].diffuse_reflectance = \ pyredner.Texture(torch.tensor([0.15, 0.2, 0.15], device = pyredner.get_device())) scene.materials[-1].specular_reflectance = \ pyredner.Texture(torch.tensor([0.8, 0.8, 0.8], device = pyredner.get_device())) scene.materials[-1].roughness = \ pyredner.Texture(torch.tensor([0.0001], device = pyredner.get_device())) args=pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 512, max_bounces = 2) render = pyredner.RenderFunction.apply # Render our target. The first argument is the seed for RNG in the renderer. img = render(0, *args) pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/target.exr') pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/target.png') target = pyredner.imread('results/test_teapot_specular/target.exr') if pyredner.get_use_gpu():