Exemple #1
0
def parse_material(node, two_sided=False):
    node_id = None
    if 'id' in node.attrib:
        node_id = node.attrib['id']
    if node.attrib['type'] == 'diffuse':
        diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5])
        diffuse_uv_scale = torch.tensor([1.0, 1.0])
        specular_reflectance = torch.tensor([0.0, 0.0, 0.0])
        specular_uv_scale = torch.tensor([1.0, 1.0])
        roughness = torch.tensor([1.0])
        for child in node:
            if child.attrib['name'] == 'reflectance':
                if child.tag == 'texture':
                    for grandchild in child:
                        if grandchild.attrib['name'] == 'filename':
                            diffuse_reflectance = pyredner.imread(
                                grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'uscale':
                            diffuse_uv_scale[0] = float(
                                grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'vscale':
                            diffuse_uv_scale[1] = float(
                                grandchild.attrib['value'])
                elif child.tag == 'rgb':
                    diffuse_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'specular':
                if child.tag == 'texture':
                    for grandchild in child:
                        if grandchild.attrib['name'] == 'filename':
                            specular_reflectance = image.imread(
                                grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'uscale':
                            specular_uv_scale[0] = float(
                                grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'vscale':
                            specular_uv_scale[1] = float(
                                grandchild.attrib['value'])
                elif child.tag == 'rgb':
                    specular_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'roughness':
                roughness = torch.tensor(float(child.attrib['value']))
        if pyredner.get_use_gpu():
            # Copy to GPU
            diffuse_reflectance = diffuse_reflectance.cuda()
            specular_reflectance = specular_reflectance.cuda()
            roughness = roughness.cuda()
        return (node_id,
                pyredner.Material(diffuse_reflectance,
                                  diffuse_uv_scale=diffuse_uv_scale,
                                  specular_reflectance=specular_reflectance,
                                  specular_uv_scale=specular_uv_scale,
                                  roughness=roughness,
                                  two_sided=two_sided))
    elif node.attrib['type'] == 'twosided':
        ret = parse_material(node[0], True)
        return (node_id, ret[1])
Exemple #2
0
def parse_scene(node):
    cam = None
    resolution = None
    materials = []
    material_dict = {}
    shapes = []
    lights = []
    shape_group_dict = {}
    envmap = None

    for child in node:
        if child.tag == 'sensor':
            cam = parse_camera(child)
        elif child.tag == 'bsdf':
            node_id, material = parse_material(child)
            if node_id is not None:
                material_dict[node_id] = len(materials)
                materials.append(material)
        # shapegroup for instancing
        elif child.tag == 'shape' and child.attrib['type'] == 'shapegroup':
            for child_s in child:
                if child_s.tag == 'shape':
                    shape_group_dict[child.attrib['id']] = parse_shape(
                        child_s, material_dict, None)[0]
        elif child.tag == 'shape':
            shape, light = parse_shape(
                child, material_dict, len(shapes), shape_group_dict
                if child.attrib['type'] == 'instance' else None)
            if isinstance(shape, list):
                shapes = shapes + shape
            else:
                shapes.append(shape)
            if light is not None:
                lights.append(light)
        # Add envmap loading support
        elif child.tag == 'emitter' and child.attrib['type'] == 'envmap':
            # read envmap params from xml
            scale = 1.0
            envmap_filename = None
            to_world = torch.eye(4)
            for child_s in child:
                if child_s.attrib['name'] == 'scale':
                    assert child_s.tag == 'float'
                    scale = float(child_s.attrib['value'])
                if child_s.attrib['name'] == 'filename':
                    assert child_s.tag == 'string'
                    envmap_filename = child_s.attrib['value']
                if child_s.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child_s)
            # load envmap
            envmap = scale * pyredner.imread(envmap_filename)
            if pyredner.get_use_gpu():
                envmap = envmap.cuda()
            envmap = pyredner.EnvironmentMap(envmap, env_to_world=to_world)
    return pyredner.Scene(cam, shapes, materials, lights, envmap)
Exemple #3
0
 def parse_material_bitmap(node, scale=None):
     reflectance_texture = None
     uv_scale = torch.tensor([1.0, 1.0])
     for grandchild in node:
         if grandchild.attrib['name'] == 'filename':
             reflectance_texture = pyredner.imread(
                 grandchild.attrib['value'])
             if scale:
                 reflectance_texture = reflectance_texture * scale
         elif grandchild.attrib['name'] == 'uscale':
             uv_scale[0] = float(grandchild.attrib['value'])
         elif grandchild.attrib['name'] == 'vscale':
             uv_scale[1] = float(grandchild.attrib['value'])
     assert reflectance_texture is not None
     return reflectance_texture, uv_scale
Exemple #4
0
def load_3d(mesh_name):
    '''
    Loads a 3D model, computing vertex normals as needed
    '''
    dpath = os.path.join(mydir, mesh_name)
    fpath = os.path.join(dpath, "mesh.obj")
    if os.path.isfile(fpath):
        obj = pyr.load_obj(fpath, return_objects=True)[0]
        recompute_normals(obj)
        texpath = os.path.join(dpath, "texture.png")
        if os.path.isfile(texpath):
            tex_img = pyr.imread(texpath)
            obj.material.diffuse_reflectance = pyr.Texture(tex_img)
        return obj
    else:
        raise FileNotFoundError(f"Could not find {mesh_name}.obj")
Exemple #5
0
shapes = [shape_triangle, shape_light]

light = pyredner.AreaLight(shape_id=1,
                           intensity=torch.tensor([20.0, 20.0, 20.0]))
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_viewport/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_viewport/target.png')
target = pyredner.imread('results/test_viewport/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device=pyredner.get_device())

# Perturb the scene, this is our initial guess.
shape_triangle.vertices = torch.tensor(\
    [[-2.0,1.5,0.3], [0.9,1.2,-0.3], [-0.4,-1.4,0.2]],
    device = pyredner.get_device(),
    requires_grad = True) # Set requires_grad to True since we want to optimize this
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)
img = render(1, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_single_triangle/init.png')
diff = torch.abs(target - img)
    max_bounces = 1)

# Now we render the scene as our target image.
# To render the scene, we use our custom PyTorch function in pyredner/render_pytorch.py
# First setup the alias of the render function
render = pyredner.RenderFunction.apply
# Next we call the render function to render.
# The first argument is the seed for RNG in the renderer.
img = render(0, *scene_args)
# This generates a PyTorch tensor with size [width, height, 3]. 
# The output image is in the GPU memory if you are using GPU.
# Now we save the generated image to disk.
pyredner.imwrite(img.cpu(), 'results/optimize_single_triangle/target.exr')
pyredner.imwrite(img.cpu(), 'results/optimize_single_triangle/target.png')
# Now we read back the target image we just saved, and copy to GPU if necessary
target = pyredner.imread('results/optimize_single_triangle/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Next we want to produce the initial guess. We do this by perturb the scene.
shape_triangle.vertices = torch.tensor(\
    [[-2.0,1.5,0.3], [0.9,1.2,-0.3], [-0.4,-1.4,0.2]],
    device = pyredner.get_device(),
    requires_grad = True) # Set requires_grad to True since we want to optimize this
# We need to serialize the scene again to get the new arguments.
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)
# Render the initial guess
img = render(1, *scene_args)
Exemple #7
0
        torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()),
    roughness = \
        torch.tensor([0.05], device = pyredner.get_device()))

materials = [mat_grey]

vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
shape_sphere = pyredner.Shape(\
    vertices = vertices,
    indices = indices,
    uvs = uvs,
    normals = normals,
    material_id = 0)
shapes = [shape_sphere]

envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda(device=pyredner.get_device())
envmap = pyredner.EnvironmentMap(envmap)
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.png')
target = pyredner.imread('results/test_envmap/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()
Exemple #8
0
# Set up the scene using Pytorch tensor
position = torch.tensor([0.0, 0.0, -5.0])
look_at = torch.tensor([0.0, 0.0, 0.0])
up = torch.tensor([0.0, 1.0, 0.0])
fov = torch.tensor([45.0])
clip_near = 1e-2

resolution = (256, 256)
cam = pyredner.Camera(position=position,
                      look_at=look_at,
                      up=up,
                      fov=fov,
                      clip_near=clip_near,
                      resolution=resolution)

checkerboard_texture = pyredner.imread('checkerboard.exr')
if pyredner.get_use_gpu():
    checkerboard_texture = checkerboard_texture.cuda()

mat_checkerboard = pyredner.Material(\
    diffuse_reflectance = checkerboard_texture)
mat_black = pyredner.Material(\
    diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0],
    device = pyredner.get_device()))
materials = [mat_checkerboard, mat_black]
vertices = torch.tensor(
    [[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0], [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]],
    device=pyredner.get_device())
indices = torch.tensor([[0, 1, 2], [1, 3, 2]],
                       dtype=torch.int32,
                       device=pyredner.get_device())
envmap = pyredner.EnvironmentMap(envmap)
# Setup the scene
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
# Serialize the scene
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
# Render the target
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
# Save the target image
pyredner.imwrite(img.cpu(), 'results/joint_material_envmap_sh/target.exr')
pyredner.imwrite(img.cpu(), 'results/joint_material_envmap_sh/target.png')
# Read the target image back
target = pyredner.imread('results/joint_material_envmap_sh/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Reset the coefficients to some constant color, repeat the same process as in target envmap
coeffs = torch.tensor([[ 0.5,
                         0.0, 0.0, 0.0,
                         0.0, 0.0, 0.0, 0.0, 0.0,
                         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # coeffs for red
                       [ 0.5,
                         0.0, 0.0, 0.0,
                         0.0, 0.0, 0.0, 0.0, 0.0,
                         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # coeffs for green
                       [ 0.5,
                         0.0, 0.0, 0.0,
                         0.0, 0.0, 0.0, 0.0, 0.0,
# The first argument is the shape id of the light
light = pyredner.Light(2, light_intensity)
lights = [light]
scene = pyredner.Scene(cam, shapes, materials, lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 1024,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_glossy/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_glossy/target.png')
target = pyredner.imread('results/test_shadow_glossy/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Perturb the scene, this is our initial guess
shape_blocker.vertices = torch.tensor(\
    [[-0.6,0.9,0.4],[-0.8,3.3,0.7],[0.2,1.1,0.6],[0.3,3.2,0.4]],
    device = pyredner.get_device(),
    requires_grad=True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 1024,
    max_bounces = 1)
# Render the initial guess
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_glossy/init.png')
scene = pyredner.Scene(cam, shapes, materials, area_lights)
# Serialize the scene
# Here we specify the output channels as "radiance" and "alpha"
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1,
    channels = [redner.channels.radiance, redner.channels.alpha])

# Render the scene as our target image.
render = pyredner.RenderFunction.apply
# Render. The first argument is the seed for RNG in the renderer.
img = render(0, *scene_args)
# Since we specified alpha as output channel, img has 4 channels now
# We blend the image with a background image
background = pyredner.imread('scenes/textures/siggraph.jpg')
background = torch.from_numpy(skimage.transform.resize(background.numpy(), (256, 256, 3)))
if pyredner.get_use_gpu():
    background = background.cuda(device = pyredner.get_device())
background = background.type_as(img)
img = img[:, :, :3] * img[:, :, 3:4] + background * (1 - img[:, :, 3:4])

# Save the images.
# The output image is in the GPU memory if you are using GPU.
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_background/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_background/target.png')
# Read the target image we just saved.
target = pyredner.imread('results/test_single_triangle_background/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

# Render the scene as our target image.
# To render the scene, we use our custom PyTorch function in pyredner/render_pytorch.py
# First setup the alias of the render function
render = pyredner.RenderFunction.apply
# Render. The first argument is the seed for RNG in the renderer.
img = render(0, *scene_args)
# Save the images.
# The output image is in the GPU memory if you are using GPU.
pyredner.imwrite(img.cpu(), 'results/test_single_triangle/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_single_triangle/target.png')
# Read the target image we just saved.
target = pyredner.imread('results/test_single_triangle/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Perturb the scene, this is our initial guess.
shape_triangle.vertices = torch.tensor(\
    [[-2.0,1.5,0.3], [0.9,1.2,-0.3], [-0.4,-1.4,0.2]],
    device = pyredner.get_device(),
    requires_grad = True) # Set requires_grad to True since we want to optimize this
# We need to serialize the scene again to get the new arguments.
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)
# Render the initial guess.
img = render(1, *scene_args)
Exemple #13
0
# The first argument is the shape id of the light
light = pyredner.AreaLight(2, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 1024,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_glossy/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_glossy/target.png')
target = pyredner.imread('results/test_shadow_glossy/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
shape_blocker.vertices = torch.tensor(\
    [[-0.6,0.9,0.4],[-0.8,3.3,0.7],[0.2,1.1,0.6],[0.3,3.2,0.4]],
    device = pyredner.get_device(),
    requires_grad=True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 1024,
    max_bounces = 1)
# Render the initial guess
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_glossy/init.png')
light_intensity = torch.tensor([30.0,30.0,30.0])
light = pyredner.AreaLight(1, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_camera/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_camera/target.png')
target = pyredner.imread('results/test_single_triangle_camera/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
position = torch.tensor([0.0,  0.0, -3.0], requires_grad = True)
look_at = torch.tensor([-0.5, -0.5,  0.0], requires_grad = True)
scene.camera = pyredner.Camera(position = position,
                               look_at = look_at,
                               up = up,
                               fov = fov,
                               clip_near = clip_near,
                               resolution = resolution)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
Exemple #15
0
# The first argument is the shape id of the light
light = pyredner.AreaLight(1, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_svbrdf/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_svbrdf/target.png')
target = pyredner.imread('results/test_svbrdf/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Our initial guess is three gray textures 
diffuse_tex = torch.tensor(\
    np.ones((256, 256, 3), dtype=np.float32) * 0.5,
    requires_grad = True,
    device = pyredner.get_device())
specular_tex = torch.tensor(\
    np.ones((256, 256, 3), dtype=np.float32) * 0.5,
    requires_grad = True,
    device = pyredner.get_device())
roughness_tex = torch.tensor(\
    np.ones((256, 256, 1), dtype=np.float32) * 0.5,
    requires_grad = True,
scene.materials[-1].specular_reflectance = \
    pyredner.Texture(torch.tensor([0.6, 0.6, 0.6], device = pyredner.get_device()))
scene.materials[-1].roughness = \
    pyredner.Texture(torch.tensor([0.05], device = pyredner.get_device()))
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 1024,
    max_bounces = 2)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_teapot_reflectance/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_teapot_reflectance/target.png')
target = pyredner.imread('results/test_teapot_reflectance/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
cam = scene.camera
cam_position = cam.position
cam_translation = torch.tensor([-0.2, 0.2, -0.2], requires_grad = True)
diffuse_reflectance = torch.tensor([0.3, 0.3, 0.3],
    device = pyredner.get_device(), requires_grad = True)
specular_reflectance = torch.tensor([0.5, 0.5, 0.5],
    device = pyredner.get_device(), requires_grad = True)
roughness = torch.tensor([0.2],
    device = pyredner.get_device(), requires_grad = True)
scene.materials[-1].diffuse_reflectance = pyredner.Texture(diffuse_reflectance)
scene.materials[-1].specular_reflectance = pyredner.Texture(specular_reflectance)
Exemple #17
0
# The first argument is the shape id of the light
light = pyredner.AreaLight(2, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_camera/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_camera/target.png')
target = pyredner.imread('results/test_shadow_camera/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
position = torch.tensor([-2.0, 7.0, 2.0], requires_grad = True)
scene.camera = pyredner.Camera(position = position,
                               look_at = look_at,
                               up = up,
                               fov = fov,
                               clip_near = clip_near,
                               resolution = resolution)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
Exemple #18
0
def load_obj(filename, obj_group = True):
    """
        Load from a Wavefront obj file as PyTorch tensors.
        XXX: this is slow, maybe move to C++?
    """
    vertices_pool = []
    uvs_pool = []
    normals_pool = []
    indices = []
    vertices = []
    normals = []
    uvs = []
    vertices_map = {}
    material_map = {}
    current_mtllib = {}
    current_material_name = None

    def create_mesh(indices, vertices, normals, uvs):
        indices = torch.tensor(indices, dtype = torch.int32, device = pyredner.get_device())
        vertices = torch.tensor(vertices, device = pyredner.get_device())
        if len(uvs) == 0:
            uvs = None
        else:
            uvs = torch.tensor(uvs, device = pyredner.get_device())
        if len(normals) == 0:
            normals = None
        else:
            normals = torch.tensor(normals, device = pyredner.get_device())
        return TriangleMesh(vertices, indices, uvs, normals)

    mesh_list = []
    light_map = {}

    f = open(filename, 'r')
    d = os.path.dirname(filename)
    cwd = os.getcwd()
    if d != '':
        os.chdir(d)
    for line in f:
        line = line.strip()
        splitted = re.split('\ +', line)
        if splitted[0] == 'mtllib':
            current_mtllib = load_mtl(splitted[1])
        elif splitted[0] == 'usemtl':
            if len(indices) > 0 and obj_group is True:
                # Flush
                mesh_list.append((current_material_name, create_mesh(indices, vertices, normals, uvs)))
                indices = []
                vertices = []
                normals = []
                uvs = []
                vertices_map = {}
            mtl_name = splitted[1]
            current_material_name = mtl_name
            if mtl_name not in material_map:
                m = current_mtllib[mtl_name]
                if m.map_Kd is None:
                    diffuse_reflectance = torch.tensor(m.Kd,
                        dtype = torch.float32, device = pyredner.get_device())
                else:
                    diffuse_reflectance = pyredner.imread(m.map_Kd)
                    if pyredner.get_use_gpu():
                        diffuse_reflectance = diffuse_reflectance.cuda(device = pyredner.get_device())
                if m.map_Ks is None:
                    specular_reflectance = torch.tensor(m.Ks,
                        dtype = torch.float32, device = pyredner.get_device())
                else:
                    specular_reflectance = pyredner.imread(m.map_Ks)
                    if pyredner.get_use_gpu():
                        specular_reflectance = specular_reflectance.cuda(device = pyredner.get_device())
                if m.map_Ns is None:
                    roughness = torch.tensor([2.0 / (m.Ns + 2.0)],
                        dtype = torch.float32, device = pyredner.get_device())
                else:
                    roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0)
                    if pyredner.get_use_gpu():
                        roughness = roughness.cuda(device = pyredner.get_device())
                if m.Ke != (0.0, 0.0, 0.0):
                    light_map[mtl_name] = torch.tensor(m.Ke, dtype = torch.float32)
                material_map[mtl_name] = pyredner.Material(\
                    diffuse_reflectance, specular_reflectance, roughness)
        elif splitted[0] == 'v':
            vertices_pool.append([float(splitted[1]), float(splitted[2]), float(splitted[3])])
        elif splitted[0] == 'vt':
            uvs_pool.append([float(splitted[1]), float(splitted[2])])
        elif splitted[0] == 'vn':
            normals_pool.append([float(splitted[1]), float(splitted[2]), float(splitted[3])])
        elif splitted[0] == 'f':
            def num_indices(x):
                return len(re.split('/', x))
            def get_index(x, i):
                return int(re.split('/', x)[i])
            def parse_face_index(x, i):
                f = get_index(x, i)
                if f < 0:
                    if (i == 0):
                        f += len(vertices)
                    if (i == 1):
                        f += len(uvs)
                else:
                    f -= 1
                return f
            assert(len(splitted) <= 5)
            def get_vertex_id(indices):
                pi = parse_face_index(indices, 0)
                uvi = None
                if (num_indices(indices) > 1 and re.split('/', indices)[1] != ''):
                    uvi = parse_face_index(indices, 1)
                ni = None
                if (num_indices(indices) > 2 and re.split('/', indices)[2] != ''):
                    ni = parse_face_index(indices, 2)
                key = (pi, uvi, ni)
                if key in vertices_map:
                    return vertices_map[key]

                vertex_id = len(vertices)
                vertices_map[key] = vertex_id
                vertices.append(vertices_pool[pi])
                if uvi is not None:
                    uvs.append(uvs_pool[uvi])
                if ni is not None:
                    normals.append(normals_pool[ni])
                return vertex_id
            vid0 = get_vertex_id(splitted[1])
            vid1 = get_vertex_id(splitted[2])
            vid2 = get_vertex_id(splitted[3])

            indices.append([vid0, vid1, vid2])
            if (len(splitted) == 5):
                vid3 = get_vertex_id(splitted[4])
                indices.append([vid0, vid2, vid3])
    
    mesh_list.append((current_material_name,
        create_mesh(indices, vertices, normals, uvs)))
    if d != '':
        os.chdir(cwd)
    return material_map, mesh_list, light_map
# The first argument is the shape id of the light
light = pyredner.Light(2, light_intensity)
lights = [light]
scene = pyredner.Scene(cam, shapes, materials, lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_blocker/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_blocker/target.png')
target = pyredner.imread('results/test_shadow_blocker/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Perturb the scene, this is our initial guess
shape_blocker.vertices = torch.tensor(\
 [[-0.2,3.5,-0.8],[-0.8,3.0,0.3],[0.4,2.8,-0.8],[0.3,3.2,1.0]],
device = pyredner.get_device(),
    requires_grad=True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
# Render the initial guess
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_blocker/init.png')
Exemple #20
0
    pyredner.Texture(torch.tensor([0.15, 0.2, 0.15], device = pyredner.get_device()))
scene.materials[-1].specular_reflectance = \
    pyredner.Texture(torch.tensor([0.8, 0.8, 0.8], device = pyredner.get_device()))
scene.materials[-1].roughness = \
    pyredner.Texture(torch.tensor([0.0001], device = pyredner.get_device()))
args=pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 2)

render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/target.png')
target = pyredner.imread('results/test_teapot_specular/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
# We perturb the last shape, which is the SIGGRAPH logo
ref_pos = scene.shapes[-1].vertices
translation = torch.tensor([20.0, 0.0, 2.0], device = pyredner.get_device(), requires_grad=True)
scene.shapes[-1].vertices = ref_pos + translation
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 2)
# Render the initial guess
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/init.png')
Exemple #21
0
def load_obj(filename,
             obj_group=True,
             flip_tex_coords=True,
             use_common_indices=False):
    """
        Load from a Wavefront obj file as PyTorch tensors.
        XXX: this is slow, maybe move to C++?

        Args: obj_group -- split the meshes based on materials
              flip_tex_coords -- flip the v coordinate of uv by applying v' = 1 - v
              use_common_indices -- use the same indices for position, uvs, normals.
                                    Not recommended since texture seams in the objects sharing
                                    the same positions would cause the optimization to "tear" the object.
    """
    vertices_pool = []
    uvs_pool = []
    normals_pool = []
    indices = []
    uv_indices = []
    normal_indices = []
    vertices = []
    uvs = []
    normals = []
    vertices_map = {}
    uvs_map = {}
    normals_map = {}
    material_map = {}
    current_mtllib = {}
    current_material_name = None

    def create_mesh(indices, uv_indices, normal_indices, vertices, uvs,
                    normals):
        indices = torch.tensor(indices,
                               dtype=torch.int32,
                               device=pyredner.get_device())
        if len(uv_indices) == 0:
            uv_indices = None
        else:
            uv_indices = torch.tensor(uv_indices,
                                      dtype=torch.int32,
                                      device=pyredner.get_device())
        if len(normal_indices) == 0:
            normal_indices = None
        else:
            normal_indices = torch.tensor(normal_indices,
                                          dtype=torch.int32,
                                          device=pyredner.get_device())
        vertices = torch.tensor(vertices, device=pyredner.get_device())
        if len(uvs) == 0:
            uvs = None
        else:
            uvs = torch.tensor(uvs, device=pyredner.get_device())
        if len(normals) == 0:
            normals = None
        else:
            normals = torch.tensor(normals, device=pyredner.get_device())
        return TriangleMesh(indices, uv_indices, normal_indices, vertices, uvs,
                            normals)

    mesh_list = []
    light_map = {}

    f = open(filename, 'r')
    d = os.path.dirname(filename)
    cwd = os.getcwd()
    if d != '':
        os.chdir(d)
    for line in f:
        line = line.strip()
        splitted = re.split('\ +', line)
        if splitted[0] == 'mtllib':
            current_mtllib = load_mtl(splitted[1])
        elif splitted[0] == 'usemtl':
            if len(indices) > 0 and obj_group is True:
                # Flush
                mesh_list.append(
                    (current_material_name,
                     create_mesh(indices, uv_indices, normal_indices, vertices,
                                 uvs, normals)))
                indices = []
                uv_indices = []
                normal_indices = []
                vertices = []
                normals = []
                uvs = []
                vertices_map = {}
                uvs_map = {}
                normals_map = {}

            mtl_name = splitted[1]
            current_material_name = mtl_name
            if mtl_name not in material_map:
                m = current_mtllib[mtl_name]
                if m.map_Kd is None:
                    diffuse_reflectance = torch.tensor(
                        m.Kd,
                        dtype=torch.float32,
                        device=pyredner.get_device())
                else:
                    diffuse_reflectance = pyredner.imread(m.map_Kd)
                    if pyredner.get_use_gpu():
                        diffuse_reflectance = diffuse_reflectance.cuda(
                            device=pyredner.get_device())
                if m.map_Ks is None:
                    specular_reflectance = torch.tensor(
                        m.Ks,
                        dtype=torch.float32,
                        device=pyredner.get_device())
                else:
                    specular_reflectance = pyredner.imread(m.map_Ks)
                    if pyredner.get_use_gpu():
                        specular_reflectance = specular_reflectance.cuda(
                            device=pyredner.get_device())
                if m.map_Ns is None:
                    roughness = torch.tensor([2.0 / (m.Ns + 2.0)],
                                             dtype=torch.float32,
                                             device=pyredner.get_device())
                else:
                    roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0)
                    if pyredner.get_use_gpu():
                        roughness = roughness.cuda(
                            device=pyredner.get_device())
                if m.Ke != (0.0, 0.0, 0.0):
                    light_map[mtl_name] = torch.tensor(m.Ke,
                                                       dtype=torch.float32)
                material_map[mtl_name] = pyredner.Material(\
                    diffuse_reflectance, specular_reflectance, roughness)
        elif splitted[0] == 'v':
            vertices_pool.append(
                [float(splitted[1]),
                 float(splitted[2]),
                 float(splitted[3])])
        elif splitted[0] == 'vt':
            u = float(splitted[1])
            v = float(splitted[2])
            if flip_tex_coords:
                v = 1 - v
            uvs_pool.append([u, v])
        elif splitted[0] == 'vn':
            normals_pool.append(
                [float(splitted[1]),
                 float(splitted[2]),
                 float(splitted[3])])
        elif splitted[0] == 'f':

            def num_indices(x):
                return len(re.split('/', x))

            def get_index(x, i):
                return int(re.split('/', x)[i])

            def parse_face_index(x, i):
                f = get_index(x, i)
                if f < 0:
                    if (i == 0):
                        f += len(vertices)
                    if (i == 1):
                        f += len(uvs)
                else:
                    f -= 1
                return f

            assert (len(splitted) <= 5)

            def get_vertex_id(indices):
                pi = parse_face_index(indices, 0)
                uvi = None
                if (num_indices(indices) > 1
                        and re.split('/', indices)[1] != ''):
                    uvi = parse_face_index(indices, 1)
                ni = None
                if (num_indices(indices) > 2
                        and re.split('/', indices)[2] != ''):
                    ni = parse_face_index(indices, 2)
                if use_common_indices:
                    # vertex, uv, normals share the same indexing
                    key = (pi, uvi, ni)
                    if key in vertices_map:
                        vertex_id = vertices_map[key]
                        return vertex_id, vertex_id, vertex_id

                    vertex_id = len(vertices)
                    vertices_map[key] = vertex_id
                    vertices.append(vertices_pool[pi])
                    if uvi is not None:
                        uvs.append(uvs_pool[uvi])
                    if ni is not None:
                        normals.append(normals_pool[ni])
                    return vertex_id, vertex_id, vertex_id
                else:
                    # vertex, uv, normals use separate indexing
                    vertex_id = None
                    uv_id = None
                    normal_id = None

                    if pi in vertices_map:
                        vertex_id = vertices_map[pi]
                    else:
                        vertex_id = len(vertices)
                        vertices.append(vertices_pool[pi])
                        vertices_map[pi] = vertex_id

                    if uvi is not None:
                        if uvi in uvs_map:
                            uv_id = uvs_map[uvi]
                        else:
                            uv_id = len(uvs)
                            uvs.append(uvs_pool[uvi])
                            uvs_map[uvi] = uv_id

                    if ni is not None:
                        if ni in normals_map:
                            normal_id = normals_map[ni]
                        else:
                            normal_id = len(normals)
                            normals.append(normals_pool[ni])
                            normals_map[ni] = normal_id
                    return vertex_id, uv_id, normal_id

            vid0, uv_id0, n_id0 = get_vertex_id(splitted[1])
            vid1, uv_id1, n_id1 = get_vertex_id(splitted[2])
            vid2, uv_id2, n_id2 = get_vertex_id(splitted[3])

            indices.append([vid0, vid1, vid2])
            if uv_id0 is not None:
                assert (uv_id1 is not None and uv_id2 is not None)
                uv_indices.append([uv_id0, uv_id1, uv_id2])
            if n_id0 is not None:
                assert (n_id1 is not None and n_id2 is not None)
                normal_indices.append([n_id0, n_id1, n_id2])
            if (len(splitted) == 5):
                vid3, uv_id3, n_id3 = get_vertex_id(splitted[4])
                indices.append([vid0, vid2, vid3])
                if uv_id0 is not None:
                    assert (uv_id3 is not None)
                    uv_indices.append([uv_id0, uv_id2, uv_id3])
                if n_id0 is not None:
                    assert (n_id3 is not None)
                    normal_indices.append([n_id0, n_id2, n_id3])

    mesh_list.append((current_material_name,
                      create_mesh(indices, uv_indices, normal_indices,
                                  vertices, uvs, normals)))
    if d != '':
        os.chdir(cwd)

    f.close()
    return material_map, mesh_list, light_map
    offset = 2./samples
    increment = math.pi * (3. - math.sqrt(5.));

    for i in range(samples):
        y = ((i * offset) - 1) + (offset / 2);
        r = math.sqrt(1 - pow(y,2))

        phi = ((i + rnd) % samples) * increment

        x = math.cos(phi) * r
        z = math.sin(phi) * r

        points.append(torch.tensor([x,y,z]))

    return points
envmap_cathedral = pyredner.imread('resources/grace-new.exr')
envmap_cathedral = torch.ones(envmap_cathedral.shape, device=pyredner.get_device())
if pyredner.get_use_gpu():
    envmap_cathedral = envmap_cathedral.cuda()
envmap_cathedral = pyredner.EnvironmentMap(envmap_cathedral)

def generate_scenes(camLocs,objects,envmap=None, lightLocs=None):
  scenes = []
  up = torch.tensor([0.0, 1.0, 0.0])
  offset_factor = 0.0
  light_intensity = 100.0            

  for ind, loc in enumerate(camLocs):
    camera = pyredner.Camera(position = loc,
                          look_at = torch.tensor([0.0, 0.0, 0.0]),
                          up = camera0.up,
Exemple #23
0
        torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()),
    roughness = \
        torch.tensor([0.05], device = pyredner.get_device()))

materials = [mat_grey]

vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
shape_sphere = pyredner.Shape(\
    vertices = vertices,
    indices = indices,
    uvs = uvs,
    normals = normals,
    material_id = 0)
shapes = [shape_sphere]

envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda(device = pyredner.get_device())
envmap = pyredner.EnvironmentMap(envmap)
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.png')
target = pyredner.imread('results/test_envmap/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()
    torch.tensor([0.15, 0.2, 0.15], device = pyredner.get_device())
scene.materials[-1].specular_reflectance = \
    torch.tensor([0.8, 0.8, 0.8], device = pyredner.get_device())
scene.materials[-1].roughness = \
    torch.tensor([0.0001], device = pyredner.get_device())
args=pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 2)

render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/target.png')
target = pyredner.imread('results/test_teapot_specular/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Perturb the scene, this is our initial guess
# We perturb the last shape, which is the SIGGRAPH logo
ref_pos = scene.shapes[-1].vertices
translation = torch.tensor([20.0, 0.0, 2.0],
                           device=pyredner.get_device(),
                           requires_grad=True)
scene.shapes[-1].vertices = ref_pos + translation
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 2)
# Render the initial guess
# In addition to Wavefront obj file, redner also supports loading from a Mitsuba
# scene file. Currently we only support a limited amount of features. In particular
# we only support two kinds of materials: diffuse and roughplastic. Note that the
# "alpha" values in roughplastic is the square root of the roughness. See cbox.xml
# for how a Mitsuba scene file should look like.
# We can load a scene using pyredner.load_mitsuba() utility, and render it as usual.
scene = pyredner.load_mitsuba('cbox/cbox.xml')
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 5) # Set max_bounces = 5 for global illumination
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/coarse_to_fine_estimation/target.exr')
pyredner.imwrite(img.cpu(), 'results/coarse_to_fine_estimation/target.png')
target = pyredner.imread('results/coarse_to_fine_estimation/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Now let's generate an initial guess by perturbing the reference.
# Let's set all the diffuse color to gray by manipulating material.diffuse_reflectance.
# We also store all the material variables to optimize in a list.
material_vars = []
for mi, m in enumerate(scene.materials):
    var = torch.tensor([0.5, 0.5, 0.5],
                       device = pyredner.get_device(),
                       requires_grad = True)
    material_vars.append(var)
    m.diffuse_reflectance = pyredner.Texture(var)
        
# And let's also slightly perturb the camera up vector and field of view a bit
Exemple #26
0
light_intensity = torch.tensor([30.0,30.0,30.0])
light = pyredner.AreaLight(1, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_camera_fisheye/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_camera_fisheye/target.png')
target = pyredner.imread('results/test_single_triangle_camera_fisheye/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
position = torch.tensor([0.5, -0.5, -3.0], requires_grad = True)
scene.camera = pyredner.Camera(position = position,
                               look_at = look_at,
                               up = up,
                               fov = fov,
                               clip_near = clip_near,
                               resolution = resolution,
                               fisheye = True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
Exemple #27
0
# Load the scene from a Mitsuba scene file
scene = pyredner.load_mitsuba('scenes/living-room-3/scene.xml')
print('scene loaded')

max_bounces = 6
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = max_bounces)

render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_living_room/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_living_room/target.png')
target = pyredner.imread('results/test_living_room/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

scene.camera.look_at = torch.tensor([-0.556408, 0.951295, -3.98066], requires_grad=True)
scene.camera.position = torch.tensor([0.00419251, 0.973707, -4.80844], requires_grad=True)
scene.camera.up = torch.tensor([-0.00920347, 0.999741, 0.020835], requires_grad=True)

args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = max_bounces)

img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_living_room/init.exr')
pyredner.imwrite(img.cpu(), 'results/test_living_room/init.png')
    fisheye=False)

# Get a list of shapes
shapes = []
for mtl_name, mesh in mesh_list:
    shapes.append(pyredner.Shape(\
        vertices = mesh.vertices,
        indices = mesh.indices,
        uvs = mesh.uvs,
        normals = mesh.normals,
        material_id = 0)) # Set all materials to the generic texture

render = pyredner.RenderFunction.apply

tex_path = '../tutorials/teapot.png'
tex_tensor = pyredner.imread(tex_path)
if pyredner.get_use_gpu():
    tex_tensor = tex_tensor.cuda(device=pyredner.get_device())

### TEST 1: regular 3-channels texture rasterization

generic_texture = tex_tensor

materials = [pyredner.Material(generic_texture=generic_texture)]

# Construct the scene.
# Don't setup any light sources, only use primary visibility.
scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=None)
# Serialize the scene.
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
Exemple #29
0
# Now we build a list of shapes using the list loaded from the Wavefront object file
shapes = []
for mtl_name, mesh in mesh_list:
    shapes.append(pyredner.Shape(\
        vertices = mesh.vertices,
        indices = mesh.indices,
        uvs = mesh.uvs,
        normals = mesh.normals,
        material_id = material_id_map[mtl_name]))

# The previous tutorial used a mesh area light for the scene lighting, 
# here we use an environment light,
# which is a texture representing infinitely far away light sources in 
# spherical coordinates.
envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda()
envmap = pyredner.EnvironmentMap(envmap)

# Finally we construct our scene using all the variables we setup previously.
scene = pyredner.Scene(cam, shapes, materials, area_lights = [], envmap = envmap)
# Like the previous tutorial, we serialize and render the scene, 
# save it as our target
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.exr')
Exemple #30
0
shapes = []
for mtl_name, mesh in mesh_list:
    assert (mesh.normal_indices is None)
    shapes.append(pyredner.Shape(\
        vertices = mesh.vertices,
        indices = mesh.indices,
        material_id = material_id_map[mtl_name],
        uvs = mesh.uvs,
        normals = mesh.normals,
        uv_indices = mesh.uv_indices))

# The previous tutorial used a mesh area light for the scene lighting,
# here we use an environment light,
# which is a texture representing infinitely far away light sources in
# spherical coordinates.
envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda()
envmap = pyredner.EnvironmentMap(envmap)

# Finally we construct our scene using all the variables we setup previously.
scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=envmap)
# Like the previous tutorial, we serialize and render the scene,
# save it as our target
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.exr')
Exemple #31
0
# The first argument is the shape id of the light
light = pyredner.AreaLight(2, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_receiver/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_receiver/target.png')
target = pyredner.imread('results/test_shadow_receiver/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
shape_floor.vertices = torch.tensor(\
	[[-2.0,-0.2,-2.0],[-2.0,-0.2,2.0],[2.0,-0.2,-2.0],[2.0,-0.2,2.0]],
	device = pyredner.get_device(),
    requires_grad = True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
# Render the initial guess
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_receiver/init.png')
Exemple #32
0
# The first argument is the shape id of the light
light = pyredner.AreaLight(2, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_two_triangles/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_two_triangles/target.png')
target = pyredner.imread('results/test_two_triangles/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
shape_tri0.vertices = torch.tensor(\
    [[-1.3,1.5,0.1], [1.5,0.7,-0.2], [-0.8,-1.1,0.2]],
    device = pyredner.get_device(),
    requires_grad=True)
shape_tri1.vertices = torch.tensor(\
    [[-0.5,1.2,1.2], [0.3,1.7,1.0], [0.5,-1.8,1.3]],
    device = pyredner.get_device(),
    requires_grad=True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
scene.materials[-1].specular_reflectance = \
    pyredner.Texture(torch.tensor([0.6, 0.6, 0.6], device = pyredner.get_device()))
scene.materials[-1].roughness = \
    pyredner.Texture(torch.tensor([0.05], device = pyredner.get_device()))
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 1024,
    max_bounces = 2)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_teapot_reflectance/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_teapot_reflectance/target.png')
target = pyredner.imread('results/test_teapot_reflectance/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device=pyredner.get_device())

# Perturb the scene, this is our initial guess
cam = scene.camera
cam_position = cam.position
cam_translation = torch.tensor([-0.2, 0.2, -0.2], requires_grad=True)
diffuse_reflectance = torch.tensor([0.3, 0.3, 0.3],
                                   device=pyredner.get_device(),
                                   requires_grad=True)
specular_reflectance = torch.tensor([0.5, 0.5, 0.5],
                                    device=pyredner.get_device(),
                                    requires_grad=True)
roughness = torch.tensor([0.2],
                         device=pyredner.get_device(),
Exemple #34
0
# Set up the scene using Pytorch tensor
position = torch.tensor([0.0, 0.0, -5.0])
look_at = torch.tensor([0.0, 0.0, 0.0])
up = torch.tensor([0.0, 1.0, 0.0])
fov = torch.tensor([45.0])
clip_near = 1e-2

resolution = (256, 256)
cam = pyredner.Camera(position = position,
                     look_at = look_at,
                     up = up,
                     fov = fov,
                     clip_near = clip_near,
                     resolution = resolution)

checkerboard_texture = pyredner.imread('checkerboard.exr')
if pyredner.get_use_gpu():
	checkerboard_texture = checkerboard_texture.cuda(device = pyredner.get_device())

mat_checkerboard = pyredner.Material(\
    diffuse_reflectance = checkerboard_texture)
mat_black = pyredner.Material(\
    diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0],
    device = pyredner.get_device()))
materials = [mat_checkerboard, mat_black]
vertices = torch.tensor([[-1.0,-1.0,0.0], [-1.0,1.0,0.0], [1.0,-1.0,0.0], [1.0,1.0,0.0]],
                        device = pyredner.get_device())
indices = torch.tensor([[0, 1, 2], [1, 3, 2]], dtype = torch.int32,
                       device = pyredner.get_device())
uvs = torch.tensor([[0.05, 0.05], [0.05, 0.95], [0.95, 0.05], [0.95, 0.95]],
				   device = pyredner.get_device())
Exemple #35
0
# The first argument is the shape id of the light
light = pyredner.AreaLight(1, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_svbrdf/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_svbrdf/target.png')
target = pyredner.imread('results/test_svbrdf/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Our initial guess is three gray textures
diffuse_tex = torch.tensor(\
    np.ones((256, 256, 3), dtype=np.float32) * 0.5,
    requires_grad = True,
    device = pyredner.get_device())
specular_tex = torch.tensor(\
    np.ones((256, 256, 3), dtype=np.float32) * 0.5,
    requires_grad = True,
    device = pyredner.get_device())
roughness_tex = torch.tensor(\
    np.ones((256, 256, 1), dtype=np.float32) * 0.5,
    requires_grad = True,
Exemple #36
0
# The first argument is the shape id of the light
light = pyredner.AreaLight(2, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_camera/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_camera/target.png')
target = pyredner.imread('results/test_shadow_camera/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
position = torch.tensor([-2.0, 7.0, 2.0], requires_grad = True)
scene.camera = pyredner.Camera(position = position,
                               look_at = look_at,
                               up = up,
                               fov = fov,
                               clip_near = clip_near,
                               resolution = resolution)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
# The first argument is the shape id of the light
light = pyredner.AreaLight(2, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_receiver/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_receiver/target.png')
target = pyredner.imread('results/test_shadow_receiver/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device=pyredner.get_device())

# Perturb the scene, this is our initial guess
shape_floor.vertices = torch.tensor(\
 [[-2.0,-0.2,-2.0],[-2.0,-0.2,2.0],[2.0,-0.2,-2.0],[2.0,-0.2,2.0]],
device = pyredner.get_device(),
    requires_grad = True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
# Render the initial guess
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_receiver/init.png')
Exemple #38
0
render = pyredner.RenderFunction.apply
# Render. The first argument is the seed for RNG in the renderer.
img = render(0, *scene_args)
# Save the images.
depth = img[:, :, 0]
normal = img[:, :, 1:4]
pyredner.imwrite(depth.cpu(), 'results/test_g_buffer/target_depth.exr')
pyredner.imwrite(depth.cpu(),
                 'results/test_g_buffer/target_depth.png',
                 normalize=True)
pyredner.imwrite(normal.cpu(), 'results/test_g_buffer/target_normal.exr')
pyredner.imwrite(normal.cpu(),
                 'results/test_g_buffer/target_normal.png',
                 normalize=True)
# Read the target image we just saved.
target_depth = pyredner.imread('results/test_g_buffer/target_depth.exr')
target_depth = target_depth[:, :, 0]
target_normal = pyredner.imread('results/test_g_buffer/target_normal.exr')
if pyredner.get_use_gpu():
    target_depth = target_depth.cuda()
    target_normal = target_normal.cuda()

# Perturb the teapot by a translation and a rotation to the object
translation_params = torch.tensor([0.1, -0.1, 0.1],
                                  device=pyredner.get_device(),
                                  requires_grad=True)
translation = translation_params * 100.0
euler_angles = torch.tensor([0.1, -0.1, 0.1], requires_grad=True)
# These are the vertices we want to apply the transformation
shape0_vertices = shapes[0].vertices.clone()
shape1_vertices = shapes[1].vertices.clone()
#scene = pyredner.Scene(cam, shapes, materials, area_lights = [], envmap = envmap)

scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)

render = pyredner.RenderFunction.apply
img = render(1, *scene_args)

#%
pyredner.imwrite(img.cpu(), 'results/' + folder_name + '/init_guess.exr')
pyredner.imwrite(img.cpu(), 'results/' + folder_name + '/init_guess.png')

#Loading it again
target = pyredner.imread('results/' + folder_name + '/init_guess.exr')
target_p = pyredner.imread('results/' + folder_name + '/init_guess.png')
if pyredner.get_use_gpu():
    target = target.cuda()

#%%
im = Image.open("results/uranus.jpg")
#imm= transforms.CenterCrop((256,256))(im)
#im= im.resize((256,256))

im = transforms.Resize((256, 256), interpolation=Image.BILINEAR)(im)

target = to_tensor(im)
target_np = torch_to_np(target)

target_np = reverse_channels(target_np)
Exemple #40
0

#cam_pos = torch.tensor([-80.2697, -55.7891, 373.9277])
#cam_look_at = torch.tensor([-0.2697, -5.7891, 54.7918])
#img = model(cam_pos, cam_look_at, torch.zeros(199, device=pyredner.get_device()), torch.zeros(199, device=pyredner.get_device()), torch.ones(3), torch.zeros(3))
#pyredner.imwrite(img.cpu(), 'img.png')
data_path = "generated/dataset2/"
c_p, cam_look_at, dir_light_intensity, dir_light_direction = np.load(
    data_path + "env_data.npy", allow_pickle=True)
cam_poses = torch.tensor(c_p, requires_grad=True)

#target = pyredner.imread('generated/img03.png').to(pyredner.get_device())
target = []
for i in range(len(cam_poses)):
    target.append(
        pyredner.imread(data_path + 'target_img{:0>2d}.png'.format(i)).to(
            pyredner.get_device()))
pyredner.imwrite(target[i].cpu(), 'process/target_img{:0>2d}.png'.format(i))

#cam_pos = torch.tensor([-0.2697, -5.7891, 373.9277], requires_grad=True)
#cam_look_at = torch.tensor([-0.2697, -5.7891, 54.7918], requires_grad=True)
#shape_coeffs = torch.zeros(199, device=pyredner.get_device(), requires_grad=True)
color_coeffs = torch.zeros(199,
                           device=pyredner.get_device(),
                           requires_grad=True)
ambient_color = torch.zeros(3,
                            device=pyredner.get_device(),
                            requires_grad=True)
#dir_light_intensity = torch.ones(3, device=pyredner.get_device(), requires_grad=True)
#dir_light_direction = torch.tensor([0.0, 0.0, -1.0], device=pyredner.get_device(), requires_grad=True)
vertices = (shape_mean +
            shape_basis @ torch.zeros(199, device=pyredner.get_device())).view(
light_intensity = torch.tensor([20.0,20.0,20.0])
light = pyredner.AreaLight(1, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_clipped/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_clipped/target.png')
target = pyredner.imread('results/test_single_triangle_clipped/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
shape_triangle.vertices = torch.tensor(\
    [[-1.0,1.5,0.3], [0.9,1.2,-0.3], [0.0,-3.0,-6.5]],
    device = pyredner.get_device(),
    requires_grad=True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
# Render the initial guess
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_clipped/init.png')
Exemple #42
0
def load_obj(filename: str,
             obj_group: bool = True,
             flip_tex_coords: bool = True,
             use_common_indices: bool = False,
             return_objects: bool = False):
    """
        Load from a Wavefront obj file as PyTorch tensors.

        Args
        ====
        obj_group: bool
            split the meshes based on materials
        flip_tex_coords: bool
            flip the v coordinate of uv by applying v' = 1 - v
        use_common_indices: bool
            Use the same indices for position, uvs, normals.
            Not recommended since texture seams in the objects sharing
            the same positions would cause the optimization to "tear" the object
        return_objects: bool
            Output list of Object instead.
            If there is no corresponding material for a shape, assign a grey material.

        Returns
        =======
        if return_objects == True, return a list of Object
        if return_objects == False, return (material_map, mesh_list, light_map),
        material_map -> Map[mtl_name, WavefrontMaterial]
        mesh_list -> List[TriangleMesh]
        light_map -> Map[mtl_name, torch.Tensor]
    """
    vertices_pool = []
    uvs_pool = []
    normals_pool = []
    indices = []
    uv_indices = []
    normal_indices = []
    vertices = []
    uvs = []
    normals = []
    vertices_map = {}
    uvs_map = {}
    normals_map = {}
    material_map = {}
    current_mtllib = {}
    current_material_name = None

    def create_mesh(indices, uv_indices, normal_indices, vertices, uvs,
                    normals):
        indices = torch.tensor(indices,
                               dtype=torch.int32,
                               device=pyredner.get_device())
        if len(uv_indices) == 0:
            uv_indices = None
        else:
            uv_indices = torch.tensor(uv_indices,
                                      dtype=torch.int32,
                                      device=pyredner.get_device())
        if len(normal_indices) == 0:
            normal_indices = None
        else:
            normal_indices = torch.tensor(normal_indices,
                                          dtype=torch.int32,
                                          device=pyredner.get_device())
        vertices = torch.tensor(vertices, device=pyredner.get_device())
        if len(uvs) == 0:
            uvs = None
        else:
            uvs = torch.tensor(uvs, device=pyredner.get_device())
        if len(normals) == 0:
            normals = None
        else:
            normals = torch.tensor(normals, device=pyredner.get_device())
        return TriangleMesh(indices, uv_indices, normal_indices, vertices, uvs,
                            normals)

    mesh_list = []
    light_map = {}

    with open(filename, 'r') as f:
        d = os.path.dirname(filename)
        cwd = os.getcwd()
        if d != '':
            os.chdir(d)
        for line in f:
            line = line.strip()
            splitted = re.split('\ +', line)
            if splitted[0] == 'mtllib':
                current_mtllib = load_mtl(splitted[1])
            elif splitted[0] == 'usemtl':
                if len(indices) > 0 and obj_group is True:
                    # Flush
                    mesh_list.append(
                        (current_material_name,
                         create_mesh(indices, uv_indices, normal_indices,
                                     vertices, uvs, normals)))
                    indices = []
                    uv_indices = []
                    normal_indices = []
                    vertices = []
                    normals = []
                    uvs = []
                    vertices_map = {}
                    uvs_map = {}
                    normals_map = {}

                mtl_name = splitted[1]
                current_material_name = mtl_name
                if mtl_name not in material_map:
                    m = current_mtllib[mtl_name]
                    if m.map_Kd is None:
                        diffuse_reflectance = torch.tensor(
                            m.Kd,
                            dtype=torch.float32,
                            device=pyredner.get_device())
                    else:
                        diffuse_reflectance = pyredner.imread(m.map_Kd)
                        if pyredner.get_use_gpu():
                            diffuse_reflectance = diffuse_reflectance.cuda(
                                device=pyredner.get_device())
                    if m.map_Ks is None:
                        specular_reflectance = torch.tensor(
                            m.Ks,
                            dtype=torch.float32,
                            device=pyredner.get_device())
                    else:
                        specular_reflectance = pyredner.imread(m.map_Ks)
                        if pyredner.get_use_gpu():
                            specular_reflectance = specular_reflectance.cuda(
                                device=pyredner.get_device())
                    if m.map_Ns is None:
                        roughness = torch.tensor([2.0 / (m.Ns + 2.0)],
                                                 dtype=torch.float32,
                                                 device=pyredner.get_device())
                    else:
                        roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0)
                        if pyredner.get_use_gpu():
                            roughness = roughness.cuda(
                                device=pyredner.get_device())
                    if m.Ke != (0.0, 0.0, 0.0):
                        light_map[mtl_name] = torch.tensor(m.Ke,
                                                           dtype=torch.float32)
                    material_map[mtl_name] = pyredner.Material(\
                        diffuse_reflectance, specular_reflectance, roughness)
            elif splitted[0] == 'v':
                vertices_pool.append([
                    float(splitted[1]),
                    float(splitted[2]),
                    float(splitted[3])
                ])
            elif splitted[0] == 'vt':
                u = float(splitted[1])
                v = float(splitted[2])
                if flip_tex_coords:
                    v = 1 - v
                uvs_pool.append([u, v])
            elif splitted[0] == 'vn':
                normals_pool.append([
                    float(splitted[1]),
                    float(splitted[2]),
                    float(splitted[3])
                ])
            elif splitted[0] == 'f':

                def num_indices(x):
                    return len(re.split('/', x))

                def get_index(x, i):
                    return int(re.split('/', x)[i])

                def parse_face_index(x, i):
                    f = get_index(x, i)
                    if f > 0:
                        f -= 1
                    return f

                assert (len(splitted) <= 5)

                def get_vertex_id(indices):
                    pi = parse_face_index(indices, 0)
                    uvi = None
                    if (num_indices(indices) > 1
                            and re.split('/', indices)[1] != ''):
                        uvi = parse_face_index(indices, 1)
                    ni = None
                    if (num_indices(indices) > 2
                            and re.split('/', indices)[2] != ''):
                        ni = parse_face_index(indices, 2)
                    if use_common_indices:
                        # vertex, uv, normals share the same indexing
                        key = (pi, uvi, ni)
                        if key in vertices_map:
                            vertex_id = vertices_map[key]
                            return vertex_id, vertex_id, vertex_id

                        vertex_id = len(vertices)
                        vertices_map[key] = vertex_id
                        vertices.append(vertices_pool[pi])
                        if uvi is not None:
                            uvs.append(uvs_pool[uvi])
                        if ni is not None:
                            normals.append(normals_pool[ni])
                        return vertex_id, vertex_id, vertex_id
                    else:
                        # vertex, uv, normals use separate indexing
                        vertex_id = None
                        uv_id = None
                        normal_id = None

                        if pi in vertices_map:
                            vertex_id = vertices_map[pi]
                        else:
                            vertex_id = len(vertices)
                            vertices.append(vertices_pool[pi])
                            vertices_map[pi] = vertex_id

                        if uvi is not None:
                            if uvi in uvs_map:
                                uv_id = uvs_map[uvi]
                            else:
                                uv_id = len(uvs)
                                uvs.append(uvs_pool[uvi])
                                uvs_map[uvi] = uv_id

                        if ni is not None:
                            if ni in normals_map:
                                normal_id = normals_map[ni]
                            else:
                                normal_id = len(normals)
                                normals.append(normals_pool[ni])
                                normals_map[ni] = normal_id
                        return vertex_id, uv_id, normal_id

                vid0, uv_id0, n_id0 = get_vertex_id(splitted[1])
                vid1, uv_id1, n_id1 = get_vertex_id(splitted[2])
                vid2, uv_id2, n_id2 = get_vertex_id(splitted[3])

                indices.append([vid0, vid1, vid2])
                if uv_id0 is not None:
                    assert (uv_id1 is not None and uv_id2 is not None)
                    uv_indices.append([uv_id0, uv_id1, uv_id2])
                if n_id0 is not None:
                    assert (n_id1 is not None and n_id2 is not None)
                    normal_indices.append([n_id0, n_id1, n_id2])
                if (len(splitted) == 5):
                    vid3, uv_id3, n_id3 = get_vertex_id(splitted[4])
                    indices.append([vid0, vid2, vid3])
                    if uv_id0 is not None:
                        assert (uv_id3 is not None)
                        uv_indices.append([uv_id0, uv_id2, uv_id3])
                    if n_id0 is not None:
                        assert (n_id3 is not None)
                        normal_indices.append([n_id0, n_id2, n_id3])

    mesh_list.append((current_material_name,
                      create_mesh(indices, uv_indices, normal_indices,
                                  vertices, uvs, normals)))
    if d != '':
        os.chdir(cwd)

    if return_objects:
        objects = []
        for mtl_name, mesh in mesh_list:
            if mtl_name in material_map:
                m = material_map[mtl_name]
            else:
                m = pyredner.Material(diffuse_reflectance = \
                    torch.tensor((0.5, 0.5, 0.5),
                                 device = pyredner.get_device()))
            if mtl_name in light_map:
                l = light_map[mtl_name]
            else:
                l = None
            objects.append(pyredner.Object(\
                vertices = mesh.vertices,
                indices = mesh.indices,
                material = m,
                light_intensity = l,
                uvs = mesh.uvs,
                normals = mesh.normals,
                uv_indices = mesh.uv_indices,
                normal_indices = mesh.normal_indices))
        return objects
    else:
        return material_map, mesh_list, light_map
scene = pyredner.Scene(cam, shapes, materials, [])
# We output the shape id, so that we can shape it later
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    # Set max bounces to 0, we don't need lighting.
    max_bounces = 0,
    # Use the diffuse color as the output
    channels = [redner.channels.diffuse_reflectance])

# Render the scene as our target image.
render = pyredner.RenderFunction.apply
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/target.exr')
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/target.png')
target = pyredner.imread('results/two_d_mesh/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device=pyredner.get_device())

# Perturb the scene, this is our initial guess
quad_vertices_2d = torch.tensor(\
    [[-0.5, 0.3], [0.3, 0.4], [-0.7, -0.2], [0.4, -0.3]],
    device = pyredner.get_device(),
    requires_grad = True)
tri_vertices_2d = torch.tensor(\
    [[-0.5, 0.4], [0.4, 0.6], [-0.0, -0.3]],
    device = pyredner.get_device(),
    requires_grad = True)
# Need to redo the concatenation
shape_quad.vertices = torch.cat(
    (quad_vertices_2d,
Exemple #44
0
def load_obj(filename, obj_group=True):
    """
        Load from a Wavefront obj file as PyTorch tensors.
        XXX: this is slow, maybe move to C++?
    """
    vertices_pool = []
    uvs_pool = []
    normals_pool = []
    indices = []
    vertices = []
    normals = []
    uvs = []
    vertices_map = {}
    material_map = {}
    current_mtllib = {}
    current_material_name = None

    def create_mesh(indices, vertices, normals, uvs):
        indices = torch.tensor(indices,
                               dtype=torch.int32,
                               device=pyredner.get_device())
        vertices = torch.tensor(vertices, device=pyredner.get_device())
        if len(uvs) == 0:
            uvs = None
        else:
            uvs = torch.tensor(uvs, device=pyredner.get_device())
        if len(normals) == 0:
            normals = None
        else:
            normals = torch.tensor(normals, device=pyredner.get_device())
        return TriangleMesh(vertices, indices, uvs, normals)

    mesh_list = []
    light_map = {}

    f = open(filename, 'r')
    d = os.path.dirname(filename)
    cwd = os.getcwd()
    if d != '':
        os.chdir(d)
    for line in f:
        line = line.strip()
        splitted = re.split('\ +', line)
        if splitted[0] == 'mtllib':
            current_mtllib = load_mtl(splitted[1])
        elif splitted[0] == 'usemtl':
            if len(indices) > 0 and obj_group is True:
                # Flush
                mesh_list.append((current_material_name,
                                  create_mesh(indices, vertices, normals,
                                              uvs)))
                indices = []
                vertices = []
                normals = []
                uvs = []
                vertices_map = {}
            mtl_name = splitted[1]
            current_material_name = mtl_name
            if mtl_name not in material_map:
                m = current_mtllib[mtl_name]
                if m.map_Kd is None:
                    diffuse_reflectance = torch.tensor(
                        m.Kd,
                        dtype=torch.float32,
                        device=pyredner.get_device())
                else:
                    diffuse_reflectance = pyredner.imread(m.map_Kd)
                    if pyredner.get_use_gpu():
                        diffuse_reflectance = diffuse_reflectance.cuda()
                if m.map_Ks is None:
                    specular_reflectance = torch.tensor(
                        m.Ks,
                        dtype=torch.float32,
                        device=pyredner.get_device())
                else:
                    specular_reflectance = pyredner.imread(m.map_Ks)
                    if pyredner.get_use_gpu():
                        specular_reflectance = specular_reflectance.cuda()
                if m.map_Ns is None:
                    roughness = torch.tensor([2.0 / (m.Ns + 2.0)],
                                             dtype=torch.float32,
                                             device=pyredner.get_device())
                else:
                    roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0)
                    if pyredner.get_use_gpu():
                        roughness = roughness.cuda()
                if m.Ke != (0.0, 0.0, 0.0):
                    light_map[mtl_name] = torch.tensor(m.Ke,
                                                       dtype=torch.float32)
                material_map[mtl_name] = pyredner.Material(\
                    diffuse_reflectance, specular_reflectance, roughness)
        elif splitted[0] == 'v':
            vertices_pool.append(
                [float(splitted[1]),
                 float(splitted[2]),
                 float(splitted[3])])
        elif splitted[0] == 'vt':
            uvs_pool.append([float(splitted[1]), float(splitted[2])])
        elif splitted[0] == 'vn':
            normals_pool.append(
                [float(splitted[1]),
                 float(splitted[2]),
                 float(splitted[3])])
        elif splitted[0] == 'f':

            def num_indices(x):
                return len(re.split('/', x))

            def get_index(x, i):
                return int(re.split('/', x)[i])

            def parse_face_index(x, i):
                f = get_index(x, i)
                if f < 0:
                    if (i == 0):
                        f += len(vertices)
                    if (i == 1):
                        f += len(uvs)
                else:
                    f -= 1
                return f

            assert (len(splitted) <= 5)

            def get_vertex_id(indices):
                pi = parse_face_index(indices, 0)
                uvi = None
                if (num_indices(indices) > 1
                        and re.split('/', indices)[1] != ''):
                    uvi = parse_face_index(indices, 1)
                ni = None
                if (num_indices(indices) > 2
                        and re.split('/', indices)[2] != ''):
                    ni = parse_face_index(indices, 2)
                key = (pi, uvi, ni)
                if key in vertices_map:
                    return vertices_map[key]

                vertex_id = len(vertices)
                vertices_map[key] = vertex_id
                vertices.append(vertices_pool[pi])
                if uvi is not None:
                    uvs.append(uvs_pool[uvi])
                if ni is not None:
                    normals.append(normals_pool[ni])
                return vertex_id

            vid0 = get_vertex_id(splitted[1])
            vid1 = get_vertex_id(splitted[2])
            vid2 = get_vertex_id(splitted[3])

            indices.append([vid0, vid1, vid2])
            if (len(splitted) == 5):
                vid3 = get_vertex_id(splitted[4])
                indices.append([vid0, vid2, vid3])

    mesh_list.append(
        (current_material_name, create_mesh(indices, vertices, normals, uvs)))
    if d != '':
        os.chdir(cwd)
    return material_map, mesh_list, light_map
                       shapes=shapes,
                       materials=materials,
                       envmap=envmap)
# Serialize the scene
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
# Render the target
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
# Save the target image
pyredner.imwrite(img.cpu(), 'results/joint_material_envmap_sh/target.exr')
pyredner.imwrite(img.cpu(), 'results/joint_material_envmap_sh/target.png')
# Read the target image back
target = pyredner.imread('results/joint_material_envmap_sh/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Reset the coefficients to some constant color, repeat the same process as in target envmap
coeffs = torch.tensor(
    [
        [
            0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
            0.0, 0.0, 0.0
        ],  # coeffs for red
        [
            0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
            0.0, 0.0, 0.0
        ],  # coeffs for green
        [
Exemple #46
0
# here we use an environment light,
# which is a texture representing infinitely far away light sources in
# spherical coordinates.
#envmap = pyredner.imread('sunsky.exr')
#if pyredner.get_use_gpu():
#    envmap = envmap.cuda()
#envmap = pyredner.EnvironmentMap(envmap)
#
## Finally we construct our scene using all the variables we setup previously.
#scene = pyredner.Scene(cam, shapes, materials, area_lights = [], envmap = envmap)

scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)

render = pyredner.RenderFunction.apply
img = render(0, *scene_args)

#%%
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.exr')
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.png')

#Loading it again
target = pyredner.imread('results/pose_estimation/target.exr')

if pyredner.get_use_gpu():
    target = target.cuda()

#%%
Exemple #47
0
# The first argument is the shape id of the light
light = pyredner.AreaLight(2, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_light/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_light/target.png')
target = pyredner.imread('results/test_shadow_light/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
light_translation = torch.tensor([-0.4, -0.4, -0.4],
    device = pyredner.get_device(), requires_grad=True)
shape_light.vertices = light_vertices + light_translation
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
# Render the initial guess
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_light/init.png')
diff = torch.abs(target - img)