Esempio n. 1
0
def init_renderer(mesh):
    obj_fp = mesh.filename
    pyr.set_print_timing(False)
    objects = pyr.load_obj(obj_fp, return_objects=True)
    #camera = pyr.automatic_camera_placement(objects, (256, 256))
    camera = pyr.Camera(position=torch.tensor([1.2, 0, 0],
                                              dtype=torch.float32),
                        look_at=torch.tensor([0, 0, 0], dtype=torch.float32),
                        up=torch.tensor([0, 1, 0], dtype=torch.float32),
                        fov=torch.tensor([60], dtype=torch.float32),
                        resolution=(256, 256),
                        camera_type=pyr.camera_type.perspective)
    lights = [
        pyr.DirectionalLight(direction=torch.tensor([-1, 0, 0],
                                                    dtype=torch.float32,
                                                    device=pyr.get_device()),
                             intensity=torch.tensor([1, 1, 1],
                                                    dtype=torch.float32,
                                                    device=pyr.get_device())),
        pyr.DirectionalLight(direction=torch.tensor([1, 0, 0],
                                                    dtype=torch.float32,
                                                    device=pyr.get_device()),
                             intensity=torch.tensor([1, 1, 1],
                                                    dtype=torch.float32,
                                                    device=pyr.get_device()))
    ]
    return objects, camera, lights
def generate_scenes(camLocs,objects,envmap=None, lightLocs=None):
  scenes = []
  up = torch.tensor([0.0, 1.0, 0.0])
  offset_factor = 0.0
  light_intensity = 100.0            

  for ind, loc in enumerate(camLocs):
    camera = pyredner.Camera(position = loc,
                          look_at = torch.tensor([0.0, 0.0, 0.0]),
                          up = camera0.up,
                          fov = torch.tensor([90.0]), #fov = camera0.fov,
                          resolution = camera0.resolution)
    
    normal = camera.position.div(torch.norm(camera.position))
    tangent = torch.cross(normal, up)
    tangent = tangent.div(torch.norm(tangent))
    bitangent = torch.cross(normal, tangent)
    bitangent = bitangent.div(torch.norm(bitangent))
    
    offsets = [offset_factor * tangent]
    lightLocs = [(camera.position + offset) for offset in offsets]
    
    lights = [pyredner.generate_quad_light(position = lightPos,
                                     look_at = camera0.look_at,
                                     size = torch.tensor([0.1, 0.1]),
                                     intensity = torch.tensor([light_intensity, light_intensity, light_intensity])) for lightPos in lightLocs]

    scene_objects = objects
    #objects.append(lights[0])

    scenes.append(pyredner.Scene(camera = camera, objects = [objects[0], objects[1], objects[2], lights[0]], envmap=None))
  return scenes
Esempio n. 3
0
def model(cam_pos, cam_look_at, vertices, indices, ambient_color,
          dir_light_intensity, dir_light_direction, normals, colors):
    #normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme)
    #m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(1000, 1000))
    scene = pyredner.Scene(camera=cam, objects=[obj])
    ambient_light = pyredner.AmbientLight(ambient_color)
    dir_light = pyredner.DirectionalLight(dir_light_direction,
                                          dir_light_intensity)
    img = pyredner.render_deferred(scene=scene,
                                   lights=[ambient_light, dir_light],
                                   aa_samples=1)
    return img
Esempio n. 4
0
def visual_vertex_grad(vertices: torch.Tensor,
                       indices: torch.Tensor,
                       cam: pyredner.Camera = None):
    if not hasattr(visual_vertex_grad, 'x'):
        visual_vertex_grad.x = 0
    else:
        visual_vertex_grad.x += 1
    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(1000, 1000))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices, indices=indices, material=m)
    coe = 500000.
    color_reps = torch.tensor([[[1., 0., 0.], [0., -1., -1.]],
                               [[0., 1., 0.], [-1., 0., -1.]],
                               [[0., 0., 1.], [-1., -1.,
                                               0.]]]).to(pyredner.get_device())
    grad_imgs = []
    for d in range(3):
        colors = torch.where(
            vertices.grad[:, d:d + 1].expand(-1, 3) > 0,
            vertices.grad[:, d:d + 1].expand(-1, 3) * color_reps[d, 0],
            vertices.grad[:, d:d + 1].expand(-1, 3) * color_reps[d, 1]) * coe

        obj.colors = colors
        scene = pyredner.Scene(camera=cam, objects=[obj])
        grad_imgs.append(pyredner.render_albedo(scene=scene))
    for d in range(3):
        pyredner.imwrite(
            grad_imgs[d].cpu(), output_path +
            '/grad_imgs/{:0>2d}{:0>2d}.png'.format(d, visual_vertex_grad.x))
    return grad_imgs
Esempio n. 5
0
def model(cam_pos, cam_look_at, vertices, color_coeffs, ambient_color,
          dir_light_intensity, dir_light_direction):
    #vertices = (shape_mean + shape_basis @ torch.zeros(199, device=pyredner.get_device())).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    #m = pyredner.Material(use_vertex_color=True)
    m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(512, 512))
    scene = pyredner.Scene(camera=cam, objects=[obj])
    ambient_light = pyredner.AmbientLight(ambient_color)
    dir_light = pyredner.DirectionalLight(dir_light_direction,
                                          dir_light_intensity)
    img = pyredner.render_deferred(scene=scene,
                                   lights=[ambient_light, dir_light])
    return img, obj
Esempio n. 6
0
def model(cam_poses, cam_look_at, shape_coeffs, color_coeffs, lights_list,
          resolution):
    vertices = (shape_mean + shape_basis @ shape_coeffs).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    #m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    imgs = []
    for i in range(cam_poses.size(0)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj])
        #ambient_light = pyredner.AmbientLight(ambient_color)
        #dir_light = pyredner.DirectionalLight(dir_light_direction, dir_light_intensity)
        img = pyredner.render_deferred(scene=scene,
                                       lights=lights_list[i %
                                                          len(lights_list)],
                                       aa_samples=1)
        imgs.append(img)
    return imgs, obj
Esempio n. 7
0
def parse_camera(node, param_dict):
    fov = torch.tensor([45.0])
    position = None
    look_at = None
    up = None
    clip_near = 1e-2
    resolution = [256, 256]
    viewport = None 
    crop_offset_x = None
    crop_offset_y = None
    crop_width = resolution[1]
    crop_height = resolution[0]

    for child in node:
        if 'name' in child.attrib:
            if child.attrib['name'] == 'fov':
                fov = torch.tensor([float(child.attrib['value'])])
            elif child.attrib['name'] == 'toWorld' or child.attrib['name'] == 'to_world':
                has_lookat = False
                for grandchild in child:
                    if grandchild.tag.lower() == 'lookat' or grandchild.tag.lower() == 'look_at':
                        has_lookat = True
                        position = parse_vector(grandchild.attrib['origin'])
                        look_at = parse_vector(grandchild.attrib['target'])
                        up = parse_vector(grandchild.attrib['up'])
                if not has_lookat:
                    print('Unsupported Mitsuba scene format: please use a look at transform')
                    assert(False)
        if child.tag == 'film':
            for grandchild in child:
                if 'name' in grandchild.attrib:
                    if grandchild.attrib['name'] == 'width':
                        resolution[1] = int(check_default(grandchild.attrib['value'], param_dict))
                    elif grandchild.attrib['name'] == 'height':
                        resolution[0] = int(check_default(grandchild.attrib['value'], param_dict))
                    elif grandchild.attrib['name'] == 'crop_offset_x':
                        crop_offset_x = int(check_default(grandchild.attrib['value'], param_dict))
                    elif grandchild.attrib['name'] == 'crop_offset_y':
                        crop_offset_y = int(check_default(grandchild.attrib['value'], param_dict))
                    elif grandchild.attrib['name'] == 'crop_width':
                        crop_width = int(check_default(grandchild.attrib['value'], param_dict))
                    elif grandchild.attrib['name'] == 'crop_height':
                        crop_height = int(check_default(grandchild.attrib['value'], param_dict))

    if crop_offset_y is None:
        viewport = [0, 0, resolution[0], resolution[1]]
    else:
        viewport = [crop_offset_y, crop_offset_x, \
                      crop_offset_y +  crop_height, \
                      crop_offset_x + crop_width ]

    return pyredner.Camera(position     = position,
                           look_at      = look_at,
                           up           = up,
                           fov          = fov,
                           clip_near    = clip_near,
                           resolution   = resolution,
                           viewport     = viewport)
Esempio n. 8
0
def generate_scenes(camLocs, objects, envmap=None, lightLocs=None):
    scenes = []
    up = torch.tensor([0.0, 1.0, 0.0])
    offset_factor = 2.0
    light_intensity = 10000.0
    for ind, loc in enumerate(camLocs):
        multiplier = 1.0
        look_at_offset = torch.tensor([0.0, 0.0, 0.0])

        camera = pyredner.Camera(position=camera0.look_at +
                                 radius * loc * multiplier,
                                 look_at=camera0.look_at + look_at_offset,
                                 up=camera0.up,
                                 fov=camera0.fov,
                                 resolution=camera0.resolution)

        normal = camera.position.div(torch.norm(camera.position))
        tangent = torch.cross(normal, up)
        tangent = tangent.div(torch.norm(tangent))
        bitangent = torch.cross(normal, tangent)
        bitangent = bitangent.div(torch.norm(bitangent))

        offsets = [offset_factor * tangent]  #, offset_factor * normal]

        lightLocs = [(camera.position + offset) for offset in offsets]
        #else:
        #  lightPos = lightLocs[ind]
        lights = [
            pyredner.generate_quad_light(position=lightPos,
                                         look_at=camera0.look_at,
                                         size=torch.tensor([0.1, 0.1]),
                                         intensity=torch.tensor([
                                             light_intensity, light_intensity,
                                             light_intensity
                                         ])) for lightPos in lightLocs
        ]

        # Camera data for voxel carving
        print(
            str(ind) + " " + str(camera.position.data[0].item()) + " " +
            str(camera.position.data[1].item()) + " " +
            str(camera.position.data[2].item()) + " " +
            str(camera.look_at.data[0].item()) + " " +
            str(camera.look_at.data[1].item()) + " " +
            str(camera.look_at.data[2].item()))
        for light in lights:
            scenes.append(
                pyredner.Scene(camera=camera,
                               objects=[objects[0], light],
                               envmap=None))
    return scenes
Esempio n. 9
0
def model(cam_pos, cam_look_at, shape_coeffs, color_coeffs, resolution, center,
          all_euler_angles, all_translations):
    # First rotate around center, then translation

    imgs = []

    #obj = pyredner.load_obj('p_ones30/final.obj', return_objects=True)[0]
    vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
    vertices *= 80
    m = pyredner.Material(
        diffuse_reflectance=torch.ones(2, 2, 3, dtype=torch.float32))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          uvs=uvs,
                          material=m)
    v = obj.vertices.clone()

    for i in range(len(all_translations)):
        rotation_matrix = pyredner.gen_rotate_matrix(all_euler_angles[i]).to(
            pyredner.get_device())
        center = center.to(pyredner.get_device())
        # vertices = ((shape_mean + shape_basis @ shape_coeffs).view(-1, 3) - center) @ torch.t(rotation_matrix) + center + all_translations[i].to(pyredner.get_device())
        obj.vertices = (v - center) @ torch.t(rotation_matrix) + center
        obj.normals = pyredner.compute_vertex_normal(obj.vertices, indices)
        # colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
        # m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
        m = pyredner.Material(use_vertex_color=True)
        # obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m, colors=colors)

        if i == 0:
            pyredner.save_obj(obj,
                              "generated/env_dataset_" + name + '/tgt_obj.obj')

        cam = pyredner.Camera(
            position=cam_pos,
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
Esempio n. 10
0
def parse_camera(node):
    fov = torch.tensor([45.0])
    position = None
    look_at = None
    up = None
    clip_near = 1e-2
    resolution = [256, 256]
    for child in node:
        if 'name' in child.attrib:
            if child.attrib['name'] == 'fov':
                fov = torch.tensor([float(child.attrib['value'])])
            elif child.attrib['name'] == 'toWorld':
                has_lookat = False
                for grandchild in child:
                    if grandchild.tag.lower() == 'lookat':
                        has_lookat = True
                        position = parse_vector(grandchild.attrib['origin'])
                        look_at = parse_vector(grandchild.attrib['target'])
                        up = parse_vector(grandchild.attrib['up'])
                if not has_lookat:
                    print(
                        'Unsupported Mitsuba scene format: please use a look at transform'
                    )
                    assert (False)
        if child.tag == 'film':
            for grandchild in child:
                if 'name' in grandchild.attrib:
                    if grandchild.attrib['name'] == 'width':
                        resolution[1] = int(grandchild.attrib['value'])
                    elif grandchild.attrib['name'] == 'height':
                        resolution[0] = int(grandchild.attrib['value'])

    return pyredner.Camera(position=position,
                           look_at=look_at,
                           up=up,
                           fov=fov,
                           clip_near=clip_near,
                           resolution=resolution)
def generate_scenes(camLocs, objects, envmap=None):
    scenes = []
    up = torch.tensor([0.0, 1.0, 0.0])
    offset_factor = 2.0
    light_intensity = 500.0

    for ind, loc in enumerate(camLocs):
        camera = pyredner.Camera(position=camera0.look_at + radius * loc,
                                 look_at=camera0.look_at,
                                 up=camera0.up,
                                 fov=camera0.fov,
                                 resolution=camera0.resolution)

        normal = camera.position.div(torch.norm(camera.position))
        tangent = torch.cross(normal, up)
        tangent = tangent.div(torch.norm(tangent))
        bitangent = torch.cross(normal, tangent)
        bitangent = bitangent.div(torch.norm(bitangent))

        lightPos = camera.position + offset_factor * tangent
        light = pyredner.generate_quad_light(position=lightPos,
                                             look_at=camera0.look_at,
                                             size=torch.tensor([0.1, 0.1]),
                                             intensity=torch.tensor([
                                                 light_intensity,
                                                 light_intensity,
                                                 light_intensity
                                             ]))

        # Camera data for voxel carving
        #print(str(ind) + " " + str(camera.position.data[0].item()) + " " + str(camera.position.data[1].item()) + " " + str(camera.position.data[2].item()) + " " + str(camera.look_at.data[0].item()) + " " + str(camera.look_at.data[1].item()) + " " + str(camera.look_at.data[2].item()))

        scenes.append(
            pyredner.Scene(camera=camera,
                           objects=[objects[0], light],
                           envmap=envmap))
    return scenes
Esempio n. 12
0
def model(cam_poses, cam_look_ats, shape_coeffs, color_coeffs, resolution):
    # First rotate around center, then translation

    imgs = []

    vertices = (shape_mean + shape_basis @ shape_coeffs).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    m = pyredner.Material(use_vertex_color=False,
                          specular_reflectance=torch.tensor(
                              [1., 1., 1.], device=pyredner.get_device()),
                          roughness=torch.tensor([0.02]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)
    obj = pyredner.load_obj('generated/env_dataset_oness_n/tgt_obj.obj',
                            return_objects=True)[0]
    obj.material.specular_reflectance = pyredner.Texture(
        torch.tensor([0.05, 0.05, 0.05], device=pyredner.get_device()))
    obj.material.roughness = pyredner.Texture(torch.tensor([0.02]))
    pyredner.save_obj(obj, "generated/senv_dataset_" + name + '/tgt_obj.obj')

    for i in range(len(cam_poses)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_ats[i %
                                 len(cam_look_ats)],  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
Esempio n. 13
0
def model(cam_poses, cam_look_at, vertices, lights_list, normals, material):

    # m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=material,
                          uvs=uvs,
                          uv_indices=uv_indices)  # , colors=colors)
    imgs = []
    for i in range(cam_poses.size(0)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=(1000, 1000))
        scene = pyredner.Scene(camera=cam, objects=[obj])
        img = pyredner.render_deferred(scene=scene,
                                       lights=lights_list[i %
                                                          len(lights_list)],
                                       aa_samples=1)
        imgs.append(img)
    return imgs
Esempio n. 14
0
def load_settings(mesh_name):
    '''
    Read, parse and return (cameras, lights) for the given mesh.
    '''
    dpath = os.path.join(mydir, mesh_name)
    fpath = os.path.join(dpath, "camera.json")
    with open(fpath) as f:
        camera_settings = json.load(f)
 
    cameras = [
        pyr.Camera(
            position=cpu(camera_pos),
            look_at=cpu(camera_settings['look_at']),
            up=cpu(camera_settings['up']),
            fov=cpu(camera_settings['fov']),
            resolution=camera_settings['resolution'],
            camera_type=pyr.camera_type.perspective
        )
        for camera_pos in camera_settings['positions']
    ]

    def parse_one_light(dict_light):
        if dict_light['type'] == 'directional':
            return pyr.DirectionalLight(
                direction = gpu(dict_light['direction']),
                intensity = gpu(dict_light['intensity'])
            )
        else:
            return None
    
    lights = [
        parse_one_light(d) for d in camera_settings['lights']
    ]
    lights = [l for l in lights if l is not None]

    return cameras, lights
Esempio n. 15
0
# Optimize camera pose looking at shadow

# Use GPU if available
pyredner.set_use_gpu(torch.cuda.is_available())

# Set up the scene using Pytorch tensor
position = torch.tensor([0.0, 9.0, 0.0])
look_at = torch.tensor([0.0, 0.0, 0.0])
up = torch.tensor([0.0, 0.0, 1.0])
fov = torch.tensor([45.0])
clip_near = 1e-2

resolution = (256, 256)
cam = pyredner.Camera(position = position,
                     look_at = look_at,
                     up = up,
                     fov = fov,
                     clip_near = clip_near,
                     resolution = resolution)

mat_grey = pyredner.Material(\
    diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5],
    device = pyredner.get_device()))
mat_black = pyredner.Material(\
    diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0],
    device = pyredner.get_device()))
materials = [mat_grey, mat_black]

floor_vertices = torch.tensor([[-20.0,0.0,-20.0],[-20.0,0.0,20.0],[20.0,0.0,-20.0],[20.0,0.0,20.0]],
    device = pyredner.get_device())
floor_indices = torch.tensor([[0,1,2], [1,3,2]],
    device = pyredner.get_device(), dtype = torch.int32)
Esempio n. 16
0
import pyredner
import numpy as np
import torch

# From the test_single_triangle.py test case but with viewport

pyredner.set_use_gpu(torch.cuda.is_available())

cam = pyredner.Camera(
    position=torch.tensor([0.0, 0.0, -5.0]),
    look_at=torch.tensor([0.0, 0.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(1024, 1024),
    viewport=(200, 300, 700, 800))

mat_grey = pyredner.Material(\
    diffuse_reflectance = \
        torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()))
materials = [mat_grey]

shape_triangle = pyredner.Shape(\
    vertices = torch.tensor([[-1.7, 1.0, 0.0], [1.0, 1.0, 0.0], [-0.5, -1.0, 0.0]],
        device = pyredner.get_device()),
    indices = torch.tensor([[0, 1, 2]], dtype = torch.int32,
        device = pyredner.get_device()),
    uvs = None,
    normals = None,
    material_id = 0)
shape_light = pyredner.Shape(\
Esempio n. 17
0
import torch
import redner

# Optimize vertices of 2D meshes

# Use GPU if available
pyredner.set_use_gpu(torch.cuda.is_available())

# Setup camera: We place the camera at (0, 0, -1), with look vector
#               (0, 0, 1). We also use an orthographic camera just to
#               make the projection more "2D": the depth is only used
#               for determining the order of the meshes.
cam = pyredner.Camera(
    position=torch.tensor([0.0, 0.0, -1.0]),
    look_at=torch.tensor([0.0, 0.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(256, 256),
    camera_type=redner.CameraType.orthographic)

# The materials:
mat_quad = pyredner.Material(\
    diffuse_reflectance = torch.tensor([0.75, 0.75, 0.25],
    device = pyredner.get_device()))
mat_tri = pyredner.Material(\
    diffuse_reflectance = torch.tensor([0.9, 0.35, 0.35],
    device = pyredner.get_device()))
materials = [mat_quad, mat_tri]

# We'll have a quad and a triangle as our meshes.
# First we define the 2D coordinates. The size of the screen is
Esempio n. 18
0
def generate_poses(model_path, output_path):
    # Init logger
    log = dict()

    # Load renderer configs
    material_map, mesh_list, light_map = pyredner.load_obj(model_path)
    material_id_map = {}
    materials = []
    count = 0
    for key, value in material_map.items():
        material_id_map[key] = count
        count += 1
        materials.append(value)

    shapes = []
    for mtl_name, mesh in mesh_list:
        shapes.append(
            pyredner.Shape(vertices=mesh.vertices,
                           indices=mesh.indices,
                           uvs=mesh.uvs,
                           normals=mesh.normals,
                           material_id=material_id_map[mtl_name]))

    envmap = pyredner.EnvironmentMap(
        torch.tensor(imread('./datasets/envmaps/one/sunsky.exr'),
                     dtype=torch.float32,
                     device=pyredner.get_device()))

    # Object pose parameters
    euler_angles = [0.0, 0.0, 0.0]
    translation = [0.0, -0.75, 0.0]
    up = [0.0, 1.0, 0.0]
    distance = 7.0

    # Setup base scene to modify during iterations
    cam_params = camera_parameters(euler_angles, translation, distance, up)

    camera = pyredner.Camera(position=torch.tensor(cam_params[0],
                                                   dtype=torch.float32),
                             look_at=torch.tensor(cam_params[1],
                                                  dtype=torch.float32),
                             up=torch.tensor(cam_params[2],
                                             dtype=torch.float32),
                             fov=torch.tensor([45.0]),
                             clip_near=1e-2,
                             resolution=(opt.resolution, opt.resolution),
                             fisheye=False)

    scene = pyredner.Scene(camera,
                           shapes,
                           materials,
                           area_lights=[],
                           envmap=envmap)

    # Generate alphamasks
    for i in range(opt.num_elev):
        # Set elevation angle
        elev_pc = i / opt.num_elev
        elevation = opt.max_elev * elev_pc + opt.min_elev * (1 - elev_pc)
        euler_angles[1] = elevation

        # Calculate number of azimuthal iterations
        num_azimuth = int(opt.num_elev * math.sin(math.pi / 2 - elevation))
        for j in range(num_azimuth):
            # Set azimuthal angle
            azimuth_pc = j / num_azimuth
            azimuth = math.pi * 2 * azimuth_pc

            euler_angles[0] = azimuth

            print('Params: Elevation - {:.4f}\tAzimuth - {:.4f}'\
                    .format(elevation, azimuth))

            # Set Camera params
            cam_params = camera_parameters(euler_angles, translation, distance,
                                           up)

            # Update scene params
            scene.camera = pyredner.Camera(
                position=torch.tensor(cam_params[0], dtype=torch.float32),
                look_at=torch.tensor(cam_params[1], dtype=torch.float32),
                up=torch.tensor(cam_params[2], dtype=torch.float32),
                fov=torch.tensor([45.0]),
                clip_near=1e-2,
                resolution=(opt.resolution, opt.resolution),
                fisheye=False)
            args = pyredner.RenderFunction.serialize_scene(
                scene=scene,
                num_samples=1,
                max_bounces=1,
                channels=[redner.channels.alpha])

            out = pyredner.RenderFunction.apply(1, *args)

            fn = gen_hash(6)
            imwrite(out, os.path.join(output_path, '{}.png'.format(fn)))
            log[fn] = {'elevation': elevation, 'azimuth': azimuth}
    return log
Esempio n. 19
0
                                   device=pyredner.get_device(),
                                   requires_grad=True)
specular_reflectance = torch.tensor([0.5, 0.5, 0.5],
                                    device=pyredner.get_device(),
                                    requires_grad=True)
roughness = torch.tensor([0.2],
                         device=pyredner.get_device(),
                         requires_grad=True)
scene.materials[-1].diffuse_reflectance = pyredner.Texture(diffuse_reflectance)
scene.materials[-1].specular_reflectance = pyredner.Texture(
    specular_reflectance)
scene.materials[-1].roughness = pyredner.Texture(roughness)
scene.camera = pyredner.Camera(position=cam_position + cam_translation,
                               look_at=cam.look_at + cam_translation,
                               up=cam.up,
                               fov=cam.fov,
                               clip_near=cam.clip_near,
                               resolution=cam.resolution,
                               fisheye=False)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 1024,
    max_bounces = 2)
# Render the initial guess
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_teapot_reflectance/init.png')
diff = torch.abs(target - img)
pyredner.imwrite(diff.cpu(), 'results/test_teapot_reflectance/init_diff.png')

lr_base = 1e-2
lr = lr_base
Esempio n. 20
0
for ind, pos in enumerate(camLocs):
    pos = torch.tensor([100, 0.0, -3.0]) 
    normal = pos.div(torch.norm(pos - torch.tensor([-3.0, 0.0, -3.0]) ))                                                   
    pos = normal * radius2   
    
    lights.append(pyredner.generate_quad_light(position = pos + torch.tensor([10.0, 0.0, -3.0]), \
                                     look_at = camera0.look_at, \
                                     size = torch.tensor([2.0, 2.0]), \
                                     intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
    
print("LIGHT TWO DONE")    

cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.5]),
                      look_at = torch.tensor([0.0, 0.0, -3.0]),
                      up = torch.tensor([0.0, 1.0, 0.0]),
                      camera_type = redner.CameraType.perspective,
                      fov = torch.tensor([45.0]),
                      clip_near = 1e-2, # needs to > 0
                      resolution = (512, 512),
                      fisheye = False)

cam3 = pyredner.Camera(position =  torch.tensor( [2.5, 0.0, -3.0]) ,
                      look_at = torch.tensor([0.0, 0.0, -3.0]),
                      up = torch.tensor([0.0, 1.0, 0.0]),
                      camera_type = redner.CameraType.perspective,
                      fov = torch.tensor([45.0]),
                      clip_near = 1e-2, # needs to > 0
                      resolution = (512, 512),
                      fisheye = False)

for obj in range(1, num_iters):
    target_obj1 = pyredner.load_obj('results/shadow_art/multitarget/' + step + '/iter_' + str(obj) + '.obj', return_objects=True)
Esempio n. 21
0
# </editor-fold>

# <editor-fold desc="CORRECTING POSITION">
euler_list, trans_list = [], []
euler = torch.tensor([0.0, 0., 0.],
                     device=pyredner.get_device(),
                     requires_grad=True)
trans = torch.tensor([-0., -0., -0.],
                     device=pyredner.get_device(),
                     requires_grad=True)
# eul_optimizer = torch.optim.SGD([euler], lr=2)
# tra_optimizer = torch.optim.SGD([trans], lr=2000)
# eul_optimizer = torch.optim.Adam([euler], lr=0.02)
cam = pyredner.Camera(
    position=cam_pos,
    look_at=cam_look_at,  # Center of the vertices
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([20.0]),
    resolution=resolution)
for i in range(0):  # num_views):
    print("correcting position {:0>2d}".format(i))
    eul_optimizer = torch.optim.SGD([euler], lr=2)
    tra_optimizer = torch.optim.SGD([trans], lr=5000)
    for t in range(20):
        eul_optimizer.zero_grad()
        tra_optimizer.zero_grad()
        rotation_matrix = pyredner.gen_rotate_matrix(euler)
        obj.vertices = (vertices - center) @ torch.t(rotation_matrix) \
                       + center + trans * torch.tensor([1., 1., 3.], device=pyredner.get_device())
        scene = pyredner.Scene(objects=[obj], camera=cam, envmap=envmap)
        img = pyredner.render_pathtracing(scene=scene,
                                          num_samples=(64, 4),
material_vars = []
for mi, m in enumerate(scene.materials):
    var = torch.tensor([0.5, 0.5, 0.5],
                       device = pyredner.get_device(),
                       requires_grad = True)
    material_vars.append(var)
    m.diffuse_reflectance = pyredner.Texture(var)
        
# And let's also slightly perturb the camera up vector and field of view a bit
up = torch.tensor([0.2, 0.8, -0.2], requires_grad = True)
fov = torch.tensor([41.0], requires_grad = True)
cam_vars = [up, fov]
scene.camera = pyredner.Camera(\
    position = scene.camera.position,
    look_at = scene.camera.look_at,
    up = up,
    fov = fov,
    clip_near = scene.camera.clip_near,
    resolution = scene.camera.resolution)
# Serialize the scene and render the initial guess
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 5)
img = render(1, *scene_args)
pyredner.imwrite(img.cpu(), 'results/coarse_to_fine_estimation/init.png')

# Optimize for parameters.
optimizer = torch.optim.Adam(material_vars + cam_vars, lr=5e-3)
# We run a coarse-to-fine estimation here to prevent being trapped in local minimum
# The final resolution is 256x256, but we will start from an 64x64 image
Esempio n. 23
0
    def __call__(self, input):
        ### Load active scene blob from config ###

        # Camera params
        cam_rotation = self.config('cam_rotation')
        cam_translation = self.config('cam_translation')
        cam_distance = self.config('cam_distance')
        cam_fov = self.config('cam_fov')
        cam_resolution = self.config('cam_resolution')
        # Geometry params
        geometry_path = self.config('geometry_path')
        # Texture params
        tex_diffuse_color = self.config('tex_diffuse_color')
        tex_specular_color = self.config('tex_specular_color')
        # Envmap params
        envmap_path = self.config('envmap_path')
        envmap_signal_mean = self.config('envmap_signal_mean')
        envmap_rotation = self.config('envmap_rotation')
        # Render option params
        opt_num_samples = self.config('opt_num_samples')
        opt_max_bounces = self.config('opt_max_bounces')
        opt_channels_str = self.config('opt_channels_str')
        opt_render_seed = self.config('opt_render_seed')

        # XXX: Temporary hack for training override
        if self.isTrain:
            opt_num_samples = (200, 1)
            opt_channels_str = ['radiance']

        ### Load configs as pyredner primitives ###

        # Convert Camera params for pyredner.Camera object
        position, look_at, up = camera_parameters(cam_rotation,
                                                  cam_translation,
                                                  cam_distance)
        camera = pyredner.Camera(
            position=torch.tensor(position, dtype=torch.float32),
            look_at=torch.tensor(look_at, dtype=torch.float32),
            up=torch.tensor(up, dtype=torch.float32),
            fov=torch.tensor(cam_fov),
            clip_near=1e-2,  # Hardcoded
            resolution=cam_resolution,
            fisheye=False)  # Hardcoded

        # Load geometry from specified path
        mesh = LearnMesh.load_pth(geometry_path, self.device)

        # Set Learnable material
        mesh.set_learn_material(input, tex_diffuse_color, tex_specular_color)

        # Load envmap from specified path
        envmap = load_envmap(envmap_path, envmap_signal_mean, envmap_rotation,
                             self.device)

        # Convert channels list into redner primitives
        opt_channels = list()
        for ch in opt_channels_str:
            opt_channels.append(getattr(redner.channels, ch))

        # Serialize Scene
        # IMPORTANT: saving scene to the object.
        # prevents python garbage collection from
        # removing variables redner allocates
        self.scene = pyredner.Scene(camera, mesh.shapes, mesh.materials, [],
                                    envmap)
        # XXX: Temporary hack
        args = pyredner.RenderFunction.serialize_scene(
            scene=self.scene,
            num_samples=opt_num_samples,
            max_bounces=opt_max_bounces,
            channels=opt_channels)
        render = pyredner.RenderFunction.apply(opt_render_seed, *args)

        #TODO: Add flag to ask whether to normalize
        #render /= torch.mean(render) * 2
        out = torch.clamp(render, 0, 1)

        return out
Esempio n. 24
0
    env_optimizer.zero_grad()
    #normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme)
    obj.material = pyredner.Material(diffuse_reflectance=texels, specular_reflectance=torch.tensor([0.05, 0.05, 0.05]), roughness=torch.tensor([0.02]))
    imgs = []
    deringed_coeffs = deringing(coeffs, 6.0)
    # envmap_img = pyredner.SH_reconstruct(deringed_coeffs, (64, 128))
    envmap_img = tgt_envmap_img
    envmap = pyredner.EnvironmentMap(envmap_img)
    pyredner.imwrite(envmap_img.cpu(), output_path + '/env{:0>2d}.png'.format(t))
    pyredner.imwrite(texels.cpu(), output_path + '/tex{:0>2d}.png'.format(t))
    obj.vertices = vertices
    obj.normals = pyredner.compute_vertex_normal(obj.vertices, obj.indices)
    for i in range(num_views):
        cam = pyredner.Camera(position=cam_poses[i],
                              look_at=cam_look_ats[i % len(cam_look_ats)],  # Center of the vertices
                              up=torch.tensor([0.0, 1.0, 0.0]),
                              fov=torch.tensor([45.0]),
                              resolution=resolution)
        scene = pyredner.Scene(objects=[obj], camera=cam, envmap=envmap)
        img = pyredner.render_pathtracing(scene=scene, num_samples=(32, 4), use_secondary_edge_sampling=False)
        imgs.append(img)

        pyredner.imwrite(img.cpu(), output_path + '/iter{:0>2d}_{:0>2d}.png'.format(t, i))

    all_imgs.append(imgs)
    # record all imgs
    losses = torch.stack([(imgs[i] - target[i]).pow(2).mean() for i in range(len(imgs))])
    # losses of all imgs in this single iteration
    all_losses.append(losses)
    # all_losses records the losses in all iterations
    img_loss = losses.sum()
    def render(self, scene, svbrdf):
        imgs = []

        svbrdf = svbrdf.unsqueeze(0) if len(svbrdf.shape) == 3 else svbrdf

        sensor_size = (svbrdf.shape[-1], svbrdf.shape[-2])

        for svbrdf_single in torch.split(svbrdf, 1, dim=0):
            normals, diffuse, roughness, specular = utils.unpack_svbrdf(
                svbrdf_single.squeeze(0))
            # Redner expects the normal map to be in range [0, 1]
            normals = utils.encode_as_unit_interval(normals)
            # Redner expects the roughness to have one channel only.
            # We also need to convert from GGX roughness to Blinn-Phong power.
            # See: https://github.com/iondune/csc473/blob/master/lectures/07-cook-torrance.md
            roughness = torch.mean(torch.clamp(roughness, min=0.001),
                                   dim=0,
                                   keepdim=True)**4

            # Convert from [c,h,w] to [h,w,c] for redner
            normals = normals.permute(1, 2, 0)
            diffuse = diffuse.permute(1, 2, 0)
            roughness = roughness.permute(1, 2, 0)
            specular = specular.permute(1, 2, 0)

            material = pyredner.Material(
                diffuse_reflectance=pyredner.Texture(
                    diffuse.to(self.redner_device)),
                specular_reflectance=pyredner.Texture(
                    specular.to(self.redner_device)),
                roughness=pyredner.Texture(roughness.to(self.redner_device)),
                normal_map=pyredner.Texture(normals.to(self.redner_device)))

            material_patch = pyredner.Object(vertices=self.patch_vertices,
                                             uvs=self.patch_uvs,
                                             indices=self.patch_indices,
                                             material=material)

            # Define the camera parameters (focused at the middle of the patch) and make sure we always have a valid 'up' direction
            position = np.array(scene.camera.pos)
            lookat = np.array([0.0, 0.0, 0.0])
            cz = lookat - position  # Principal axis
            up = np.array([0.0, 0.0, 1.0])
            if np.linalg.norm(np.cross(cz, up)) == 0.0:
                up = np.array([0.0, 1.0, 0.0])

            camera = pyredner.Camera(
                position=torch.FloatTensor(position).to(self.redner_device),
                look_at=torch.FloatTensor(lookat).to(self.redner_device),
                up=torch.FloatTensor(up).to(self.redner_device),
                fov=torch.FloatTensor([90]),
                resolution=sensor_size,
                camera_type=self.camera_type)

            # # The deferred rendering path.
            # # It does not have a specular model and therefore is of limited usability for us
            # full_scene = pyredner.Scene(camera = camera, objects = [material_patch])
            # light = pyredner.PointLight(position = torch.tensor(scene.light.pos).to(self.redner_device),
            #                                    intensity = torch.tensor(scene.light.color).to(self.redner_device))
            # img = pyredner.render_deferred(scene = full_scene, lights = [light])

            light = pyredner.generate_quad_light(
                position=torch.Tensor(scene.light.pos).to(self.redner_device),
                look_at=torch.zeros(3).to(self.redner_device),
                size=torch.Tensor([0.6, 0.6]).to(self.redner_device),
                intensity=torch.Tensor(scene.light.color).to(
                    self.redner_device))
            full_scene = pyredner.Scene(camera=camera,
                                        objects=[material_patch, light])
            img = pyredner.render_pathtracing(full_scene, num_samples=(16, 8))

            # Transform the rendered image back to something torch can interprete
            imgs.append(img.permute(2, 0, 1).to(svbrdf.device))

        return torch.stack(imgs)
Esempio n. 26
0
def reverse_channels(img):
    return np.moveaxis(img, 0, -1)  # source, dest


#pyredner.set_use_gpu(torch.cuda.is_available())
pyredner.set_use_gpu(False)

#First, we setup a camera, by constructing a pyredner.Camera object

# I think this is known and not optimized

cam = pyredner.Camera(
    position=torch.tensor([0.0, 0.0, -5.0]),
    look_at=torch.tensor([0.0, 0.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(256, 256),
    fisheye=False)

#%%
material_map, mesh_list, light_map = pyredner.load_obj('diamond.obj')
# The teapot we loaded is relatively low-poly and doesn't have vertex normal.
# Fortunately we can compute the vertex normal from the neighbor vertices.
# We can use pyredner.compute_vertex_normal for this task:
# (Try commenting out the following two lines to see the differences in target images!)
for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices / 20,
                                                  mesh.indices)
    print(_)  # None
Esempio n. 27
0
pyredner.set_use_gpu(torch.cuda.is_available())

position = torch.tensor([1.0, 0.0, -3.0])
look_at = torch.tensor([1.0, 0.0, 0.0])
up = torch.tensor([0.0, 1.0, 0.0])
fov = torch.tensor([45.0])
clip_near = 1e-2

# randomly generate distortion parameters
torch.manual_seed(1234)
target_distort_params = (torch.rand(8) - 0.5) * 0.05
resolution = (256, 256)
cam = pyredner.Camera(position = position,
                      look_at = look_at,
                      up = up,
                      fov = fov,
                      clip_near = clip_near,
                      resolution = resolution,
                      distortion_params = target_distort_params)

checkerboard_texture = pyredner.imread('scenes/teapot.png')
if pyredner.get_use_gpu():
    checkerboard_texture = checkerboard_texture.cuda(device = pyredner.get_device())

mat_checkerboard = pyredner.Material(\
    diffuse_reflectance = checkerboard_texture)
mat_black = pyredner.Material(\
    diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0], device = pyredner.get_device()))

plane = pyredner.Object(vertices = torch.tensor([[-1.0,-1.0, 0.0],
                                                 [-1.0, 1.0, 0.0],