예제 #1
0
def model(cam_pos, cam_look_at, vertices, indices, ambient_color,
          dir_light_intensity, dir_light_direction, normals, colors):
    #normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme)
    #m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(1000, 1000))
    scene = pyredner.Scene(camera=cam, objects=[obj])
    ambient_light = pyredner.AmbientLight(ambient_color)
    dir_light = pyredner.DirectionalLight(dir_light_direction,
                                          dir_light_intensity)
    img = pyredner.render_deferred(scene=scene,
                                   lights=[ambient_light, dir_light],
                                   aa_samples=1)
    return img
예제 #2
0
def visual_vertex_grad(vertices: torch.Tensor,
                       indices: torch.Tensor,
                       cam: pyredner.Camera = None):
    if not hasattr(visual_vertex_grad, 'x'):
        visual_vertex_grad.x = 0
    else:
        visual_vertex_grad.x += 1
    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(1000, 1000))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices, indices=indices, material=m)
    coe = 500000.
    color_reps = torch.tensor([[[1., 0., 0.], [0., -1., -1.]],
                               [[0., 1., 0.], [-1., 0., -1.]],
                               [[0., 0., 1.], [-1., -1.,
                                               0.]]]).to(pyredner.get_device())
    grad_imgs = []
    for d in range(3):
        colors = torch.where(
            vertices.grad[:, d:d + 1].expand(-1, 3) > 0,
            vertices.grad[:, d:d + 1].expand(-1, 3) * color_reps[d, 0],
            vertices.grad[:, d:d + 1].expand(-1, 3) * color_reps[d, 1]) * coe

        obj.colors = colors
        scene = pyredner.Scene(camera=cam, objects=[obj])
        grad_imgs.append(pyredner.render_albedo(scene=scene))
    for d in range(3):
        pyredner.imwrite(
            grad_imgs[d].cpu(), output_path +
            '/grad_imgs/{:0>2d}{:0>2d}.png'.format(d, visual_vertex_grad.x))
    return grad_imgs
def generate_scenes(camLocs,objects,envmap=None, lightLocs=None):
  scenes = []
  up = torch.tensor([0.0, 1.0, 0.0])
  offset_factor = 0.0
  light_intensity = 100.0            

  for ind, loc in enumerate(camLocs):
    camera = pyredner.Camera(position = loc,
                          look_at = torch.tensor([0.0, 0.0, 0.0]),
                          up = camera0.up,
                          fov = torch.tensor([90.0]), #fov = camera0.fov,
                          resolution = camera0.resolution)
    
    normal = camera.position.div(torch.norm(camera.position))
    tangent = torch.cross(normal, up)
    tangent = tangent.div(torch.norm(tangent))
    bitangent = torch.cross(normal, tangent)
    bitangent = bitangent.div(torch.norm(bitangent))
    
    offsets = [offset_factor * tangent]
    lightLocs = [(camera.position + offset) for offset in offsets]
    
    lights = [pyredner.generate_quad_light(position = lightPos,
                                     look_at = camera0.look_at,
                                     size = torch.tensor([0.1, 0.1]),
                                     intensity = torch.tensor([light_intensity, light_intensity, light_intensity])) for lightPos in lightLocs]

    scene_objects = objects
    #objects.append(lights[0])

    scenes.append(pyredner.Scene(camera = camera, objects = [objects[0], objects[1], objects[2], lights[0]], envmap=None))
  return scenes
예제 #4
0
def model(cam_pos, cam_look_at, vertices, color_coeffs, ambient_color,
          dir_light_intensity, dir_light_direction):
    #vertices = (shape_mean + shape_basis @ torch.zeros(199, device=pyredner.get_device())).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    #m = pyredner.Material(use_vertex_color=True)
    m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(512, 512))
    scene = pyredner.Scene(camera=cam, objects=[obj])
    ambient_light = pyredner.AmbientLight(ambient_color)
    dir_light = pyredner.DirectionalLight(dir_light_direction,
                                          dir_light_intensity)
    img = pyredner.render_deferred(scene=scene,
                                   lights=[ambient_light, dir_light])
    return img, obj
예제 #5
0
def model(cam_poses, cam_look_at, shape_coeffs, color_coeffs, lights_list,
          resolution):
    vertices = (shape_mean + shape_basis @ shape_coeffs).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    #m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    imgs = []
    for i in range(cam_poses.size(0)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj])
        #ambient_light = pyredner.AmbientLight(ambient_color)
        #dir_light = pyredner.DirectionalLight(dir_light_direction, dir_light_intensity)
        img = pyredner.render_deferred(scene=scene,
                                       lights=lights_list[i %
                                                          len(lights_list)],
                                       aa_samples=1)
        imgs.append(img)
    return imgs, obj
예제 #6
0
def make_scenes(mesh, settings):
    '''
    Make redner scenes with the given mesh, and the (camera, lights) settings
    '''
    cameras, lights = settings
    if not isinstance(mesh, list):
        mesh = [mesh]
    scenes = [pyr.Scene(camera=c, objects=mesh) for c in cameras]
    return scenes
예제 #7
0
 def grad_image(self):
     grads = torch.clone(self.mesh.vertices.grad).detach()
     vcolormat = pyr.Material(use_vertex_color=True)
     grads_mag = torch.abs(grads)
     vcolors = (grads_mag - grads_mag.min()) / (grads_mag.max()-grads_mag.min())
     gradobj = pyr.Object(self.mesh.vertices, self.mesh.indices, material=vcolormat, normals=self.mesh.normals, colors=vcolors)
     cameras = self.settings[0]
     gradscenes = [pyr.Scene(c, objects=[gradobj]) for c in cameras]
     grads_rendered = pyr.render_albedo(gradscenes)
     return grads_rendered
예제 #8
0
def parse_scene(node):
    cam = None
    resolution = None
    materials = []
    material_dict = {}
    shapes = []
    lights = []
    shape_group_dict = {}
    envmap = None

    for child in node:
        if child.tag == 'sensor':
            cam = parse_camera(child)
        elif child.tag == 'bsdf':
            node_id, material = parse_material(child)
            if node_id is not None:
                material_dict[node_id] = len(materials)
                materials.append(material)
        # shapegroup for instancing
        elif child.tag == 'shape' and child.attrib['type'] == 'shapegroup':
            for child_s in child:
                if child_s.tag == 'shape':
                    shape_group_dict[child.attrib['id']] = parse_shape(
                        child_s, material_dict, None)[0]
        elif child.tag == 'shape':
            shape, light = parse_shape(
                child, material_dict, len(shapes), shape_group_dict
                if child.attrib['type'] == 'instance' else None)
            if isinstance(shape, list):
                shapes = shapes + shape
            else:
                shapes.append(shape)
            if light is not None:
                lights.append(light)
        # Add envmap loading support
        elif child.tag == 'emitter' and child.attrib['type'] == 'envmap':
            # read envmap params from xml
            scale = 1.0
            envmap_filename = None
            to_world = torch.eye(4)
            for child_s in child:
                if child_s.attrib['name'] == 'scale':
                    assert child_s.tag == 'float'
                    scale = float(child_s.attrib['value'])
                if child_s.attrib['name'] == 'filename':
                    assert child_s.tag == 'string'
                    envmap_filename = child_s.attrib['value']
                if child_s.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child_s)
            # load envmap
            envmap = scale * pyredner.imread(envmap_filename)
            if pyredner.get_use_gpu():
                envmap = envmap.cuda()
            envmap = pyredner.EnvironmentMap(envmap, env_to_world=to_world)
    return pyredner.Scene(cam, shapes, materials, lights, envmap)
def model(translation, euler_angles):
    # Get the rotation matrix from Euler angles
    rotation_matrix = pyredner.gen_rotate_matrix(euler_angles)
    # Shift the vertices to the center, apply rotation matrix,
    # shift back to the original space, then apply the translation.
    for obj, v in zip(objects, vertices):
        obj.vertices = (
            v - center) @ torch.t(rotation_matrix) + center + translation
    # Assemble the 3D scene.
    scene = pyredner.Scene(camera=camera, objects=objects)
    # Render the scene.
    img = pyredner.render_albedo(scene)
    return img
예제 #10
0
def generate_scenes(camLocs, objects, envmap=None, lightLocs=None):
    scenes = []
    up = torch.tensor([0.0, 1.0, 0.0])
    offset_factor = 2.0
    light_intensity = 10000.0
    for ind, loc in enumerate(camLocs):
        multiplier = 1.0
        look_at_offset = torch.tensor([0.0, 0.0, 0.0])

        camera = pyredner.Camera(position=camera0.look_at +
                                 radius * loc * multiplier,
                                 look_at=camera0.look_at + look_at_offset,
                                 up=camera0.up,
                                 fov=camera0.fov,
                                 resolution=camera0.resolution)

        normal = camera.position.div(torch.norm(camera.position))
        tangent = torch.cross(normal, up)
        tangent = tangent.div(torch.norm(tangent))
        bitangent = torch.cross(normal, tangent)
        bitangent = bitangent.div(torch.norm(bitangent))

        offsets = [offset_factor * tangent]  #, offset_factor * normal]

        lightLocs = [(camera.position + offset) for offset in offsets]
        #else:
        #  lightPos = lightLocs[ind]
        lights = [
            pyredner.generate_quad_light(position=lightPos,
                                         look_at=camera0.look_at,
                                         size=torch.tensor([0.1, 0.1]),
                                         intensity=torch.tensor([
                                             light_intensity, light_intensity,
                                             light_intensity
                                         ])) for lightPos in lightLocs
        ]

        # Camera data for voxel carving
        print(
            str(ind) + " " + str(camera.position.data[0].item()) + " " +
            str(camera.position.data[1].item()) + " " +
            str(camera.position.data[2].item()) + " " +
            str(camera.look_at.data[0].item()) + " " +
            str(camera.look_at.data[1].item()) + " " +
            str(camera.look_at.data[2].item()))
        for light in lights:
            scenes.append(
                pyredner.Scene(camera=camera,
                               objects=[objects[0], light],
                               envmap=None))
    return scenes
예제 #11
0
def parse_scene(node, device, param_dict):
    cam = None
    # resolution = None
    materials = []
    material_dict = {}
    shapes = []
    lights = []
    shape_group_dict = {}
    shape_id = {}
    envmap = None

    cam, materials, material_dict, shapes, shape_id, lights, shape_group_dict, envmap = \
                parse_xml(node, device, param_dict, 
                                cam, materials, material_dict, shapes, shape_id, lights, shape_group_dict, envmap)
    return pyredner.Scene(cam, shapes, shape_id, materials, lights, envmap)
예제 #12
0
def model(cam_pos, cam_look_at, shape_coeffs, color_coeffs, resolution, center,
          all_euler_angles, all_translations):
    # First rotate around center, then translation

    imgs = []

    #obj = pyredner.load_obj('p_ones30/final.obj', return_objects=True)[0]
    vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
    vertices *= 80
    m = pyredner.Material(
        diffuse_reflectance=torch.ones(2, 2, 3, dtype=torch.float32))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          uvs=uvs,
                          material=m)
    v = obj.vertices.clone()

    for i in range(len(all_translations)):
        rotation_matrix = pyredner.gen_rotate_matrix(all_euler_angles[i]).to(
            pyredner.get_device())
        center = center.to(pyredner.get_device())
        # vertices = ((shape_mean + shape_basis @ shape_coeffs).view(-1, 3) - center) @ torch.t(rotation_matrix) + center + all_translations[i].to(pyredner.get_device())
        obj.vertices = (v - center) @ torch.t(rotation_matrix) + center
        obj.normals = pyredner.compute_vertex_normal(obj.vertices, indices)
        # colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
        # m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
        m = pyredner.Material(use_vertex_color=True)
        # obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m, colors=colors)

        if i == 0:
            pyredner.save_obj(obj,
                              "generated/env_dataset_" + name + '/tgt_obj.obj')

        cam = pyredner.Camera(
            position=cam_pos,
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
예제 #13
0
def parse_scene(node):
    cam = None
    resolution = None
    materials = []
    material_dict = {}
    shapes = []
    lights = []
    for child in node:
        if child.tag == 'sensor':
            cam = parse_camera(child)
        elif child.tag == 'bsdf':
            node_id, material = parse_material(child)
            if node_id is not None:
                material_dict[node_id] = len(materials)
                materials.append(material)
        elif child.tag == 'shape':
            shape, light = parse_shape(child, material_dict, len(shapes))
            shapes.append(shape)
            if light is not None:
                lights.append(light)
    return pyredner.Scene(cam, shapes, materials, lights)
예제 #14
0
 def _model(self):
     # Get the rotation matrix from Euler angles
     rotation_matrix = pyredner.gen_rotate_matrix(self.euler_angles)
     self.euler_angles.retain_grad()
     # Shift the vertices to the center, apply rotation matrix,
     # shift back to the original space, then apply the translation.
     vertices = []
     if self.attack_type == "CW":
         for m, shape in zip(self.modifiers, self.shapes):
             shape_v = tanh_rescale(
                 torch_arctanh(shape.vertices.clone().detach()) -
                 m.clone().detach() + m)
             shape.vertices = (shape_v - self.center) @ torch.t(
                 rotation_matrix) + self.center + self.translation
             shape.vertices.retain_grad()
             shape.vertices.register_hook(set_grad(shape.vertices))
             shape.normals = pyredner.compute_vertex_normal(
                 shape.vertices, shape.indices)
             vertices.append(shape.vertices.clone().detach())
     else:
         for shape in self.shapes:
             shape_v = shape.vertices.clone().detach()
             shape.vertices = (shape_v - self.center) @ torch.t(
                 rotation_matrix) + self.center + self.translation
             shape.vertices.retain_grad()
             shape.vertices.register_hook(set_grad(shape.vertices))
             shape.normals = pyredner.compute_vertex_normal(
                 shape.vertices, shape.indices)
             vertices.append(shape.vertices.clone().detach())
     self.center = torch.mean(torch.cat(vertices), 0)
     # Assemble the 3D scene.
     scene = pyredner.Scene(camera=self.camera,
                            shapes=self.shapes,
                            materials=self.materials)
     # Render the scene.
     img = pyredner.render_deferred(scene, lights=[self.light], alpha=True)
     return img
예제 #15
0
파일: sface.py 프로젝트: yihang99/FaceRecon
def model(cam_poses, cam_look_ats, shape_coeffs, color_coeffs, resolution):
    # First rotate around center, then translation

    imgs = []

    vertices = (shape_mean + shape_basis @ shape_coeffs).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    m = pyredner.Material(use_vertex_color=False,
                          specular_reflectance=torch.tensor(
                              [1., 1., 1.], device=pyredner.get_device()),
                          roughness=torch.tensor([0.02]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)
    obj = pyredner.load_obj('generated/env_dataset_oness_n/tgt_obj.obj',
                            return_objects=True)[0]
    obj.material.specular_reflectance = pyredner.Texture(
        torch.tensor([0.05, 0.05, 0.05], device=pyredner.get_device()))
    obj.material.roughness = pyredner.Texture(torch.tensor([0.02]))
    pyredner.save_obj(obj, "generated/senv_dataset_" + name + '/tgt_obj.obj')

    for i in range(len(cam_poses)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_ats[i %
                                 len(cam_look_ats)],  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
예제 #16
0
def generate_scenes(camLocs, objects, envmap=None):
    scenes = []
    up = torch.tensor([0.0, 1.0, 0.0])
    offset_factor = 2.0
    light_intensity = 500.0

    for ind, loc in enumerate(camLocs):
        camera = pyredner.Camera(position=camera0.look_at + radius * loc,
                                 look_at=camera0.look_at,
                                 up=camera0.up,
                                 fov=camera0.fov,
                                 resolution=camera0.resolution)

        normal = camera.position.div(torch.norm(camera.position))
        tangent = torch.cross(normal, up)
        tangent = tangent.div(torch.norm(tangent))
        bitangent = torch.cross(normal, tangent)
        bitangent = bitangent.div(torch.norm(bitangent))

        lightPos = camera.position + offset_factor * tangent
        light = pyredner.generate_quad_light(position=lightPos,
                                             look_at=camera0.look_at,
                                             size=torch.tensor([0.1, 0.1]),
                                             intensity=torch.tensor([
                                                 light_intensity,
                                                 light_intensity,
                                                 light_intensity
                                             ]))

        # Camera data for voxel carving
        #print(str(ind) + " " + str(camera.position.data[0].item()) + " " + str(camera.position.data[1].item()) + " " + str(camera.position.data[2].item()) + " " + str(camera.look_at.data[0].item()) + " " + str(camera.look_at.data[1].item()) + " " + str(camera.look_at.data[2].item()))

        scenes.append(
            pyredner.Scene(camera=camera,
                           objects=[objects[0], light],
                           envmap=envmap))
    return scenes
예제 #17
0
def model(cam_poses, cam_look_at, vertices, lights_list, normals, material):

    # m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=material,
                          uvs=uvs,
                          uv_indices=uv_indices)  # , colors=colors)
    imgs = []
    for i in range(cam_poses.size(0)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=(1000, 1000))
        scene = pyredner.Scene(camera=cam, objects=[obj])
        img = pyredner.render_deferred(scene=scene,
                                       lights=lights_list[i %
                                                          len(lights_list)],
                                       aa_samples=1)
        imgs.append(img)
    return imgs
예제 #18
0
    target_obj1[0].material = pyredner.Material(diffuse_reflectance=torch.tensor([1.0, 1.0, 1.0]), two_sided=True)

    target_obj1[0].normals = pyredner.compute_vertex_normal(target_obj1[0].vertices, target_obj1[0].indices)

    shapes = []
    shapes.append(target_obj1[0])

    numShapes = len(shapes)
    shapes.extend(lights)

    area_lights = []
    for i in range(numShapes, len(shapes)):
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity*10, light_intensity*10, light_intensity*10])))

    scene = pyredner.Scene(cam, objects = [shapes[0], shapes[1]],area_lights = [area_lights[0]], envmap = None)
    scene_intense = pyredner.Scene(cam, objects = [shapes[0], shapes[1]], area_lights = [area_lights[1]], envmap = None)

    target = pyredner.render_pathtracing(scene = [scene], num_samples=(512, 0), max_bounces=1)[0]
    pyredner.imwrite(target.cpu(), 'results/shadow_art/high_res/' + step + '/' + str(obj) + '_0.png')

    area_lights = []
    for i in range(numShapes, len(shapes)):
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity*10, light_intensity*10, light_intensity*10])))

    shape0_vertices = shapes[0].vertices.clone()
    shapes[0].vertices = \
        (shape0_vertices)

    scene_3 = pyredner.Scene(cam3, objects=[shapes[0], shapes[2]], area_lights = [area_lights[2]], envmap = None)
예제 #19
0
materials = [mat_grey]

vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
shape_sphere = pyredner.Shape(\
    vertices = vertices,
    indices = indices,
    uvs = uvs,
    normals = normals,
    material_id = 0)
shapes = [shape_sphere]

envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda(device=pyredner.get_device())
envmap = pyredner.EnvironmentMap(envmap)
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.png')
target = pyredner.imread('results/test_envmap/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

envmap_texels = torch.tensor(
    0.5 * torch.ones([32, 64, 3], device=pyredner.get_device()),
    requires_grad=True)
예제 #20
0
        pyredner.AreaLight(shape_id=numShapes,
                           intensity=torch.tensor([
                               lightIntensity * 10, lightIntensity * 10,
                               lightIntensity * 10
                           ])))

# FOR SCENE DEBUGGING
envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda()
envmap = pyredner.EnvironmentMap(envmap)
envmap = None

# ALL SCENES
scene = pyredner.Scene(cam, [shapes[0], shapes[1], shapes[2]],
                       materials,
                       area_lights=[area_lights[0]],
                       envmap=None)
scene_intense = pyredner.Scene(cam, [shapes[0], shapes[1], shapes[2]],
                               materials,
                               area_lights=[area_lights[1]],
                               envmap=None)

render = pyredner.RenderFunction.apply

# SET UP SCENES
maker_translation_params0 = torch.tensor([0.0, 0.0, -3.0],
                                         device=pyredner.get_device())

shape0_vertices = shapes[0].vertices.clone()
shape1_vertices = shapes[1].vertices.clone()
예제 #21
0
import pyredner
import torch

# Test the sample pixel center flag

pyredner.set_use_gpu(torch.cuda.is_available())
objects = pyredner.load_obj('scenes/teapot.obj', return_objects=True)
camera = pyredner.automatic_camera_placement(objects, resolution=(128, 128))
scene = pyredner.Scene(camera=camera, objects=objects)
img = pyredner.render_albedo(scene, sample_pixel_center=True)
pyredner.imwrite(img.cpu(), 'results/test_sample_pixel_center/img_no_aa.exr')
img = pyredner.render_albedo(scene, sample_pixel_center=False)
pyredner.imwrite(img.cpu(), 'results/test_sample_pixel_center/img_with_aa.exr')
예제 #22
0
tri_vertices = torch.cat(
    (tri_vertices_2d,
     torch.zeros(tri_vertices_2d.shape[0], 1, device=pyredner.get_device())),
    dim=1).contiguous()
quad_indices = torch.tensor([[0, 1, 2], [1, 2, 3]],
                            dtype=torch.int32,
                            device=pyredner.get_device())
tri_indices = torch.tensor([[0, 1, 2]],
                           dtype=torch.int32,
                           device=pyredner.get_device())
shape_quad = pyredner.Shape(quad_vertices, quad_indices, None, None, 0)
shape_tri = pyredner.Shape(tri_vertices, tri_indices, None, None, 1)
shapes = [shape_quad, shape_tri]

# Setup the scene. We don't need lights.
scene = pyredner.Scene(cam, shapes, materials, [])
# We output the shape id, so that we can shape it later
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    # Set max bounces to 0, we don't need lighting.
    max_bounces = 0,
    # Use the diffuse color as the output
    channels = [redner.channels.diffuse_reflectance])

# Render the scene as our target image.
render = pyredner.RenderFunction.apply
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/target.exr')
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/target.png')
target = pyredner.imread('results/two_d_mesh/target.exr')
        coeffs[:, 9:9 + 7] * math.pow(math.sin(math.pi * 3.0 / window) / (math.pi * 3.0 / window), 4.0)
    return deringed_coeffs


deringed_coeffs = deringing(coeffs, 6.0)
res = (128, 128)
# We call the utility function SH_reconstruct to rasterize the coefficients into an envmap
envmap = pyredner.SH_reconstruct(deringed_coeffs, res)
# Save the target envmap
pyredner.imwrite(envmap.cpu(),
                 'results/joint_material_envmap_sh/target_envmap.exr')
# Convert the PyTorch tensor into pyredner compatible envmap
envmap = pyredner.EnvironmentMap(envmap)
# Setup the scene
scene = pyredner.Scene(camera=cam,
                       shapes=shapes,
                       materials=materials,
                       envmap=envmap)
# Serialize the scene
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
# Render the target
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
# Save the target image
pyredner.imwrite(img.cpu(), 'results/joint_material_envmap_sh/target.exr')
pyredner.imwrite(img.cpu(), 'results/joint_material_envmap_sh/target.png')
# Read the target image back
target = pyredner.imread('results/joint_material_envmap_sh/target.exr')
if pyredner.get_use_gpu():
예제 #24
0
    torch.zeros(tri_vertices_2d.shape[0], 1, device = pyredner.get_device())), dim=1).contiguous()
quad_indices = torch.tensor([[0, 1, 2], [1, 2, 3]], dtype = torch.int32, device = pyredner.get_device())
tri_indices = torch.tensor([[0, 1, 2]], dtype = torch.int32, device = pyredner.get_device())
shape_quad = pyredner.Shape(\
    vertices = quad_vertices,
    indices = quad_indices,
    material_id = 0)
shape_tri = pyredner.Shape(\
    vertices = tri_vertices,
    indices = tri_indices,
    material_id = 1)
shapes = [shape_quad, shape_tri]

# Setup the scene. We don't need lights.
scene = pyredner.Scene(camera = cam,
                       shapes = shapes,
                       materials = materials)
# We output the shape id, so that we can shape it later
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    # Set max bounces to 0, we don't need lighting.
    max_bounces = 0,
    # Use the diffuse color as the output
    channels = [redner.channels.diffuse_reflectance])

# Render the scene as our target image.
render = pyredner.RenderFunction.apply
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/target.exr')
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/target.png')
예제 #25
0
    materials.append(value)

# Setup geometries
shapes = []
for mtl_name, mesh in mesh_list:
    shapes.append(pyredner.Shape(\
        vertices = mesh.vertices,
        indices = mesh.indices,
        uvs = mesh.uvs,
        normals = mesh.normals,
        material_id = material_id_map[mtl_name]))

# We don't setup any light source here

# Construct the scene
scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=None)
# Serialize the scene
# Here we specify the output channels as "depth", "shading_normal"
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 0,
    channels = [redner.channels.depth, redner.channels.shading_normal])

# Render the scene as our target image.
render = pyredner.RenderFunction.apply
# Render. The first argument is the seed for RNG in the renderer.
img = render(0, *scene_args)
# Save the images.
depth = img[:, :, 0]
normal = img[:, :, 1:4]
예제 #26
0
import torch
import pyredner

vertices, indices, uvs, normals = pyredner.generate_sphere(64, 128)
m = pyredner.Material(diffuse_reflectance=torch.tensor(
    (0.5, 0.5, 0.5), device=pyredner.get_device()))
obj = pyredner.Object(vertices=vertices,
                      indices=indices,
                      uvs=uvs,
                      normals=normals,
                      material=m)
cam = pyredner.automatic_camera_placement([obj], resolution=(480, 640))
scene = pyredner.Scene(objects=[obj], camera=cam)

img = pyredner.render_g_buffer(
    scene, channels=[pyredner.channels.uv, pyredner.channels.shading_normal])
uv_img = torch.cat([img[:, :, :2], torch.zeros(480, 640, 1)], dim=2)
normal_img = img[:, :, 2:]
pyredner.imwrite(uv_img, 'results/test_sphere/uv.png')
pyredner.imwrite(normal_img, 'results/test_sphere/normal.png')
예제 #27
0
    position=cam_pos,
    look_at=cam_look_at,  # Center of the vertices
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([20.0]),
    resolution=resolution)
for i in range(0):  # num_views):
    print("correcting position {:0>2d}".format(i))
    eul_optimizer = torch.optim.SGD([euler], lr=2)
    tra_optimizer = torch.optim.SGD([trans], lr=5000)
    for t in range(20):
        eul_optimizer.zero_grad()
        tra_optimizer.zero_grad()
        rotation_matrix = pyredner.gen_rotate_matrix(euler)
        obj.vertices = (vertices - center) @ torch.t(rotation_matrix) \
                       + center + trans * torch.tensor([1., 1., 3.], device=pyredner.get_device())
        scene = pyredner.Scene(objects=[obj], camera=cam, envmap=envmap)
        img = pyredner.render_pathtracing(scene=scene,
                                          num_samples=(64, 4),
                                          use_secondary_edge_sampling=True)
        print('f')
        loss = (img - target[i]).pow(2).mean()
        loss.backward()
        eul_optimizer.step()
        tra_optimizer.step()
        if t % 2 == 1:
            print('    iteration', t, 'loss:{:.6f}'.format(loss),
                  euler.data.cpu(),
                  trans.data.cpu() * torch.tensor([1., 1., 3.]))

    euler_list.append(euler.data.clone())
    trans_list.append(trans.data.clone())
예제 #28
0
    def render(self, scene, svbrdf):
        imgs = []

        svbrdf = svbrdf.unsqueeze(0) if len(svbrdf.shape) == 3 else svbrdf

        sensor_size = (svbrdf.shape[-1], svbrdf.shape[-2])

        for svbrdf_single in torch.split(svbrdf, 1, dim=0):
            normals, diffuse, roughness, specular = utils.unpack_svbrdf(
                svbrdf_single.squeeze(0))
            # Redner expects the normal map to be in range [0, 1]
            normals = utils.encode_as_unit_interval(normals)
            # Redner expects the roughness to have one channel only.
            # We also need to convert from GGX roughness to Blinn-Phong power.
            # See: https://github.com/iondune/csc473/blob/master/lectures/07-cook-torrance.md
            roughness = torch.mean(torch.clamp(roughness, min=0.001),
                                   dim=0,
                                   keepdim=True)**4

            # Convert from [c,h,w] to [h,w,c] for redner
            normals = normals.permute(1, 2, 0)
            diffuse = diffuse.permute(1, 2, 0)
            roughness = roughness.permute(1, 2, 0)
            specular = specular.permute(1, 2, 0)

            material = pyredner.Material(
                diffuse_reflectance=pyredner.Texture(
                    diffuse.to(self.redner_device)),
                specular_reflectance=pyredner.Texture(
                    specular.to(self.redner_device)),
                roughness=pyredner.Texture(roughness.to(self.redner_device)),
                normal_map=pyredner.Texture(normals.to(self.redner_device)))

            material_patch = pyredner.Object(vertices=self.patch_vertices,
                                             uvs=self.patch_uvs,
                                             indices=self.patch_indices,
                                             material=material)

            # Define the camera parameters (focused at the middle of the patch) and make sure we always have a valid 'up' direction
            position = np.array(scene.camera.pos)
            lookat = np.array([0.0, 0.0, 0.0])
            cz = lookat - position  # Principal axis
            up = np.array([0.0, 0.0, 1.0])
            if np.linalg.norm(np.cross(cz, up)) == 0.0:
                up = np.array([0.0, 1.0, 0.0])

            camera = pyredner.Camera(
                position=torch.FloatTensor(position).to(self.redner_device),
                look_at=torch.FloatTensor(lookat).to(self.redner_device),
                up=torch.FloatTensor(up).to(self.redner_device),
                fov=torch.FloatTensor([90]),
                resolution=sensor_size,
                camera_type=self.camera_type)

            # # The deferred rendering path.
            # # It does not have a specular model and therefore is of limited usability for us
            # full_scene = pyredner.Scene(camera = camera, objects = [material_patch])
            # light = pyredner.PointLight(position = torch.tensor(scene.light.pos).to(self.redner_device),
            #                                    intensity = torch.tensor(scene.light.color).to(self.redner_device))
            # img = pyredner.render_deferred(scene = full_scene, lights = [light])

            light = pyredner.generate_quad_light(
                position=torch.Tensor(scene.light.pos).to(self.redner_device),
                look_at=torch.zeros(3).to(self.redner_device),
                size=torch.Tensor([0.6, 0.6]).to(self.redner_device),
                intensity=torch.Tensor(scene.light.color).to(
                    self.redner_device))
            full_scene = pyredner.Scene(camera=camera,
                                        objects=[material_patch, light])
            img = pyredner.render_pathtracing(full_scene, num_samples=(16, 8))

            # Transform the rendered image back to something torch can interprete
            imgs.append(img.permute(2, 0, 1).to(svbrdf.device))

        return torch.stack(imgs)
예제 #29
0
                             [-1.0,  1.0, -7.0],
                             [ 2.0,  1.0, -7.0]], device = pyredner.get_device()),
    indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
        dtype = torch.int32, device = pyredner.get_device()),
    uvs = None,
    normals = None,
    material_id = 0)

# The shape list of our scene contains two shapes:
shapes = [shape_diamond, shape_light]

light = pyredner.AreaLight(shape_id=1,
                           intensity=torch.tensor([200.0, 200.0, 200.0]))
area_lights = [light]
# Finally we construct our scene using all the variables we setup previously.
scene = pyredner.Scene(cam, shapes, materials, area_lights)

#shapes = []
#for _, mesh in mesh_list:
#    assert(mesh.normal_indices is None)
#    shapes.append(pyredner.Shape(\
#        vertices = mesh.vertices,
#        indices = mesh.indices,
#        uvs = None,
#        normals = mesh.normals,
#        material_id=0))

# The previous tutorial used a mesh area light for the scene lighting,
# here we use an environment light,
# which is a texture representing infinitely far away light sources in
# spherical coordinates.
    [0.5, 0.5, 0.5], device=pyredner.get_device()))

#sphere = pyredner.Object(vertices = mesh2.vertices/3, indices = mesh2.indices, uvs = None,
#                         normals = mesh2.normals, material = m)

#cam = pyredner.automatic_camera_placement(shapes=[sphere], resolution=(background.shape[0], background.shape[1]))
cam = pyredner.Camera(
    position=torch.tensor([0.0, -0.0, -5.0]),  # -8.5
    look_at=torch.tensor([0.0, 0.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(1650, 2843),
    fisheye=False)

scene = pyredner.Scene(camera=cam, objects=objects)  #[sphere])
lights = [
    pyredner.PointLight(
        cam.position.to(pyredner.get_device()),
        torch.tensor([10.0, 10.0, 10.0], device=pyredner.get_device()))
]

img = pyredner.render_albedo(scene)
#img = pyredner.render_deferred(scene=scene, lights=lights, alpha=True)
imshow(torch.pow(img, 1.0 / 2.2).cpu())
pyredner.imwrite(
    torch.pow(img, 1.0 / 2.2).cpu(),
    'results/' + folder_name + '/init_load.png')
#%%
vertices = []
for obj in objects: