Exemplo n.º 1
0
def visual_vertex_grad(vertices: torch.Tensor,
                       indices: torch.Tensor,
                       cam: pyredner.Camera = None):
    if not hasattr(visual_vertex_grad, 'x'):
        visual_vertex_grad.x = 0
    else:
        visual_vertex_grad.x += 1
    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(1000, 1000))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices, indices=indices, material=m)
    coe = 500000.
    color_reps = torch.tensor([[[1., 0., 0.], [0., -1., -1.]],
                               [[0., 1., 0.], [-1., 0., -1.]],
                               [[0., 0., 1.], [-1., -1.,
                                               0.]]]).to(pyredner.get_device())
    grad_imgs = []
    for d in range(3):
        colors = torch.where(
            vertices.grad[:, d:d + 1].expand(-1, 3) > 0,
            vertices.grad[:, d:d + 1].expand(-1, 3) * color_reps[d, 0],
            vertices.grad[:, d:d + 1].expand(-1, 3) * color_reps[d, 1]) * coe

        obj.colors = colors
        scene = pyredner.Scene(camera=cam, objects=[obj])
        grad_imgs.append(pyredner.render_albedo(scene=scene))
    for d in range(3):
        pyredner.imwrite(
            grad_imgs[d].cpu(), output_path +
            '/grad_imgs/{:0>2d}{:0>2d}.png'.format(d, visual_vertex_grad.x))
    return grad_imgs
Exemplo n.º 2
0
def model(cam_pos, cam_look_at, vertices, color_coeffs, ambient_color,
          dir_light_intensity, dir_light_direction):
    #vertices = (shape_mean + shape_basis @ torch.zeros(199, device=pyredner.get_device())).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    #m = pyredner.Material(use_vertex_color=True)
    m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(512, 512))
    scene = pyredner.Scene(camera=cam, objects=[obj])
    ambient_light = pyredner.AmbientLight(ambient_color)
    dir_light = pyredner.DirectionalLight(dir_light_direction,
                                          dir_light_intensity)
    img = pyredner.render_deferred(scene=scene,
                                   lights=[ambient_light, dir_light])
    return img, obj
Exemplo n.º 3
0
def model(cam_pos, cam_look_at, vertices, indices, ambient_color,
          dir_light_intensity, dir_light_direction, normals, colors):
    #normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme)
    #m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(1000, 1000))
    scene = pyredner.Scene(camera=cam, objects=[obj])
    ambient_light = pyredner.AmbientLight(ambient_color)
    dir_light = pyredner.DirectionalLight(dir_light_direction,
                                          dir_light_intensity)
    img = pyredner.render_deferred(scene=scene,
                                   lights=[ambient_light, dir_light],
                                   aa_samples=1)
    return img
Exemplo n.º 4
0
def model(cam_poses, cam_look_at, shape_coeffs, color_coeffs, lights_list,
          resolution):
    vertices = (shape_mean + shape_basis @ shape_coeffs).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    #m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    imgs = []
    for i in range(cam_poses.size(0)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj])
        #ambient_light = pyredner.AmbientLight(ambient_color)
        #dir_light = pyredner.DirectionalLight(dir_light_direction, dir_light_intensity)
        img = pyredner.render_deferred(scene=scene,
                                       lights=lights_list[i %
                                                          len(lights_list)],
                                       aa_samples=1)
        imgs.append(img)
    return imgs, obj
Exemplo n.º 5
0
 def grad_image(self):
     grads = torch.clone(self.mesh.vertices.grad).detach()
     vcolormat = pyr.Material(use_vertex_color=True)
     grads_mag = torch.abs(grads)
     vcolors = (grads_mag - grads_mag.min()) / (grads_mag.max()-grads_mag.min())
     gradobj = pyr.Object(self.mesh.vertices, self.mesh.indices, material=vcolormat, normals=self.mesh.normals, colors=vcolors)
     cameras = self.settings[0]
     gradscenes = [pyr.Scene(c, objects=[gradobj]) for c in cameras]
     grads_rendered = pyr.render_albedo(gradscenes)
     return grads_rendered
Exemplo n.º 6
0
def generate_quad_light(position: torch.Tensor,
                        look_at: torch.Tensor,
                        size: torch.Tensor,
                        intensity: torch.Tensor,
                        directly_visible: Optional[bool] = None):
    """
        Generate a pyredner.Object that is a quad light source.

        Args
        ====
        position: torch.Tensor
            1-d tensor of size 3
        look_at: torch.Tensor
            1-d tensor of size 3
        size: torch.Tensor
            1-d tensor of size 2
        intensity: torch.Tensor
            1-d tensor of size 3
        directly_visible: Optional[bool]
            Can the camera see the light source directly?

        Returns
        =======
        pyredner.Object
            quad light source
    """
    d = look_at - position
    d = d / torch.norm(d)
    # ONB -- generate two axes that are orthogonal to d
    a = 1 / (1 + d[2])
    b = -d[0] * d[1] * a
    x = torch.where(d[2] < (-1 + 1e-6),
                    torch.tensor([0.0, -1.0, 0.0], device = d.device),
                    torch.stack([1 - d[0] * d[0] * a, b, -d[0]]))
    y = torch.where(d[2] < (-1 + 1e-6),
                    torch.tensor([-1.0, 0.0, 0.0], device = d.device),
                    torch.stack([b, 1 - d[1] * d[1] * a, -d[1]]))
    v0 = position - x * size[0] * 0.5 - y * size[1] * 0.5
    v1 = position + x * size[0] * 0.5 - y * size[1] * 0.5
    v2 = position - x * size[0] * 0.5 + y * size[1] * 0.5
    v3 = position + x * size[0] * 0.5 + y * size[1] * 0.5

    vertices = torch.stack((v0, v1, v2, v3), dim = 0).to(d.device)
    indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
        dtype = torch.int32, device = d.device)
    m = pyredner.Material(diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0], device = d.device))
    return pyredner.Object(vertices = vertices,
                           indices = indices,
                           material = m,
                           light_intensity = intensity,
                           directly_visible = directly_visible)
Exemplo n.º 7
0
def model(cam_pos, cam_look_at, shape_coeffs, color_coeffs, resolution, center,
          all_euler_angles, all_translations):
    # First rotate around center, then translation

    imgs = []

    #obj = pyredner.load_obj('p_ones30/final.obj', return_objects=True)[0]
    vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
    vertices *= 80
    m = pyredner.Material(
        diffuse_reflectance=torch.ones(2, 2, 3, dtype=torch.float32))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          uvs=uvs,
                          material=m)
    v = obj.vertices.clone()

    for i in range(len(all_translations)):
        rotation_matrix = pyredner.gen_rotate_matrix(all_euler_angles[i]).to(
            pyredner.get_device())
        center = center.to(pyredner.get_device())
        # vertices = ((shape_mean + shape_basis @ shape_coeffs).view(-1, 3) - center) @ torch.t(rotation_matrix) + center + all_translations[i].to(pyredner.get_device())
        obj.vertices = (v - center) @ torch.t(rotation_matrix) + center
        obj.normals = pyredner.compute_vertex_normal(obj.vertices, indices)
        # colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
        # m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
        m = pyredner.Material(use_vertex_color=True)
        # obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m, colors=colors)

        if i == 0:
            pyredner.save_obj(obj,
                              "generated/env_dataset_" + name + '/tgt_obj.obj')

        cam = pyredner.Camera(
            position=cam_pos,
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
Exemplo n.º 8
0
def model(cam_poses, cam_look_ats, shape_coeffs, color_coeffs, resolution):
    # First rotate around center, then translation

    imgs = []

    vertices = (shape_mean + shape_basis @ shape_coeffs).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    m = pyredner.Material(use_vertex_color=False,
                          specular_reflectance=torch.tensor(
                              [1., 1., 1.], device=pyredner.get_device()),
                          roughness=torch.tensor([0.02]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)
    obj = pyredner.load_obj('generated/env_dataset_oness_n/tgt_obj.obj',
                            return_objects=True)[0]
    obj.material.specular_reflectance = pyredner.Texture(
        torch.tensor([0.05, 0.05, 0.05], device=pyredner.get_device()))
    obj.material.roughness = pyredner.Texture(torch.tensor([0.02]))
    pyredner.save_obj(obj, "generated/senv_dataset_" + name + '/tgt_obj.obj')

    for i in range(len(cam_poses)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_ats[i %
                                 len(cam_look_ats)],  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
Exemplo n.º 9
0
def model(cam_poses, cam_look_at, vertices, lights_list, normals, material):

    # m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=material,
                          uvs=uvs,
                          uv_indices=uv_indices)  # , colors=colors)
    imgs = []
    for i in range(cam_poses.size(0)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=(1000, 1000))
        scene = pyredner.Scene(camera=cam, objects=[obj])
        img = pyredner.render_deferred(scene=scene,
                                       lights=lights_list[i %
                                                          len(lights_list)],
                                       aa_samples=1)
        imgs.append(img)
    return imgs
Exemplo n.º 10
0
def load_obj(filename: str,
             obj_group: bool = True,
             flip_tex_coords: bool = True,
             use_common_indices: bool = False,
             return_objects: bool = False):
    """
        Load from a Wavefront obj file as PyTorch tensors.

        Args
        ====
        obj_group: bool
            split the meshes based on materials
        flip_tex_coords: bool
            flip the v coordinate of uv by applying v' = 1 - v
        use_common_indices: bool
            Use the same indices for position, uvs, normals.
            Not recommended since texture seams in the objects sharing
            the same positions would cause the optimization to "tear" the object
        return_objects: bool
            Output list of Object instead.
            If there is no corresponding material for a shape, assign a grey material.

        Returns
        =======
        if return_objects == True, return a list of Object
        if return_objects == False, return (material_map, mesh_list, light_map),
        material_map -> Map[mtl_name, WavefrontMaterial]
        mesh_list -> List[TriangleMesh]
        light_map -> Map[mtl_name, torch.Tensor]
    """
    vertices_pool = []
    uvs_pool = []
    normals_pool = []
    indices = []
    uv_indices = []
    normal_indices = []
    vertices = []
    uvs = []
    normals = []
    vertices_map = {}
    uvs_map = {}
    normals_map = {}
    material_map = {}
    current_mtllib = {}
    current_material_name = None

    def create_mesh(indices, uv_indices, normal_indices, vertices, uvs,
                    normals):
        indices = torch.tensor(indices,
                               dtype=torch.int32,
                               device=pyredner.get_device())
        if len(uv_indices) == 0:
            uv_indices = None
        else:
            uv_indices = torch.tensor(uv_indices,
                                      dtype=torch.int32,
                                      device=pyredner.get_device())
        if len(normal_indices) == 0:
            normal_indices = None
        else:
            normal_indices = torch.tensor(normal_indices,
                                          dtype=torch.int32,
                                          device=pyredner.get_device())
        vertices = torch.tensor(vertices, device=pyredner.get_device())
        if len(uvs) == 0:
            uvs = None
        else:
            uvs = torch.tensor(uvs, device=pyredner.get_device())
        if len(normals) == 0:
            normals = None
        else:
            normals = torch.tensor(normals, device=pyredner.get_device())
        return TriangleMesh(indices, uv_indices, normal_indices, vertices, uvs,
                            normals)

    mesh_list = []
    light_map = {}

    with open(filename, 'r') as f:
        d = os.path.dirname(filename)
        cwd = os.getcwd()
        if d != '':
            os.chdir(d)
        for line in f:
            line = line.strip()
            splitted = re.split('\ +', line)
            if splitted[0] == 'mtllib':
                current_mtllib = load_mtl(splitted[1])
            elif splitted[0] == 'usemtl':
                if len(indices) > 0 and obj_group is True:
                    # Flush
                    mesh_list.append(
                        (current_material_name,
                         create_mesh(indices, uv_indices, normal_indices,
                                     vertices, uvs, normals)))
                    indices = []
                    uv_indices = []
                    normal_indices = []
                    vertices = []
                    normals = []
                    uvs = []
                    vertices_map = {}
                    uvs_map = {}
                    normals_map = {}

                mtl_name = splitted[1]
                current_material_name = mtl_name
                if mtl_name not in material_map:
                    m = current_mtllib[mtl_name]
                    if m.map_Kd is None:
                        diffuse_reflectance = torch.tensor(
                            m.Kd,
                            dtype=torch.float32,
                            device=pyredner.get_device())
                    else:
                        diffuse_reflectance = pyredner.imread(m.map_Kd)
                        if pyredner.get_use_gpu():
                            diffuse_reflectance = diffuse_reflectance.cuda(
                                device=pyredner.get_device())
                    if m.map_Ks is None:
                        specular_reflectance = torch.tensor(
                            m.Ks,
                            dtype=torch.float32,
                            device=pyredner.get_device())
                    else:
                        specular_reflectance = pyredner.imread(m.map_Ks)
                        if pyredner.get_use_gpu():
                            specular_reflectance = specular_reflectance.cuda(
                                device=pyredner.get_device())
                    if m.map_Ns is None:
                        roughness = torch.tensor([2.0 / (m.Ns + 2.0)],
                                                 dtype=torch.float32,
                                                 device=pyredner.get_device())
                    else:
                        roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0)
                        if pyredner.get_use_gpu():
                            roughness = roughness.cuda(
                                device=pyredner.get_device())
                    if m.Ke != (0.0, 0.0, 0.0):
                        light_map[mtl_name] = torch.tensor(m.Ke,
                                                           dtype=torch.float32)
                    material_map[mtl_name] = pyredner.Material(\
                        diffuse_reflectance, specular_reflectance, roughness)
            elif splitted[0] == 'v':
                vertices_pool.append([
                    float(splitted[1]),
                    float(splitted[2]),
                    float(splitted[3])
                ])
            elif splitted[0] == 'vt':
                u = float(splitted[1])
                v = float(splitted[2])
                if flip_tex_coords:
                    v = 1 - v
                uvs_pool.append([u, v])
            elif splitted[0] == 'vn':
                normals_pool.append([
                    float(splitted[1]),
                    float(splitted[2]),
                    float(splitted[3])
                ])
            elif splitted[0] == 'f':

                def num_indices(x):
                    return len(re.split('/', x))

                def get_index(x, i):
                    return int(re.split('/', x)[i])

                def parse_face_index(x, i):
                    f = get_index(x, i)
                    if f > 0:
                        f -= 1
                    return f

                assert (len(splitted) <= 5)

                def get_vertex_id(indices):
                    pi = parse_face_index(indices, 0)
                    uvi = None
                    if (num_indices(indices) > 1
                            and re.split('/', indices)[1] != ''):
                        uvi = parse_face_index(indices, 1)
                    ni = None
                    if (num_indices(indices) > 2
                            and re.split('/', indices)[2] != ''):
                        ni = parse_face_index(indices, 2)
                    if use_common_indices:
                        # vertex, uv, normals share the same indexing
                        key = (pi, uvi, ni)
                        if key in vertices_map:
                            vertex_id = vertices_map[key]
                            return vertex_id, vertex_id, vertex_id

                        vertex_id = len(vertices)
                        vertices_map[key] = vertex_id
                        vertices.append(vertices_pool[pi])
                        if uvi is not None:
                            uvs.append(uvs_pool[uvi])
                        if ni is not None:
                            normals.append(normals_pool[ni])
                        return vertex_id, vertex_id, vertex_id
                    else:
                        # vertex, uv, normals use separate indexing
                        vertex_id = None
                        uv_id = None
                        normal_id = None

                        if pi in vertices_map:
                            vertex_id = vertices_map[pi]
                        else:
                            vertex_id = len(vertices)
                            vertices.append(vertices_pool[pi])
                            vertices_map[pi] = vertex_id

                        if uvi is not None:
                            if uvi in uvs_map:
                                uv_id = uvs_map[uvi]
                            else:
                                uv_id = len(uvs)
                                uvs.append(uvs_pool[uvi])
                                uvs_map[uvi] = uv_id

                        if ni is not None:
                            if ni in normals_map:
                                normal_id = normals_map[ni]
                            else:
                                normal_id = len(normals)
                                normals.append(normals_pool[ni])
                                normals_map[ni] = normal_id
                        return vertex_id, uv_id, normal_id

                vid0, uv_id0, n_id0 = get_vertex_id(splitted[1])
                vid1, uv_id1, n_id1 = get_vertex_id(splitted[2])
                vid2, uv_id2, n_id2 = get_vertex_id(splitted[3])

                indices.append([vid0, vid1, vid2])
                if uv_id0 is not None:
                    assert (uv_id1 is not None and uv_id2 is not None)
                    uv_indices.append([uv_id0, uv_id1, uv_id2])
                if n_id0 is not None:
                    assert (n_id1 is not None and n_id2 is not None)
                    normal_indices.append([n_id0, n_id1, n_id2])
                if (len(splitted) == 5):
                    vid3, uv_id3, n_id3 = get_vertex_id(splitted[4])
                    indices.append([vid0, vid2, vid3])
                    if uv_id0 is not None:
                        assert (uv_id3 is not None)
                        uv_indices.append([uv_id0, uv_id2, uv_id3])
                    if n_id0 is not None:
                        assert (n_id3 is not None)
                        normal_indices.append([n_id0, n_id2, n_id3])

    mesh_list.append((current_material_name,
                      create_mesh(indices, uv_indices, normal_indices,
                                  vertices, uvs, normals)))
    if d != '':
        os.chdir(cwd)

    if return_objects:
        objects = []
        for mtl_name, mesh in mesh_list:
            if mtl_name in material_map:
                m = material_map[mtl_name]
            else:
                m = pyredner.Material(diffuse_reflectance = \
                    torch.tensor((0.5, 0.5, 0.5),
                                 device = pyredner.get_device()))
            if mtl_name in light_map:
                l = light_map[mtl_name]
            else:
                l = None
            objects.append(pyredner.Object(\
                vertices = mesh.vertices,
                indices = mesh.indices,
                material = m,
                light_intensity = l,
                uvs = mesh.uvs,
                normals = mesh.normals,
                uv_indices = mesh.uv_indices,
                normal_indices = mesh.normal_indices))
        return objects
    else:
        return material_map, mesh_list, light_map
Exemplo n.º 11
0
        print(t)
        texels = pyredner.imresize(texels, scale=2.).detach()
        texels.requires_grad = True
        tex_optimizer = torch.optim.Adam([texels], lr=0.05)
        print(texels.size())

all_losses = torch.stack(all_losses).detach().cpu()
total_losses = torch.stack(total_losses).detach().cpu()
img_losses = torch.stack(img_losses).detach().cpu()
smooth_losses = torch.stack(smooth_losses).detach().cpu()

print()

obj = pyredner.Object(vertices=vertices,
                      indices=indices,
                      normals=normals,
                      material=m,
                      uvs=uvs,
                      uv_indices=uv_indices)  # , colors=colors)
pyredner.save_obj(obj, output_path + '/final.obj')
# pyredner.imwrite(texels.data.cpu(), output_path + '/texels.png')

import matplotlib.pyplot as plt

plt.figure()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
plt.suptitle(description + '\n' + str(vars(args))[1:-1].replace("'", ""),
             fontsize=13,
             color='blue')

plt.sca(ax1)
Exemplo n.º 12
0
import torch
import pyredner

vertices, indices, uvs, normals = pyredner.generate_sphere(64, 128)
m = pyredner.Material(diffuse_reflectance=torch.tensor(
    (0.5, 0.5, 0.5), device=pyredner.get_device()))
obj = pyredner.Object(vertices=vertices,
                      indices=indices,
                      uvs=uvs,
                      normals=normals,
                      material=m)
cam = pyredner.automatic_camera_placement([obj], resolution=(480, 640))
scene = pyredner.Scene(objects=[obj], camera=cam)

img = pyredner.render_g_buffer(
    scene, channels=[pyredner.channels.uv, pyredner.channels.shading_normal])
uv_img = torch.cat([img[:, :, :2], torch.zeros(480, 640, 1)], dim=2)
normal_img = img[:, :, 2:]
pyredner.imwrite(uv_img, 'results/test_sphere/uv.png')
pyredner.imwrite(normal_img, 'results/test_sphere/normal.png')
Exemplo n.º 13
0
    total_loss.backward()
    ver_optimizer.step()
    vertices.data = vertices.data * bound.reshape(-1, 1).expand(-1, 3) + boundary
    # normals_optimizer.step()
    # normals.data = normals.data / normals.data.norm(dim=1).reshape(-1, 1).expand(-1, 3)
    if 1:#smooth_scheme != 'none':  # and t > 20:smi
        for num_of_smooth in range(2):
            pyredner.smooth(vertices, indices, smooth_lmd, smooth_scheme, bound)
            pyredner.smooth(vertices, indices, -smooth_lmd, smooth_scheme, bound)

    print("{:.^10}total_loss:{:.6f}...img_loss:{:.6f}...smooth_loss:{:.6f}".format(t, total_loss, img_loss, smooth_loss))

print()
m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m)  # , colors=colors)
pyredner.save_obj(obj, output_path + '/final.obj')

if num_iters_2 > 0:
    for t in range(num_iters_2):
        total_loss = 0
        normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme)

        for i in range(len(cam_poses)):
            cam_pos = torch.tensor(cam_poses[i])
            dir_light_direction = torch.tensor(dir_light_directions[i % len(dir_light_directions)])
            img = model(cam_pos, cam_look_at, vertices, ambient_color, dir_light_intensity, dir_light_direction, normals)
            loss = (img - target[i]).pow(2).mean()
            losses[i].append(loss.data.item())
            total_loss += loss
Exemplo n.º 14
0
    def render(self, scene, svbrdf):
        imgs = []

        svbrdf = svbrdf.unsqueeze(0) if len(svbrdf.shape) == 3 else svbrdf

        sensor_size = (svbrdf.shape[-1], svbrdf.shape[-2])

        for svbrdf_single in torch.split(svbrdf, 1, dim=0):
            normals, diffuse, roughness, specular = utils.unpack_svbrdf(
                svbrdf_single.squeeze(0))
            # Redner expects the normal map to be in range [0, 1]
            normals = utils.encode_as_unit_interval(normals)
            # Redner expects the roughness to have one channel only.
            # We also need to convert from GGX roughness to Blinn-Phong power.
            # See: https://github.com/iondune/csc473/blob/master/lectures/07-cook-torrance.md
            roughness = torch.mean(torch.clamp(roughness, min=0.001),
                                   dim=0,
                                   keepdim=True)**4

            # Convert from [c,h,w] to [h,w,c] for redner
            normals = normals.permute(1, 2, 0)
            diffuse = diffuse.permute(1, 2, 0)
            roughness = roughness.permute(1, 2, 0)
            specular = specular.permute(1, 2, 0)

            material = pyredner.Material(
                diffuse_reflectance=pyredner.Texture(
                    diffuse.to(self.redner_device)),
                specular_reflectance=pyredner.Texture(
                    specular.to(self.redner_device)),
                roughness=pyredner.Texture(roughness.to(self.redner_device)),
                normal_map=pyredner.Texture(normals.to(self.redner_device)))

            material_patch = pyredner.Object(vertices=self.patch_vertices,
                                             uvs=self.patch_uvs,
                                             indices=self.patch_indices,
                                             material=material)

            # Define the camera parameters (focused at the middle of the patch) and make sure we always have a valid 'up' direction
            position = np.array(scene.camera.pos)
            lookat = np.array([0.0, 0.0, 0.0])
            cz = lookat - position  # Principal axis
            up = np.array([0.0, 0.0, 1.0])
            if np.linalg.norm(np.cross(cz, up)) == 0.0:
                up = np.array([0.0, 1.0, 0.0])

            camera = pyredner.Camera(
                position=torch.FloatTensor(position).to(self.redner_device),
                look_at=torch.FloatTensor(lookat).to(self.redner_device),
                up=torch.FloatTensor(up).to(self.redner_device),
                fov=torch.FloatTensor([90]),
                resolution=sensor_size,
                camera_type=self.camera_type)

            # # The deferred rendering path.
            # # It does not have a specular model and therefore is of limited usability for us
            # full_scene = pyredner.Scene(camera = camera, objects = [material_patch])
            # light = pyredner.PointLight(position = torch.tensor(scene.light.pos).to(self.redner_device),
            #                                    intensity = torch.tensor(scene.light.color).to(self.redner_device))
            # img = pyredner.render_deferred(scene = full_scene, lights = [light])

            light = pyredner.generate_quad_light(
                position=torch.Tensor(scene.light.pos).to(self.redner_device),
                look_at=torch.zeros(3).to(self.redner_device),
                size=torch.Tensor([0.6, 0.6]).to(self.redner_device),
                intensity=torch.Tensor(scene.light.color).to(
                    self.redner_device))
            full_scene = pyredner.Scene(camera=camera,
                                        objects=[material_patch, light])
            img = pyredner.render_pathtracing(full_scene, num_samples=(16, 8))

            # Transform the rendered image back to something torch can interprete
            imgs.append(img.permute(2, 0, 1).to(svbrdf.device))

        return torch.stack(imgs)
Exemplo n.º 15
0
checkerboard_texture = pyredner.imread('scenes/teapot.png')
if pyredner.get_use_gpu():
    checkerboard_texture = checkerboard_texture.cuda(device = pyredner.get_device())

mat_checkerboard = pyredner.Material(\
    diffuse_reflectance = checkerboard_texture)
mat_black = pyredner.Material(\
    diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0], device = pyredner.get_device()))

plane = pyredner.Object(vertices = torch.tensor([[-1.0,-1.0, 0.0],
                                                 [-1.0, 1.0, 0.0],
                                                 [ 1.0,-1.0, 0.0],
                                                 [ 1.0, 1.0, 0.0]],
                                                 device = pyredner.get_device()),
                        indices = torch.tensor([[0, 1, 2],
                                                [1, 3, 2]],
                                               dtype = torch.int32,
                                               device = pyredner.get_device()),
                        uvs = torch.tensor([[0.05, 0.05],
                                            [0.05, 0.95],
                                            [0.95, 0.05],
                                            [0.95, 0.95]], device = pyredner.get_device()),
                        material = mat_checkerboard)
scene = pyredner.Scene(camera=cam, objects=[plane])
img = pyredner.render_albedo(scene=scene)
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/target.png')
# Read the target image we just saved.
target = pyredner.imread('results/test_camera_distortion/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())