def model(translation, euler_angles):
    # Get the rotation matrix from Euler angles
    rotation_matrix = pyredner.gen_rotate_matrix(euler_angles)
    # Shift the vertices to the center, apply rotation matrix,
    # shift back to the original space, then apply the translation.
    for obj, v in zip(objects, vertices):
        obj.vertices = (
            v - center) @ torch.t(rotation_matrix) + center + translation
    # Assemble the 3D scene.
    scene = pyredner.Scene(camera=camera, objects=objects)
    # Render the scene.
    img = pyredner.render_albedo(scene)
    return img
Пример #2
0
def model(cam_pos, cam_look_at, shape_coeffs, color_coeffs, resolution, center,
          all_euler_angles, all_translations):
    # First rotate around center, then translation

    imgs = []

    #obj = pyredner.load_obj('p_ones30/final.obj', return_objects=True)[0]
    vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
    vertices *= 80
    m = pyredner.Material(
        diffuse_reflectance=torch.ones(2, 2, 3, dtype=torch.float32))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          uvs=uvs,
                          material=m)
    v = obj.vertices.clone()

    for i in range(len(all_translations)):
        rotation_matrix = pyredner.gen_rotate_matrix(all_euler_angles[i]).to(
            pyredner.get_device())
        center = center.to(pyredner.get_device())
        # vertices = ((shape_mean + shape_basis @ shape_coeffs).view(-1, 3) - center) @ torch.t(rotation_matrix) + center + all_translations[i].to(pyredner.get_device())
        obj.vertices = (v - center) @ torch.t(rotation_matrix) + center
        obj.normals = pyredner.compute_vertex_normal(obj.vertices, indices)
        # colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
        # m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
        m = pyredner.Material(use_vertex_color=True)
        # obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m, colors=colors)

        if i == 0:
            pyredner.save_obj(obj,
                              "generated/env_dataset_" + name + '/tgt_obj.obj')

        cam = pyredner.Camera(
            position=cam_pos,
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
Пример #3
0
 def _model(self):
     # Get the rotation matrix from Euler angles
     rotation_matrix = pyredner.gen_rotate_matrix(self.euler_angles)
     self.euler_angles.retain_grad()
     # Shift the vertices to the center, apply rotation matrix,
     # shift back to the original space, then apply the translation.
     vertices = []
     if self.attack_type == "CW":
         for m, shape in zip(self.modifiers, self.shapes):
             shape_v = tanh_rescale(
                 torch_arctanh(shape.vertices.clone().detach()) -
                 m.clone().detach() + m)
             shape.vertices = (shape_v - self.center) @ torch.t(
                 rotation_matrix) + self.center + self.translation
             shape.vertices.retain_grad()
             shape.vertices.register_hook(set_grad(shape.vertices))
             shape.normals = pyredner.compute_vertex_normal(
                 shape.vertices, shape.indices)
             vertices.append(shape.vertices.clone().detach())
     else:
         for shape in self.shapes:
             shape_v = shape.vertices.clone().detach()
             shape.vertices = (shape_v - self.center) @ torch.t(
                 rotation_matrix) + self.center + self.translation
             shape.vertices.retain_grad()
             shape.vertices.register_hook(set_grad(shape.vertices))
             shape.normals = pyredner.compute_vertex_normal(
                 shape.vertices, shape.indices)
             vertices.append(shape.vertices.clone().detach())
     self.center = torch.mean(torch.cat(vertices), 0)
     # Assemble the 3D scene.
     scene = pyredner.Scene(camera=self.camera,
                            shapes=self.shapes,
                            materials=self.materials)
     # Render the scene.
     img = pyredner.render_deferred(scene, lights=[self.light], alpha=True)
     return img
Пример #4
0
target_normal = pyredner.imread('results/test_g_buffer/target_normal.exr')
if pyredner.get_use_gpu():
    target_depth = target_depth.cuda()
    target_normal = target_normal.cuda()

# Perturb the teapot by a translation and a rotation to the object
translation_params = torch.tensor([0.1, -0.1, 0.1],
                                  device=pyredner.get_device(),
                                  requires_grad=True)
translation = translation_params * 100.0
euler_angles = torch.tensor([0.1, -0.1, 0.1], requires_grad=True)
# These are the vertices we want to apply the transformation
shape0_vertices = shapes[0].vertices.clone()
shape1_vertices = shapes[1].vertices.clone()
# We can use pyredner.gen_rotate_matrix to generate 3x3 rotation matrices
rotation_matrix = pyredner.gen_rotate_matrix(euler_angles)
if pyredner.get_use_gpu():
    rotation_matrix = rotation_matrix.cuda()
center = torch.mean(torch.cat([shape0_vertices, shape1_vertices]), 0)
# Shift the vertices to the center, apply rotation matrix,
# shift back to the original space
shapes[0].vertices = \
    (shape0_vertices - center) @ torch.t(rotation_matrix) + \
    center + translation
shapes[1].vertices = \
    (shape1_vertices - center) @ torch.t(rotation_matrix) + \
    center + translation
# Since we changed the vertices, we need to regenerate the shading normals
shapes[0].normals = pyredner.compute_vertex_normal(shapes[0].vertices,
                                                   shapes[0].indices)
shapes[1].normals = pyredner.compute_vertex_normal(shapes[1].vertices,
Пример #5
0
# tra_optimizer = torch.optim.SGD([trans], lr=2000)
# eul_optimizer = torch.optim.Adam([euler], lr=0.02)
cam = pyredner.Camera(
    position=cam_pos,
    look_at=cam_look_at,  # Center of the vertices
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([20.0]),
    resolution=resolution)
for i in range(0):  # num_views):
    print("correcting position {:0>2d}".format(i))
    eul_optimizer = torch.optim.SGD([euler], lr=2)
    tra_optimizer = torch.optim.SGD([trans], lr=5000)
    for t in range(20):
        eul_optimizer.zero_grad()
        tra_optimizer.zero_grad()
        rotation_matrix = pyredner.gen_rotate_matrix(euler)
        obj.vertices = (vertices - center) @ torch.t(rotation_matrix) \
                       + center + trans * torch.tensor([1., 1., 3.], device=pyredner.get_device())
        scene = pyredner.Scene(objects=[obj], camera=cam, envmap=envmap)
        img = pyredner.render_pathtracing(scene=scene,
                                          num_samples=(64, 4),
                                          use_secondary_edge_sampling=True)
        print('f')
        loss = (img - target[i]).pow(2).mean()
        loss.backward()
        eul_optimizer.step()
        tra_optimizer.step()
        if t % 2 == 1:
            print('    iteration', t, 'loss:{:.6f}'.format(loss),
                  euler.data.cpu(),
                  trans.data.cpu() * torch.tensor([1., 1., 3.]))
Пример #6
0
# Now we want to generate the initial guess.
# We want to rotate and translate the teapot. We do this by declaring
# PyTorch tensors of translation and rotation parameters,
# then apply them to all teapot vertices.
# The translation and rotation parameters have very different ranges, so we normalize them
# by multiplying the translation parameters 100 to map to the actual translation amounts.
translation_params = torch.tensor([0.1, -0.1, 0.1],
    device = pyredner.get_device(), requires_grad=True)
translation = translation_params * 100.0
euler_angles = torch.tensor([0.1, -0.1, 0.1], requires_grad=True)
# We obtain the teapot vertices we want to apply the transformation on.
shape0_vertices = shapes[0].vertices.clone()
shape1_vertices = shapes[1].vertices.clone()
# We can use pyredner.gen_rotate_matrix to generate 3x3 rotation matrices
rotation_matrix = pyredner.gen_rotate_matrix(euler_angles)
if pyredner.get_use_gpu():
    rotation_matrix = rotation_matrix.cuda()
center = torch.mean(torch.cat([shape0_vertices, shape1_vertices]), 0)
# We shift the vertices to the center, apply rotation matrix,
# then shift back to the original space.
shapes[0].vertices = \
    (shape0_vertices - center) @ torch.t(rotation_matrix) + \
    center + translation
shapes[1].vertices = \
    (shape1_vertices - center) @ torch.t(rotation_matrix) + \
    center + translation
# Since we changed the vertices, we need to regenerate the shading normals
shapes[0].normals = pyredner.compute_vertex_normal(shapes[0].vertices, shapes[0].indices)
shapes[1].normals = pyredner.compute_vertex_normal(shapes[1].vertices, shapes[1].indices)
# We need to serialize the scene again to get the new arguments.
Пример #7
0
render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/target.png')
target = pyredner.imread('results/test_bunny_box/target.exr')
target = target.cuda(pyredner.get_device())

bunny_vertices = scene.shapes[-1].vertices.clone()
bunny_translation = torch.tensor([0.1, 0.4, 0.1],
                                 device=pyredner.get_device(),
                                 requires_grad=True)
bunny_rotation = torch.tensor([-0.2, 0.1, -0.1],
                              device=pyredner.get_device(),
                              requires_grad=True)
bunny_rotation_matrix = pyredner.gen_rotate_matrix(bunny_rotation)

scene.shapes[-1].vertices = \
    (bunny_vertices-torch.mean(bunny_vertices, 0))@torch.t(bunny_rotation_matrix) + \
    torch.mean(bunny_vertices, 0) + bunny_translation
args=pyredner.RenderFunction.serialize_scene(\
        scene = scene,
        num_samples = 1024,
        max_bounces = 6)
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/init.exr')
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/init.png')

optimizer = torch.optim.Adam([bunny_translation, bunny_rotation], lr=1e-2)
for t in range(200):
    print('iteration:', t)