示例#1
0
def model(cam_pos, cam_look_at, vertices, color_coeffs, ambient_color,
          dir_light_intensity, dir_light_direction):
    #vertices = (shape_mean + shape_basis @ torch.zeros(199, device=pyredner.get_device())).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    #m = pyredner.Material(use_vertex_color=True)
    m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(512, 512))
    scene = pyredner.Scene(camera=cam, objects=[obj])
    ambient_light = pyredner.AmbientLight(ambient_color)
    dir_light = pyredner.DirectionalLight(dir_light_direction,
                                          dir_light_intensity)
    img = pyredner.render_deferred(scene=scene,
                                   lights=[ambient_light, dir_light])
    return img, obj
示例#2
0
def model(cam_poses, cam_look_at, shape_coeffs, color_coeffs, lights_list,
          resolution):
    vertices = (shape_mean + shape_basis @ shape_coeffs).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    #m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)

    imgs = []
    for i in range(cam_poses.size(0)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj])
        #ambient_light = pyredner.AmbientLight(ambient_color)
        #dir_light = pyredner.DirectionalLight(dir_light_direction, dir_light_intensity)
        img = pyredner.render_deferred(scene=scene,
                                       lights=lights_list[i %
                                                          len(lights_list)],
                                       aa_samples=1)
        imgs.append(img)
    return imgs, obj
示例#3
0
def recompute_normals(obj: pyr.Object):
    """
    Recomputes smooth shading vertex normals for obj, and sets them
    accordingly.

    :param obj: A PyRedner object
    """
    obj.normals = pyr.compute_vertex_normal(obj.vertices.detach(),
                                            obj.indices.detach(), 'cotangent')
    obj.normal_indices = None
示例#4
0
 def _model(self):
     # Get the rotation matrix from Euler angles
     rotation_matrix = pyredner.gen_rotate_matrix(self.euler_angles)
     self.euler_angles.retain_grad()
     # Shift the vertices to the center, apply rotation matrix,
     # shift back to the original space, then apply the translation.
     vertices = []
     if self.attack_type == "CW":
         for m, shape in zip(self.modifiers, self.shapes):
             shape_v = tanh_rescale(
                 torch_arctanh(shape.vertices.clone().detach()) -
                 m.clone().detach() + m)
             shape.vertices = (shape_v - self.center) @ torch.t(
                 rotation_matrix) + self.center + self.translation
             shape.vertices.retain_grad()
             shape.vertices.register_hook(set_grad(shape.vertices))
             shape.normals = pyredner.compute_vertex_normal(
                 shape.vertices, shape.indices)
             vertices.append(shape.vertices.clone().detach())
     else:
         for shape in self.shapes:
             shape_v = shape.vertices.clone().detach()
             shape.vertices = (shape_v - self.center) @ torch.t(
                 rotation_matrix) + self.center + self.translation
             shape.vertices.retain_grad()
             shape.vertices.register_hook(set_grad(shape.vertices))
             shape.normals = pyredner.compute_vertex_normal(
                 shape.vertices, shape.indices)
             vertices.append(shape.vertices.clone().detach())
     self.center = torch.mean(torch.cat(vertices), 0)
     # Assemble the 3D scene.
     scene = pyredner.Scene(camera=self.camera,
                            shapes=self.shapes,
                            materials=self.materials)
     # Render the scene.
     img = pyredner.render_deferred(scene, lights=[self.light], alpha=True)
     return img
示例#5
0
def model(cam_pos, cam_look_at, shape_coeffs, color_coeffs, resolution, center,
          all_euler_angles, all_translations):
    # First rotate around center, then translation

    imgs = []

    #obj = pyredner.load_obj('p_ones30/final.obj', return_objects=True)[0]
    vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
    vertices *= 80
    m = pyredner.Material(
        diffuse_reflectance=torch.ones(2, 2, 3, dtype=torch.float32))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          uvs=uvs,
                          material=m)
    v = obj.vertices.clone()

    for i in range(len(all_translations)):
        rotation_matrix = pyredner.gen_rotate_matrix(all_euler_angles[i]).to(
            pyredner.get_device())
        center = center.to(pyredner.get_device())
        # vertices = ((shape_mean + shape_basis @ shape_coeffs).view(-1, 3) - center) @ torch.t(rotation_matrix) + center + all_translations[i].to(pyredner.get_device())
        obj.vertices = (v - center) @ torch.t(rotation_matrix) + center
        obj.normals = pyredner.compute_vertex_normal(obj.vertices, indices)
        # colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
        # m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
        m = pyredner.Material(use_vertex_color=True)
        # obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m, colors=colors)

        if i == 0:
            pyredner.save_obj(obj,
                              "generated/env_dataset_" + name + '/tgt_obj.obj')

        cam = pyredner.Camera(
            position=cam_pos,
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
示例#6
0
def model(cam_poses, cam_look_ats, shape_coeffs, color_coeffs, resolution):
    # First rotate around center, then translation

    imgs = []

    vertices = (shape_mean + shape_basis @ shape_coeffs).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    m = pyredner.Material(use_vertex_color=False,
                          specular_reflectance=torch.tensor(
                              [1., 1., 1.], device=pyredner.get_device()),
                          roughness=torch.tensor([0.02]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)
    obj = pyredner.load_obj('generated/env_dataset_oness_n/tgt_obj.obj',
                            return_objects=True)[0]
    obj.material.specular_reflectance = pyredner.Texture(
        torch.tensor([0.05, 0.05, 0.05], device=pyredner.get_device()))
    obj.material.roughness = pyredner.Texture(torch.tensor([0.02]))
    pyredner.save_obj(obj, "generated/senv_dataset_" + name + '/tgt_obj.obj')

    for i in range(len(cam_poses)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_ats[i %
                                 len(cam_look_ats)],  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
示例#7
0
    position=torch.tensor([0.0, 0.0, -5.0]),
    look_at=torch.tensor([0.0, 0.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(256, 256),
    fisheye=False)

#%%
material_map, mesh_list, light_map = pyredner.load_obj('diamond.obj')
# The teapot we loaded is relatively low-poly and doesn't have vertex normal.
# Fortunately we can compute the vertex normal from the neighbor vertices.
# We can use pyredner.compute_vertex_normal for this task:
# (Try commenting out the following two lines to see the differences in target images!)
for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices / 20,
                                                  mesh.indices)
    print(_)  # None

diffuse_reflectance = torch.tensor([0.0, 1.0, 0.0],
                                   device=pyredner.get_device(),
                                   requires_grad=True)

mat_grey = pyredner.Material(diffuse_reflectance)

# The material list of the scene #
materials = [mat_grey]

# Now we build a list of shapes using the list loaded from the Wavefront object file.
# Meshes loaded from .obj files may have different indices for uvs and normals,
# we use mesh.uv_indices and mesh.normal_indices to access them.
# This mesh does not have normal_indices so the value is None.
示例#8
0
import pyredner
import redner
import numpy as np
import torch
import skimage.transform

# Optimize depth and normal of a teapot

# Use GPU if available
pyredner.set_use_gpu(torch.cuda.is_available())

# Set up the pyredner scene for rendering:
material_map, mesh_list, light_map = pyredner.load_obj('scenes/teapot.obj')
for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices, mesh.indices)

# Setup camera
cam = pyredner.Camera(
    position=torch.tensor([0.0, 30.0, 200.0]),
    look_at=torch.tensor([0.0, 30.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(256, 256),
    fisheye=False)

# Setup materials
material_id_map = {}
materials = []
count = 0
for key, value in material_map.items():
示例#9
0
                         [0, 2.8, 1.1],
                         [4.1, 6.8, 2.1]], dtype=torch.float32, device=pyredner.get_device(), requires_grad=True)
indices = torch.tensor([[0, 1, 2],
                        [2, 1, 3]], dtype=torch.int32, device=pyredner.get_device())
'''
obj = pyredner.load_obj('cube.obj', return_objects=True)[0]
vertices = obj.vertices * 8. - 4.
vertices.requires_grad = True
indices = obj.indices
tgt_vertices = vertices.detach() * 1.1 + 0.5 * torch.randn(vertices.shape, device=pyredner.get_device())
print(tgt_vertices)
cam_poses = torch.tensor([[0, 0, 20], [-12, 0, 16], [12, 0, 16]], dtype=torch.float32, device=pyredner.get_device(), requires_grad=False)
cam_look_at = torch.tensor([0, 0, 0], dtype=torch.float32, device=pyredner.get_device(), requires_grad=False)
dir_light_direction = torch.tensor([-0.0, -0.0, -1.0], device=pyredner.get_device(), requires_grad=False)
dir_light_intensity = torch.ones(3, device=pyredner.get_device(), requires_grad=False)
normals = pyredner.compute_vertex_normal(tgt_vertices, indices, normal_scheme)
print("finish loading")

def model(cam_pos, cam_look_at, vertices, ambient_color, dir_light_intensity, dir_light_direction, normals):
   #normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme)
    m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m)#, colors=colors)

    cam = pyredner.Camera(position=cam_pos,
                          look_at=cam_look_at,  # Center of the vertices
                          up=torch.tensor([0.0, 1.0, 0.0]),
                          fov=torch.tensor([45.0]),
                          resolution=(1000, 1000))
    scene = pyredner.Scene(camera=cam, objects=[obj])
    ambient_light = pyredner.AmbientLight(ambient_color)
    dir_light = pyredner.DirectionalLight(dir_light_direction, dir_light_intensity)
示例#10
0
cam3 = pyredner.Camera(position =  torch.tensor( [2.5, 0.0, -3.0]) ,
                      look_at = torch.tensor([0.0, 0.0, -3.0]),
                      up = torch.tensor([0.0, 1.0, 0.0]),
                      camera_type = redner.CameraType.perspective,
                      fov = torch.tensor([45.0]),
                      clip_near = 1e-2, # needs to > 0
                      resolution = (512, 512),
                      fisheye = False)

for obj in range(1, num_iters):
    target_obj1 = pyredner.load_obj('results/shadow_art/multitarget/' + step + '/iter_' + str(obj) + '.obj', return_objects=True)

    target_obj1[0].material = pyredner.Material(diffuse_reflectance=torch.tensor([1.0, 1.0, 1.0]), two_sided=True)

    target_obj1[0].normals = pyredner.compute_vertex_normal(target_obj1[0].vertices, target_obj1[0].indices)

    shapes = []
    shapes.append(target_obj1[0])

    numShapes = len(shapes)
    shapes.extend(lights)

    area_lights = []
    for i in range(numShapes, len(shapes)):
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity*10, light_intensity*10, light_intensity*10])))

    scene = pyredner.Scene(cam, objects = [shapes[0], shapes[1]],area_lights = [area_lights[0]], envmap = None)
    scene_intense = pyredner.Scene(cam, objects = [shapes[0], shapes[1]], area_lights = [area_lights[1]], envmap = None)
#%%
# The teapot we loaded is relatively low-poly and doesn't have vertex normal.
# Fortunately we can compute the vertex normal from the neighbor vertices.
# We can use pyredner.compute_vertex_normal for this task:
# (Try commenting out the following two lines to see the differences in target images!)

#for _, mesh in mesh_list:
#    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices/2.5, mesh.indices)
#    print (_) # None

#for _, mesh1 in mesh_list1:
#    mesh1.normals = pyredner.compute_vertex_normal(mesh1.vertices/1, mesh1.indices)
#    print (_)

for _, mesh2 in mesh_list2:
    mesh2.normals = pyredner.compute_vertex_normal(mesh2.vertices / 3,
                                                   mesh2.indices)

#for _, mesh3 in mesh_list3:
#    mesh3.normals = pyredner.compute_vertex_normal(mesh3.vertices/5, mesh3.indices)

#diffuse_reflectance_green =torch.tensor([0.0, 1.0, 0.0], device = pyredner.get_device())
diffuse_reflectance_green = torch.tensor([0.65, 0.32, 0.16],
                                         device=pyredner.get_device())
mat_green = pyredner.Material(diffuse_reflectance_green)

#diffuse_reflectance_red =torch.tensor([1.0, 0.0, 0.0], device = pyredner.get_device())
diffuse_reflectance_red = torch.tensor([0.65, 0.32, 0.16],
                                       device=pyredner.get_device())
mat_red = pyredner.Material(diffuse_reflectance_red)

#diffuse_reflectance_blue =torch.tensor([0.0, 0.0, 1.0], device = pyredner.get_device())
示例#12
0
boundary = vertices.data * (1.0 - bound).reshape(-1, 1).expand(-1, 3)

ver_optimizer = torch.optim.Adam([vertices], lr=0.2e0)
tex_optimizer = torch.optim.Adam([texels], lr=0.03)
eul_optimizer = torch.optim.SGD([euler_list], lr=1 * 3)
tra_optimizer = torch.optim.SGD([trans_list], lr=2000 * 3)
#eul_optimizer = torch.optim.Adam([euler_list], lr=0.005)
#tra_optimizer = torch.optim.Adam([trans_list], lr=0.2)
all_losses, img_losses, smooth_losses, total_losses, all_texels, all_imgs = [], [], [], [], [], []
print((vertices - v).pow(2).mean())
for t in range(num_iters_1):
    eul_optimizer.zero_grad()
    tra_optimizer.zero_grad()
    ver_optimizer.zero_grad()
    tex_optimizer.zero_grad()
    obj.normals = pyredner.compute_vertex_normal(vertices, indices,
                                                 normal_scheme)
    obj.material = pyredner.Material(diffuse_reflectance=texels)
    imgs = []

    for i in range(num_views):
        euler = euler_list[i]
        trans = trans_list[i]
        rotation_matrix = pyredner.gen_rotate_matrix(euler)
        obj.vertices = (vertices - center) @ torch.t(rotation_matrix) \
                       + center + trans * torch.tensor([1., 1., 3.], device=pyredner.get_device())
        scene = pyredner.Scene(objects=[obj], camera=cam, envmap=envmap)
        img = pyredner.render_pathtracing(scene=scene,
                                          num_samples=(32, 4),
                                          use_secondary_edge_sampling=False)
        imgs.append(img)
示例#13
0
# material_map is a dict containing all the materials used in the obj file,
# where the key is the name of material, and the value is a pyredner.Material
#
# mesh_list is a list containing all the meshes in the obj file, grouped by use_mtl calls.
# Each element in the list is a tuple with length 2, the first is the name of material,
# the second is a pyredner.TriangleMesh with mesh information.
#
# light_map is a Python dict, where the key is the material names with non zeros Ke,
# and the values are the Ke
material_map, mesh_list, light_map = pyredner.load_obj('teapot.obj')
# The teapot we loaded is relatively low-poly and doesn't have vertex normal.
# Fortunately we can compute the vertex normal from the neighbor vertices.
# We can use pyredner.compute_vertex_normal for this task:
# (Try commenting out the following two lines to see the differences in target images!)
for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices, mesh.indices)

# Setup camera
cam = pyredner.Camera(position = torch.tensor([0.0, 30.0, 200.0]),
                      look_at = torch.tensor([0.0, 30.0, 0.0]),
                      up = torch.tensor([0.0, 1.0, 0.0]),
                      fov = torch.tensor([45.0]), # in degree
                      clip_near = 1e-2, # needs to > 0
                      resolution = (256, 256),
                      fisheye = False)

# Next, we convert the materials loaded from the Wavefront object files to a
# Python list of material. At the same time we keep track of the id of the materials,
# so that we can assign them to the shapes later.
material_id_map = {}
materials = []
示例#14
0
objects = pyredner.load_obj('scenes/teapot.obj', return_objects=True)
camera = pyredner.automatic_camera_placement(objects, resolution=(512, 512))
scene = pyredner.Scene(camera=camera, objects=objects)

light = pyredner.PointLight(position=(camera.position + torch.tensor(
    (0.0, 0.0, 100.0))).to(pyredner.get_device()),
                            intensity=torch.tensor(
                                (20000.0, 30000.0, 20000.0),
                                device=pyredner.get_device()))

img = pyredner.render_deferred(scene=scene, lights=[light])
pyredner.imwrite(img.cpu(),
                 'results/test_compute_vertex_normals/no_vertex_normal.exr')

for obj in objects:
    obj.normals = pyredner.compute_vertex_normal(obj.vertices, obj.indices,
                                                 'max')
scene = pyredner.Scene(camera=camera, objects=objects)
img = pyredner.render_deferred(scene=scene, lights=[light])
pyredner.imwrite(img.cpu(),
                 'results/test_compute_vertex_normals/max_vertex_normal.exr')

for obj in objects:
    obj.normals = pyredner.compute_vertex_normal(obj.vertices, obj.indices,
                                                 'cotangent')
scene = pyredner.Scene(camera=camera, objects=objects)
img = pyredner.render_deferred(scene=scene, lights=[light])
pyredner.imwrite(
    img.cpu(),
    'results/test_compute_vertex_normals/cotangent_vertex_normal.exr')
示例#15
0
    if loss > prev_loss:
      break

    loss.backward()
    optimizer.step()
    optim_objects[0].material.diffuse_reflectance.texels.data.clamp_(0.0, 1.0)
    optim_objects[0].material.specular_reflectance.texels.data.clamp_(0.0, 1.0)
    prev_loss = loss
    i += 1

  '''
    # Geometry Optimization
    print("Geometry optimization")

    initial_verts = optim_objects[0].vertices.clone()
    initial_normals = pyredner.compute_vertex_normal(optim_objects[0].vertices,
                                                     optim_objects[0].indices)

    if (use_vertex_offsets):  # Vertex optim
        offsets = torch.zeros(initial_verts.shape,
                              device=pyredner.get_device(),
                              requires_grad=True)
    else:  # Normal optim
        offsets = torch.zeros(initial_verts.shape[0],
                              device=pyredner.get_device(),
                              requires_grad=True)

    #lr = 0.0001 / 2^subdiv_level
    optimizer = torch.optim.Adam([offsets], lr=0.001)
    prev_loss = 10000000000
    while True:
        optimizer.zero_grad()
示例#16
0
    def __init__(self,
                 framework,
                 filename,
                 dims,
                 label_names,
                 normalize_params,
                 background,
                 pose,
                 num_classes,
                 attack_type="benign"):

        self.NUM_CLASSES = num_classes
        self.framework = framework.to(pyredner.get_device())
        self.image_dims = dims
        self.label_names = label_names
        self.framework_params = normalize_params

        # self.objects = pyredner.load_obj(filename, return_objects=True)
        self.material_map, mesh_list, self.light_map = pyredner.load_obj(
            filename)
        for _, mesh in mesh_list:
            mesh.normals = pyredner.compute_vertex_normal(
                mesh.vertices, mesh.indices)

        vertices = []

        self.modifiers = []
        self.input_adv_list = []
        self.input_orig_list = []
        self.targeted = False
        self.clamp_fn = "tanh"

        self.attack_type = attack_type

        if attack_type == "CW":
            for _, mesh in mesh_list:
                vertices.append(mesh.vertices)
                modifier = torch.zeros(mesh.vertices.size(),
                                       requires_grad=True,
                                       device=pyredner.get_device())
                self.modifiers.append(modifier)
                self.input_orig_list.append(
                    tanh_rescale(torch_arctanh(mesh.vertices)))
                mesh.vertices = tanh_rescale(
                    torch_arctanh(mesh.vertices) + modifier)

                self.input_adv_list.append(mesh.vertices)
                mesh.vertices.retain_grad()
        else:
            for _, mesh in mesh_list:
                vertices.append(mesh.vertices)
                mesh.vertices = Variable(mesh.vertices, requires_grad=True)
                mesh.vertices.retain_grad()

        material_id_map = {}
        self.materials = []
        count = 0
        for key, value in self.material_map.items():
            material_id_map[key] = count
            count += 1
            self.materials.append(value)

        self.shapes = []
        self.cw_shapes = []
        for mtl_name, mesh in mesh_list:
            # assert(mesh.normal_indices is None)
            self.shapes.append(
                pyredner.Shape(vertices=mesh.vertices,
                               indices=mesh.indices,
                               material_id=material_id_map[mtl_name],
                               uvs=mesh.uvs,
                               normals=mesh.normals,
                               uv_indices=mesh.uv_indices))

        self.camera = pyredner.automatic_camera_placement(self.shapes,
                                                          resolution=(512,
                                                                      512))
        # Compute the center of the teapot
        self.center = torch.mean(torch.cat(vertices), 0)
        self.translation = torch.tensor([0., 0., 0.],
                                        device=pyredner.get_device(),
                                        requires_grad=True)

        self.angle_input_adv_list = []
        self.angle_input_orig_list = []
        self.pose = pose
        if attack_type == "CW":
            self.euler_angles_modifier = torch.tensor(
                [0., 0., 0.], device=pyredner.get_device(), requires_grad=True)
            if pose == 'forward':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0., 0., 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))
            elif pose == 'top':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0.35, 0., 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))
            elif pose == 'left':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0., 0.50, 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))
            elif pose == 'right':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0., -0.50, 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))

            self.angle_input_adv_list.append(self.euler_angles)
        else:
            if pose == 'forward':
                self.euler_angles = torch.tensor([0., 0., 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)
            elif pose == 'top':
                self.euler_angles = torch.tensor([0.35, 0., 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)
            elif pose == 'left':
                self.euler_angles = torch.tensor([0., 0.50, 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)
            elif pose == 'right':
                self.euler_angles = torch.tensor([0., -0.50, 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)

        self.light_init_vals = torch.tensor([20000.0, 30000.0, 20000.0],
                                            device=pyredner.get_device())
        if attack_type == "CW":
            self.light_input_orig_list = []
            self.light_input_adv_list = []
            delta = 1e-6  # constant for stability
            self.light_modifier = torch.tensor([0., 0., 0.],
                                               device=pyredner.get_device(),
                                               requires_grad=True)
            # redner can't accept negative light intensities, so we have to be a bit creative and work with lighting norms instead and then rescale them afterwards...
            tanh_factor = tanh_rescale(
                torch_arctanh(self.light_init_vals /
                              torch.norm(self.light_init_vals)) +
                self.light_modifier / torch.norm(self.light_modifier + delta))
            self.light_intensity = torch.norm(
                self.light_init_vals) * torch.clamp(tanh_factor, 0, 1)

            self.light_input_orig_list.append(self.light_init_vals /
                                              torch.norm(self.light_init_vals))
            self.light_input_adv_list.append(self.light_intensity)
            self.light = pyredner.PointLight(
                position=(self.camera.position + torch.tensor(
                    (0.0, 0.0, 100.0))).to(pyredner.get_device()),
                intensity=self.light_intensity)
        else:
            self.light = pyredner.PointLight(
                position=(self.camera.position + torch.tensor(
                    (0.0, 0.0, 100.0))).to(pyredner.get_device()),
                intensity=Variable(torch.tensor((20000.0, 30000.0, 20000.0),
                                                device=pyredner.get_device()),
                                   requires_grad=True))

        background = pyredner.imread(background)
        self.background = background.to(pyredner.get_device())