roughness = torch.tensor([0.6])

diffuse = torch.tensor([0.0, 0.0, 1.0])
target_objects[0].material = pyredner.Material(diffuse_reflectance=diffuse, specular_reflectance=specular_target, roughness=roughness, normal_map=normal_map, two_sided=True)

diffuse = torch.tensor([1.0, 0.0, 0.0])
target_objects[1].material = pyredner.Material(diffuse_reflectance=diffuse, specular_reflectance=specular_target, roughness=roughness, normal_map=normal_map, two_sided=True)

diffuse = torch.tensor([0.7,0.7,0.7])
target_objects[2].material = pyredner.Material(diffuse_reflectance=diffuse, specular_reflectance=specular_target, roughness=roughness, normal_map=normal_map, two_sided=True)

resolution = (256, 256)
num_cameras = 2
radius = float(sys.argv[5])
lightLocs = None
camera0 = pyredner.automatic_camera_placement(target_objects, resolution)
#camLocs = fibonacci_sphere(num_cameras, False)
camLocs = [torch.tensor([-0.1, 0.1, 0.1])]
target_scenes = generate_scenes(camLocs, target_objects, None, lightLocs)

max_bounces_targets = 4
max_bounces_optim = 4

# Render Targets
targets = pyredner.render_pathtracing(scene = target_scenes, num_samples = (512, 0), max_bounces=max_bounces_targets)

for ind, img in enumerate(targets):
  img = img.data.cpu()
  pyredner.imwrite(img, path + "targets/target_" + str(ind) + ".png")
  #target_data = pyredner.imread( path + "targets/target_" + str(ind) + ".png")
  #targets[ind] = target_data
示例#2
0
import torch
import pyredner

vertices, indices, uvs, normals = pyredner.generate_sphere(64, 128)
m = pyredner.Material(diffuse_reflectance=torch.tensor(
    (0.5, 0.5, 0.5), device=pyredner.get_device()))
obj = pyredner.Object(vertices=vertices,
                      indices=indices,
                      uvs=uvs,
                      normals=normals,
                      material=m)
cam = pyredner.automatic_camera_placement([obj], resolution=(480, 640))
scene = pyredner.Scene(objects=[obj], camera=cam)

img = pyredner.render_g_buffer(
    scene, channels=[pyredner.channels.uv, pyredner.channels.shading_normal])
uv_img = torch.cat([img[:, :, :2], torch.zeros(480, 640, 1)], dim=2)
normal_img = img[:, :, 2:]
pyredner.imwrite(uv_img, 'results/test_sphere/uv.png')
pyredner.imwrite(normal_img, 'results/test_sphere/normal.png')
示例#3
0
import pyredner
import torch

objects = pyredner.load_obj('scenes/teapot.obj', return_objects=True)
camera = pyredner.automatic_camera_placement(objects, resolution=(512, 512))
scene = pyredner.Scene(camera=camera, objects=objects)

light = pyredner.PointLight(position=(camera.position + torch.tensor(
    (0.0, 0.0, 100.0))).to(pyredner.get_device()),
                            intensity=torch.tensor(
                                (20000.0, 30000.0, 20000.0),
                                device=pyredner.get_device()))

img = pyredner.render_deferred(scene=scene, lights=[light])
pyredner.imwrite(img.cpu(),
                 'results/test_compute_vertex_normals/no_vertex_normal.exr')

for obj in objects:
    obj.normals = pyredner.compute_vertex_normal(obj.vertices, obj.indices,
                                                 'max')
scene = pyredner.Scene(camera=camera, objects=objects)
img = pyredner.render_deferred(scene=scene, lights=[light])
pyredner.imwrite(img.cpu(),
                 'results/test_compute_vertex_normals/max_vertex_normal.exr')

for obj in objects:
    obj.normals = pyredner.compute_vertex_normal(obj.vertices, obj.indices,
                                                 'cotangent')
scene = pyredner.Scene(camera=camera, objects=objects)
img = pyredner.render_deferred(scene=scene, lights=[light])
pyredner.imwrite(
import pyredner
import torch

# Test the sample pixel center flag

pyredner.set_use_gpu(torch.cuda.is_available())
objects = pyredner.load_obj('scenes/teapot.obj', return_objects=True)
camera = pyredner.automatic_camera_placement(objects, resolution=(128, 128))
scene = pyredner.Scene(camera=camera, objects=objects)
img = pyredner.render_albedo(scene, sample_pixel_center=True)
pyredner.imwrite(img.cpu(), 'results/test_sample_pixel_center/img_no_aa.exr')
img = pyredner.render_albedo(scene, sample_pixel_center=False)
pyredner.imwrite(img.cpu(), 'results/test_sample_pixel_center/img_with_aa.exr')
示例#5
0
    def __init__(self,
                 framework,
                 filename,
                 dims,
                 label_names,
                 normalize_params,
                 background,
                 pose,
                 num_classes,
                 attack_type="benign"):

        self.NUM_CLASSES = num_classes
        self.framework = framework.to(pyredner.get_device())
        self.image_dims = dims
        self.label_names = label_names
        self.framework_params = normalize_params

        # self.objects = pyredner.load_obj(filename, return_objects=True)
        self.material_map, mesh_list, self.light_map = pyredner.load_obj(
            filename)
        for _, mesh in mesh_list:
            mesh.normals = pyredner.compute_vertex_normal(
                mesh.vertices, mesh.indices)

        vertices = []

        self.modifiers = []
        self.input_adv_list = []
        self.input_orig_list = []
        self.targeted = False
        self.clamp_fn = "tanh"

        self.attack_type = attack_type

        if attack_type == "CW":
            for _, mesh in mesh_list:
                vertices.append(mesh.vertices)
                modifier = torch.zeros(mesh.vertices.size(),
                                       requires_grad=True,
                                       device=pyredner.get_device())
                self.modifiers.append(modifier)
                self.input_orig_list.append(
                    tanh_rescale(torch_arctanh(mesh.vertices)))
                mesh.vertices = tanh_rescale(
                    torch_arctanh(mesh.vertices) + modifier)

                self.input_adv_list.append(mesh.vertices)
                mesh.vertices.retain_grad()
        else:
            for _, mesh in mesh_list:
                vertices.append(mesh.vertices)
                mesh.vertices = Variable(mesh.vertices, requires_grad=True)
                mesh.vertices.retain_grad()

        material_id_map = {}
        self.materials = []
        count = 0
        for key, value in self.material_map.items():
            material_id_map[key] = count
            count += 1
            self.materials.append(value)

        self.shapes = []
        self.cw_shapes = []
        for mtl_name, mesh in mesh_list:
            # assert(mesh.normal_indices is None)
            self.shapes.append(
                pyredner.Shape(vertices=mesh.vertices,
                               indices=mesh.indices,
                               material_id=material_id_map[mtl_name],
                               uvs=mesh.uvs,
                               normals=mesh.normals,
                               uv_indices=mesh.uv_indices))

        self.camera = pyredner.automatic_camera_placement(self.shapes,
                                                          resolution=(512,
                                                                      512))
        # Compute the center of the teapot
        self.center = torch.mean(torch.cat(vertices), 0)
        self.translation = torch.tensor([0., 0., 0.],
                                        device=pyredner.get_device(),
                                        requires_grad=True)

        self.angle_input_adv_list = []
        self.angle_input_orig_list = []
        self.pose = pose
        if attack_type == "CW":
            self.euler_angles_modifier = torch.tensor(
                [0., 0., 0.], device=pyredner.get_device(), requires_grad=True)
            if pose == 'forward':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0., 0., 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))
            elif pose == 'top':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0.35, 0., 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))
            elif pose == 'left':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0., 0.50, 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))
            elif pose == 'right':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0., -0.50, 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))

            self.angle_input_adv_list.append(self.euler_angles)
        else:
            if pose == 'forward':
                self.euler_angles = torch.tensor([0., 0., 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)
            elif pose == 'top':
                self.euler_angles = torch.tensor([0.35, 0., 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)
            elif pose == 'left':
                self.euler_angles = torch.tensor([0., 0.50, 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)
            elif pose == 'right':
                self.euler_angles = torch.tensor([0., -0.50, 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)

        self.light_init_vals = torch.tensor([20000.0, 30000.0, 20000.0],
                                            device=pyredner.get_device())
        if attack_type == "CW":
            self.light_input_orig_list = []
            self.light_input_adv_list = []
            delta = 1e-6  # constant for stability
            self.light_modifier = torch.tensor([0., 0., 0.],
                                               device=pyredner.get_device(),
                                               requires_grad=True)
            # redner can't accept negative light intensities, so we have to be a bit creative and work with lighting norms instead and then rescale them afterwards...
            tanh_factor = tanh_rescale(
                torch_arctanh(self.light_init_vals /
                              torch.norm(self.light_init_vals)) +
                self.light_modifier / torch.norm(self.light_modifier + delta))
            self.light_intensity = torch.norm(
                self.light_init_vals) * torch.clamp(tanh_factor, 0, 1)

            self.light_input_orig_list.append(self.light_init_vals /
                                              torch.norm(self.light_init_vals))
            self.light_input_adv_list.append(self.light_intensity)
            self.light = pyredner.PointLight(
                position=(self.camera.position + torch.tensor(
                    (0.0, 0.0, 100.0))).to(pyredner.get_device()),
                intensity=self.light_intensity)
        else:
            self.light = pyredner.PointLight(
                position=(self.camera.position + torch.tensor(
                    (0.0, 0.0, 100.0))).to(pyredner.get_device()),
                intensity=Variable(torch.tensor((20000.0, 30000.0, 20000.0),
                                                device=pyredner.get_device()),
                                   requires_grad=True))

        background = pyredner.imread(background)
        self.background = background.to(pyredner.get_device())