def generate_scenes(camLocs,objects,envmap=None, lightLocs=None):
  scenes = []
  up = torch.tensor([0.0, 1.0, 0.0])
  offset_factor = 0.0
  light_intensity = 100.0            

  for ind, loc in enumerate(camLocs):
    camera = pyredner.Camera(position = loc,
                          look_at = torch.tensor([0.0, 0.0, 0.0]),
                          up = camera0.up,
                          fov = torch.tensor([90.0]), #fov = camera0.fov,
                          resolution = camera0.resolution)
    
    normal = camera.position.div(torch.norm(camera.position))
    tangent = torch.cross(normal, up)
    tangent = tangent.div(torch.norm(tangent))
    bitangent = torch.cross(normal, tangent)
    bitangent = bitangent.div(torch.norm(bitangent))
    
    offsets = [offset_factor * tangent]
    lightLocs = [(camera.position + offset) for offset in offsets]
    
    lights = [pyredner.generate_quad_light(position = lightPos,
                                     look_at = camera0.look_at,
                                     size = torch.tensor([0.1, 0.1]),
                                     intensity = torch.tensor([light_intensity, light_intensity, light_intensity])) for lightPos in lightLocs]

    scene_objects = objects
    #objects.append(lights[0])

    scenes.append(pyredner.Scene(camera = camera, objects = [objects[0], objects[1], objects[2], lights[0]], envmap=None))
  return scenes
Beispiel #2
0
def generate_scenes(camLocs, objects, envmap=None, lightLocs=None):
    scenes = []
    up = torch.tensor([0.0, 1.0, 0.0])
    offset_factor = 2.0
    light_intensity = 10000.0
    for ind, loc in enumerate(camLocs):
        multiplier = 1.0
        look_at_offset = torch.tensor([0.0, 0.0, 0.0])

        camera = pyredner.Camera(position=camera0.look_at +
                                 radius * loc * multiplier,
                                 look_at=camera0.look_at + look_at_offset,
                                 up=camera0.up,
                                 fov=camera0.fov,
                                 resolution=camera0.resolution)

        normal = camera.position.div(torch.norm(camera.position))
        tangent = torch.cross(normal, up)
        tangent = tangent.div(torch.norm(tangent))
        bitangent = torch.cross(normal, tangent)
        bitangent = bitangent.div(torch.norm(bitangent))

        offsets = [offset_factor * tangent]  #, offset_factor * normal]

        lightLocs = [(camera.position + offset) for offset in offsets]
        #else:
        #  lightPos = lightLocs[ind]
        lights = [
            pyredner.generate_quad_light(position=lightPos,
                                         look_at=camera0.look_at,
                                         size=torch.tensor([0.1, 0.1]),
                                         intensity=torch.tensor([
                                             light_intensity, light_intensity,
                                             light_intensity
                                         ])) for lightPos in lightLocs
        ]

        # Camera data for voxel carving
        print(
            str(ind) + " " + str(camera.position.data[0].item()) + " " +
            str(camera.position.data[1].item()) + " " +
            str(camera.position.data[2].item()) + " " +
            str(camera.look_at.data[0].item()) + " " +
            str(camera.look_at.data[1].item()) + " " +
            str(camera.look_at.data[2].item()))
        for light in lights:
            scenes.append(
                pyredner.Scene(camera=camera,
                               objects=[objects[0], light],
                               envmap=None))
    return scenes
def generate_scenes(camLocs, objects, envmap=None):
    scenes = []
    up = torch.tensor([0.0, 1.0, 0.0])
    offset_factor = 2.0
    light_intensity = 500.0

    for ind, loc in enumerate(camLocs):
        camera = pyredner.Camera(position=camera0.look_at + radius * loc,
                                 look_at=camera0.look_at,
                                 up=camera0.up,
                                 fov=camera0.fov,
                                 resolution=camera0.resolution)

        normal = camera.position.div(torch.norm(camera.position))
        tangent = torch.cross(normal, up)
        tangent = tangent.div(torch.norm(tangent))
        bitangent = torch.cross(normal, tangent)
        bitangent = bitangent.div(torch.norm(bitangent))

        lightPos = camera.position + offset_factor * tangent
        light = pyredner.generate_quad_light(position=lightPos,
                                             look_at=camera0.look_at,
                                             size=torch.tensor([0.1, 0.1]),
                                             intensity=torch.tensor([
                                                 light_intensity,
                                                 light_intensity,
                                                 light_intensity
                                             ]))

        # Camera data for voxel carving
        #print(str(ind) + " " + str(camera.position.data[0].item()) + " " + str(camera.position.data[1].item()) + " " + str(camera.position.data[2].item()) + " " + str(camera.look_at.data[0].item()) + " " + str(camera.look_at.data[1].item()) + " " + str(camera.look_at.data[2].item()))

        scenes.append(
            pyredner.Scene(camera=camera,
                           objects=[objects[0], light],
                           envmap=envmap))
    return scenes
fov = 45.0
resolution = 1
look_at = torch.tensor([0.0, 0.0, 0.0])
camLocs = fibonacci_sphere(num_cameras, False)


cams = []
target_objects = pyredner.load_obj('resources/monkey.obj', return_objects=True)
camera0 = pyredner.automatic_camera_placement(target_objects, resolution)
for ind, pos in enumerate(camLocs):
    pos = torch.tensor([0.5, 0.0, 100.0])
    pos = torch.tensor(pos)
    normal = pos.div(torch.norm(pos))                                                     
    pos = normal * radius2    
    lights.append(pyredner.generate_quad_light(position = pos + torch.tensor([0.0,0.0,-15.0]), \
                                     look_at = camera0.look_at, \
                                     size = torch.tensor([2.0, 2.0]), \
                                     intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
print("LIGHT ONE DONE")
for ind, pos in enumerate(camLocs):
    pos = torch.tensor([100, 0.0, -3.0]) 
    normal = pos.div(torch.norm(pos - torch.tensor([-3.0, 0.0, -3.0]) ))                                                   
    pos = normal * radius2   
    
    lights.append(pyredner.generate_quad_light(position = pos + torch.tensor([10.0, 0.0, -3.0]), \
                                     look_at = camera0.look_at, \
                                     size = torch.tensor([2.0, 2.0]), \
                                     intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
    
print("LIGHT TWO DONE")    

cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.5]),
    def render(self, scene, svbrdf):
        imgs = []

        svbrdf = svbrdf.unsqueeze(0) if len(svbrdf.shape) == 3 else svbrdf

        sensor_size = (svbrdf.shape[-1], svbrdf.shape[-2])

        for svbrdf_single in torch.split(svbrdf, 1, dim=0):
            normals, diffuse, roughness, specular = utils.unpack_svbrdf(
                svbrdf_single.squeeze(0))
            # Redner expects the normal map to be in range [0, 1]
            normals = utils.encode_as_unit_interval(normals)
            # Redner expects the roughness to have one channel only.
            # We also need to convert from GGX roughness to Blinn-Phong power.
            # See: https://github.com/iondune/csc473/blob/master/lectures/07-cook-torrance.md
            roughness = torch.mean(torch.clamp(roughness, min=0.001),
                                   dim=0,
                                   keepdim=True)**4

            # Convert from [c,h,w] to [h,w,c] for redner
            normals = normals.permute(1, 2, 0)
            diffuse = diffuse.permute(1, 2, 0)
            roughness = roughness.permute(1, 2, 0)
            specular = specular.permute(1, 2, 0)

            material = pyredner.Material(
                diffuse_reflectance=pyredner.Texture(
                    diffuse.to(self.redner_device)),
                specular_reflectance=pyredner.Texture(
                    specular.to(self.redner_device)),
                roughness=pyredner.Texture(roughness.to(self.redner_device)),
                normal_map=pyredner.Texture(normals.to(self.redner_device)))

            material_patch = pyredner.Object(vertices=self.patch_vertices,
                                             uvs=self.patch_uvs,
                                             indices=self.patch_indices,
                                             material=material)

            # Define the camera parameters (focused at the middle of the patch) and make sure we always have a valid 'up' direction
            position = np.array(scene.camera.pos)
            lookat = np.array([0.0, 0.0, 0.0])
            cz = lookat - position  # Principal axis
            up = np.array([0.0, 0.0, 1.0])
            if np.linalg.norm(np.cross(cz, up)) == 0.0:
                up = np.array([0.0, 1.0, 0.0])

            camera = pyredner.Camera(
                position=torch.FloatTensor(position).to(self.redner_device),
                look_at=torch.FloatTensor(lookat).to(self.redner_device),
                up=torch.FloatTensor(up).to(self.redner_device),
                fov=torch.FloatTensor([90]),
                resolution=sensor_size,
                camera_type=self.camera_type)

            # # The deferred rendering path.
            # # It does not have a specular model and therefore is of limited usability for us
            # full_scene = pyredner.Scene(camera = camera, objects = [material_patch])
            # light = pyredner.PointLight(position = torch.tensor(scene.light.pos).to(self.redner_device),
            #                                    intensity = torch.tensor(scene.light.color).to(self.redner_device))
            # img = pyredner.render_deferred(scene = full_scene, lights = [light])

            light = pyredner.generate_quad_light(
                position=torch.Tensor(scene.light.pos).to(self.redner_device),
                look_at=torch.zeros(3).to(self.redner_device),
                size=torch.Tensor([0.6, 0.6]).to(self.redner_device),
                intensity=torch.Tensor(scene.light.color).to(
                    self.redner_device))
            full_scene = pyredner.Scene(camera=camera,
                                        objects=[material_patch, light])
            img = pyredner.render_pathtracing(full_scene, num_samples=(16, 8))

            # Transform the rendered image back to something torch can interprete
            imgs.append(img.permute(2, 0, 1).to(svbrdf.device))

        return torch.stack(imgs)