示例#1
0
def tex_model(optim_scenes, deferred, num_samples=(64, 64), max_bounces=1):
    if deferred:
        renders = pyredner.render_deferred(scene=optim_scenes)
    else:
        renders = pyredner.render_pathtracing(scene=optim_scenes,
                                              num_samples=num_samples,
                                              max_bounces=max_bounces)
    return renders
def model(initial_verts, initial_normals, offsets, optim_objects,
          use_vertex_offsets):

    if use_vertex_offsets:  # Vertex optim
        optim_objects[0].vertices = initial_verts + offsets
    else:  # Normal optim
        off = torch.stack([offsets, offsets, offsets], dim=1)
        optim_objects[0].vertices = initial_verts + off * initial_normals

    optim_scenes = generate_scenes(camLocs, optim_objects)
    renders = pyredner.render_pathtracing(scene=optim_scenes,
                                          num_samples=(64, 64),
                                          max_bounces=1)
    return renders
示例#3
0
def model(cam_pos, cam_look_at, shape_coeffs, color_coeffs, resolution, center,
          all_euler_angles, all_translations):
    # First rotate around center, then translation

    imgs = []

    #obj = pyredner.load_obj('p_ones30/final.obj', return_objects=True)[0]
    vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
    vertices *= 80
    m = pyredner.Material(
        diffuse_reflectance=torch.ones(2, 2, 3, dtype=torch.float32))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          uvs=uvs,
                          material=m)
    v = obj.vertices.clone()

    for i in range(len(all_translations)):
        rotation_matrix = pyredner.gen_rotate_matrix(all_euler_angles[i]).to(
            pyredner.get_device())
        center = center.to(pyredner.get_device())
        # vertices = ((shape_mean + shape_basis @ shape_coeffs).view(-1, 3) - center) @ torch.t(rotation_matrix) + center + all_translations[i].to(pyredner.get_device())
        obj.vertices = (v - center) @ torch.t(rotation_matrix) + center
        obj.normals = pyredner.compute_vertex_normal(obj.vertices, indices)
        # colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
        # m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
        m = pyredner.Material(use_vertex_color=True)
        # obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m, colors=colors)

        if i == 0:
            pyredner.save_obj(obj,
                              "generated/env_dataset_" + name + '/tgt_obj.obj')

        cam = pyredner.Camera(
            position=cam_pos,
            look_at=cam_look_at,  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
示例#4
0
def model(cam_poses, cam_look_ats, shape_coeffs, color_coeffs, resolution):
    # First rotate around center, then translation

    imgs = []

    vertices = (shape_mean + shape_basis @ shape_coeffs).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    m = pyredner.Material(use_vertex_color=False,
                          specular_reflectance=torch.tensor(
                              [1., 1., 1.], device=pyredner.get_device()),
                          roughness=torch.tensor([0.02]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)
    obj = pyredner.load_obj('generated/env_dataset_oness_n/tgt_obj.obj',
                            return_objects=True)[0]
    obj.material.specular_reflectance = pyredner.Texture(
        torch.tensor([0.05, 0.05, 0.05], device=pyredner.get_device()))
    obj.material.roughness = pyredner.Texture(torch.tensor([0.02]))
    pyredner.save_obj(obj, "generated/senv_dataset_" + name + '/tgt_obj.obj')

    for i in range(len(cam_poses)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_ats[i %
                                 len(cam_look_ats)],  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
def tex_model(optim_scenes, num_samples=(64, 64), max_bounces=1):
    return pyredner.render_pathtracing(scene = optim_scenes, num_samples=num_samples, max_bounces=max_bounces)
target_objects[2].material = pyredner.Material(diffuse_reflectance=diffuse, specular_reflectance=specular_target, roughness=roughness, normal_map=normal_map, two_sided=True)

resolution = (256, 256)
num_cameras = 2
radius = float(sys.argv[5])
lightLocs = None
camera0 = pyredner.automatic_camera_placement(target_objects, resolution)
#camLocs = fibonacci_sphere(num_cameras, False)
camLocs = [torch.tensor([-0.1, 0.1, 0.1])]
target_scenes = generate_scenes(camLocs, target_objects, None, lightLocs)

max_bounces_targets = 4
max_bounces_optim = 4

# Render Targets
targets = pyredner.render_pathtracing(scene = target_scenes, num_samples = (512, 0), max_bounces=max_bounces_targets)

for ind, img in enumerate(targets):
  img = img.data.cpu()
  pyredner.imwrite(img, path + "targets/target_" + str(ind) + ".png")
  #target_data = pyredner.imread( path + "targets/target_" + str(ind) + ".png")
  #targets[ind] = target_data


target_texture = pyredner.render_albedo(scene = target_scenes, num_samples = (512, 0))

for ind, img in enumerate(target_texture):
  mask = img.clone()
  mask = mask.sum(2)/3
  mask[mask < 0.8] = 0.0
  mask = torch.stack([mask, mask, mask], dim=2)
    shapes = []
    shapes.append(target_obj1[0])

    numShapes = len(shapes)
    shapes.extend(lights)

    area_lights = []
    for i in range(numShapes, len(shapes)):
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity*10, light_intensity*10, light_intensity*10])))

    scene = pyredner.Scene(cam, objects = [shapes[0], shapes[1]],area_lights = [area_lights[0]], envmap = None)
    scene_intense = pyredner.Scene(cam, objects = [shapes[0], shapes[1]], area_lights = [area_lights[1]], envmap = None)

    target = pyredner.render_pathtracing(scene = [scene], num_samples=(512, 0), max_bounces=1)[0]
    pyredner.imwrite(target.cpu(), 'results/shadow_art/high_res/' + step + '/' + str(obj) + '_0.png')

    area_lights = []
    for i in range(numShapes, len(shapes)):
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity*10, light_intensity*10, light_intensity*10])))

    shape0_vertices = shapes[0].vertices.clone()
    shapes[0].vertices = \
        (shape0_vertices)

    scene_3 = pyredner.Scene(cam3, objects=[shapes[0], shapes[2]], area_lights = [area_lights[2]], envmap = None)

    target2 = pyredner.render_pathtracing(scene = [scene_3], num_samples=(512, 0), max_bounces=1)[0]
    pyredner.imwrite(target2.cpu(), 'results/shadow_art/high_res/' + step + '/' + str(obj) + '_1.png')
示例#8
0
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([20.0]),
    resolution=resolution)
for i in range(0):  # num_views):
    print("correcting position {:0>2d}".format(i))
    eul_optimizer = torch.optim.SGD([euler], lr=2)
    tra_optimizer = torch.optim.SGD([trans], lr=5000)
    for t in range(20):
        eul_optimizer.zero_grad()
        tra_optimizer.zero_grad()
        rotation_matrix = pyredner.gen_rotate_matrix(euler)
        obj.vertices = (vertices - center) @ torch.t(rotation_matrix) \
                       + center + trans * torch.tensor([1., 1., 3.], device=pyredner.get_device())
        scene = pyredner.Scene(objects=[obj], camera=cam, envmap=envmap)
        img = pyredner.render_pathtracing(scene=scene,
                                          num_samples=(64, 4),
                                          use_secondary_edge_sampling=True)
        print('f')
        loss = (img - target[i]).pow(2).mean()
        loss.backward()
        eul_optimizer.step()
        tra_optimizer.step()
        if t % 2 == 1:
            print('    iteration', t, 'loss:{:.6f}'.format(loss),
                  euler.data.cpu(),
                  trans.data.cpu() * torch.tensor([1., 1., 3.]))

    euler_list.append(euler.data.clone())
    trans_list.append(trans.data.clone())

    #pyredner.imwrite(img.cpu(), output_path + '/view_positions/{:0>2d}.png'.format(i))
    def render(self, scene, svbrdf):
        imgs = []

        svbrdf = svbrdf.unsqueeze(0) if len(svbrdf.shape) == 3 else svbrdf

        sensor_size = (svbrdf.shape[-1], svbrdf.shape[-2])

        for svbrdf_single in torch.split(svbrdf, 1, dim=0):
            normals, diffuse, roughness, specular = utils.unpack_svbrdf(
                svbrdf_single.squeeze(0))
            # Redner expects the normal map to be in range [0, 1]
            normals = utils.encode_as_unit_interval(normals)
            # Redner expects the roughness to have one channel only.
            # We also need to convert from GGX roughness to Blinn-Phong power.
            # See: https://github.com/iondune/csc473/blob/master/lectures/07-cook-torrance.md
            roughness = torch.mean(torch.clamp(roughness, min=0.001),
                                   dim=0,
                                   keepdim=True)**4

            # Convert from [c,h,w] to [h,w,c] for redner
            normals = normals.permute(1, 2, 0)
            diffuse = diffuse.permute(1, 2, 0)
            roughness = roughness.permute(1, 2, 0)
            specular = specular.permute(1, 2, 0)

            material = pyredner.Material(
                diffuse_reflectance=pyredner.Texture(
                    diffuse.to(self.redner_device)),
                specular_reflectance=pyredner.Texture(
                    specular.to(self.redner_device)),
                roughness=pyredner.Texture(roughness.to(self.redner_device)),
                normal_map=pyredner.Texture(normals.to(self.redner_device)))

            material_patch = pyredner.Object(vertices=self.patch_vertices,
                                             uvs=self.patch_uvs,
                                             indices=self.patch_indices,
                                             material=material)

            # Define the camera parameters (focused at the middle of the patch) and make sure we always have a valid 'up' direction
            position = np.array(scene.camera.pos)
            lookat = np.array([0.0, 0.0, 0.0])
            cz = lookat - position  # Principal axis
            up = np.array([0.0, 0.0, 1.0])
            if np.linalg.norm(np.cross(cz, up)) == 0.0:
                up = np.array([0.0, 1.0, 0.0])

            camera = pyredner.Camera(
                position=torch.FloatTensor(position).to(self.redner_device),
                look_at=torch.FloatTensor(lookat).to(self.redner_device),
                up=torch.FloatTensor(up).to(self.redner_device),
                fov=torch.FloatTensor([90]),
                resolution=sensor_size,
                camera_type=self.camera_type)

            # # The deferred rendering path.
            # # It does not have a specular model and therefore is of limited usability for us
            # full_scene = pyredner.Scene(camera = camera, objects = [material_patch])
            # light = pyredner.PointLight(position = torch.tensor(scene.light.pos).to(self.redner_device),
            #                                    intensity = torch.tensor(scene.light.color).to(self.redner_device))
            # img = pyredner.render_deferred(scene = full_scene, lights = [light])

            light = pyredner.generate_quad_light(
                position=torch.Tensor(scene.light.pos).to(self.redner_device),
                look_at=torch.zeros(3).to(self.redner_device),
                size=torch.Tensor([0.6, 0.6]).to(self.redner_device),
                intensity=torch.Tensor(scene.light.color).to(
                    self.redner_device))
            full_scene = pyredner.Scene(camera=camera,
                                        objects=[material_patch, light])
            img = pyredner.render_pathtracing(full_scene, num_samples=(16, 8))

            # Transform the rendered image back to something torch can interprete
            imgs.append(img.permute(2, 0, 1).to(svbrdf.device))

        return torch.stack(imgs)