Ejemplo n.º 1
0
 def __init__(self,
              camera: pyredner.Camera,
              shapes: List[pyredner.Shape] = [],
              shape_id=dict(),
              materials: List[pyredner.Material] = [],
              area_lights: List[pyredner.AreaLight] = [],
              objects: Optional[List[pyredner.Object]] = None,
              envmap: Optional[pyredner.EnvironmentMap] = None):
     self.camera = camera
     self.envmap = envmap
     self.shape_id = shape_id
     if objects is None:
         self.shapes = shapes
         self.materials = materials
         self.area_lights = area_lights
     else:
         # Convert objects to shapes/materials/lights
         shapes = []
         materials = []
         area_lights = []
         material_dict = {}
         current_material_id = 0
         for obj in objects:
             mid = -1
             if obj.material in material_dict:
                 mid = material_dict[obj.material]
             else:
                 mid = current_material_id
                 material_dict[obj.material] = current_material_id
                 materials.append(obj.material)
                 current_material_id += 1
             if obj.light_intensity is not None:
                 current_shape_id = len(shapes)
                 area_light = pyredner.AreaLight(
                     shape_id=current_shape_id,
                     intensity=obj.light_intensity,
                     two_sided=obj.light_two_sided)
                 area_lights.append(area_light)
             shape = pyredner.Shape(vertices=obj.vertices,
                                    indices=obj.indices,
                                    material_id=mid,
                                    uvs=obj.uvs,
                                    normals=obj.normals,
                                    uv_indices=obj.uv_indices,
                                    normal_indices=obj.normal_indices,
                                    colors=obj.colors)
             shapes.append(shape)
         self.shapes = shapes
         #self.shape_id = shape_id
         self.materials = materials
         self.area_lights = area_lights
Ejemplo n.º 2
0
shape_light = pyredner.Shape(\
    vertices = torch.tensor([[-1.0, -1.0, -7.0],
                             [ 2.0, -1.0, -7.0],
                             [-1.0,  1.0, -7.0],
                             [ 2.0,  1.0, -7.0]], device = pyredner.get_device()),
    indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
        dtype = torch.int32, device = pyredner.get_device()),
    uvs = None,
    normals = None,
    material_id = 0)

# The shape list of our scene contains two shapes:
shapes = [shape_diamond, shape_light]

light = pyredner.AreaLight(shape_id=1,
                           intensity=torch.tensor([200.0, 200.0, 200.0]))
area_lights = [light]
# Finally we construct our scene using all the variables we setup previously.
scene = pyredner.Scene(cam, shapes, materials, area_lights)

#shapes = []
#for _, mesh in mesh_list:
#    assert(mesh.normal_indices is None)
#    shapes.append(pyredner.Shape(\
#        vertices = mesh.vertices,
#        indices = mesh.indices,
#        uvs = None,
#        normals = mesh.normals,
#        material_id=0))

# The previous tutorial used a mesh area light for the scene lighting,
Ejemplo n.º 3
0
def parse_shape(node, material_dict, shape_id, shape_group_dict=None):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        print(node.attrib['id'])
        to_world = torch.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        mat_name2id = {}
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
                if 'name' in child.attrib.keys(
                ) and child.attrib['name'] != 'bsdf':
                    mat_name2id[child.attrib['name']] = child.attrib['id']
            elif child.tag == 'bsdf':
                #TODO hack! use default diffuse if countering internal declaration bsdf
                mat_id = 0
                # node_id, material = parse_material(child)
                # if node_id is not None:
                #     material_dict[node_id] = len(materials)
                #     materials.append(material)
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])

        if node.attrib['type'] == 'obj':
            _, mesh_list, _ = pyredner.load_obj.load_obj_fast(
                filename, is_load_mtl=False)
            shape_list = []
            for mesh in mesh_list:
                mat_name = mesh[0]
                vertices = mesh[1].vertices.cpu()
                indices = mesh[1].indices.cpu()
                uvs = mesh[1].uvs
                normals = mesh[1].normals
                if uvs is not None:
                    uvs = uvs.cpu()
                if normals is not None:
                    normals = normals.cpu()

                # Transform the vertices and normals
                vertices = torch.cat(
                    (vertices, torch.ones(vertices.shape[0], 1)), dim=1)
                vertices = vertices @ torch.transpose(to_world, 0, 1)
                vertices = vertices / vertices[:, 3:4]
                vertices = vertices[:, 0:3].contiguous()
                if normals is not None:
                    normals = normals @ (torch.inverse(
                        torch.transpose(to_world, 0, 1))[:3, :3])
                    normals = normals.contiguous()
                assert (vertices is not None)
                assert (indices is not None)
                lgt = None
                if light_intensity is not None:
                    lgt = pyredner.AreaLight(shape_id, light_intensity)

                if pyredner.get_use_gpu():
                    # Copy to GPU
                    vertices = vertices.cuda()
                    indices = indices.cuda()
                    if uvs is not None:
                        uvs = uvs.cuda()
                    if normals is not None:
                        normals = normals.cuda()
                # Assign material
                if mat_name != '' and mat_name is not None:  # skip no material mesh
                    mat_id = material_dict[mat_name2id[mat_name]]
                shape_list.append(
                    pyredner.Shape(vertices, indices, uvs, normals, mat_id))
            return shape_list, lgt
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = torch.from_numpy(mitsuba_tri_mesh.vertices)
            indices = torch.from_numpy(mitsuba_tri_mesh.indices)
            uvs = torch.from_numpy(mitsuba_tri_mesh.uvs)
            normals = torch.from_numpy(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None

            # Transform the vertices and normals
            vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                                 dim=1)
            vertices = vertices @ torch.transpose(to_world, 0, 1)
            vertices = vertices / vertices[:, 3:4]
            vertices = vertices[:, 0:3].contiguous()
            if normals is not None:
                normals = normals @ (torch.inverse(
                    torch.transpose(to_world, 0, 1))[:3, :3])
                normals = normals.contiguous()
            assert (vertices is not None)
            assert (indices is not None)
            lgt = None
            if light_intensity is not None:
                lgt = pyredner.AreaLight(shape_id, light_intensity)

            if pyredner.get_use_gpu():
                # Copy to GPU
                vertices = vertices.cuda()
                indices = indices.cuda()
                if uvs is not None:
                    uvs = uvs.cuda()
                if normals is not None:
                    normals = normals.cuda()
            return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = torch.tensor([[0, 2, 1], [1, 2, 3]], dtype=torch.int32)
        vertices = torch.tensor([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                 [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = torch.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])
        # Transform the vertices
        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda()
            indices = indices.cuda()
            if uvs is not None:
                uvs = uvs.cuda()
            if normals is not None:
                normals = normals.cuda()
        return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    # Add instance support
    # TODO (simply transform & create a new shape now)
    elif node.attrib['type'] == 'instance':
        shape = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                    if pyredner.get_use_gpu():
                        to_world = to_world.cuda()
            if child.tag == 'ref':
                shape_ = shape_group_dict[child.attrib['id']]
        shape_list = []
        for shape in list(shape_):
            # transform instance
            vertices = shape.vertices
            normals = shape.normals
            vector1 = torch.ones(vertices.shape[0], 1)
            vertices = torch.cat(
                (vertices,
                 vector1.cuda() if pyredner.get_use_gpu() else vector1),
                dim=1)
            vertices = vertices @ torch.transpose(to_world, 0, 1)
            vertices = vertices / vertices[:, 3:4]
            vertices = vertices[:, 0:3].contiguous()
            if normals is not None:
                normals = normals @ (torch.inverse(
                    torch.transpose(to_world, 0, 1))[:3, :3])
                normals = normals.contiguous()
            # assert(vertices is not None)
            # assert(indices is not None)
            # lgt = None
            # if light_intensity is not None:
            #     lgt = pyredner.AreaLight(shape_id, light_intensity)
            shape_list.append(
                pyredner.Shape(vertices, shape.indices, shape.uvs, normals,
                               shape.material_id))

        return shape_list, None
    else:
        print('Shape type {} is not supported!'.format(node.attrib['type']))
        assert (False)
Ejemplo n.º 4
0
def parse_shape(node, material_dict, shape_id, device, shape_group_dict=None):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        to_world = torch.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        max_smooth_angle = -1
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
                elif child.attrib['name'] == 'maxSmoothAngle':
                    max_smooth_angle = float(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])

        if node.attrib['type'] == 'obj':
            # Load in CPU for rebuild_topology
            _, mesh_list, _ = pyredner.load_obj(filename,
                                                obj_group=False,
                                                device=torch.device('cpu'))
            vertices = mesh_list[0][1].vertices
            indices = mesh_list[0][1].indices
            uvs = mesh_list[0][1].uvs
            normals = mesh_list[0][1].normals
            uv_indices = mesh_list[0][1].uv_indices
            normal_indices = mesh_list[0][1].normal_indices
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = torch.from_numpy(mitsuba_tri_mesh.vertices)
            indices = torch.from_numpy(mitsuba_tri_mesh.indices)
            uvs = torch.from_numpy(mitsuba_tri_mesh.uvs)
            normals = torch.from_numpy(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None
            uv_indices = None  # Serialized doesn't use different indices for UV & normal
            normal_indices = None

        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        if max_smooth_angle >= 0:
            if normals is None:
                normals = torch.zeros_like(vertices)
            new_num_vertices = redner.rebuild_topology(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                redner.int_ptr(uv_indices.data_ptr() if uv_indices is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                max_smooth_angle)
            print('Rebuilt topology, original vertices size: {}, new vertices size: {}'.format(\
                int(vertices.shape[0]), new_num_vertices))
            vertices.resize_(new_num_vertices, 3)
            if uvs is not None:
                uvs.resize_(new_num_vertices, 2)
            if normals is not None:
                normals.resize_(new_num_vertices, 3)

        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        vertices = vertices.to(device)
        indices = indices.to(device)
        if uvs is not None:
            uvs = uvs.to(device)
        if normals is not None:
            normals = normals.to(device)
        if uv_indices is not None:
            uv_indices = uv_indices.to(device)
        if normal_indices is not None:
            normal_indices = normal_indices.to(device)
        return pyredner.Shape(vertices,
                              indices,
                              uvs=uvs,
                              normals=normals,
                              uv_indices=uv_indices,
                              normal_indices=normal_indices,
                              material_id=mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = torch.tensor([[0, 2, 1], [1, 2, 3]], dtype=torch.int32)
        vertices = torch.tensor([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                 [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = torch.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])
        # Transform the vertices
        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        vertices = vertices.to(device)
        indices = indices.to(device)
        if uvs is not None:
            uvs = uvs.to(device)
        if normals is not None:
            normals = normals.to(device)
        return pyredner.Shape(vertices,
                              indices,
                              uvs=uvs,
                              normals=normals,
                              material_id=mat_id), lgt
    # Add instance support
    # TODO (simply transform & create a new shape now)
    elif node.attrib['type'] == 'instance':
        shape = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                shape = shape_group_dict[child.attrib['id']]
        # transform instance
        vertices = shape.vertices
        normals = shape.normals
        vector1 = torch.ones(vertices.shape[0], 1, device=vertices.device)
        to_world = to_world.to(vertices.device)
        vertices = torch.cat((vertices, vector1), dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        # assert(vertices is not None)
        # assert(indices is not None)
        # lgt = None
        # if light_intensity is not None:
        #     lgt = pyredner.AreaLight(shape_id, light_intensity)

        return pyredner.Shape(vertices,
                              shape.indices,
                              uvs=shape.uvs,
                              normals=normals,
                              material_ids=shape.material_id), None
    else:
        print('Shape type {} is not supported!'.format(node.attrib['type']))
        assert (False)
Ejemplo n.º 5
0
blocker_vertices = torch.tensor(\
    [[-0.5,10.0,-0.5],[-0.5,10.0,0.5],[0.5,10.0,-0.5],[0.5,10.0,0.5]],
    device = pyredner.get_device())
blocker_indices = torch.tensor([[0,1,2], [1,3,2]],
    device = pyredner.get_device(), dtype = torch.int32)
shape_blocker = pyredner.Shape(blocker_vertices, blocker_indices, 0)
light_vertices = torch.tensor(\
    [[-0.1,15,-0.1],[-0.1,15,0.1],[0.1,15,-0.1],[0.1,15,0.1]],
    device = pyredner.get_device())
light_indices = torch.tensor([[0,2,1], [1,2,3]],
    device = pyredner.get_device(), dtype = torch.int32)
shape_light = pyredner.Shape(light_vertices, light_indices, 1)
shapes = [shape_floor, shape_blocker, shape_light]
light_intensity = torch.tensor([5000.0, 5000.0, 5000.0])
# The first argument is the shape id of the light
light = pyredner.AreaLight(2, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_camera/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_camera/target.png')
target = pyredner.imread('results/test_shadow_camera/target.exr')
if pyredner.get_use_gpu():
Ejemplo n.º 6
0
def parse_shape(node, material_dict, shape_id, shape_group_dict=None):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        to_world = torch.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])

        if node.attrib['type'] == 'obj':
            _, mesh_list, _ = pyredner.load_obj(filename)
            vertices = mesh_list[0][1].vertices.cpu()
            indices = mesh_list[0][1].indices.cpu()
            uvs = mesh_list[0][1].uvs
            normals = mesh_list[0][1].normals
            if uvs is not None:
                uvs = uvs.cpu()
            if normals is not None:
                normals = normals.cpu()
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = torch.from_numpy(mitsuba_tri_mesh.vertices)
            indices = torch.from_numpy(mitsuba_tri_mesh.indices)
            uvs = torch.from_numpy(mitsuba_tri_mesh.uvs)
            normals = torch.from_numpy(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None

        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda()
            indices = indices.cuda()
            if uvs is not None:
                uvs = uvs.cuda()
            if normals is not None:
                normals = normals.cuda()
        return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = torch.tensor([[0, 2, 1], [1, 2, 3]], dtype=torch.int32)
        vertices = torch.tensor([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                 [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = torch.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])
        # Transform the vertices
        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda()
            indices = indices.cuda()
            if uvs is not None:
                uvs = uvs.cuda()
            if normals is not None:
                normals = normals.cuda()
        return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    # Add instance support
    # TODO (simply transform & create a new shape now)
    elif node.attrib['type'] == 'instance':
        shape = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                    if pyredner.get_use_gpu():
                        to_world = to_world.cuda()
            if child.tag == 'ref':
                shape = shape_group_dict[child.attrib['id']]
        # transform instance
        vertices = shape.vertices
        normals = shape.normals
        vector1 = torch.ones(vertices.shape[0], 1)
        vertices = torch.cat(
            (vertices, vector1.cuda() if pyredner.get_use_gpu() else vector1),
            dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        # assert(vertices is not None)
        # assert(indices is not None)
        # lgt = None
        # if light_intensity is not None:
        #     lgt = pyredner.AreaLight(shape_id, light_intensity)

        return pyredner.Shape(vertices, shape.indices, shape.uvs, normals,
                              shape.material_id), None
    else:
        print('Shape type {} is not supported!'.format(node.attrib['type']))
        assert (False)
Ejemplo n.º 7
0
for obj in range(1, num_iters):
    target_obj1 = pyredner.load_obj('results/shadow_art/multitarget/' + step + '/iter_' + str(obj) + '.obj', return_objects=True)

    target_obj1[0].material = pyredner.Material(diffuse_reflectance=torch.tensor([1.0, 1.0, 1.0]), two_sided=True)

    target_obj1[0].normals = pyredner.compute_vertex_normal(target_obj1[0].vertices, target_obj1[0].indices)

    shapes = []
    shapes.append(target_obj1[0])

    numShapes = len(shapes)
    shapes.extend(lights)

    area_lights = []
    for i in range(numShapes, len(shapes)):
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity*10, light_intensity*10, light_intensity*10])))

    scene = pyredner.Scene(cam, objects = [shapes[0], shapes[1]],area_lights = [area_lights[0]], envmap = None)
    scene_intense = pyredner.Scene(cam, objects = [shapes[0], shapes[1]], area_lights = [area_lights[1]], envmap = None)

    target = pyredner.render_pathtracing(scene = [scene], num_samples=(512, 0), max_bounces=1)[0]
    pyredner.imwrite(target.cpu(), 'results/shadow_art/high_res/' + step + '/' + str(obj) + '_0.png')

    area_lights = []
    for i in range(numShapes, len(shapes)):
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity*10, light_intensity*10, light_intensity*10])))

    shape0_vertices = shapes[0].vertices.clone()
    shapes[0].vertices = \
Ejemplo n.º 8
0
        vertices = mesh.vertices,
        indices = mesh.indices,
        material_id = 0,
        uvs = mesh.uvs,
        normals = mesh.normals,
        uv_indices = mesh.uv_indices))

numShapes = len(shapes)
print("NUM SHAPES", numShapes)
shapes.extend(lights)

area_lights = []
for i in range(numShapes, len(shapes)):
    area_lights.append(
        pyredner.AreaLight(shape_id=numShapes,
                           intensity=torch.tensor([
                               lightIntensity, lightIntensity, lightIntensity
                           ])))
    area_lights.append(
        pyredner.AreaLight(shape_id=numShapes,
                           intensity=torch.tensor([
                               lightIntensity * 10, lightIntensity * 10,
                               lightIntensity * 10
                           ])))

# FOR SCENE DEBUGGING
envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda()
envmap = pyredner.EnvironmentMap(envmap)
envmap = None