Пример #1
0
def parse_shape(node, material_dict, shape_id):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        to_world = torch.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])

        if node.attrib['type'] == 'obj':
            vertices, indices, uvs, normals = pyredner.load_obj(filename)
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = torch.from_numpy(mitsuba_tri_mesh.vertices)
            indices = torch.from_numpy(mitsuba_tri_mesh.indices)
            uvs = torch.from_numpy(mitsuba_tri_mesh.uvs)
            normals = torch.from_numpy(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None

        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.Light(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda()
            indices = indices.cuda()
            if uvs is not None:
                uvs = uvs.cuda()
            if normals is not None:
                normals = normals.cuda()
        return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = torch.tensor([[0, 2, 1], [1, 2, 3]], dtype=torch.int32)
        vertices = torch.tensor([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                 [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = torch.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])
        # Transform the vertices
        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyrender.Light(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda()
            indices = indices.cuda()
            if uvs is not None:
                uvs = uvs.cuda()
            if normals is not None:
                normals = normals.cuda()
        return shape.Shape(vertices, indices, uvs, normals, mat_id), lgt
    else:
        assert (False)
blocker_indices = torch.tensor([[0, 1, 2], [1, 3, 2]],
                               device=pyredner.get_device(),
                               dtype=torch.int32)
shape_blocker = pyredner.Shape(blocker_vertices, blocker_indices, None, None,
                               0)
light_vertices = torch.tensor(\
    [[-0.1,15,-0.1],[-0.1,15,0.1],[0.1,15,-0.1],[0.1,15,0.1]],
    device = pyredner.get_device())
light_indices = torch.tensor([[0, 2, 1], [1, 2, 3]],
                             device=pyredner.get_device(),
                             dtype=torch.int32)
shape_light = pyredner.Shape(light_vertices, light_indices, None, None, 1)
shapes = [shape_floor, shape_blocker, shape_light]
light_intensity = torch.tensor([5000.0, 5000.0, 5000.0])
# The first argument is the shape id of the light
light = pyredner.Light(2, light_intensity)
lights = [light]
scene = pyredner.Scene(cam, shapes, materials, lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_camera/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_camera/target.png')
target = pyredner.imread('results/test_shadow_camera/target.exr')
if pyredner.get_use_gpu():
                             [ 1.0, -1.0, -7.0],
                             [-1.0,  1.0, -7.0],
                             [ 1.0,  1.0, -7.0]], device = pyredner.get_device()),
    indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
        dtype = torch.int32, device = pyredner.get_device()),
    uvs = None,
    normals = None,
    material_id = 0)
# The shape list of the scene
shapes = [shape_triangle, shape_light]

# Now we assign some of the shapes in the scene as light sources.
# Again, all light sources in the scene are stored in a Python list.
# Each light is attached to a shape using shape id, additionally we need to
# assign the intensity of the light, which is a length 3 float tensor in CPU. 
light = pyredner.Light(shape_id = 1, 
                       intensity = torch.tensor([20.0,20.0,20.0]))
lights = [light]
# Finally we construct our scene using all the variables we setup previously.
scene = pyredner.Scene(cam, shapes, materials, lights)
# All PyTorch functions take a flat array of PyTorch tensors as input,
# therefore we need to serialize the scene into an array. The following
# function is doing this. We also specify how many Monte Carlo samples we want to 
# use per pixel and the number of bounces for indirect illumination here
# (one bounce means only direct illumination).
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

# Render the scene as our target image.
# To render the scene, we use our custom PyTorch function in pyredner/render_pytorch.py