Example #1
0
    def __init__(self, vertices, indices, uvs, normals, material_id):
        assert (vertices.dtype == torch.float32)
        assert (indices.dtype == torch.int32)
        assert (vertices.is_contiguous())
        assert (indices.is_contiguous())
        if (uvs is not None):
            assert (uvs.dtype == torch.float32)
            assert (uvs.is_contiguous())
        if (normals is not None):
            assert (normals.dtype == torch.float32)
            assert (normals.is_contiguous())
        if pyredner.get_use_gpu():
            assert (vertices.is_cuda)
            assert (indices.is_cuda)
            assert (uvs is None or uvs.is_cuda)
            assert (normals is None or normals.is_cuda)
        else:
            assert (not vertices.is_cuda)
            assert (not indices.is_cuda)
            assert (uvs is None or not uvs.is_cuda)
            assert (normals is None or not normals.is_cuda)

        self.vertices = vertices
        self.indices = indices
        self.uvs = uvs
        self.normals = normals
        if material_id < 0:
            raise ValueError('material is not specified')
        self.material_id = material_id
        self.light_id = -1
Example #2
0
def compute_uvs(vertices, indices, print_progress = True):
    """
        Args: vertices -- N x 3 float tensor
              indices -- M x 3 int tensor
        Return: uvs & uvs_indices
    """
    vertices = vertices.cpu()
    indices = indices.cpu()

    uv_trimesh = redner.UVTriMesh(redner.float_ptr(vertices.data_ptr()),
                                  redner.int_ptr(indices.data_ptr()),
                                  redner.float_ptr(0),
                                  redner.int_ptr(0),
                                  int(vertices.shape[0]),
                                  0,
                                  int(indices.shape[0]))

    atlas = redner.TextureAtlas()
    num_uv_vertices = redner.automatic_uv_map([uv_trimesh], atlas, print_progress)[0]

    uvs = torch.zeros(num_uv_vertices, 2, dtype=torch.float32)
    uv_indices = torch.zeros_like(indices)
    uv_trimesh.uvs = redner.float_ptr(uvs.data_ptr())
    uv_trimesh.uv_indices = redner.int_ptr(uv_indices.data_ptr())
    uv_trimesh.num_uv_vertices = num_uv_vertices

    redner.copy_texture_atlas(atlas, [uv_trimesh])

    if pyredner.get_use_gpu():
        vertices = vertices.cuda(device = pyredner.get_device())
        indices = indices.cuda(device = pyredner.get_device())
        uvs = uvs.cuda(device = pyredner.get_device())
        uv_indices = uv_indices.cuda(device = pyredner.get_device())
    return uvs, uv_indices
Example #3
0
def SH_reconstruct(coeffs, res):
    uv = np.mgrid[0:res[1], 0:res[0]].astype(np.float32)
    theta = torch.from_numpy((math.pi / res[1]) * (uv[1, :, :] + 0.5))
    phi = torch.from_numpy((2 * math.pi / res[0]) * (uv[0, :, :] + 0.5))
    if pyredner.get_use_gpu():
        theta = theta.cuda()
        phi = phi.cuda()
    result = torch.zeros(res[1],
                         res[0],
                         coeffs.shape[0],
                         device=pyredner.get_device())
    num_order = int(math.sqrt(coeffs.shape[1]))
    i = 0
    for l in range(num_order):
        for m in range(-l, l + 1):
            sh_factor = SH(l, m, theta, phi)
            result = result + sh_factor.view(
                sh_factor.shape[0], sh_factor.shape[1], 1) * coeffs[:, i]
            i += 1
    result = torch.max(
        result,
        torch.zeros(res[1],
                    res[0],
                    coeffs.shape[0],
                    device=pyredner.get_device()))
    return result
Example #4
0
    def __init__(self,
                 diffuse_reflectance,
                 specular_reflectance=None,
                 roughness=None,
                 diffuse_uv_scale=torch.tensor([1.0, 1.0]),
                 specular_uv_scale=torch.tensor([1.0, 1.0]),
                 roughness_uv_scale=torch.tensor([1.0, 1.0]),
                 two_sided=False):
        assert (diffuse_reflectance.is_contiguous())
        if specular_reflectance is None:
            specular_reflectance = torch.tensor([0.0, 0.0, 0.0],
                                                device=pyredner.get_device())
        else:
            assert (specular_reflectance.is_contiguous())
        if roughness is None:
            roughness = torch.tensor([1.0], device=pyredner.get_device())
        else:
            assert (roughness.is_contiguous())
        if pyredner.get_use_gpu():
            assert (diffuse_reflectance.is_cuda)
            assert (specular_reflectance.is_cuda)
            assert (roughness.is_cuda)
        else:
            assert (not diffuse_reflectance.is_cuda)
            assert (not specular_reflectance.is_cuda)
            assert (not roughness.is_cuda)

        self.diffuse_reflectance = diffuse_reflectance
        self.specular_reflectance = specular_reflectance
        self.roughness = roughness
        self.diffuse_uv_scale = diffuse_uv_scale
        self.specular_uv_scale = diffuse_uv_scale
        self.roughness_uv_scale = diffuse_uv_scale
        self.two_sided = two_sided
Example #5
0
def parse_material(node, two_sided=False):
    node_id = None
    if 'id' in node.attrib:
        node_id = node.attrib['id']
    if node.attrib['type'] == 'diffuse':
        diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5])
        diffuse_uv_scale = torch.tensor([1.0, 1.0])
        specular_reflectance = torch.tensor([0.0, 0.0, 0.0])
        specular_uv_scale = torch.tensor([1.0, 1.0])
        roughness = torch.tensor([1.0])
        for child in node:
            if child.attrib['name'] == 'reflectance':
                if child.tag == 'texture':
                    for grandchild in child:
                        if grandchild.attrib['name'] == 'filename':
                            diffuse_reflectance = pyredner.imread(
                                grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'uscale':
                            diffuse_uv_scale[0] = float(
                                grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'vscale':
                            diffuse_uv_scale[1] = float(
                                grandchild.attrib['value'])
                elif child.tag == 'rgb':
                    diffuse_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'specular':
                if child.tag == 'texture':
                    for grandchild in child:
                        if grandchild.attrib['name'] == 'filename':
                            specular_reflectance = image.imread(
                                grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'uscale':
                            specular_uv_scale[0] = float(
                                grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'vscale':
                            specular_uv_scale[1] = float(
                                grandchild.attrib['value'])
                elif child.tag == 'rgb':
                    specular_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'roughness':
                roughness = torch.tensor(float(child.attrib['value']))
        if pyredner.get_use_gpu():
            # Copy to GPU
            diffuse_reflectance = diffuse_reflectance.cuda()
            specular_reflectance = specular_reflectance.cuda()
            roughness = roughness.cuda()
        return (node_id,
                pyredner.Material(diffuse_reflectance,
                                  diffuse_uv_scale=diffuse_uv_scale,
                                  specular_reflectance=specular_reflectance,
                                  specular_uv_scale=specular_uv_scale,
                                  roughness=roughness,
                                  two_sided=two_sided))
    elif node.attrib['type'] == 'twosided':
        ret = parse_material(node[0], True)
        return (node_id, ret[1])
Example #6
0
def parse_scene(node):
    cam = None
    resolution = None
    materials = []
    material_dict = {}
    shapes = []
    lights = []
    shape_group_dict = {}
    envmap = None

    for child in node:
        if child.tag == 'sensor':
            cam = parse_camera(child)
        elif child.tag == 'bsdf':
            node_id, material = parse_material(child)
            if node_id is not None:
                material_dict[node_id] = len(materials)
                materials.append(material)
        # shapegroup for instancing
        elif child.tag == 'shape' and child.attrib['type'] == 'shapegroup':
            for child_s in child:
                if child_s.tag == 'shape':
                    shape_group_dict[child.attrib['id']] = parse_shape(
                        child_s, material_dict, None)[0]
        elif child.tag == 'shape':
            shape, light = parse_shape(
                child, material_dict, len(shapes), shape_group_dict
                if child.attrib['type'] == 'instance' else None)
            if isinstance(shape, list):
                shapes = shapes + shape
            else:
                shapes.append(shape)
            if light is not None:
                lights.append(light)
        # Add envmap loading support
        elif child.tag == 'emitter' and child.attrib['type'] == 'envmap':
            # read envmap params from xml
            scale = 1.0
            envmap_filename = None
            to_world = torch.eye(4)
            for child_s in child:
                if child_s.attrib['name'] == 'scale':
                    assert child_s.tag == 'float'
                    scale = float(child_s.attrib['value'])
                if child_s.attrib['name'] == 'filename':
                    assert child_s.tag == 'string'
                    envmap_filename = child_s.attrib['value']
                if child_s.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child_s)
            # load envmap
            envmap = scale * pyredner.imread(envmap_filename)
            if pyredner.get_use_gpu():
                envmap = envmap.cuda()
            envmap = pyredner.EnvironmentMap(envmap, env_to_world=to_world)
    return pyredner.Scene(cam, shapes, materials, lights, envmap)
Example #7
0
    def __init__(self,
                 vertices,
                 indices,
                 material_id,
                 uvs = None,
                 normals = None,
                 uv_indices = None,
                 normal_indices = None):
        assert(vertices.dtype == torch.float32)
        assert(indices.dtype == torch.int32)
        assert(vertices.is_contiguous())
        assert(indices.is_contiguous())
        if (uvs is not None):
            assert(uvs.dtype == torch.float32)
            assert(uvs.is_contiguous())
        if (normals is not None):
            assert(normals.dtype == torch.float32)
            assert(normals.is_contiguous())
        if (uv_indices is not None):
            assert(uv_indices.dtype == torch.int32)
            assert(uv_indices.is_contiguous())
        if (normal_indices is not None):
            assert(normal_indices.dtype == torch.int32)
            assert(normal_indices.is_contiguous())

        if pyredner.get_use_gpu():
            assert(vertices.is_cuda)
            assert(indices.is_cuda)
            assert(uvs is None or uvs.is_cuda)
            assert(normals is None or normals.is_cuda)
            assert(uv_indices is None or uv_indices.is_cuda)
            assert(normal_indices is None or normal_indices.is_cuda)
        else:
            assert(not vertices.is_cuda)
            assert(not indices.is_cuda)        
            assert(uvs is None or not uvs.is_cuda)
            assert(normals is None or not normals.is_cuda)
            assert(uv_indices is None or not uv_indices.is_cuda)
            assert(normal_indices is None or not normal_indices.is_cuda)

        self.vertices = vertices
        self.indices = indices
        self.material_id = material_id
        self.uvs = uvs
        self.normals = normals
        self.uv_indices = uv_indices
        self.normal_indices = normal_indices
        self.light_id = -1
Example #8
0
 def parse_material_bitmap(node, scale=None):
     reflectance_texture = None
     uv_scale = torch.tensor([1.0, 1.0])
     for grandchild in node:
         if grandchild.attrib['name'] == 'filename':
             reflectance_texture = pyredner.imread(
                 grandchild.attrib['value'])
             if scale:
                 reflectance_texture = reflectance_texture * scale
         elif grandchild.attrib['name'] == 'uscale':
             uv_scale[0] = float(grandchild.attrib['value'])
         elif grandchild.attrib['name'] == 'vscale':
             uv_scale[1] = float(grandchild.attrib['value'])
     assert reflectance_texture is not None
     if pyredner.get_use_gpu():
         uv_scale = uv_scale.cuda(device=pyredner.get_device())
     return reflectance_texture, uv_scale
Example #9
0
    def __init__(self, values, env_to_world=tf.eye(4, 4)):
        # Convert to constant texture if necessary
        if isinstance(values, tf.Tensor):
            values = pyredner.Texture(values)

        assert (values.texels.is_contiguous())
        assert (values.texels.dtype == tf.float32)
        if pyredner.get_use_gpu():
            assert (values.texels.is_cuda)
        else:
            assert (not values.texels.is_cuda)

        # Build sampling table
        luminance = 0.212671 * values.texels[:, :, 0] + \
                    0.715160 * values.texels[:, :, 1] + \
                    0.072169 * values.texels[:, :, 2]
        # For each y, compute CDF over x
        sample_cdf_xs_ = tf.cumsum(luminance, axis=1)
        y_weight = tf.sin(\
         math.pi * (tf.range(luminance.shape[0],
                dtype = tf.float32, name='EnvironmentMap.y_weight') + 0.5) \
             / float(luminance.shape[0]))
        # Compute CDF for x
        sample_cdf_ys_ = tf.cumsum(sample_cdf_xs_[:, -1] * y_weight, axis=0)
        pdf_norm = (luminance.shape[0] * luminance.shape[1]) / \
         (sample_cdf_ys_[-1].item() * (2 * math.pi * math.pi))
        # Normalize to [0, 1)
        sample_cdf_xs = (sample_cdf_xs_ - sample_cdf_xs_[:, 0:1]) / \
            tf.math.maximum(sample_cdf_xs_[:, (luminance.shape[1] - 1):luminance.shape[1]],
                1e-8 * tf.ones(
                    (sample_cdf_xs_.shape[0], 1)
                    )
                )
        sample_cdf_ys = (sample_cdf_ys_ - sample_cdf_ys_[0]) / \
            tf.math.maximum(sample_cdf_ys_[-1], tf.constant([1e-8]))

        self.values = values
        self.env_to_world = env_to_world
        self.world_to_env = tf.linalg.inv(env_to_world).contiguous()
        self.sample_cdf_ys = sample_cdf_ys.contiguous()
        self.sample_cdf_xs = sample_cdf_xs.contiguous()
        self.pdf_norm = pdf_norm
Example #10
0
    def __init__(self, values, env_to_world = torch.eye(4, 4)):
        # Convert to constant texture if necessary
        if isinstance(values, torch.Tensor):
            values = pyredner.Texture(values)

        assert(values.texels.is_contiguous())
        assert(values.texels.dtype == torch.float32)
        if pyredner.get_use_gpu():
            assert(values.texels.is_cuda)
        else:
            assert(not values.texels.is_cuda)

        assert(env_to_world.dtype == torch.float32)

        # Build sampling table
        luminance = 0.212671 * values.texels[:, :, 0] + \
                    0.715160 * values.texels[:, :, 1] + \
                    0.072169 * values.texels[:, :, 2]
        # For each y, compute CDF over x
        sample_cdf_xs_ = torch.cumsum(luminance, dim = 1)
        y_weight = torch.sin(\
        	math.pi * (torch.arange(luminance.shape[0],
                dtype = torch.float32, device = luminance.device) + 0.5) \
             / float(luminance.shape[0]))
        # Compute CDF for x
        sample_cdf_ys_ = torch.cumsum(sample_cdf_xs_[:, -1] * y_weight, dim = 0)
        pdf_norm = (luminance.shape[0] * luminance.shape[1]) / \
        	(sample_cdf_ys_[-1].item() * (2 * math.pi * math.pi))
        # Normalize to [0, 1)
        sample_cdf_xs = (sample_cdf_xs_ - sample_cdf_xs_[:, 0:1]) / \
            torch.max(sample_cdf_xs_[:, (luminance.shape[1] - 1):luminance.shape[1]],
                1e-8 * torch.ones(sample_cdf_xs_.shape[0], 1, device = sample_cdf_ys_.device))
        sample_cdf_ys = (sample_cdf_ys_ - sample_cdf_ys_[0]) / \
            torch.max(sample_cdf_ys_[-1], torch.tensor([1e-8], device = sample_cdf_ys_.device))

        self.values = values
        self.env_to_world = env_to_world
        self.world_to_env = torch.inverse(env_to_world).contiguous()
        self.sample_cdf_ys = sample_cdf_ys.contiguous()
        self.sample_cdf_xs = sample_cdf_xs.contiguous()
        self.pdf_norm = pdf_norm
Example #11
0
    def __init__(self, vertices, indices, uvs, normals, mat_id):
        assert(vertices.is_contiguous())
        assert(indices.is_contiguous())
        assert(uvs is None or uvs.is_contiguous())
        assert(normals is None or normals.is_contiguous())
        if pyredner.get_use_gpu():
            assert(vertices.is_cuda)
            assert(indices.is_cuda)        
            assert(uvs is None or uvs.is_cuda)
            assert(normals is None or normals.is_cuda)
        else:
            assert(not vertices.is_cuda)
            assert(not indices.is_cuda)        
            assert(uvs is None or not uvs.is_cuda)
            assert(normals is None or not normals.is_cuda)

        self.vertices = vertices
        self.indices = indices
        self.uvs = uvs
        self.normals = normals
        self.mat_id = mat_id
        self.light_id = -1
Example #12
0
    def __init__(self,
                 diffuse_reflectance,
                 specular_reflectance = None,
                 roughness = None,
                 two_sided = False):
        if specular_reflectance is None:
            specular_reflectance = pyredner.Texture(\
                torch.tensor([0.0,0.0,0.0], device = pyredner.get_device()))
        if roughness is None:
            roughness = pyredner.Texture(\
                torch.tensor([1.0], device = pyredner.get_device()))

        # Convert to constant texture if necessary
        if isinstance(diffuse_reflectance, torch.Tensor):
            diffuse_reflectance = pyredner.Texture(diffuse_reflectance)
        if isinstance(specular_reflectance, torch.Tensor):
            specular_reflectance = pyredner.Texture(specular_reflectance)
        if isinstance(roughness, torch.Tensor):
            roughness = pyredner.Texture(roughness)

        assert(diffuse_reflectance.texels.is_contiguous())
        assert(diffuse_reflectance.texels.dtype == torch.float32)
        assert(specular_reflectance.texels.is_contiguous())
        assert(specular_reflectance.texels.dtype == torch.float32)
        assert(roughness.texels.is_contiguous())
        assert(roughness.texels.dtype == torch.float32)
        if pyredner.get_use_gpu():
            assert(diffuse_reflectance.texels.is_cuda)
            assert(specular_reflectance.texels.is_cuda)
            assert(roughness.texels.is_cuda)
        else:
            assert(not diffuse_reflectance.texels.is_cuda)
            assert(not specular_reflectance.texels.is_cuda)
            assert(not roughness.texels.is_cuda)

        self.diffuse_reflectance = diffuse_reflectance
        self.specular_reflectance = specular_reflectance
        self.roughness = roughness
        self.two_sided = two_sided
Example #13
0
    def __init__(self,
                 diffuse_reflectance,
                 specular_reflectance=None,
                 roughness=None,
                 two_sided=False):
        if specular_reflectance is None:
            specular_reflectance = pyredner.Texture(\
                torch.tensor([0.0,0.0,0.0], device = pyredner.get_device()))
        if roughness is None:
            roughness = pyredner.Texture(\
                torch.tensor([1.0], device = pyredner.get_device()))

        # Convert to constant texture if necessary
        if isinstance(diffuse_reflectance, torch.Tensor):
            diffuse_reflectance = pyredner.Texture(diffuse_reflectance)
        if isinstance(specular_reflectance, torch.Tensor):
            specular_reflectance = pyredner.Texture(specular_reflectance)
        if isinstance(roughness, torch.Tensor):
            roughness = pyredner.Texture(roughness)

        assert (diffuse_reflectance.texels.is_contiguous())
        assert (diffuse_reflectance.texels.dtype == torch.float32)
        assert (specular_reflectance.texels.is_contiguous())
        assert (specular_reflectance.texels.dtype == torch.float32)
        assert (roughness.texels.is_contiguous())
        assert (roughness.texels.dtype == torch.float32)
        if pyredner.get_use_gpu():
            assert (diffuse_reflectance.texels.is_cuda)
            assert (specular_reflectance.texels.is_cuda)
            assert (roughness.texels.is_cuda)
        else:
            assert (not diffuse_reflectance.texels.is_cuda)
            assert (not specular_reflectance.texels.is_cuda)
            assert (not roughness.texels.is_cuda)

        self.diffuse_reflectance = diffuse_reflectance
        self.specular_reflectance = specular_reflectance
        self.roughness = roughness
        self.two_sided = two_sided
Example #14
0
def parse_material(node, two_sided=False):
    def parse_material_bitmap(node, scale=None):
        reflectance_texture = None
        uv_scale = torch.tensor([1.0, 1.0])
        for grandchild in node:
            if grandchild.attrib['name'] == 'filename':
                reflectance_texture = pyredner.imread(
                    grandchild.attrib['value'])
                if scale:
                    reflectance_texture = reflectance_texture * scale
            elif grandchild.attrib['name'] == 'uscale':
                uv_scale[0] = float(grandchild.attrib['value'])
            elif grandchild.attrib['name'] == 'vscale':
                uv_scale[1] = float(grandchild.attrib['value'])
        assert reflectance_texture is not None
        return reflectance_texture, uv_scale

    # support mitsuba pulgin 'scale' for texture
    def parse_texture(node):
        if node.attrib['type'] == 'scale':
            scale_value = None
            for grandchild in node:
                if grandchild.attrib[
                        'name'] == 'scale' and grandchild.tag == 'float':
                    scale_value = float(grandchild.attrib['value'])
                elif grandchild.attrib[
                        'type'] == 'bitmap' and grandchild.tag == 'texture':
                    assert scale_value is not None  # avoid 'scale' element is declared below the 'bitmap'
                    return parse_material_bitmap(grandchild, scale_value)
                else:
                    raise NotImplementedError(
                        'Unsupported scale param type {}'.format(
                            grandchild.child['type']))
        elif node.attrib['type'] == 'bitmap':
            return parse_material_bitmap(node)
        else:
            raise NotImplementedError('Unsupported Texture type {}'.format(
                node.attrib['type']))

    node_id = None
    if 'id' in node.attrib:
        node_id = node.attrib['id']
    if node.attrib['type'] == 'diffuse':
        diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5])
        diffuse_uv_scale = torch.tensor([1.0, 1.0])
        specular_reflectance = torch.tensor([0.0, 0.0, 0.0])
        specular_uv_scale = torch.tensor([1.0, 1.0])
        roughness = torch.tensor([1.0])
        for child in node:
            if child.attrib['name'] == 'reflectance':
                if child.tag == 'texture':
                    diffuse_reflectance, diffuse_uv_scale = parse_texture(
                        child)
                elif child.tag == 'rgb' or child.tag == 'spectrum':
                    diffuse_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'specular':
                if child.tag == 'texture':
                    specular_reflectance, specular_uv_scale = parse_texture(
                        child)
                elif child.tag == 'rgb' or child.tag == 'spectrum':
                    specular_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'roughness':
                roughness = torch.tensor([float(child.attrib['value'])])
        if pyredner.get_use_gpu():
            # Copy to GPU
            diffuse_reflectance = diffuse_reflectance.cuda()
            specular_reflectance = specular_reflectance.cuda()
            roughness = roughness.cuda()
        return (node_id, pyredner.Material(\
                diffuse_reflectance = pyredner.Texture(diffuse_reflectance, diffuse_uv_scale),
                specular_reflectance = pyredner.Texture(specular_reflectance, specular_uv_scale),
                roughness = pyredner.Texture(roughness),
                two_sided = two_sided))
    elif node.attrib['type'] == 'roughplastic':
        diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5])
        diffuse_uv_scale = torch.tensor([1.0, 1.0])
        specular_reflectance = torch.tensor([0.0, 0.0, 0.0])
        specular_uv_scale = torch.tensor([1.0, 1.0])
        roughness = torch.tensor([1.0])
        roughness_uv_scale = torch.tensor([1.0, 1.0])

        for child in node:
            if child.attrib['name'] == 'diffuseReflectance':
                if child.tag == 'texture':
                    diffuse_reflectance, diffuse_uv_scale = parse_texture(
                        child)
                elif child.tag == 'rgb' or child.tag == 'spectrum':
                    diffuse_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'specularReflectance':
                if child.tag == 'texture':
                    specular_reflectance, specular_uv_scale = parse_texture(
                        child)
                elif child.tag == 'rgb' or child.tag == 'spectrum':
                    specular_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'alpha':
                # Add 'alpha texture' support
                if child.tag == 'texture':
                    #TODO KJL
                    roughness, roughness_uv_scale = parse_texture(
                        child)  #? not sure to do square here
                elif child.tag == 'float':
                    alpha = float(child.attrib['value'])
                    roughness = torch.tensor([alpha * alpha])
        if pyredner.get_use_gpu():
            # Copy to GPU
            diffuse_reflectance = diffuse_reflectance.cuda()
            specular_reflectance = specular_reflectance.cuda()
            roughness = roughness.cuda()
        return (node_id, pyredner.Material(\
                diffuse_reflectance = pyredner.Texture(diffuse_reflectance, diffuse_uv_scale),
                specular_reflectance = pyredner.Texture(specular_reflectance, specular_uv_scale),
                roughness = pyredner.Texture(roughness, roughness_uv_scale),
                two_sided = two_sided))
    elif node.attrib['type'] == 'twosided':
        ret = parse_material(node[0], True)
        return (node_id, ret[1])
    # Simply bypass mask's opacity
    elif node.attrib['type'] == 'mask':  #TODO add opacity!!!
        ret = parse_material(node[0])
        return (node_id, ret[1])
    else:
        print('Unsupported material type:', node.attrib['type'])
        assert (False)
Example #15
0
    def backward(ctx, grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options
        camera = ctx.camera

        if camera.use_look_at:
            d_cam_position = torch.zeros(3, device=pyredner.get_device())
            d_cam_look = torch.zeros(3, device=pyredner.get_device())
            d_cam_up = torch.zeros(3, device=pyredner.get_device())
            d_cam_to_world = None
            d_world_to_cam = None
        else:
            d_cam_position = None
            d_cam_look = None
            d_cam_up = None
            d_cam_to_world = torch.zeros(4, 4, device=pyredner.get_device())
            d_world_to_cam = torch.zeros(4, 4, device=pyredner.get_device())
        d_intrinsic_mat_inv = torch.zeros(3, 3, device=pyredner.get_device())
        d_intrinsic_mat = torch.zeros(3, 3, device=pyredner.get_device())
        if camera.use_look_at:
            d_camera = redner.DCamera(
                redner.float_ptr(d_cam_position.data_ptr()),
                redner.float_ptr(d_cam_look.data_ptr()),
                redner.float_ptr(d_cam_up.data_ptr()),
                redner.float_ptr(0),  # cam_to_world
                redner.float_ptr(0),  # world_to_cam
                redner.float_ptr(d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(d_intrinsic_mat.data_ptr()))
        else:
            d_camera = redner.DCamera(
                redner.float_ptr(0),  # pos
                redner.float_ptr(0),  # look
                redner.float_ptr(0),  # up
                redner.float_ptr(d_cam_to_world.data_ptr()),
                redner.float_ptr(d_world_to_cam.data_ptr()),
                redner.float_ptr(d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(d_intrinsic_mat.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_colors_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            num_uv_vertices = shape.num_uv_vertices
            num_normal_vertices = shape.num_normal_vertices
            d_vertices = torch.zeros(num_vertices,
                                     3,
                                     device=pyredner.get_device())
            d_uvs = torch.zeros(
                num_uv_vertices, 2,
                device=pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(
                num_normal_vertices, 3,
                device=pyredner.get_device()) if shape.has_normals() else None
            d_colors = torch.zeros(
                num_vertices, 3,
                device=pyredner.get_device()) if shape.has_colors() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_colors_list.append(d_colors)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0),
                redner.float_ptr(d_colors.data_ptr() if d_colors is not None else 0)))

        d_diffuse_list = []
        d_diffuse_uv_scale_list = []
        d_specular_list = []
        d_specular_uv_scale_list = []
        d_roughness_list = []
        d_roughness_uv_scale_list = []
        d_generic_list = []
        d_generic_uv_scale_list = []
        d_normal_map_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        for material in ctx.materials:
            if material.get_diffuse_size(0)[0] == 0:
                d_diffuse = [torch.zeros(3, device=pyredner.get_device())]
            else:
                d_diffuse = []
                for l in range(material.get_diffuse_levels()):
                    diffuse_size = material.get_diffuse_size(l)
                    d_diffuse.append(\
                        torch.zeros(diffuse_size[1],
                                    diffuse_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_specular_size(0)[0] == 0:
                d_specular = [torch.zeros(3, device=pyredner.get_device())]
            else:
                d_specular = []
                for l in range(material.get_specular_levels()):
                    specular_size = material.get_specular_size(l)
                    d_specular.append(\
                        torch.zeros(specular_size[1],
                                    specular_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_roughness_size(0)[0] == 0:
                d_roughness = [torch.zeros(1, device=pyredner.get_device())]
            else:
                d_roughness = []
                for l in range(material.get_roughness_levels()):
                    roughness_size = material.get_roughness_size(l)
                    d_roughness.append(\
                        torch.zeros(roughness_size[1],
                                    roughness_size[0],
                                    1, device = pyredner.get_device()))

            if material.get_generic_levels() == 0:
                d_generic = None
            else:
                d_generic = []
                for l in range(material.get_generic_levels()):
                    generic_size = material.get_generic_size(l)
                    d_generic.append(\
                        torch.zeros(generic_size[2],
                                    generic_size[1],
                                    generic_size[0], device = pyredner.get_device()))

            if material.get_normal_map_levels() == 0:
                d_normal_map = None
            else:
                d_normal_map = []
                for l in range(material.get_normal_map_levels()):
                    normal_map_size = material.get_normal_map_size(l)
                    d_normal_map.append(\
                        torch.zeros(normal_map_size[1],
                                    normal_map_size[0],
                                    3, device = pyredner.get_device()))

            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_generic_list.append(d_generic)
            d_normal_map_list.append(d_normal_map)
            d_diffuse_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_specular_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_roughness_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
            d_specular_uv_scale_list.append(d_specular_uv_scale)
            d_roughness_uv_scale_list.append(d_roughness_uv_scale)
            if d_generic is None:
                d_generic_uv_scale = None
            else:
                d_generic_uv_scale = torch.zeros(2,
                                                 device=pyredner.get_device())
            if d_normal_map is None:
                d_normal_map_uv_scale = None
            else:
                d_normal_map_uv_scale = torch.zeros(
                    2, device=pyredner.get_device())

            d_generic_uv_scale_list.append(d_generic_uv_scale)
            d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
            if d_diffuse[0].dim() == 1:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(d_diffuse[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            else:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_diffuse],
                    [x.shape[1] for x in d_diffuse],
                    [x.shape[0] for x in d_diffuse],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))

            if d_specular[0].dim() == 1:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(d_specular[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))
            else:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_specular],
                    [x.shape[1] for x in d_specular],
                    [x.shape[0] for x in d_specular],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))

            if d_roughness[0].dim() == 1:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(d_roughness[0].data_ptr())],
                    [0],
                    [0],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            else:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(x.data_ptr()) for x in d_roughness],
                    [x.shape[1] for x in d_roughness],
                    [x.shape[0] for x in d_roughness],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))

            if d_generic is None:
                d_generic_tex = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_generic_tex = redner.TextureN(\
                    [redner.float_ptr(x.data_ptr()) for x in d_generic],
                    [x.shape[1] for x in d_generic],
                    [x.shape[0] for x in d_generic],
                    d_generic[0].shape[2],
                    redner.float_ptr(d_generic_uv_scale.data_ptr()))

            if d_normal_map is None:
                d_normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_normal_map = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_normal_map],
                    [x.shape[1] for x in d_normal_map],
                    [x.shape[0] for x in d_normal_map],
                    3,
                    redner.float_ptr(d_normal_map_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex,
                d_generic_tex, d_normal_map))

        d_intensity_list = []
        d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device=pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            d_envmap_values = []
            for l in range(envmap.get_levels()):
                size = envmap.get_size(l)
                d_envmap_values.append(\
                    torch.zeros(size[1],
                                size[0],
                                3, device = pyredner.get_device()))
            d_envmap_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_envmap_tex = redner.Texture3(\
                [redner.float_ptr(x.data_ptr()) for x in d_envmap_values],
                [x.shape[1] for x in d_envmap_values],
                [x.shape[0] for x in d_envmap_values],
                3,
                redner.float_ptr(d_envmap_uv_scale.data_ptr()))
            d_world_to_env = torch.zeros(4, 4, device=pyredner.get_device())
            d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(d_world_to_env.data_ptr()))

        d_scene = redner.DScene(
            d_camera, d_shapes, d_materials, d_area_lights, d_envmap,
            pyredner.get_use_gpu(),
            pyredner.get_device().index
            if pyredner.get_device().index is not None else -1)
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003

        options.num_samples = ctx.num_samples[1]
        start = time.time()
        redner.render(scene, options, redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()), d_scene,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = torch.ones(256, 256, 3, device = pyredner.get_device())
        # debug_img = torch.zeros(256, 256, 3)
        # start = time.time()
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # time_elapsed = time.time() - start
        # if print_timing:
        #     print('Backward pass, time: %.5f s' % time_elapsed)
        # debug_img = debug_img[:, :, 0]
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # debug_img = debug_img.numpy()
        # print(np.max(debug_img))
        # print(np.unravel_index(np.argmax(debug_img), debug_img.shape))
        # print(np.min(debug_img))
        # print(np.unravel_index(np.argmin(debug_img), debug_img.shape))
        # print(np.sum(debug_img) / 3)
        # debug_max = 0.5
        # debug_min = -0.5
        # debug_img = np.clip((debug_img - debug_min) / (debug_max - debug_min), 0, 1)
        # # debug_img = debug_img[:, :, 0]
        # import matplotlib.cm as cm
        # debug_img = cm.viridis(debug_img)
        # skimage.io.imsave('debug.png', np.power(debug_img, 1/2.2))
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if camera.use_look_at:
            ret_list.append(d_cam_position.cpu())
            ret_list.append(d_cam_look.cpu())
            ret_list.append(d_cam_up.cpu())
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(d_cam_to_world.cpu())
            ret_list.append(d_world_to_cam.cpu())
        ret_list.append(d_intrinsic_mat_inv.cpu())
        ret_list.append(d_intrinsic_mat.cpu())
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(d_roughness_uv_scale_list[i])
            if d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(d_generic_uv_scale_list[i])
            if d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            ret_list.append(d_world_to_env.cpu())
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # channels
        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return tuple(ret_list)
Example #16
0
def parse_shape(node, material_dict, shape_id, shape_group_dict=None):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        print(node.attrib['id'])
        to_world = torch.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        mat_name2id = {}
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
                if 'name' in child.attrib.keys(
                ) and child.attrib['name'] != 'bsdf':
                    mat_name2id[child.attrib['name']] = child.attrib['id']
            elif child.tag == 'bsdf':
                #TODO hack! use default diffuse if countering internal declaration bsdf
                mat_id = 0
                # node_id, material = parse_material(child)
                # if node_id is not None:
                #     material_dict[node_id] = len(materials)
                #     materials.append(material)
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])

        if node.attrib['type'] == 'obj':
            _, mesh_list, _ = pyredner.load_obj.load_obj_fast(
                filename, is_load_mtl=False)
            shape_list = []
            for mesh in mesh_list:
                mat_name = mesh[0]
                vertices = mesh[1].vertices.cpu()
                indices = mesh[1].indices.cpu()
                uvs = mesh[1].uvs
                normals = mesh[1].normals
                if uvs is not None:
                    uvs = uvs.cpu()
                if normals is not None:
                    normals = normals.cpu()

                # Transform the vertices and normals
                vertices = torch.cat(
                    (vertices, torch.ones(vertices.shape[0], 1)), dim=1)
                vertices = vertices @ torch.transpose(to_world, 0, 1)
                vertices = vertices / vertices[:, 3:4]
                vertices = vertices[:, 0:3].contiguous()
                if normals is not None:
                    normals = normals @ (torch.inverse(
                        torch.transpose(to_world, 0, 1))[:3, :3])
                    normals = normals.contiguous()
                assert (vertices is not None)
                assert (indices is not None)
                lgt = None
                if light_intensity is not None:
                    lgt = pyredner.AreaLight(shape_id, light_intensity)

                if pyredner.get_use_gpu():
                    # Copy to GPU
                    vertices = vertices.cuda()
                    indices = indices.cuda()
                    if uvs is not None:
                        uvs = uvs.cuda()
                    if normals is not None:
                        normals = normals.cuda()
                # Assign material
                if mat_name != '' and mat_name is not None:  # skip no material mesh
                    mat_id = material_dict[mat_name2id[mat_name]]
                shape_list.append(
                    pyredner.Shape(vertices, indices, uvs, normals, mat_id))
            return shape_list, lgt
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = torch.from_numpy(mitsuba_tri_mesh.vertices)
            indices = torch.from_numpy(mitsuba_tri_mesh.indices)
            uvs = torch.from_numpy(mitsuba_tri_mesh.uvs)
            normals = torch.from_numpy(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None

            # Transform the vertices and normals
            vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                                 dim=1)
            vertices = vertices @ torch.transpose(to_world, 0, 1)
            vertices = vertices / vertices[:, 3:4]
            vertices = vertices[:, 0:3].contiguous()
            if normals is not None:
                normals = normals @ (torch.inverse(
                    torch.transpose(to_world, 0, 1))[:3, :3])
                normals = normals.contiguous()
            assert (vertices is not None)
            assert (indices is not None)
            lgt = None
            if light_intensity is not None:
                lgt = pyredner.AreaLight(shape_id, light_intensity)

            if pyredner.get_use_gpu():
                # Copy to GPU
                vertices = vertices.cuda()
                indices = indices.cuda()
                if uvs is not None:
                    uvs = uvs.cuda()
                if normals is not None:
                    normals = normals.cuda()
            return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = torch.tensor([[0, 2, 1], [1, 2, 3]], dtype=torch.int32)
        vertices = torch.tensor([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                 [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = torch.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])
        # Transform the vertices
        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda()
            indices = indices.cuda()
            if uvs is not None:
                uvs = uvs.cuda()
            if normals is not None:
                normals = normals.cuda()
        return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    # Add instance support
    # TODO (simply transform & create a new shape now)
    elif node.attrib['type'] == 'instance':
        shape = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                    if pyredner.get_use_gpu():
                        to_world = to_world.cuda()
            if child.tag == 'ref':
                shape_ = shape_group_dict[child.attrib['id']]
        shape_list = []
        for shape in list(shape_):
            # transform instance
            vertices = shape.vertices
            normals = shape.normals
            vector1 = torch.ones(vertices.shape[0], 1)
            vertices = torch.cat(
                (vertices,
                 vector1.cuda() if pyredner.get_use_gpu() else vector1),
                dim=1)
            vertices = vertices @ torch.transpose(to_world, 0, 1)
            vertices = vertices / vertices[:, 3:4]
            vertices = vertices[:, 0:3].contiguous()
            if normals is not None:
                normals = normals @ (torch.inverse(
                    torch.transpose(to_world, 0, 1))[:3, :3])
                normals = normals.contiguous()
            # assert(vertices is not None)
            # assert(indices is not None)
            # lgt = None
            # if light_intensity is not None:
            #     lgt = pyredner.AreaLight(shape_id, light_intensity)
            shape_list.append(
                pyredner.Shape(vertices, shape.indices, shape.uvs, normals,
                               shape.material_id))

        return shape_list, None
    else:
        print('Shape type {} is not supported!'.format(node.attrib['type']))
        assert (False)
    def forward(ctx, seed, *args):
        """
            Forward rendering pass: given a serialized scene and output an image.
        """
        # Unpack arguments
        current_index = 0
        num_shapes = args[current_index]
        current_index += 1
        num_materials = args[current_index]
        current_index += 1
        num_lights = args[current_index]
        current_index += 1

        cam_position = args[current_index]
        current_index += 1
        cam_look_at = args[current_index]
        current_index += 1
        cam_up = args[current_index]
        current_index += 1
        cam_to_world = args[current_index]
        current_index += 1
        world_to_cam = args[current_index]
        current_index += 1
        intrinsic_mat_inv = args[current_index]
        current_index += 1
        intrinsic_mat = args[current_index]
        current_index += 1
        clip_near = args[current_index]
        current_index += 1
        resolution = args[current_index]
        current_index += 1
        camera_type = args[current_index]
        current_index += 1
        if cam_to_world is None:
            camera = redner.Camera(
                resolution[1],
                resolution[0],
                redner.float_ptr(cam_position.data_ptr()),
                redner.float_ptr(cam_look_at.data_ptr()),
                redner.float_ptr(cam_up.data_ptr()),
                redner.float_ptr(0),  # cam_to_world
                redner.float_ptr(0),  # world_to_cam
                redner.float_ptr(intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(intrinsic_mat.data_ptr()),
                clip_near,
                camera_type)
        else:
            camera = redner.Camera(
                resolution[1],
                resolution[0],
                redner.float_ptr(0),  # cam_position
                redner.float_ptr(0),  # cam_look_at
                redner.float_ptr(0),  # cam_up
                redner.float_ptr(cam_to_world.data_ptr()),
                redner.float_ptr(world_to_cam.data_ptr()),
                redner.float_ptr(intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(intrinsic_mat.data_ptr()),
                clip_near,
                camera_type)
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            uv_indices = args[current_index]
            current_index += 1
            normal_indices = args[current_index]
            current_index += 1
            colors = args[current_index]
            current_index += 1
            material_id = args[current_index]
            current_index += 1
            light_id = args[current_index]
            current_index += 1
            assert (vertices.is_contiguous())
            assert (indices.is_contiguous())
            if uvs is not None:
                assert (uvs.is_contiguous())
            if normals is not None:
                assert (normals.is_contiguous())
            if uv_indices is not None:
                assert (uv_indices.is_contiguous())
            if normal_indices is not None:
                assert (normal_indices.is_contiguous())
            shapes.append(redner.Shape(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                redner.int_ptr(uv_indices.data_ptr() if uv_indices is not None else 0),
                redner.int_ptr(normal_indices.data_ptr() if normal_indices is not None else 0),
                redner.float_ptr(colors.data_ptr() if colors is not None else 0),
                int(vertices.shape[0]),
                int(uvs.shape[0]) if uvs is not None else 0,
                int(normals.shape[0]) if normals is not None else 0,
                int(indices.shape[0]),
                material_id,
                light_id))
        materials = []
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            diffuse_mesh_colors_resolution = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            specular_mesh_colors_resolution = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            roughness_mesh_colors_resolution = args[current_index]
            current_index += 1
            generic_texture = args[current_index]
            current_index += 1
            generic_uv_scale = args[current_index]
            current_index += 1
            generic_mesh_colors_resolution = args[current_index]
            current_index += 1
            normal_map = args[current_index]
            current_index += 1
            normal_map_uv_scale = args[current_index]
            current_index += 1
            normal_map_mesh_colors_resolution = args[current_index]
            current_index += 1
            compute_specular_lighting = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            use_vertex_color = args[current_index]
            current_index += 1

            assert (diffuse_reflectance.is_contiguous())
            if diffuse_reflectance.dim() == 1:
                num_levels = 0
                height = 0
                if diffuse_mesh_colors_resolution > 0:
                    num_levels = 1
                    height = int(diffuse_reflectance.size()[0] / 3 / int(
                        ((diffuse_mesh_colors_resolution + 1) *
                         (diffuse_mesh_colors_resolution + 2)) / 2))

                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()),
                    0,
                    height,
                    3,
                    num_levels,
                    diffuse_mesh_colors_resolution,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            else:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()),
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[3]), # channels
                    int(diffuse_reflectance.shape[0]), # num levels
                0, # mesh_colors_resolution
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))

            assert (specular_reflectance.is_contiguous())
            if specular_reflectance.dim() == 1:
                num_levels = 0
                height = 0
                if specular_mesh_colors_resolution > 0:
                    num_levels = 1
                    height = int(specular_reflectance.size()[0] / 3 / int(
                        ((specular_mesh_colors_resolution + 1) *
                         (specular_mesh_colors_resolution + 2)) / 2))

                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()),
                    0,
                    height,
                    3,
                    num_levels,
                    specular_mesh_colors_resolution,
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            else:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()),
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[3]), # channels
                    int(specular_reflectance.shape[0]), # num levels
                0, # mesh_colors_resolution
                    redner.float_ptr(specular_uv_scale.data_ptr()))

            assert (roughness.is_contiguous())
            if roughness.dim() == 1:
                num_levels = 0
                height = 0
                if roughness_mesh_colors_resolution > 0:
                    num_levels = 1
                    height = int(roughness.size()[0] / int(
                        ((roughness_mesh_colors_resolution + 1) *
                         (roughness_mesh_colors_resolution + 2)) / 2))

                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()),
                0,
                height,
                1,
                num_levels,
                roughness_mesh_colors_resolution,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            else:
                assert (roughness.dim() == 4)
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()),
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[3]), # channels
                    int(roughness.shape[0]), # num levels
                0, # mesh_colors_resolution
                    redner.float_ptr(roughness_uv_scale.data_ptr()))

            if generic_texture is not None:
                if generic_texture.dim() == 1:
                    num_levels = 0
                    height = 0
                    if generic_mesh_colors_resolution > 0:
                        num_levels = 1
                        height = int(
                            roughness.size()[0] /
                            int(generic_texture.shape[3]) / int(
                                ((generic_mesh_colors_resolution + 1) *
                                 (generic_mesh_colors_resolution + 2)) / 2))

                    generic_texture = redner.TextureN(\
      redner.float_ptr(generic_texture.data_ptr()),
                    0,
                    height,
                    int(generic_texture.shape[3]),
                    num_levels,
                    generic_mesh_colors_resolution,
                    redner.float_ptr(generic_uv_scale.data_ptr()))
                else:
                    assert (generic_texture.dim() == 4)
                    generic_texture = redner.TextureN(\
      redner.float_ptr(generic_texture.data_ptr()),
                    int(generic_texture.shape[2]), # width
                    int(generic_texture.shape[1]), # height
                    int(generic_texture.shape[3]), # channels
                    int(generic_texture.shape[0]), # num levels
                    0, # mesh_colors_resolution
                    redner.float_ptr(generic_uv_scale.data_ptr()))
            else:
                generic_texture = redner.TextureN(\
                    redner.float_ptr(0), 0, 0, 0, 0, 0, redner.float_ptr(0))

            if normal_map is not None:
                if normal_map.dim() == 1:
                    num_levels = 0
                    height = 0
                    if normal_map_mesh_colors_resolution > 0:
                        num_levels = 1
                        height = int(normal_map.size()[0] / 3 / int(
                            ((normal_map_mesh_colors_resolution + 1) *
                             (normal_map_mesh_colors_resolution + 2)) / 2))

                    normal_map = redner.Texture3(\
                        redner.float_ptr(normal_map.data_ptr()),
                        0,
                        height,
                        3,
                        num_levels,
                        normal_map_mesh_colors_resolution,
                        redner.float_ptr(normal_map_uv_scale.data_ptr()))
                else:
                    assert (normal_map.dim() == 4)
                    normal_map = redner.Texture3(\
      redner.float_ptr(normal_map.data_ptr()),
                    int(normal_map.shape[2]), # width
                    int(normal_map.shape[1]), # height
                    int(normal_map.shape[3]), # channels
                    int(normal_map.shape[0]), # num levels
                    0, # mesh_colors_resolution
                    redner.float_ptr(normal_map_uv_scale.data_ptr()))
            else:
                normal_map = redner.Texture3(\
                    redner.float_ptr(0), 0, 0, 0, 0, 0, redner.float_ptr(0))

            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                generic_texture,
                normal_map,
                compute_specular_lighting,
                two_sided,
                use_vertex_color))

        area_lights = []
        for i in range(num_lights):
            shape_id = args[current_index]
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1

            area_lights.append(redner.AreaLight(\
                shape_id,
                redner.float_ptr(intensity.data_ptr()),
                two_sided))

        envmap = None
        if args[current_index] is not None:
            values = args[current_index]
            current_index += 1
            envmap_uv_scale = args[current_index]
            current_index += 1
            env_to_world = args[current_index]
            current_index += 1
            world_to_env = args[current_index]
            current_index += 1
            sample_cdf_ys = args[current_index]
            current_index += 1
            sample_cdf_xs = args[current_index]
            current_index += 1
            pdf_norm = args[current_index]
            current_index += 1
            values = redner.Texture3(\
                redner.float_ptr(values.data_ptr()),
                int(values.shape[2]), # width
                int(values.shape[1]), # height
                0, # channels
                int(values.shape[0]), # num levels
            0, #mesh_colors_resolution
                redner.float_ptr(envmap_uv_scale.data_ptr()))
            envmap = redner.EnvironmentMap(\
                values,
                redner.float_ptr(env_to_world.data_ptr()),
                redner.float_ptr(world_to_env.data_ptr()),
                redner.float_ptr(sample_cdf_ys.data_ptr()),
                redner.float_ptr(sample_cdf_xs.data_ptr()),
                pdf_norm)
        else:
            current_index += 7

        # Options
        num_samples = args[current_index]
        current_index += 1
        max_bounces = args[current_index]
        current_index += 1
        channels = args[current_index]
        current_index += 1
        sampler_type = args[current_index]
        current_index += 1
        use_primary_edge_sampling = args[current_index]
        current_index += 1
        use_secondary_edge_sampling = args[current_index]
        current_index += 1

        start = time.time()
        scene = redner.Scene(
            camera, shapes, materials, area_lights, envmap,
            pyredner.get_use_gpu(),
            pyredner.get_device().index
            if pyredner.get_device().index is not None else -1,
            use_primary_edge_sampling, use_secondary_edge_sampling)
        time_elapsed = time.time() - start
        if print_timing:
            print('Scene construction, time: %.5f s' % time_elapsed)

        # check that num_samples is a tuple
        if isinstance(num_samples, int):
            num_samples = (num_samples, num_samples)

        options = redner.RenderOptions(seed, num_samples[0], max_bounces,
                                       channels, sampler_type)
        num_channels = redner.compute_num_channels(
            channels, scene.max_generic_texture_dimension)
        rendered_image = torch.zeros(resolution[0],
                                     resolution[1],
                                     num_channels,
                                     device=pyredner.get_device())
        start = time.time()
        redner.render(scene, options,
                      redner.float_ptr(rendered_image.data_ptr()),
                      redner.float_ptr(0), None, redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Forward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(rendered_image.data_ptr()),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # exit()

        ctx.camera = camera
        ctx.shapes = shapes
        ctx.materials = materials
        ctx.area_lights = area_lights
        ctx.envmap = envmap
        ctx.scene = scene
        ctx.options = options
        ctx.num_samples = num_samples
        ctx.args = args  # Important to prevent GC from deallocating the tensors
        return rendered_image
Example #18
0
def parse_material(node, two_sided = False):
    node_id = None
    if 'id' in node.attrib:
        node_id = node.attrib['id']
    if node.attrib['type'] == 'diffuse':
        diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5])
        diffuse_uv_scale = torch.tensor([1.0, 1.0])
        specular_reflectance = torch.tensor([0.0, 0.0, 0.0])
        specular_uv_scale = torch.tensor([1.0, 1.0])
        roughness = torch.tensor([1.0])
        for child in node:
            if child.attrib['name'] == 'reflectance':
                if child.tag == 'texture':
                    for grandchild in child:
                        if grandchild.attrib['name'] == 'filename':
                            diffuse_reflectance = pyredner.imread(grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'uscale':
                            diffuse_uv_scale[0] = float(grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'vscale':
                            diffuse_uv_scale[1] = float(grandchild.attrib['value'])
                elif child.tag == 'rgb' or child.tag == 'spectrum':
                    diffuse_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'specular':
                if child.tag == 'texture':
                    for grandchild in child:
                        if grandchild.attrib['name'] == 'filename':
                            specular_reflectance = pyredner.imread(grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'uscale':
                            specular_uv_scale[0] = float(grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'vscale':
                            specular_uv_scale[1] = float(grandchild.attrib['value'])
                elif child.tag == 'rgb' or child.tag == 'spectrum':
                    specular_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'roughness':
                roughness = torch.tensor([float(child.attrib['value'])])
        if pyredner.get_use_gpu():
            # Copy to GPU
            diffuse_reflectance = diffuse_reflectance.cuda()
            specular_reflectance = specular_reflectance.cuda()
            roughness = roughness.cuda()
        return (node_id, pyredner.Material(\
                diffuse_reflectance = pyredner.Texture(diffuse_reflectance, diffuse_uv_scale),
                specular_reflectance = pyredner.Texture(specular_reflectance, specular_uv_scale),
                roughness = pyredner.Texture(roughness),
                two_sided = two_sided))
    elif node.attrib['type'] == 'roughplastic':
        diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5])
        diffuse_uv_scale = torch.tensor([1.0, 1.0])
        specular_reflectance = torch.tensor([0.0, 0.0, 0.0])
        specular_uv_scale = torch.tensor([1.0, 1.0])
        roughness = torch.tensor([1.0])
        for child in node:
            if child.attrib['name'] == 'diffuseReflectance':
                if child.tag == 'texture':
                    for grandchild in child:
                        if grandchild.attrib['name'] == 'filename':
                            diffuse_reflectance = pyredner.imread(grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'uscale':
                            diffuse_uv_scale[0] = float(grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'vscale':
                            diffuse_uv_scale[1] = float(grandchild.attrib['value'])
                elif child.tag == 'rgb' or child.tag == 'spectrum':
                    diffuse_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'specularReflectance':
                if child.tag == 'texture':
                    for grandchild in child:
                        if grandchild.attrib['name'] == 'filename':
                            specular_reflectance = pyredner.imread(grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'uscale':
                            specular_uv_scale[0] = float(grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'vscale':
                            specular_uv_scale[1] = float(grandchild.attrib['value'])
                elif child.tag == 'rgb' or child.tag == 'spectrum':
                    specular_reflectance = parse_vector(child.attrib['value'])
            elif child.attrib['name'] == 'alpha':
                alpha = float(child.attrib['value'])
                roughness = torch.tensor([alpha * alpha])
        if pyredner.get_use_gpu():
            # Copy to GPU
            diffuse_reflectance = diffuse_reflectance.cuda()
            specular_reflectance = specular_reflectance.cuda()
            roughness = roughness.cuda()
        return (node_id, pyredner.Material(\
                diffuse_reflectance = pyredner.Texture(diffuse_reflectance, diffuse_uv_scale),
                specular_reflectance = pyredner.Texture(specular_reflectance, specular_uv_scale),
                roughness = pyredner.Texture(roughness),
                two_sided = two_sided))
    elif node.attrib['type'] == 'twosided':
        ret = parse_material(node[0], True)
        return (node_id, ret[1])
    else:
        print('Unsupported material type:', node.attrib['type'])
        assert(False)
Example #19
0
def load_obj(filename, obj_group=True):
    """
        Load from a Wavefront obj file as PyTorch tensors.
        XXX: this is slow, maybe move to C++?
    """
    vertices_pool = []
    uvs_pool = []
    normals_pool = []
    indices = []
    vertices = []
    normals = []
    uvs = []
    vertices_map = {}
    material_map = {}
    current_mtllib = {}
    current_material_name = None

    def create_mesh(indices, vertices, normals, uvs):
        indices = torch.tensor(indices,
                               dtype=torch.int32,
                               device=pyredner.get_device())
        vertices = torch.tensor(vertices, device=pyredner.get_device())
        if len(uvs) == 0:
            uvs = None
        else:
            uvs = torch.tensor(uvs, device=pyredner.get_device())
        if len(normals) == 0:
            normals = None
        else:
            normals = torch.tensor(normals, device=pyredner.get_device())
        return TriangleMesh(vertices, indices, uvs, normals)

    mesh_list = []
    light_map = {}

    f = open(filename, 'r')
    d = os.path.dirname(filename)
    cwd = os.getcwd()
    if d != '':
        os.chdir(d)
    for line in f:
        line = line.strip()
        splitted = re.split('\ +', line)
        if splitted[0] == 'mtllib':
            current_mtllib = load_mtl(splitted[1])
        elif splitted[0] == 'usemtl':
            if len(indices) > 0 and obj_group is True:
                # Flush
                mesh_list.append((current_material_name,
                                  create_mesh(indices, vertices, normals,
                                              uvs)))
                indices = []
                vertices = []
                normals = []
                uvs = []
                vertices_map = {}
            mtl_name = splitted[1]
            current_material_name = mtl_name
            if mtl_name not in material_map:
                m = current_mtllib[mtl_name]
                if m.map_Kd is None:
                    diffuse_reflectance = torch.tensor(
                        m.Kd,
                        dtype=torch.float32,
                        device=pyredner.get_device())
                else:
                    diffuse_reflectance = pyredner.imread(m.map_Kd)
                    if pyredner.get_use_gpu():
                        diffuse_reflectance = diffuse_reflectance.cuda()
                if m.map_Ks is None:
                    specular_reflectance = torch.tensor(
                        m.Ks,
                        dtype=torch.float32,
                        device=pyredner.get_device())
                else:
                    specular_reflectance = pyredner.imread(m.map_Ks)
                    if pyredner.get_use_gpu():
                        specular_reflectance = specular_reflectance.cuda()
                if m.map_Ns is None:
                    roughness = torch.tensor([2.0 / (m.Ns + 2.0)],
                                             dtype=torch.float32,
                                             device=pyredner.get_device())
                else:
                    roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0)
                    if pyredner.get_use_gpu():
                        roughness = roughness.cuda()
                if m.Ke != (0.0, 0.0, 0.0):
                    light_map[mtl_name] = torch.tensor(m.Ke,
                                                       dtype=torch.float32)
                material_map[mtl_name] = pyredner.Material(\
                    diffuse_reflectance, specular_reflectance, roughness)
        elif splitted[0] == 'v':
            vertices_pool.append(
                [float(splitted[1]),
                 float(splitted[2]),
                 float(splitted[3])])
        elif splitted[0] == 'vt':
            uvs_pool.append([float(splitted[1]), float(splitted[2])])
        elif splitted[0] == 'vn':
            normals_pool.append(
                [float(splitted[1]),
                 float(splitted[2]),
                 float(splitted[3])])
        elif splitted[0] == 'f':

            def num_indices(x):
                return len(re.split('/', x))

            def get_index(x, i):
                return int(re.split('/', x)[i])

            def parse_face_index(x, i):
                f = get_index(x, i)
                if f < 0:
                    if (i == 0):
                        f += len(vertices)
                    if (i == 1):
                        f += len(uvs)
                else:
                    f -= 1
                return f

            assert (len(splitted) <= 5)

            def get_vertex_id(indices):
                pi = parse_face_index(indices, 0)
                uvi = None
                if (num_indices(indices) > 1
                        and re.split('/', indices)[1] != ''):
                    uvi = parse_face_index(indices, 1)
                ni = None
                if (num_indices(indices) > 2
                        and re.split('/', indices)[2] != ''):
                    ni = parse_face_index(indices, 2)
                key = (pi, uvi, ni)
                if key in vertices_map:
                    return vertices_map[key]

                vertex_id = len(vertices)
                vertices_map[key] = vertex_id
                vertices.append(vertices_pool[pi])
                if uvi is not None:
                    uvs.append(uvs_pool[uvi])
                if ni is not None:
                    normals.append(normals_pool[ni])
                return vertex_id

            vid0 = get_vertex_id(splitted[1])
            vid1 = get_vertex_id(splitted[2])
            vid2 = get_vertex_id(splitted[3])

            indices.append([vid0, vid1, vid2])
            if (len(splitted) == 5):
                vid3 = get_vertex_id(splitted[4])
                indices.append([vid0, vid2, vid3])

    mesh_list.append(
        (current_material_name, create_mesh(indices, vertices, normals, uvs)))
    if d != '':
        os.chdir(cwd)
    return material_map, mesh_list, light_map
Example #20
0
    def backward(ctx, grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options

        d_cam_position = torch.zeros(3)
        d_cam_look = torch.zeros(3)
        d_cam_up = torch.zeros(3)
        d_ndc_to_cam = torch.zeros(3, 3)
        d_cam_to_ndc = torch.zeros(3, 3)
        d_camera = redner.DCamera(redner.float_ptr(d_cam_position.data_ptr()),
                                  redner.float_ptr(d_cam_look.data_ptr()),
                                  redner.float_ptr(d_cam_up.data_ptr()),
                                  redner.float_ptr(d_ndc_to_cam.data_ptr()),
                                  redner.float_ptr(d_cam_to_ndc.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            d_vertices = torch.zeros(num_vertices,
                                     3,
                                     device=pyredner.get_device())
            d_uvs = torch.zeros(
                num_vertices, 2,
                device=pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(
                num_vertices, 3,
                device=pyredner.get_device()) if shape.has_normals() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_materials = []
        for material in ctx.materials:
            diffuse_size = material.get_diffuse_size()
            specular_size = material.get_specular_size()
            roughness_size = material.get_roughness_size()
            if diffuse_size[0] == 0:
                d_diffuse = torch.zeros(3, device=pyredner.get_device())
            else:
                d_diffuse = torch.zeros(diffuse_size[2],
                                        diffuse_size[1],
                                        diffuse_size[0],
                                        3,
                                        device=pyredner.get_device())
            if specular_size[0] == 0:
                d_specular = torch.zeros(3, device=pyredner.get_device())
            else:
                d_specular = torch.zeros(specular_size[2],
                                         specular_size[1],
                                         specular_size[0],
                                         3,
                                         device=pyredner.get_device())
            if roughness_size[0] == 0:
                d_roughness = torch.zeros(1, device=pyredner.get_device())
            else:
                d_roughness = torch.zeros(roughness_size[2],
                                          roughness_size[1],
                                          roughness_size[0],
                                          1,
                                          device=pyredner.get_device())
            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_diffuse_uv_scale = torch.zeros(2)
            d_specular_uv_scale = torch.zeros(2)
            d_roughness_uv_scale = torch.zeros(2)
            d_diffuse_tex = redner.Texture3(\
                redner.float_ptr(d_diffuse.data_ptr()),
                diffuse_size[0], diffuse_size[1], diffuse_size[2],
                redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            d_specular_tex = redner.Texture3(\
                redner.float_ptr(d_specular.data_ptr()),
                specular_size[0], specular_size[1], specular_size[2],
                redner.float_ptr(d_specular_uv_scale.data_ptr()))
            d_roughness_tex = redner.Texture1(\
                redner.float_ptr(d_roughness.data_ptr()),
                roughness_size[0], roughness_size[1], roughness_size[2],
                redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex))

        d_intensity_list = []
        d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device=pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            d_envmap_values = \
                torch.zeros(size[2],
                            size[1],
                            size[0],
                            3,
                            device = pyredner.get_device())
            d_envmap_uv_scale = torch.zeros(2)
            d_envmap_tex = redner.Texture3(\
                redner.float_ptr(d_envmap_values.data_ptr()),
                size[0], size[1], size[2],
                redner.float_ptr(d_envmap_uv_scale.data_ptr()))
            d_world_to_env = torch.zeros(4, 4)
            d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(d_world_to_env.data_ptr()))

        d_scene = redner.DScene(
            d_camera, d_shapes, d_materials, d_area_lights, d_envmap,
            pyredner.get_use_gpu(),
            pyredner.get_device().index
            if pyredner.get_device().index is not None else -1)
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003

        options.num_samples = ctx.num_samples[1]
        start = time.time()
        redner.render(scene, options, redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()), d_scene,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # # pyredner.imwrite(grad_img, 'grad_img.exr')
        # # grad_img = torch.ones(256, 256, 3, device = pyredner.get_device())
        # debug_img = torch.zeros(256, 256, 3)
        # start = time.time()
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # time_elapsed = time.time() - start
        # if print_timing:
        #     print('Backward pass, time: %.5f s' % time_elapsed)
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # debug_img = debug_img.numpy()
        # print(np.max(debug_img))
        # print(np.unravel_index(np.argmax(debug_img), debug_img.shape))
        # print(np.min(debug_img))
        # print(np.unravel_index(np.argmin(debug_img), debug_img.shape))
        # print(np.sum(debug_img) / 3)
        # debug_max = 0.5
        # debug_min = -0.5
        # debug_img = np.clip((debug_img - debug_min) / (debug_max - debug_min), 0, 1)
        # debug_img = debug_img[:, :, 0]
        # import matplotlib.cm as cm
        # debug_img = cm.viridis(debug_img)
        # skimage.io.imsave('debug.png', np.power(debug_img, 1/2.2))
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        ret_list.append(d_cam_position)
        ret_list.append(d_cam_look)
        ret_list.append(d_cam_up)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # fisheye

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(None)  # diffuse_uv_scale
            ret_list.append(d_specular_list[i])
            ret_list.append(None)  # specular_uv_scale
            ret_list.append(d_roughness_list[i])
            ret_list.append(None)  # roughness_uv_scale
            ret_list.append(None)  # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(None)  # uv_scale
            ret_list.append(None)  # env_to_world
            ret_list.append(d_world_to_env)
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # channels
        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling

        return tuple(ret_list)
Example #21
0
def parse_shape(node, material_dict, shape_id, shape_group_dict=None):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        to_world = torch.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        max_smooth_angle = -1
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
                elif child.attrib['name'] == 'maxSmoothAngle':
                    max_smooth_angle = float(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])

        if node.attrib['type'] == 'obj':
            _, mesh_list, _ = pyredner.load_obj(filename)
            # Convert to CPU for rebuild_topology
            vertices = mesh_list[0][1].vertices.cpu()
            indices = mesh_list[0][1].indices.cpu()
            uvs = mesh_list[0][1].uvs
            normals = mesh_list[0][1].normals
            uv_indices = mesh_list[0][1].uv_indices
            normal_indices = mesh_list[0][1].normal_indices
            if uvs is not None:
                uvs = uvs.cpu()
            if normals is not None:
                normals = normals.cpu()
            if uv_indices is not None:
                uv_indices = uv_indices.cpu()
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = torch.from_numpy(mitsuba_tri_mesh.vertices)
            indices = torch.from_numpy(mitsuba_tri_mesh.indices)
            uvs = torch.from_numpy(mitsuba_tri_mesh.uvs)
            normals = torch.from_numpy(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None
            uv_indices = None  # Serialized doesn't use different indices for UV & normal
            normal_indices = None

        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        if max_smooth_angle >= 0:
            if normals is None:
                normals = torch.zeros_like(vertices)
            new_num_vertices = redner.rebuild_topology(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                redner.int_ptr(uv_indices.data_ptr() if uv_indices is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                max_smooth_angle)
            print('Rebuilt topology, original vertices size: {}, new vertices size: {}'.format(\
                int(vertices.shape[0]), new_num_vertices))
            vertices.resize_(new_num_vertices, 3)
            if uvs is not None:
                uvs.resize_(new_num_vertices, 2)
            if normals is not None:
                normals.resize_(new_num_vertices, 3)

        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda(device=pyredner.get_device())
            indices = indices.cuda(device=pyredner.get_device())
            if uvs is not None:
                uvs = uvs.cuda(device=pyredner.get_device())
            if normals is not None:
                normals = normals.cuda(device=pyredner.get_device())
            if uv_indices is not None:
                uv_indices = uv_indices.cuda(device=pyredner.get_device())
            if normal_indices is not None:
                normal_indices = normal_indices.cuda(
                    device=pyredner.get_device())
        return pyredner.Shape(vertices,
                              indices,
                              uvs=uvs,
                              normals=normals,
                              uv_indices=uv_indices,
                              normal_indices=normal_indices,
                              material_id=mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = torch.tensor([[0, 2, 1], [1, 2, 3]], dtype=torch.int32)
        vertices = torch.tensor([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                 [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = torch.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])
        # Transform the vertices
        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda(device=pyredner.get_device())
            indices = indices.cuda(device=pyredner.get_device())
            if uvs is not None:
                uvs = uvs.cuda(device=pyredner.get_device())
            if normals is not None:
                normals = normals.cuda(device=pyredner.get_device())
        return pyredner.Shape(vertices,
                              indices,
                              uvs=uvs,
                              normals=normals,
                              material_id=mat_id), lgt
    # Add instance support
    # TODO (simply transform & create a new shape now)
    elif node.attrib['type'] == 'instance':
        shape = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                    if pyredner.get_use_gpu():
                        to_world = to_world.cuda()
            if child.tag == 'ref':
                shape = shape_group_dict[child.attrib['id']]
        # transform instance
        vertices = shape.vertices
        normals = shape.normals
        vector1 = torch.ones(vertices.shape[0], 1)
        vertices = torch.cat(
            (vertices, vector1.cuda() if pyredner.get_use_gpu() else vector1),
            dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        # assert(vertices is not None)
        # assert(indices is not None)
        # lgt = None
        # if light_intensity is not None:
        #     lgt = pyredner.AreaLight(shape_id, light_intensity)

        return pyredner.Shape(vertices,
                              shape.indices,
                              uvs=shape.uvs,
                              normals=normals,
                              material_ids=shape.material_id), None
    else:
        print('Shape type {} is not supported!'.format(node.attrib['type']))
        assert (False)
Example #22
0
position = torch.tensor([0.0, 0.0, -5.0])
look_at = torch.tensor([0.0, 0.0, 0.0])
up = torch.tensor([0.0, 1.0, 0.0])
fov = torch.tensor([45.0])
clip_near = 1e-2

resolution = (256, 256)
cam = pyredner.Camera(position = position,
                     look_at = look_at,
                     up = up,
                     fov = fov,
                     clip_near = clip_near,
                     resolution = resolution)

checkerboard_texture = pyredner.imread('checkerboard.exr')
if pyredner.get_use_gpu():
	checkerboard_texture = checkerboard_texture.cuda(device = pyredner.get_device())

mat_checkerboard = pyredner.Material(\
    diffuse_reflectance = checkerboard_texture)
mat_black = pyredner.Material(\
    diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0],
    device = pyredner.get_device()))
materials = [mat_checkerboard, mat_black]
vertices = torch.tensor([[-1.0,-1.0,0.0], [-1.0,1.0,0.0], [1.0,-1.0,0.0], [1.0,1.0,0.0]],
                        device = pyredner.get_device())
indices = torch.tensor([[0, 1, 2], [1, 3, 2]], dtype = torch.int32,
                       device = pyredner.get_device())
uvs = torch.tensor([[0.05, 0.05], [0.05, 0.95], [0.95, 0.05], [0.95, 0.95]],
				   device = pyredner.get_device())
shape_plane = pyredner.Shape(vertices, indices, uvs, None, 0)
Example #23
0
    def forward(ctx,
                seed,
                *args):
        """
            Forward rendering pass: given a scene and output an image.
        """
        # Unpack arguments
        current_index = 0
        num_shapes = args[current_index]
        current_index += 1
        num_materials = args[current_index]
        current_index += 1
        num_lights = args[current_index]
        current_index += 1
        cam_position = args[current_index]
        current_index += 1
        cam_look_at = args[current_index]
        current_index += 1
        cam_up = args[current_index]
        current_index += 1
        ndc_to_cam = args[current_index]
        current_index += 1
        cam_to_ndc = args[current_index]
        current_index += 1
        clip_near = args[current_index]
        current_index += 1
        resolution = args[current_index]
        current_index += 1
        fisheye = args[current_index]
        current_index += 1
        camera = redner.Camera(resolution[1],
                               resolution[0],
                               redner.float_ptr(cam_position.data_ptr()),
                               redner.float_ptr(cam_look_at.data_ptr()),
                               redner.float_ptr(cam_up.data_ptr()),
                               redner.float_ptr(ndc_to_cam.data_ptr()),
                               redner.float_ptr(cam_to_ndc.data_ptr()),
                               clip_near,
                               fisheye)
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            material_id = args[current_index]
            current_index += 1
            light_id = args[current_index]
            current_index += 1
            assert(vertices.is_contiguous())
            assert(indices.is_contiguous())
            if uvs is not None:
                assert(uvs.is_contiguous())
            if normals is not None:
                assert(normals.is_contiguous())
            shapes.append(redner.Shape(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                material_id,
                light_id))
        materials = []
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            assert(diffuse_reflectance.is_contiguous())
            if diffuse_reflectance.dim() == 1:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()), 0, 0, 0,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            else:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()),
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[0]), # num levels
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            assert(specular_reflectance.is_contiguous())
            if specular_reflectance.dim() == 1:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()), 0, 0, 0,
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            else:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()),
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[0]), # num levels
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            assert(roughness.is_contiguous())
            if roughness.dim() == 1:
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()), 0, 0, 0,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            else:
                assert(roughness.dim() == 4)
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()),
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[0]), # num levels
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                two_sided))

        area_lights = []
        for i in range(num_lights):
            shape_id = args[current_index]
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1

            area_lights.append(redner.AreaLight(\
                shape_id,
                redner.float_ptr(intensity.data_ptr()),
                two_sided))

        envmap = None
        if args[current_index] is not None:
            values = args[current_index]
            current_index += 1
            envmap_uv_scale = args[current_index]
            current_index += 1
            env_to_world = args[current_index]
            current_index += 1
            world_to_env = args[current_index]
            current_index += 1
            sample_cdf_ys = args[current_index]
            current_index += 1
            sample_cdf_xs = args[current_index]
            current_index += 1
            pdf_norm = args[current_index]
            current_index += 1
            values = redner.Texture3(\
                redner.float_ptr(values.data_ptr()),
                int(values.shape[2]), # width
                int(values.shape[1]), # height
                int(values.shape[0]), # num levels
                redner.float_ptr(envmap_uv_scale.data_ptr()))
            envmap = redner.EnvironmentMap(\
                values,
                redner.float_ptr(env_to_world.data_ptr()),
                redner.float_ptr(world_to_env.data_ptr()),
                redner.float_ptr(sample_cdf_ys.data_ptr()),
                redner.float_ptr(sample_cdf_xs.data_ptr()),
                pdf_norm)
        else:
            current_index += 7

        start = time.time()
        scene = redner.Scene(camera,
                             shapes,
                             materials,
                             area_lights,
                             envmap,
                             pyredner.get_use_gpu(),
                             pyredner.get_device().index if pyredner.get_device().index is not None else -1)
        time_elapsed = time.time() - start
        if print_timing:
            print('Scene construction, time: %.5f s' % time_elapsed)

        num_samples = args[current_index]
        current_index += 1
        max_bounces = args[current_index]
        current_index += 1
        channels = args[current_index]
        current_index += 1
        sampler_type = args[current_index]
        current_index += 1

        # check that num_samples is a tuple
        if isinstance(num_samples, int):
            num_samples = (num_samples, num_samples)

        options = redner.RenderOptions(seed, num_samples[0], max_bounces, channels, sampler_type)
        num_channels = redner.compute_num_channels(channels)
        rendered_image = torch.zeros(resolution[0], resolution[1], num_channels,
            device = pyredner.get_device())
        start = time.time()
        redner.render(scene,
                      options,
                      redner.float_ptr(rendered_image.data_ptr()),
                      redner.float_ptr(0),
                      None,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Forward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(rendered_image.data_ptr()),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # exit()

        ctx.shapes = shapes
        ctx.materials = materials
        ctx.area_lights = area_lights
        ctx.envmap = envmap
        ctx.scene = scene
        ctx.options = options
        ctx.num_samples = num_samples
        return rendered_image
Example #24
0
    def backward(ctx, grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options

        d_fov_factor = torch.zeros(1)
        d_cam_to_world = torch.zeros(4, 4)
        d_world_to_cam = torch.zeros(4, 4)
        d_camera = redner.DCamera(redner.float_ptr(d_cam_to_world.data_ptr()),
                                  redner.float_ptr(d_world_to_cam.data_ptr()),
                                  redner.float_ptr(d_fov_factor.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            d_vertices = torch.zeros(num_vertices,
                                     3,
                                     device=pyredner.get_device())
            d_uvs = torch.zeros(
                num_vertices, 2,
                device=pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(
                num_vertices, 3,
                device=pyredner.get_device()) if shape.has_normals() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_materials = []
        for material in ctx.materials:
            diffuse_size = material.get_diffuse_size()
            specular_size = material.get_specular_size()
            roughness_size = material.get_roughness_size()
            if diffuse_size[0] == 0:
                d_diffuse = torch.zeros(3, device=pyredner.get_device())
            else:
                d_diffuse = torch.zeros(diffuse_size[2],
                                        diffuse_size[1],
                                        diffuse_size[0],
                                        3,
                                        device=pyredner.get_device())
            if specular_size[0] == 0:
                d_specular = torch.zeros(3, device=pyredner.get_device())
            else:
                d_specular = torch.zeros(specular_size[2],
                                         specular_size[1],
                                         specular_size[0],
                                         3,
                                         device=pyredner.get_device())
            if roughness_size[0] == 0:
                d_roughness = torch.zeros(1, device=pyredner.get_device())
            else:
                d_roughness = torch.zeros(roughness_size[2],
                                          roughness_size[1],
                                          roughness_size[0],
                                          device=pyredner.get_device())
            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_diffuse_uv_scale = torch.zeros(2)
            d_specular_uv_scale = torch.zeros(2)
            d_roughness_uv_scale = torch.zeros(2)
            d_diffuse_tex = redner.Texture3(\
                redner.float_ptr(d_diffuse.data_ptr()),
                diffuse_size[0], diffuse_size[1], diffuse_size[2],
                redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            d_specular_tex = redner.Texture3(\
                redner.float_ptr(d_specular.data_ptr()),
                specular_size[0], specular_size[1], specular_size[2],
                redner.float_ptr(d_specular_uv_scale.data_ptr()))
            d_roughness_tex = redner.Texture1(\
                redner.float_ptr(d_roughness.data_ptr()),
                roughness_size[0], roughness_size[1], roughness_size[2],
                redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex))

        d_intensity_list = []
        d_lights = []
        for light in ctx.lights:
            d_intensity = torch.zeros(3, device=pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_lights.append(
                redner.DLight(redner.float_ptr(d_intensity.data_ptr())))

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_lights,
                                pyredner.get_use_gpu())
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        redner.render(scene, options, redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()), d_scene,
                      redner.float_ptr(0))

        # # For debugging
        # grad_img = torch.ones(256, 256, 3)
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        ret_list.append(d_cam_to_world)
        ret_list.append(d_world_to_cam)
        ret_list.append(d_fov_factor)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # fisheye

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(None)  # diffuse_uv_scale
            ret_list.append(d_specular_list[i])
            ret_list.append(None)  # specular_uv_scale
            ret_list.append(d_roughness_list[i])
            ret_list.append(None)  # roughness_uv_scale
            ret_list.append(None)  # two sided

        num_lights = len(ctx.lights)
        for i in range(num_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces

        return tuple(ret_list)
Example #25
0
def parse_shape(node, material_dict, shape_id, shape_group_dict=None):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        to_world = torch.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])

        if node.attrib['type'] == 'obj':
            _, mesh_list, _ = pyredner.load_obj(filename)
            vertices = mesh_list[0][1].vertices.cpu()
            indices = mesh_list[0][1].indices.cpu()
            uvs = mesh_list[0][1].uvs
            normals = mesh_list[0][1].normals
            if uvs is not None:
                uvs = uvs.cpu()
            if normals is not None:
                normals = normals.cpu()
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = torch.from_numpy(mitsuba_tri_mesh.vertices)
            indices = torch.from_numpy(mitsuba_tri_mesh.indices)
            uvs = torch.from_numpy(mitsuba_tri_mesh.uvs)
            normals = torch.from_numpy(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None

        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda()
            indices = indices.cuda()
            if uvs is not None:
                uvs = uvs.cuda()
            if normals is not None:
                normals = normals.cuda()
        return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = torch.tensor([[0, 2, 1], [1, 2, 3]], dtype=torch.int32)
        vertices = torch.tensor([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                 [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = torch.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])
        # Transform the vertices
        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda()
            indices = indices.cuda()
            if uvs is not None:
                uvs = uvs.cuda()
            if normals is not None:
                normals = normals.cuda()
        return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    # Add instance support
    # TODO (simply transform & create a new shape now)
    elif node.attrib['type'] == 'instance':
        shape = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                    if pyredner.get_use_gpu():
                        to_world = to_world.cuda()
            if child.tag == 'ref':
                shape = shape_group_dict[child.attrib['id']]
        # transform instance
        vertices = shape.vertices
        normals = shape.normals
        vector1 = torch.ones(vertices.shape[0], 1)
        vertices = torch.cat(
            (vertices, vector1.cuda() if pyredner.get_use_gpu() else vector1),
            dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        # assert(vertices is not None)
        # assert(indices is not None)
        # lgt = None
        # if light_intensity is not None:
        #     lgt = pyredner.AreaLight(shape_id, light_intensity)

        return pyredner.Shape(vertices, shape.indices, shape.uvs, normals,
                              shape.material_id), None
    else:
        print('Shape type {} is not supported!'.format(node.attrib['type']))
        assert (False)
Example #26
0
    def backward(ctx,
                 grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options

        d_cam_position = torch.zeros(3)
        d_cam_look = torch.zeros(3)
        d_cam_up = torch.zeros(3)
        d_ndc_to_cam = torch.zeros(3, 3)
        d_cam_to_ndc = torch.zeros(3, 3)
        d_camera = redner.DCamera(redner.float_ptr(d_cam_position.data_ptr()),
                                  redner.float_ptr(d_cam_look.data_ptr()),
                                  redner.float_ptr(d_cam_up.data_ptr()),
                                  redner.float_ptr(d_ndc_to_cam.data_ptr()),
                                  redner.float_ptr(d_cam_to_ndc.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            d_vertices = torch.zeros(num_vertices, 3,
                device = pyredner.get_device())
            d_uvs = torch.zeros(num_vertices, 2,
                device = pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(num_vertices, 3,
                device = pyredner.get_device()) if shape.has_normals() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_materials = []
        for material in ctx.materials:
            diffuse_size = material.get_diffuse_size()
            specular_size = material.get_specular_size()
            roughness_size = material.get_roughness_size()
            if diffuse_size[0] == 0:
                d_diffuse = torch.zeros(3, device = pyredner.get_device())
            else:
                d_diffuse = torch.zeros(diffuse_size[2],
                                        diffuse_size[1],
                                        diffuse_size[0],
                                        3, device = pyredner.get_device())
            if specular_size[0] == 0:
                d_specular = torch.zeros(3, device = pyredner.get_device())
            else:
                d_specular = torch.zeros(specular_size[2],
                                         specular_size[1],
                                         specular_size[0],
                                         3, device = pyredner.get_device())
            if roughness_size[0] == 0:
                d_roughness = torch.zeros(1, device = pyredner.get_device())
            else:
                d_roughness = torch.zeros(roughness_size[2],
                                          roughness_size[1],
                                          roughness_size[0],
                                          1, device = pyredner.get_device())
            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_diffuse_uv_scale = torch.zeros(2)
            d_specular_uv_scale = torch.zeros(2)
            d_roughness_uv_scale = torch.zeros(2)
            d_diffuse_tex = redner.Texture3(\
                redner.float_ptr(d_diffuse.data_ptr()),
                diffuse_size[0], diffuse_size[1], diffuse_size[2],
                redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            d_specular_tex = redner.Texture3(\
                redner.float_ptr(d_specular.data_ptr()),
                specular_size[0], specular_size[1], specular_size[2],
                redner.float_ptr(d_specular_uv_scale.data_ptr()))
            d_roughness_tex = redner.Texture1(\
                redner.float_ptr(d_roughness.data_ptr()),
                roughness_size[0], roughness_size[1], roughness_size[2],
                redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex))

        d_intensity_list = []
        d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device = pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            d_envmap_values = \
                torch.zeros(size[2],
                            size[1],
                            size[0],
                            3,
                            device = pyredner.get_device())
            d_envmap_uv_scale = torch.zeros(2)
            d_envmap_tex = redner.Texture3(\
                redner.float_ptr(d_envmap_values.data_ptr()),
                size[0], size[1], size[2],
                redner.float_ptr(d_envmap_uv_scale.data_ptr()))
            d_world_to_env = torch.zeros(4, 4)
            d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(d_world_to_env.data_ptr()))

        d_scene = redner.DScene(d_camera,
                                d_shapes,
                                d_materials,
                                d_area_lights,
                                d_envmap,
                                pyredner.get_use_gpu(),
                                pyredner.get_device().index if pyredner.get_device().index is not None else -1)
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003

        options.num_samples = ctx.num_samples[1]
        start = time.time()
        redner.render(scene, options,
                      redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()),
                      d_scene,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # # pyredner.imwrite(grad_img, 'grad_img.exr')
        # # grad_img = torch.ones(256, 256, 3, device = pyredner.get_device())
        # debug_img = torch.zeros(256, 256, 3)
        # start = time.time()
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # time_elapsed = time.time() - start
        # if print_timing:
        #     print('Backward pass, time: %.5f s' % time_elapsed)
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # debug_img = debug_img.numpy()
        # print(np.max(debug_img))
        # print(np.unravel_index(np.argmax(debug_img), debug_img.shape))
        # print(np.min(debug_img))
        # print(np.unravel_index(np.argmin(debug_img), debug_img.shape))
        # print(np.sum(debug_img) / 3)
        # debug_max = 0.5
        # debug_min = -0.5
        # debug_img = np.clip((debug_img - debug_min) / (debug_max - debug_min), 0, 1)
        # debug_img = debug_img[:, :, 0]
        # import matplotlib.cm as cm
        # debug_img = cm.viridis(debug_img)
        # skimage.io.imsave('debug.png', np.power(debug_img, 1/2.2))
        # exit()

        ret_list = []
        ret_list.append(None) # seed
        ret_list.append(None) # num_shapes
        ret_list.append(None) # num_materials
        ret_list.append(None) # num_lights
        ret_list.append(d_cam_position)
        ret_list.append(d_cam_look)
        ret_list.append(d_cam_up)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None) # clip near
        ret_list.append(None) # resolution
        ret_list.append(None) # fisheye

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None) # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None) # material id
            ret_list.append(None) # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(None) # diffuse_uv_scale
            ret_list.append(d_specular_list[i])
            ret_list.append(None) # specular_uv_scale
            ret_list.append(d_roughness_list[i])
            ret_list.append(None) # roughness_uv_scale
            ret_list.append(None) # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None) # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None) # two sided

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(None) # uv_scale
            ret_list.append(None) # env_to_world
            ret_list.append(d_world_to_env)
            ret_list.append(None) # sample_cdf_ys
            ret_list.append(None) # sample_cdf_xs
            ret_list.append(None) # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
        
        ret_list.append(None) # num samples
        ret_list.append(None) # num bounces
        ret_list.append(None) # channels
        ret_list.append(None) # sampler type

        return tuple(ret_list)
Example #27
0
    def unpack_args(seed,
                    args,
                    use_primary_edge_sampling = None,
                    use_secondary_edge_sampling = None):
        """
            Given a list of serialized scene arguments, unpack
            all information into a Context.
        """

        current_index = 0
        num_shapes = args[current_index]
        current_index += 1
        num_materials = args[current_index]
        current_index += 1
        num_lights = args[current_index]
        current_index += 1

        cam_position = args[current_index]
        current_index += 1
        cam_look_at = args[current_index]
        current_index += 1
        cam_up = args[current_index]
        current_index += 1
        cam_to_world = args[current_index]
        current_index += 1
        world_to_cam = args[current_index]
        current_index += 1
        intrinsic_mat_inv = args[current_index]
        current_index += 1
        intrinsic_mat = args[current_index]
        current_index += 1
        clip_near = args[current_index]
        current_index += 1
        resolution = args[current_index]
        current_index += 1
        viewport = args[current_index]
        current_index += 1
        camera_type = args[current_index]
        current_index += 1
        if cam_to_world is None:
            camera = redner.Camera(resolution[1],
                                   resolution[0],
                                   redner.float_ptr(cam_position.data_ptr()),
                                   redner.float_ptr(cam_look_at.data_ptr()),
                                   redner.float_ptr(cam_up.data_ptr()),
                                   redner.float_ptr(0), # cam_to_world
                                   redner.float_ptr(0), # world_to_cam
                                   redner.float_ptr(intrinsic_mat_inv.data_ptr()),
                                   redner.float_ptr(intrinsic_mat.data_ptr()),
                                   clip_near,
                                   camera_type,
                                   redner.Vector2i(viewport[1], viewport[0]),
                                   redner.Vector2i(viewport[3], viewport[2]))
        else:
            camera = redner.Camera(resolution[1],
                                   resolution[0],
                                   redner.float_ptr(0), # cam_position
                                   redner.float_ptr(0), # cam_look_at
                                   redner.float_ptr(0), # cam_up
                                   redner.float_ptr(cam_to_world.data_ptr()),
                                   redner.float_ptr(world_to_cam.data_ptr()),
                                   redner.float_ptr(intrinsic_mat_inv.data_ptr()),
                                   redner.float_ptr(intrinsic_mat.data_ptr()),
                                   clip_near,
                                   camera_type,
                                   redner.Vector2i(viewport[1], viewport[0]),
                                   redner.Vector2i(viewport[3], viewport[2]))
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            uv_indices = args[current_index]
            current_index += 1
            normal_indices = args[current_index]
            current_index += 1
            colors = args[current_index]
            current_index += 1
            material_id = args[current_index]
            current_index += 1
            light_id = args[current_index]
            current_index += 1
            assert(vertices.is_contiguous())
            assert(indices.is_contiguous())
            if uvs is not None:
                assert(uvs.is_contiguous())
            if normals is not None:
                assert(normals.is_contiguous())
            if uv_indices is not None:
                assert(uv_indices.is_contiguous())
            if normal_indices is not None:
                assert(normal_indices.is_contiguous())
            shapes.append(redner.Shape(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                redner.int_ptr(uv_indices.data_ptr() if uv_indices is not None else 0),
                redner.int_ptr(normal_indices.data_ptr() if normal_indices is not None else 0),
                redner.float_ptr(colors.data_ptr() if colors is not None else 0),
                int(vertices.shape[0]),
                int(uvs.shape[0]) if uvs is not None else 0,
                int(normals.shape[0]) if normals is not None else 0,
                int(indices.shape[0]),
                material_id,
                light_id))

        materials = []
        for i in range(num_materials):
            num_levels = args[current_index]
            current_index += 1
            diffuse_reflectance = []
            for j in range(num_levels):
                diffuse_reflectance.append(args[current_index])
                current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            
            num_levels = args[current_index]
            current_index += 1
            specular_reflectance = []
            for j in range(num_levels):
                specular_reflectance.append(args[current_index])
                current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            
            num_levels = args[current_index]
            current_index += 1
            roughness = []
            for j in range(num_levels):
                roughness.append(args[current_index])
                current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1

            num_levels = args[current_index]
            current_index += 1
            generic_texture = []
            if num_levels > 0:
                for j in range(num_levels):
                    generic_texture.append(args[current_index])
                    current_index += 1
                generic_uv_scale = args[current_index]
                current_index += 1
            else:
                generic_uv_scale = None

            num_levels = args[current_index]
            current_index += 1
            normal_map = []
            if num_levels > 0:
                for j in range(num_levels):
                    normal_map.append(args[current_index])
                    current_index += 1
                normal_map_uv_scale = args[current_index]
                current_index += 1
            else:
                normal_map_uv_scale = None

            compute_specular_lighting = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            use_vertex_color = args[current_index]
            current_index += 1

            if diffuse_reflectance[0].dim() == 1:
                # Constant texture
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(diffuse_reflectance[0].data_ptr())],
                    [0], [0], 3,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            else:
                assert(diffuse_reflectance[0].dim() == 3)
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in diffuse_reflectance],
                    [x.shape[1] for x in diffuse_reflectance],
                    [x.shape[0] for x in diffuse_reflectance],
                    3,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))

            if specular_reflectance[0].dim() == 1:
                # Constant texture
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(specular_reflectance[0].data_ptr())],
                    [0], [0], 3,
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            else:
                assert(specular_reflectance[0].dim() == 3)
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in specular_reflectance],
                    [x.shape[1] for x in specular_reflectance],
                    [x.shape[0] for x in specular_reflectance],
                    3,
                    redner.float_ptr(specular_uv_scale.data_ptr()))

            if roughness[0].dim() == 1:
                # Constant texture
                roughness = redner.Texture1(\
                    [redner.float_ptr(roughness[0].data_ptr())],
                    [0], [0], 1,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            else:
                assert(roughness[0].dim() == 3)
                roughness = redner.Texture1(\
                    [redner.float_ptr(x.data_ptr()) for x in roughness],
                    [x.shape[1] for x in roughness],
                    [x.shape[0] for x in roughness],
                    1,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))

            if len(generic_texture) > 0:
                assert(generic_texture[0].dim() == 3)
                generic_texture = redner.TextureN(\
                    [redner.float_ptr(x.data_ptr()) for x in generic_texture],
                    [x.shape[1] for x in generic_texture],
                    [x.shape[0] for x in generic_texture],
                    generic_texture[0].shape[2],
                    redner.float_ptr(generic_uv_scale.data_ptr()))
            else:
                generic_texture = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))

            if len(normal_map) > 0:
                assert(normal_map[0].dim() == 3)
                normal_map = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in normal_map],
                    [x.shape[1] for x in normal_map],
                    [x.shape[0] for x in normal_map],
                    3,
                    redner.float_ptr(normal_map_uv_scale.data_ptr()))
            else:
                normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                generic_texture,
                normal_map,
                compute_specular_lighting,
                two_sided,
                use_vertex_color))

        area_lights = []
        for i in range(num_lights):
            shape_id = args[current_index]
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            directly_visible = args[current_index]
            current_index += 1

            area_lights.append(redner.AreaLight(\
                shape_id,
                redner.float_ptr(intensity.data_ptr()),
                two_sided,
                directly_visible))

        envmap = None
        if args[current_index] is not None:
            num_levels = args[current_index]
            current_index += 1
            values = []
            for j in range(num_levels):
                values.append(args[current_index])
                current_index += 1
            envmap_uv_scale = args[current_index]
            current_index += 1
            env_to_world = args[current_index]
            current_index += 1
            world_to_env = args[current_index]
            current_index += 1
            sample_cdf_ys = args[current_index]
            current_index += 1
            sample_cdf_xs = args[current_index]
            current_index += 1
            pdf_norm = args[current_index]
            current_index += 1
            directly_visible = args[current_index]
            current_index += 1
            values = redner.Texture3(\
                [redner.float_ptr(x.data_ptr()) for x in values],
                [x.shape[1] for x in values], # width
                [x.shape[0] for x in values], # height
                3, # channels
                redner.float_ptr(envmap_uv_scale.data_ptr()))
            envmap = redner.EnvironmentMap(\
                values,
                redner.float_ptr(env_to_world.data_ptr()),
                redner.float_ptr(world_to_env.data_ptr()),
                redner.float_ptr(sample_cdf_ys.data_ptr()),
                redner.float_ptr(sample_cdf_xs.data_ptr()),
                pdf_norm,
                directly_visible)
        else:
            current_index += 1

        # Options
        num_samples = args[current_index]
        current_index += 1
        max_bounces = args[current_index]
        current_index += 1
        channels = args[current_index]
        current_index += 1
        sampler_type = args[current_index]
        current_index += 1
        use_primary_edge_sampling_ = args[current_index]
        current_index += 1
        use_secondary_edge_sampling_ = args[current_index]
        current_index += 1
        sample_pixel_center = args[current_index]
        current_index += 1

        if use_primary_edge_sampling is None:
            use_primary_edge_sampling = use_primary_edge_sampling_
        if use_secondary_edge_sampling is None:
            use_secondary_edge_sampling = use_secondary_edge_sampling_

        start = time.time()
        scene = redner.Scene(camera,
                             shapes,
                             materials,
                             area_lights,
                             envmap,
                             pyredner.get_use_gpu(),
                             pyredner.get_device().index if pyredner.get_device().index is not None else -1,
                             use_primary_edge_sampling,
                             use_secondary_edge_sampling)
        time_elapsed = time.time() - start
        if get_print_timing():
            print('Scene construction, time: %.5f s' % time_elapsed)

        # check that num_samples is a tuple
        if isinstance(num_samples, int):
            num_samples = (num_samples, num_samples)

        options = redner.RenderOptions(seed,
                                       num_samples[0],
                                       max_bounces,
                                       channels,
                                       sampler_type,
                                       sample_pixel_center)

        ctx = Context()
        ctx.channels = channels
        ctx.options = options
        ctx.resolution = resolution
        ctx.viewport = viewport
        ctx.scene = scene
        ctx.camera = camera
        ctx.shapes = shapes
        ctx.materials = materials
        ctx.area_lights = area_lights
        ctx.envmap = envmap
        ctx.scene = scene
        ctx.options = options
        ctx.num_samples = num_samples

        return ctx
Example #28
0
    def forward(ctx, seed, *args):
        """
            Forward rendering pass: given a scene and output an image.
        """
        # Unpack arguments
        current_index = 0
        num_shapes = args[current_index]
        current_index += 1
        num_materials = args[current_index]
        current_index += 1
        num_lights = args[current_index]
        current_index += 1
        cam_position = args[current_index]
        current_index += 1
        cam_look_at = args[current_index]
        current_index += 1
        cam_up = args[current_index]
        current_index += 1
        ndc_to_cam = args[current_index]
        current_index += 1
        cam_to_ndc = args[current_index]
        current_index += 1
        clip_near = args[current_index]
        current_index += 1
        resolution = args[current_index]
        current_index += 1
        fisheye = args[current_index]
        current_index += 1
        camera = redner.Camera(resolution[1], resolution[0],
                               redner.float_ptr(cam_position.data_ptr()),
                               redner.float_ptr(cam_look_at.data_ptr()),
                               redner.float_ptr(cam_up.data_ptr()),
                               redner.float_ptr(ndc_to_cam.data_ptr()),
                               redner.float_ptr(cam_to_ndc.data_ptr()),
                               clip_near, fisheye)
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            material_id = args[current_index]
            current_index += 1
            light_id = args[current_index]
            current_index += 1
            assert (vertices.is_contiguous())
            assert (indices.is_contiguous())
            if uvs is not None:
                assert (uvs.is_contiguous())
            if normals is not None:
                assert (normals.is_contiguous())
            shapes.append(redner.Shape(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                material_id,
                light_id))
        materials = []
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            assert (diffuse_reflectance.is_contiguous())
            if diffuse_reflectance.dim() == 1:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()), 0, 0, 0,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            else:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()),
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[0]), # num levels
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            assert (specular_reflectance.is_contiguous())
            if specular_reflectance.dim() == 1:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()), 0, 0, 0,
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            else:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()),
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[0]), # num levels
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            assert (roughness.is_contiguous())
            if roughness.dim() == 1:
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()), 0, 0, 0,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            else:
                assert (roughness.dim() == 4)
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()),
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[0]), # num levels
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                two_sided))

        area_lights = []
        for i in range(num_lights):
            shape_id = args[current_index]
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1

            area_lights.append(redner.AreaLight(\
                shape_id,
                redner.float_ptr(intensity.data_ptr()),
                two_sided))

        envmap = None
        if args[current_index] is not None:
            values = args[current_index]
            current_index += 1
            envmap_uv_scale = args[current_index]
            current_index += 1
            env_to_world = args[current_index]
            current_index += 1
            world_to_env = args[current_index]
            current_index += 1
            sample_cdf_ys = args[current_index]
            current_index += 1
            sample_cdf_xs = args[current_index]
            current_index += 1
            pdf_norm = args[current_index]
            current_index += 1
            values = redner.Texture3(\
                redner.float_ptr(values.data_ptr()),
                int(values.shape[2]), # width
                int(values.shape[1]), # height
                int(values.shape[0]), # num levels
                redner.float_ptr(envmap_uv_scale.data_ptr()))
            envmap = redner.EnvironmentMap(\
                values,
                redner.float_ptr(env_to_world.data_ptr()),
                redner.float_ptr(world_to_env.data_ptr()),
                redner.float_ptr(sample_cdf_ys.data_ptr()),
                redner.float_ptr(sample_cdf_xs.data_ptr()),
                pdf_norm)
        else:
            current_index += 7

        # Options
        num_samples = args[current_index]
        current_index += 1
        max_bounces = args[current_index]
        current_index += 1
        channels = args[current_index]
        current_index += 1
        sampler_type = args[current_index]
        current_index += 1
        use_primary_edge_sampling = args[current_index]
        current_index += 1
        use_secondary_edge_sampling = args[current_index]
        current_index += 1

        start = time.time()
        scene = redner.Scene(
            camera, shapes, materials, area_lights, envmap,
            pyredner.get_use_gpu(),
            pyredner.get_device().index
            if pyredner.get_device().index is not None else -1,
            use_primary_edge_sampling, use_secondary_edge_sampling)
        time_elapsed = time.time() - start
        if print_timing:
            print('Scene construction, time: %.5f s' % time_elapsed)

        # check that num_samples is a tuple
        if isinstance(num_samples, int):
            num_samples = (num_samples, num_samples)

        options = redner.RenderOptions(seed, num_samples[0], max_bounces,
                                       channels, sampler_type)
        num_channels = redner.compute_num_channels(channels)
        rendered_image = torch.zeros(resolution[0],
                                     resolution[1],
                                     num_channels,
                                     device=pyredner.get_device())
        start = time.time()
        redner.render(scene, options,
                      redner.float_ptr(rendered_image.data_ptr()),
                      redner.float_ptr(0), None, redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Forward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(rendered_image.data_ptr()),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # exit()

        ctx.shapes = shapes
        ctx.materials = materials
        ctx.area_lights = area_lights
        ctx.envmap = envmap
        ctx.scene = scene
        ctx.options = options
        ctx.num_samples = num_samples
        return rendered_image
Example #29
0
    def backward(ctx,
                 grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options

        d_cam_to_world = torch.zeros(4, 4)
        d_world_to_cam = torch.zeros(4, 4)
        d_ndc_to_cam = torch.zeros(3, 3)
        d_cam_to_ndc = torch.zeros(3, 3)
        d_camera = redner.DCamera(redner.float_ptr(d_cam_to_world.data_ptr()),
                                  redner.float_ptr(d_world_to_cam.data_ptr()),
                                  redner.float_ptr(d_ndc_to_cam.data_ptr()),
                                  redner.float_ptr(d_cam_to_ndc.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            d_vertices = torch.zeros(num_vertices, 3,
                device = pyredner.get_device())
            d_uvs = torch.zeros(num_vertices, 2,
                device = pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(num_vertices, 3,
                device = pyredner.get_device()) if shape.has_normals() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_materials = []
        for material in ctx.materials:
            diffuse_size = material.get_diffuse_size()
            specular_size = material.get_specular_size()
            roughness_size = material.get_roughness_size()
            if diffuse_size[0] == 0:
                d_diffuse = torch.zeros(3, device = pyredner.get_device())
            else:
                d_diffuse = torch.zeros(diffuse_size[2],
                                        diffuse_size[1],
                                        diffuse_size[0],
                                        3, device = pyredner.get_device())
            if specular_size[0] == 0:
                d_specular = torch.zeros(3, device = pyredner.get_device())
            else:
                d_specular = torch.zeros(specular_size[2],
                                         specular_size[1],
                                         specular_size[0],
                                         3, device = pyredner.get_device())
            if roughness_size[0] == 0:
                d_roughness = torch.zeros(1, device = pyredner.get_device())
            else:
                d_roughness = torch.zeros(roughness_size[2],
                                          roughness_size[1],
                                          roughness_size[0],
                                          1, device = pyredner.get_device())
            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_diffuse_uv_scale = torch.zeros(2)
            d_specular_uv_scale = torch.zeros(2)
            d_roughness_uv_scale = torch.zeros(2)
            d_diffuse_tex = redner.Texture3(\
                redner.float_ptr(d_diffuse.data_ptr()),
                diffuse_size[0], diffuse_size[1], diffuse_size[2],
                redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            d_specular_tex = redner.Texture3(\
                redner.float_ptr(d_specular.data_ptr()),
                specular_size[0], specular_size[1], specular_size[2],
                redner.float_ptr(d_specular_uv_scale.data_ptr()))
            d_roughness_tex = redner.Texture1(\
                redner.float_ptr(d_roughness.data_ptr()),
                roughness_size[0], roughness_size[1], roughness_size[2],
                redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex))

        d_intensity_list = []
        d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device = pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            d_envmap_values = \
                torch.zeros(size[2],
                            size[1],
                            size[0],
                            3,
                            device = pyredner.get_device())
            d_envmap_uv_scale = torch.zeros(2)
            d_envmap_tex = redner.Texture3(\
                redner.float_ptr(d_envmap_values.data_ptr()),
                size[0], size[1], size[2],
                redner.float_ptr(d_envmap_uv_scale.data_ptr()))
            d_world_to_env = torch.zeros(4, 4)
            d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(d_world_to_env.data_ptr()))

        d_scene = redner.DScene(d_camera,
                                d_shapes,
                                d_materials,
                                d_area_lights,
                                d_envmap,
                                pyredner.get_use_gpu())
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()
        redner.render(scene, options,
                      redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()),
                      d_scene,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = torch.ones(256, 256, 3)
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None) # seed
        ret_list.append(None) # num_shapes
        ret_list.append(None) # num_materials
        ret_list.append(None) # num_lights
        ret_list.append(d_cam_to_world)
        ret_list.append(d_world_to_cam)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None) # clip near
        ret_list.append(None) # resolution
        ret_list.append(None) # fisheye

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None) # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None) # material id
            ret_list.append(None) # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(None) # diffuse_uv_scale
            ret_list.append(d_specular_list[i])
            ret_list.append(None) # specular_uv_scale
            ret_list.append(d_roughness_list[i])
            ret_list.append(None) # roughness_uv_scale
            ret_list.append(None) # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None) # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None) # two sided
            ret_list.append(None)

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(None) # uv_scale
            ret_list.append(None) # env_to_world
            ret_list.append(d_world_to_env)
            ret_list.append(None) # sample_cdf_ys
            ret_list.append(None) # sample_cdf_xs
            ret_list.append(None) # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
        
        ret_list.append(None) # num samples
        ret_list.append(None) # num bounces
        ret_list.append(None) # channels

        return tuple(ret_list)
Example #30
0
def load_obj(filename: str,
             obj_group: bool = True,
             flip_tex_coords: bool = True,
             use_common_indices: bool = False,
             return_objects: bool = False):
    """
        Load from a Wavefront obj file as PyTorch tensors.

        Args
        ====
        obj_group: bool
            split the meshes based on materials
        flip_tex_coords: bool
            flip the v coordinate of uv by applying v' = 1 - v
        use_common_indices: bool
            Use the same indices for position, uvs, normals.
            Not recommended since texture seams in the objects sharing
            the same positions would cause the optimization to "tear" the object
        return_objects: bool
            Output list of Object instead.
            If there is no corresponding material for a shape, assign a grey material.

        Returns
        =======
        if return_objects == True, return a list of Object
        if return_objects == False, return (material_map, mesh_list, light_map),
        material_map -> Map[mtl_name, WavefrontMaterial]
        mesh_list -> List[TriangleMesh]
        light_map -> Map[mtl_name, torch.Tensor]
    """
    vertices_pool = []
    uvs_pool = []
    normals_pool = []
    indices = []
    uv_indices = []
    normal_indices = []
    vertices = []
    uvs = []
    normals = []
    vertices_map = {}
    uvs_map = {}
    normals_map = {}
    material_map = {}
    current_mtllib = {}
    current_material_name = None

    def create_mesh(indices, uv_indices, normal_indices, vertices, uvs,
                    normals):
        indices = torch.tensor(indices,
                               dtype=torch.int32,
                               device=pyredner.get_device())
        if len(uv_indices) == 0:
            uv_indices = None
        else:
            uv_indices = torch.tensor(uv_indices,
                                      dtype=torch.int32,
                                      device=pyredner.get_device())
        if len(normal_indices) == 0:
            normal_indices = None
        else:
            normal_indices = torch.tensor(normal_indices,
                                          dtype=torch.int32,
                                          device=pyredner.get_device())
        vertices = torch.tensor(vertices, device=pyredner.get_device())
        if len(uvs) == 0:
            uvs = None
        else:
            uvs = torch.tensor(uvs, device=pyredner.get_device())
        if len(normals) == 0:
            normals = None
        else:
            normals = torch.tensor(normals, device=pyredner.get_device())
        return TriangleMesh(indices, uv_indices, normal_indices, vertices, uvs,
                            normals)

    mesh_list = []
    light_map = {}

    with open(filename, 'r') as f:
        d = os.path.dirname(filename)
        cwd = os.getcwd()
        if d != '':
            os.chdir(d)
        for line in f:
            line = line.strip()
            splitted = re.split('\ +', line)
            if splitted[0] == 'mtllib':
                current_mtllib = load_mtl(splitted[1])
            elif splitted[0] == 'usemtl':
                if len(indices) > 0 and obj_group is True:
                    # Flush
                    mesh_list.append(
                        (current_material_name,
                         create_mesh(indices, uv_indices, normal_indices,
                                     vertices, uvs, normals)))
                    indices = []
                    uv_indices = []
                    normal_indices = []
                    vertices = []
                    normals = []
                    uvs = []
                    vertices_map = {}
                    uvs_map = {}
                    normals_map = {}

                mtl_name = splitted[1]
                current_material_name = mtl_name
                if mtl_name not in material_map:
                    m = current_mtllib[mtl_name]
                    if m.map_Kd is None:
                        diffuse_reflectance = torch.tensor(
                            m.Kd,
                            dtype=torch.float32,
                            device=pyredner.get_device())
                    else:
                        diffuse_reflectance = pyredner.imread(m.map_Kd)
                        if pyredner.get_use_gpu():
                            diffuse_reflectance = diffuse_reflectance.cuda(
                                device=pyredner.get_device())
                    if m.map_Ks is None:
                        specular_reflectance = torch.tensor(
                            m.Ks,
                            dtype=torch.float32,
                            device=pyredner.get_device())
                    else:
                        specular_reflectance = pyredner.imread(m.map_Ks)
                        if pyredner.get_use_gpu():
                            specular_reflectance = specular_reflectance.cuda(
                                device=pyredner.get_device())
                    if m.map_Ns is None:
                        roughness = torch.tensor([2.0 / (m.Ns + 2.0)],
                                                 dtype=torch.float32,
                                                 device=pyredner.get_device())
                    else:
                        roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0)
                        if pyredner.get_use_gpu():
                            roughness = roughness.cuda(
                                device=pyredner.get_device())
                    if m.Ke != (0.0, 0.0, 0.0):
                        light_map[mtl_name] = torch.tensor(m.Ke,
                                                           dtype=torch.float32)
                    material_map[mtl_name] = pyredner.Material(\
                        diffuse_reflectance, specular_reflectance, roughness)
            elif splitted[0] == 'v':
                vertices_pool.append([
                    float(splitted[1]),
                    float(splitted[2]),
                    float(splitted[3])
                ])
            elif splitted[0] == 'vt':
                u = float(splitted[1])
                v = float(splitted[2])
                if flip_tex_coords:
                    v = 1 - v
                uvs_pool.append([u, v])
            elif splitted[0] == 'vn':
                normals_pool.append([
                    float(splitted[1]),
                    float(splitted[2]),
                    float(splitted[3])
                ])
            elif splitted[0] == 'f':

                def num_indices(x):
                    return len(re.split('/', x))

                def get_index(x, i):
                    return int(re.split('/', x)[i])

                def parse_face_index(x, i):
                    f = get_index(x, i)
                    if f > 0:
                        f -= 1
                    return f

                assert (len(splitted) <= 5)

                def get_vertex_id(indices):
                    pi = parse_face_index(indices, 0)
                    uvi = None
                    if (num_indices(indices) > 1
                            and re.split('/', indices)[1] != ''):
                        uvi = parse_face_index(indices, 1)
                    ni = None
                    if (num_indices(indices) > 2
                            and re.split('/', indices)[2] != ''):
                        ni = parse_face_index(indices, 2)
                    if use_common_indices:
                        # vertex, uv, normals share the same indexing
                        key = (pi, uvi, ni)
                        if key in vertices_map:
                            vertex_id = vertices_map[key]
                            return vertex_id, vertex_id, vertex_id

                        vertex_id = len(vertices)
                        vertices_map[key] = vertex_id
                        vertices.append(vertices_pool[pi])
                        if uvi is not None:
                            uvs.append(uvs_pool[uvi])
                        if ni is not None:
                            normals.append(normals_pool[ni])
                        return vertex_id, vertex_id, vertex_id
                    else:
                        # vertex, uv, normals use separate indexing
                        vertex_id = None
                        uv_id = None
                        normal_id = None

                        if pi in vertices_map:
                            vertex_id = vertices_map[pi]
                        else:
                            vertex_id = len(vertices)
                            vertices.append(vertices_pool[pi])
                            vertices_map[pi] = vertex_id

                        if uvi is not None:
                            if uvi in uvs_map:
                                uv_id = uvs_map[uvi]
                            else:
                                uv_id = len(uvs)
                                uvs.append(uvs_pool[uvi])
                                uvs_map[uvi] = uv_id

                        if ni is not None:
                            if ni in normals_map:
                                normal_id = normals_map[ni]
                            else:
                                normal_id = len(normals)
                                normals.append(normals_pool[ni])
                                normals_map[ni] = normal_id
                        return vertex_id, uv_id, normal_id

                vid0, uv_id0, n_id0 = get_vertex_id(splitted[1])
                vid1, uv_id1, n_id1 = get_vertex_id(splitted[2])
                vid2, uv_id2, n_id2 = get_vertex_id(splitted[3])

                indices.append([vid0, vid1, vid2])
                if uv_id0 is not None:
                    assert (uv_id1 is not None and uv_id2 is not None)
                    uv_indices.append([uv_id0, uv_id1, uv_id2])
                if n_id0 is not None:
                    assert (n_id1 is not None and n_id2 is not None)
                    normal_indices.append([n_id0, n_id1, n_id2])
                if (len(splitted) == 5):
                    vid3, uv_id3, n_id3 = get_vertex_id(splitted[4])
                    indices.append([vid0, vid2, vid3])
                    if uv_id0 is not None:
                        assert (uv_id3 is not None)
                        uv_indices.append([uv_id0, uv_id2, uv_id3])
                    if n_id0 is not None:
                        assert (n_id3 is not None)
                        normal_indices.append([n_id0, n_id2, n_id3])

    mesh_list.append((current_material_name,
                      create_mesh(indices, uv_indices, normal_indices,
                                  vertices, uvs, normals)))
    if d != '':
        os.chdir(cwd)

    if return_objects:
        objects = []
        for mtl_name, mesh in mesh_list:
            if mtl_name in material_map:
                m = material_map[mtl_name]
            else:
                m = pyredner.Material(diffuse_reflectance = \
                    torch.tensor((0.5, 0.5, 0.5),
                                 device = pyredner.get_device()))
            if mtl_name in light_map:
                l = light_map[mtl_name]
            else:
                l = None
            objects.append(pyredner.Object(\
                vertices = mesh.vertices,
                indices = mesh.indices,
                material = m,
                light_intensity = l,
                uvs = mesh.uvs,
                normals = mesh.normals,
                uv_indices = mesh.uv_indices,
                normal_indices = mesh.normal_indices))
        return objects
    else:
        return material_map, mesh_list, light_map
Example #31
0
def load_obj(filename, obj_group = True):
    """
        Load from a Wavefront obj file as PyTorch tensors.
        XXX: this is slow, maybe move to C++?
    """
    vertices_pool = []
    uvs_pool = []
    normals_pool = []
    indices = []
    vertices = []
    normals = []
    uvs = []
    vertices_map = {}
    material_map = {}
    current_mtllib = {}
    current_material_name = None

    def create_mesh(indices, vertices, normals, uvs):
        indices = torch.tensor(indices, dtype = torch.int32, device = pyredner.get_device())
        vertices = torch.tensor(vertices, device = pyredner.get_device())
        if len(uvs) == 0:
            uvs = None
        else:
            uvs = torch.tensor(uvs, device = pyredner.get_device())
        if len(normals) == 0:
            normals = None
        else:
            normals = torch.tensor(normals, device = pyredner.get_device())
        return TriangleMesh(vertices, indices, uvs, normals)

    mesh_list = []
    light_map = {}

    f = open(filename, 'r')
    d = os.path.dirname(filename)
    cwd = os.getcwd()
    if d != '':
        os.chdir(d)
    for line in f:
        line = line.strip()
        splitted = re.split('\ +', line)
        if splitted[0] == 'mtllib':
            current_mtllib = load_mtl(splitted[1])
        elif splitted[0] == 'usemtl':
            if len(indices) > 0 and obj_group is True:
                # Flush
                mesh_list.append((current_material_name, create_mesh(indices, vertices, normals, uvs)))
                indices = []
                vertices = []
                normals = []
                uvs = []
                vertices_map = {}
            mtl_name = splitted[1]
            current_material_name = mtl_name
            if mtl_name not in material_map:
                m = current_mtllib[mtl_name]
                if m.map_Kd is None:
                    diffuse_reflectance = torch.tensor(m.Kd,
                        dtype = torch.float32, device = pyredner.get_device())
                else:
                    diffuse_reflectance = pyredner.imread(m.map_Kd)
                    if pyredner.get_use_gpu():
                        diffuse_reflectance = diffuse_reflectance.cuda(device = pyredner.get_device())
                if m.map_Ks is None:
                    specular_reflectance = torch.tensor(m.Ks,
                        dtype = torch.float32, device = pyredner.get_device())
                else:
                    specular_reflectance = pyredner.imread(m.map_Ks)
                    if pyredner.get_use_gpu():
                        specular_reflectance = specular_reflectance.cuda(device = pyredner.get_device())
                if m.map_Ns is None:
                    roughness = torch.tensor([2.0 / (m.Ns + 2.0)],
                        dtype = torch.float32, device = pyredner.get_device())
                else:
                    roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0)
                    if pyredner.get_use_gpu():
                        roughness = roughness.cuda(device = pyredner.get_device())
                if m.Ke != (0.0, 0.0, 0.0):
                    light_map[mtl_name] = torch.tensor(m.Ke, dtype = torch.float32)
                material_map[mtl_name] = pyredner.Material(\
                    diffuse_reflectance, specular_reflectance, roughness)
        elif splitted[0] == 'v':
            vertices_pool.append([float(splitted[1]), float(splitted[2]), float(splitted[3])])
        elif splitted[0] == 'vt':
            uvs_pool.append([float(splitted[1]), float(splitted[2])])
        elif splitted[0] == 'vn':
            normals_pool.append([float(splitted[1]), float(splitted[2]), float(splitted[3])])
        elif splitted[0] == 'f':
            def num_indices(x):
                return len(re.split('/', x))
            def get_index(x, i):
                return int(re.split('/', x)[i])
            def parse_face_index(x, i):
                f = get_index(x, i)
                if f < 0:
                    if (i == 0):
                        f += len(vertices)
                    if (i == 1):
                        f += len(uvs)
                else:
                    f -= 1
                return f
            assert(len(splitted) <= 5)
            def get_vertex_id(indices):
                pi = parse_face_index(indices, 0)
                uvi = None
                if (num_indices(indices) > 1 and re.split('/', indices)[1] != ''):
                    uvi = parse_face_index(indices, 1)
                ni = None
                if (num_indices(indices) > 2 and re.split('/', indices)[2] != ''):
                    ni = parse_face_index(indices, 2)
                key = (pi, uvi, ni)
                if key in vertices_map:
                    return vertices_map[key]

                vertex_id = len(vertices)
                vertices_map[key] = vertex_id
                vertices.append(vertices_pool[pi])
                if uvi is not None:
                    uvs.append(uvs_pool[uvi])
                if ni is not None:
                    normals.append(normals_pool[ni])
                return vertex_id
            vid0 = get_vertex_id(splitted[1])
            vid1 = get_vertex_id(splitted[2])
            vid2 = get_vertex_id(splitted[3])

            indices.append([vid0, vid1, vid2])
            if (len(splitted) == 5):
                vid3 = get_vertex_id(splitted[4])
                indices.append([vid0, vid2, vid3])
    
    mesh_list.append((current_material_name,
        create_mesh(indices, vertices, normals, uvs)))
    if d != '':
        os.chdir(cwd)
    return material_map, mesh_list, light_map
Example #32
0
# here we use an environment light,
# which is a texture representing infinitely far away light sources in
# spherical coordinates.
#envmap = pyredner.imread('sunsky.exr')
#if pyredner.get_use_gpu():
#    envmap = envmap.cuda()
#envmap = pyredner.EnvironmentMap(envmap)
#
## Finally we construct our scene using all the variables we setup previously.
#scene = pyredner.Scene(cam, shapes, materials, area_lights = [], envmap = envmap)

scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)

render = pyredner.RenderFunction.apply
img = render(0, *scene_args)

#%%
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.exr')
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.png')

#Loading it again
target = pyredner.imread('results/pose_estimation/target.exr')

if pyredner.get_use_gpu():
    target = target.cuda()

#%%
Example #33
0
    def create_gradient_buffers(ctx):
        scene = ctx.scene
        options = ctx.options
        camera = ctx.camera

        buffers = Context()

        if camera.use_look_at:
            buffers.d_cam_position = torch.zeros(3, device = pyredner.get_device())
            buffers.d_cam_look = torch.zeros(3, device = pyredner.get_device())
            buffers.d_cam_up = torch.zeros(3, device = pyredner.get_device())
            buffers.d_cam_to_world = None
            buffers.d_world_to_cam = None
        else:
            buffers.d_cam_position = None
            buffers.d_cam_look = None
            buffers.d_cam_up = None
            buffers.d_cam_to_world = torch.zeros(4, 4, device = pyredner.get_device())
            buffers.d_world_to_cam = torch.zeros(4, 4, device = pyredner.get_device())
        buffers.d_intrinsic_mat_inv = torch.zeros(3, 3, device = pyredner.get_device())
        buffers.d_intrinsic_mat = torch.zeros(3, 3, device = pyredner.get_device())
        if camera.use_look_at:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(buffers.d_cam_position.data_ptr()),
                redner.float_ptr(buffers.d_cam_look.data_ptr()),
                redner.float_ptr(buffers.d_cam_up.data_ptr()),
                redner.float_ptr(0), # cam_to_world
                redner.float_ptr(0), # world_to_cam
                redner.float_ptr(buffers.d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(buffers.d_intrinsic_mat.data_ptr()))
        else:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(0), # pos
                redner.float_ptr(0), # look
                redner.float_ptr(0), # up
                redner.float_ptr(buffers.d_cam_to_world.data_ptr()),
                redner.float_ptr(buffers.d_world_to_cam.data_ptr()),
                redner.float_ptr(buffers.d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(buffers.d_intrinsic_mat.data_ptr()))
        buffers.d_vertices_list = []
        buffers.d_uvs_list = []
        buffers.d_normals_list = []
        buffers.d_colors_list = []
        buffers.d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            num_uv_vertices = shape.num_uv_vertices
            num_normal_vertices = shape.num_normal_vertices
            d_vertices = torch.zeros(num_vertices, 3,
                device = pyredner.get_device())
            d_uvs = torch.zeros(num_uv_vertices, 2,
                device = pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(num_normal_vertices, 3,
                device = pyredner.get_device()) if shape.has_normals() else None
            d_colors = torch.zeros(num_vertices, 3,
                device = pyredner.get_device()) if shape.has_colors() else None
            buffers.d_vertices_list.append(d_vertices)
            buffers.d_uvs_list.append(d_uvs)
            buffers.d_normals_list.append(d_normals)
            buffers.d_colors_list.append(d_colors)
            buffers.d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0),
                redner.float_ptr(d_colors.data_ptr() if d_colors is not None else 0)))

        buffers.d_diffuse_list = []
        buffers.d_diffuse_uv_scale_list = []
        buffers.d_specular_list = []
        buffers.d_specular_uv_scale_list = []
        buffers.d_roughness_list = []
        buffers.d_roughness_uv_scale_list = []
        buffers.d_generic_list = []
        buffers.d_generic_uv_scale_list = []
        buffers.d_normal_map_list = []
        buffers.d_normal_map_uv_scale_list = []
        buffers.d_materials = []
        for material in ctx.materials:
            if material.get_diffuse_size(0)[0] == 0:
                d_diffuse = [torch.zeros(3, device = pyredner.get_device())]
            else:
                d_diffuse = []
                for l in range(material.get_diffuse_levels()):
                    diffuse_size = material.get_diffuse_size(l)
                    d_diffuse.append(\
                        torch.zeros(diffuse_size[1],
                                    diffuse_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_specular_size(0)[0] == 0:
                d_specular = [torch.zeros(3, device = pyredner.get_device())]
            else:
                d_specular = []
                for l in range(material.get_specular_levels()):
                    specular_size = material.get_specular_size(l)
                    d_specular.append(\
                        torch.zeros(specular_size[1],
                                    specular_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_roughness_size(0)[0] == 0:
                d_roughness = [torch.zeros(1, device = pyredner.get_device())]
            else:
                d_roughness = []
                for l in range(material.get_roughness_levels()):
                    roughness_size = material.get_roughness_size(l)
                    d_roughness.append(\
                        torch.zeros(roughness_size[1],
                                    roughness_size[0],
                                    1, device = pyredner.get_device()))

            if material.get_generic_levels() == 0:
                d_generic = None
            else:
                d_generic = []
                for l in range(material.get_generic_levels()):
                    generic_size = material.get_generic_size(l)
                    d_generic.append(\
                        torch.zeros(generic_size[2],
                                    generic_size[1],
                                    generic_size[0], device = pyredner.get_device()))

            if material.get_normal_map_levels() == 0:
                d_normal_map = None
            else:
                d_normal_map = []
                for l in range(material.get_normal_map_levels()):
                    normal_map_size = material.get_normal_map_size(l)
                    d_normal_map.append(\
                        torch.zeros(normal_map_size[1],
                                    normal_map_size[0],
                                    3, device = pyredner.get_device()))

            buffers.d_diffuse_list.append(d_diffuse)
            buffers.d_specular_list.append(d_specular)
            buffers.d_roughness_list.append(d_roughness)
            buffers.d_generic_list.append(d_generic)
            buffers.d_normal_map_list.append(d_normal_map)
            d_diffuse_uv_scale = torch.zeros(2, device = pyredner.get_device())
            d_specular_uv_scale = torch.zeros(2, device = pyredner.get_device())
            d_roughness_uv_scale = torch.zeros(2, device = pyredner.get_device())
            buffers.d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
            buffers.d_specular_uv_scale_list.append(d_specular_uv_scale)
            buffers.d_roughness_uv_scale_list.append(d_roughness_uv_scale)
            if d_generic is None:
                d_generic_uv_scale = None
            else:
                d_generic_uv_scale = torch.zeros(2, device = pyredner.get_device())
            if d_normal_map is None:
                d_normal_map_uv_scale = None
            else:
                d_normal_map_uv_scale = torch.zeros(2, device = pyredner.get_device())

            buffers.d_generic_uv_scale_list.append(d_generic_uv_scale)
            buffers.d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
            if d_diffuse[0].dim() == 1:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(d_diffuse[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            else:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_diffuse],
                    [x.shape[1] for x in d_diffuse],
                    [x.shape[0] for x in d_diffuse],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))

            if d_specular[0].dim() == 1:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(d_specular[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))
            else:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_specular],
                    [x.shape[1] for x in d_specular],
                    [x.shape[0] for x in d_specular],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))

            if d_roughness[0].dim() == 1:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(d_roughness[0].data_ptr())],
                    [0],
                    [0],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            else:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(x.data_ptr()) for x in d_roughness],
                    [x.shape[1] for x in d_roughness],
                    [x.shape[0] for x in d_roughness],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))

            if d_generic is None:
                d_generic_tex = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_generic_tex = redner.TextureN(\
                    [redner.float_ptr(x.data_ptr()) for x in d_generic],
                    [x.shape[1] for x in d_generic],
                    [x.shape[0] for x in d_generic],
                    d_generic[0].shape[2],
                    redner.float_ptr(d_generic_uv_scale.data_ptr()))

            if d_normal_map is None:
                d_normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_normal_map = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_normal_map],
                    [x.shape[1] for x in d_normal_map],
                    [x.shape[0] for x in d_normal_map],
                    3,
                    redner.float_ptr(d_normal_map_uv_scale.data_ptr()))
            buffers.d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex,
                d_generic_tex, d_normal_map))

        buffers.d_intensity_list = []
        buffers.d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device = pyredner.get_device())
            buffers.d_intensity_list.append(d_intensity)
            buffers.d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        buffers.d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            buffers.d_envmap_values = []
            for l in range(envmap.get_levels()):
                size = envmap.get_size(l)
                buffers.d_envmap_values.append(\
                    torch.zeros(size[1],
                                size[0],
                                3, device = pyredner.get_device()))
            buffers.d_envmap_uv_scale = torch.zeros(2, device = pyredner.get_device())
            d_envmap_tex = redner.Texture3(\
                [redner.float_ptr(x.data_ptr()) for x in buffers.d_envmap_values],
                [x.shape[1] for x in buffers.d_envmap_values],
                [x.shape[0] for x in buffers.d_envmap_values],
                3,
                redner.float_ptr(buffers.d_envmap_uv_scale.data_ptr()))
            buffers.d_world_to_env = torch.zeros(4, 4, device = pyredner.get_device())
            buffers.d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(buffers.d_world_to_env.data_ptr()))

        buffers.d_scene = redner.DScene(buffers.d_camera,
                                        buffers.d_shapes,
                                        buffers.d_materials,
                                        buffers.d_area_lights,
                                        buffers.d_envmap,
                                        pyredner.get_use_gpu(),
                                        pyredner.get_device().index if pyredner.get_device().index is not None else -1)
        return buffers
Example #34
0
    def forward(ctx, seed, *args):
        """
            Forward rendering pass: given a scene and output an image.
        """
        # Unpack arguments
        current_index = 0
        num_shapes = args[current_index]
        current_index += 1
        num_materials = args[current_index]
        current_index += 1
        num_lights = args[current_index]
        current_index += 1
        cam_to_world = args[current_index]
        current_index += 1
        world_to_cam = args[current_index]
        current_index += 1
        fov_factor = args[current_index]
        current_index += 1
        clip_near = args[current_index]
        current_index += 1
        resolution = args[current_index]
        current_index += 1
        fisheye = args[current_index]
        current_index += 1
        assert (cam_to_world.is_contiguous())
        assert (world_to_cam.is_contiguous())
        camera = redner.Camera(resolution[1], resolution[0],
                               redner.float_ptr(cam_to_world.data_ptr()),
                               redner.float_ptr(world_to_cam.data_ptr()),
                               fov_factor.item(), clip_near, fisheye)
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            material_id = args[current_index]
            current_index += 1
            light_id = args[current_index]
            current_index += 1
            assert (vertices.is_contiguous())
            assert (indices.is_contiguous())
            if uvs is not None:
                assert (uvs.is_contiguous())
            if normals is not None:
                assert (normals.is_contiguous())
            shapes.append(redner.Shape(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                material_id,
                light_id))
        materials = []
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            assert (diffuse_reflectance.is_contiguous())
            if diffuse_reflectance.dim() == 1:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()), 0, 0, 0,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            else:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()),
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[0]), # num levels
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            assert (specular_reflectance.is_contiguous())
            if specular_reflectance.dim() == 1:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()), 0, 0, 0,
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            else:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()),
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[0]), # num levels
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            assert (roughness.is_contiguous())
            if roughness.dim() == 1:
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()), 0, 0, 0,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            else:
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()),
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[0]), # num levels
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                two_sided))

        lights = []
        for i in range(num_lights):
            shape_id = args[current_index]
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1

            lights.append(
                redner.Light(shape_id, redner.float_ptr(intensity.data_ptr()),
                             two_sided))

        scene = redner.Scene(camera, shapes, materials, lights,
                             pyredner.get_use_gpu())
        num_samples = args[current_index]
        current_index += 1
        max_bounces = args[current_index]
        current_index += 1
        options = redner.RenderOptions(seed, num_samples, max_bounces)
        rendered_image = torch.zeros(resolution[0],
                                     resolution[1],
                                     3,
                                     device=pyredner.get_device())
        redner.render(scene, options,
                      redner.float_ptr(rendered_image.data_ptr()),
                      redner.float_ptr(0), None, redner.float_ptr(0))

        # # For debugging
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(rendered_image.data_ptr()),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # exit()

        ctx.shapes = shapes
        ctx.materials = materials
        ctx.lights = lights
        ctx.scene = scene
        ctx.options = options
        return rendered_image
Example #35
0
def load_obj(filename,
             obj_group=True,
             flip_tex_coords=True,
             use_common_indices=False):
    """
        Load from a Wavefront obj file as PyTorch tensors.
        XXX: this is slow, maybe move to C++?

        Args: obj_group -- split the meshes based on materials
              flip_tex_coords -- flip the v coordinate of uv by applying v' = 1 - v
              use_common_indices -- use the same indices for position, uvs, normals.
                                    Not recommended since texture seams in the objects sharing
                                    the same positions would cause the optimization to "tear" the object.
    """
    vertices_pool = []
    uvs_pool = []
    normals_pool = []
    indices = []
    uv_indices = []
    normal_indices = []
    vertices = []
    uvs = []
    normals = []
    vertices_map = {}
    uvs_map = {}
    normals_map = {}
    material_map = {}
    current_mtllib = {}
    current_material_name = None

    def create_mesh(indices, uv_indices, normal_indices, vertices, uvs,
                    normals):
        indices = torch.tensor(indices,
                               dtype=torch.int32,
                               device=pyredner.get_device())
        if len(uv_indices) == 0:
            uv_indices = None
        else:
            uv_indices = torch.tensor(uv_indices,
                                      dtype=torch.int32,
                                      device=pyredner.get_device())
        if len(normal_indices) == 0:
            normal_indices = None
        else:
            normal_indices = torch.tensor(normal_indices,
                                          dtype=torch.int32,
                                          device=pyredner.get_device())
        vertices = torch.tensor(vertices, device=pyredner.get_device())
        if len(uvs) == 0:
            uvs = None
        else:
            uvs = torch.tensor(uvs, device=pyredner.get_device())
        if len(normals) == 0:
            normals = None
        else:
            normals = torch.tensor(normals, device=pyredner.get_device())
        return TriangleMesh(indices, uv_indices, normal_indices, vertices, uvs,
                            normals)

    mesh_list = []
    light_map = {}

    f = open(filename, 'r')
    d = os.path.dirname(filename)
    cwd = os.getcwd()
    if d != '':
        os.chdir(d)
    for line in f:
        line = line.strip()
        splitted = re.split('\ +', line)
        if splitted[0] == 'mtllib':
            current_mtllib = load_mtl(splitted[1])
        elif splitted[0] == 'usemtl':
            if len(indices) > 0 and obj_group is True:
                # Flush
                mesh_list.append(
                    (current_material_name,
                     create_mesh(indices, uv_indices, normal_indices, vertices,
                                 uvs, normals)))
                indices = []
                uv_indices = []
                normal_indices = []
                vertices = []
                normals = []
                uvs = []
                vertices_map = {}
                uvs_map = {}
                normals_map = {}

            mtl_name = splitted[1]
            current_material_name = mtl_name
            if mtl_name not in material_map:
                m = current_mtllib[mtl_name]
                if m.map_Kd is None:
                    diffuse_reflectance = torch.tensor(
                        m.Kd,
                        dtype=torch.float32,
                        device=pyredner.get_device())
                else:
                    diffuse_reflectance = pyredner.imread(m.map_Kd)
                    if pyredner.get_use_gpu():
                        diffuse_reflectance = diffuse_reflectance.cuda(
                            device=pyredner.get_device())
                if m.map_Ks is None:
                    specular_reflectance = torch.tensor(
                        m.Ks,
                        dtype=torch.float32,
                        device=pyredner.get_device())
                else:
                    specular_reflectance = pyredner.imread(m.map_Ks)
                    if pyredner.get_use_gpu():
                        specular_reflectance = specular_reflectance.cuda(
                            device=pyredner.get_device())
                if m.map_Ns is None:
                    roughness = torch.tensor([2.0 / (m.Ns + 2.0)],
                                             dtype=torch.float32,
                                             device=pyredner.get_device())
                else:
                    roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0)
                    if pyredner.get_use_gpu():
                        roughness = roughness.cuda(
                            device=pyredner.get_device())
                if m.Ke != (0.0, 0.0, 0.0):
                    light_map[mtl_name] = torch.tensor(m.Ke,
                                                       dtype=torch.float32)
                material_map[mtl_name] = pyredner.Material(\
                    diffuse_reflectance, specular_reflectance, roughness)
        elif splitted[0] == 'v':
            vertices_pool.append(
                [float(splitted[1]),
                 float(splitted[2]),
                 float(splitted[3])])
        elif splitted[0] == 'vt':
            u = float(splitted[1])
            v = float(splitted[2])
            if flip_tex_coords:
                v = 1 - v
            uvs_pool.append([u, v])
        elif splitted[0] == 'vn':
            normals_pool.append(
                [float(splitted[1]),
                 float(splitted[2]),
                 float(splitted[3])])
        elif splitted[0] == 'f':

            def num_indices(x):
                return len(re.split('/', x))

            def get_index(x, i):
                return int(re.split('/', x)[i])

            def parse_face_index(x, i):
                f = get_index(x, i)
                if f < 0:
                    if (i == 0):
                        f += len(vertices)
                    if (i == 1):
                        f += len(uvs)
                else:
                    f -= 1
                return f

            assert (len(splitted) <= 5)

            def get_vertex_id(indices):
                pi = parse_face_index(indices, 0)
                uvi = None
                if (num_indices(indices) > 1
                        and re.split('/', indices)[1] != ''):
                    uvi = parse_face_index(indices, 1)
                ni = None
                if (num_indices(indices) > 2
                        and re.split('/', indices)[2] != ''):
                    ni = parse_face_index(indices, 2)
                if use_common_indices:
                    # vertex, uv, normals share the same indexing
                    key = (pi, uvi, ni)
                    if key in vertices_map:
                        vertex_id = vertices_map[key]
                        return vertex_id, vertex_id, vertex_id

                    vertex_id = len(vertices)
                    vertices_map[key] = vertex_id
                    vertices.append(vertices_pool[pi])
                    if uvi is not None:
                        uvs.append(uvs_pool[uvi])
                    if ni is not None:
                        normals.append(normals_pool[ni])
                    return vertex_id, vertex_id, vertex_id
                else:
                    # vertex, uv, normals use separate indexing
                    vertex_id = None
                    uv_id = None
                    normal_id = None

                    if pi in vertices_map:
                        vertex_id = vertices_map[pi]
                    else:
                        vertex_id = len(vertices)
                        vertices.append(vertices_pool[pi])
                        vertices_map[pi] = vertex_id

                    if uvi is not None:
                        if uvi in uvs_map:
                            uv_id = uvs_map[uvi]
                        else:
                            uv_id = len(uvs)
                            uvs.append(uvs_pool[uvi])
                            uvs_map[uvi] = uv_id

                    if ni is not None:
                        if ni in normals_map:
                            normal_id = normals_map[ni]
                        else:
                            normal_id = len(normals)
                            normals.append(normals_pool[ni])
                            normals_map[ni] = normal_id
                    return vertex_id, uv_id, normal_id

            vid0, uv_id0, n_id0 = get_vertex_id(splitted[1])
            vid1, uv_id1, n_id1 = get_vertex_id(splitted[2])
            vid2, uv_id2, n_id2 = get_vertex_id(splitted[3])

            indices.append([vid0, vid1, vid2])
            if uv_id0 is not None:
                assert (uv_id1 is not None and uv_id2 is not None)
                uv_indices.append([uv_id0, uv_id1, uv_id2])
            if n_id0 is not None:
                assert (n_id1 is not None and n_id2 is not None)
                normal_indices.append([n_id0, n_id1, n_id2])
            if (len(splitted) == 5):
                vid3, uv_id3, n_id3 = get_vertex_id(splitted[4])
                indices.append([vid0, vid2, vid3])
                if uv_id0 is not None:
                    assert (uv_id3 is not None)
                    uv_indices.append([uv_id0, uv_id2, uv_id3])
                if n_id0 is not None:
                    assert (n_id3 is not None)
                    normal_indices.append([n_id0, n_id2, n_id3])

    mesh_list.append((current_material_name,
                      create_mesh(indices, uv_indices, normal_indices,
                                  vertices, uvs, normals)))
    if d != '':
        os.chdir(cwd)

    f.close()
    return material_map, mesh_list, light_map