Exemplo n.º 1
0
def render_g_buffer(scene: pyredner.Scene,
                    channels: List[redner.channels],
                    num_samples: Union[int, Tuple[int, int]] = (1, 1),
                    seed: Optional[int] = None):
    """
        Render a G buffer from the scene.

        Args
        ====
        scene: pyredner.Scene
            pyredner Scene containing camera, geometry, material, and lighting
        channels: List[pyredner.channels]
            | A list of the following channels\:
            | pyredner.channels.alpha
            | pyredner.channels.depth
            | pyredner.channels.position
            | pyredner.channels.geometry_normal
            | pyredner.channels.shading_normal
            | pyredner.channels.uv
            | pyredner.channels.diffuse_reflectance
            | pyredner.channels.specular_reflectance
            | pyredner.channels.roughness
            | pyredner.channels.generic_texture
            | pyredner.channels.vertex_color
            | pyredner.channels.shape_id
            | pyredner.channels.material_id
        num_samples: Union[int, Tuple[int, int]]
            Number of samples for forward and backward passes, respectively.
            If a single integer is provided, use the same number of samples
            for both.
        seed: Optional[int]
            Random seed used for sampling. Randomly assigned if set to None.

        Returns
        =======
        tf.Tensor
            a tensor with size [H, W, C]
    """
    if seed==None:
        seed = random.randint(0, 16777216)
    scene_args = pyredner.serialize_scene(\
        scene = scene,
        num_samples = num_samples,
        max_bounces = 0,
        sampler_type = redner.SamplerType.sobol,
        channels = channels,
        use_secondary_edge_sampling = False)
    return pyredner.render(seed, *scene_args)
Exemplo n.º 2
0
def render_pathtracing(scene: pyredner.Scene,
                       alpha: bool = False,
                       max_bounces: int = 1,
                       sampler_type: redner.SamplerType = pyredner.sampler_type.sobol,
                       num_samples: Union[int, Tuple[int, int]] = (4, 4),
                       seed: Optional[int] = None):
    """
        Render a pyredner scene using pathtracing.

        Args
        ====
        scene: pyredner.Scene
            pyredner Scene containing camera, geometry, material, and lighting
        max_bounces: int
            Number of bounces for global illumination, 1 means direct lighting only.
        num_samples: int
            Number of samples per pixel for forward and backward passes.
            Can be an integer or a tuple of 2 integers.
        sampler_type: pyredner.sampler_type
            | Which sampling pattern to use? See 
              `Chapter 7 of the PBRT book <http://www.pbr-book.org/3ed-2018/Sampling_and_Reconstruction.html>`_
              for an explanation of the difference between different samplers.
            | Following samplers are supported\:
            | pyredner.sampler_type.independent
            | pyredner.sampler_type.sobol

        Returns
        =======
        torch.Tensor
            if alpha == True, a tensor with size [H, W, 4],
            else, a tensor with size [H, W, 3]
    """
    if seed==None:
        seed = random.randint(0, 16777216)
    channels = [redner.channels.radiance]
    if alpha:
        channels.append(redner.channels.alpha)
    scene_args = pyredner.serialize_scene(\
        scene = scene,
        num_samples = num_samples,
        max_bounces = max_bounces,
        sampler_type = sampler_type,
        channels = channels)
    return pyredner.render(seed, *scene_args)
Exemplo n.º 3
0
def render_albedo(scene: pyredner.Scene,
                  alpha: bool = False,
                  num_samples: Union[int, Tuple[int, int]] = (16, 4),
                  seed: Optional[int] = None):
    """
        Render the diffuse albedo color of the scene.

        Args
        ====
        scene: pyredner.Scene
            pyredner Scene containing camera, geometry and material.
        alpha: bool
            If set to False, generates a 3-channel image,
            otherwise generates a 4-channel image where the
            fourth channel is alpha.
        num_samples: Union[int, Tuple[int, int]]:
            number of samples for forward and backward passes, respectively
            if a single integer is provided, use the same number of samples
            for both
        seed: Optional[int]:
            Random seed used for sampling. Randomly assigned if set to None.

        Returns
        =======
        tf.Tensor
            if alpha == True, a tensor with size [H, W, 4],
            else, a tensor with size [H, W, 3]
    """
    if seed==None:
        seed = random.randint(0, 16777216)
    channels = [redner.channels.diffuse_reflectance]
    if alpha:
        channels.append(redner.channels.alpha)
    scene_args = pyredner.serialize_scene(\
        scene = scene,
        num_samples = num_samples,
        max_bounces = 0,
        sampler_type = redner.SamplerType.sobol,
        channels = channels,
        use_secondary_edge_sampling = False)
    return pyredner.render(seed, *scene_args)
Exemplo n.º 4
0
                                 dtype=tf.float32)
    light_indices = tf.constant([[0, 1, 2], [1, 3, 2]], dtype=tf.int32)
    shape_light = pyredner.Shape(light_vertices, light_indices, 1)
shapes = [shape_plane, shape_light]
with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
    light_intensity = tf.Variable([20.0, 20.0, 20.0], dtype=tf.float32)
# The first argument is the shape id of the light
light = pyredner.AreaLight(1, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
scene_args = pyredner.serialize_scene(scene=scene,
                                      num_samples=16,
                                      max_bounces=1)

# Render our target
img = pyredner.render(0, *scene_args)
pyredner.imwrite(img, 'results/test_svbrdf/target.exr')
pyredner.imwrite(img, 'results/test_svbrdf/target.png')
target = pyredner.imread('results/test_svbrdf/target.exr')

# Our initial guess is three gray textures
with tf.device(pyredner.get_device_name()):
    diffuse_tex = tf.Variable(tf.ones((256, 256, 3), dtype=np.float32) * 0.5,
                              trainable=True)
    specular_tex = tf.Variable(tf.ones((256, 256, 3), dtype=np.float32) * 0.5,
                               trainable=True)
    roughness_tex = tf.Variable(tf.ones((256, 256, 1), dtype=np.float32) * 0.5,
                                trainable=True)
mat_perlin.diffuse_reflectance = pyredner.Texture(diffuse_tex)
mat_perlin.specular_reflectance = pyredner.Texture(specular_tex)
mat_perlin.roughness = pyredner.Texture(roughness_tex)
Exemplo n.º 5
0
# The last material is the teapot material, set it to a specular material
with tf.device(pyredner.get_device_name()):
    scene.materials[-1].diffuse_reflectance = \
        pyredner.Texture(tf.Variable([0.15, 0.2, 0.15], dtype=tf.float32))
    scene.materials[-1].specular_reflectance = \
        pyredner.Texture(tf.Variable([0.8, 0.8, 0.8], dtype=tf.float32))
    scene.materials[-1].roughness = \
        pyredner.Texture(tf.Variable([0.0001], dtype=tf.float32))

scene_args = pyredner.serialize_scene(scene=scene,
                                      num_samples=512,
                                      max_bounces=2)

# Render our target. The first argument is the seed for RNG in the renderer.
img = pyredner.render(0, *scene_args)
pyredner.imwrite(img, 'results/test_teapot_specular/target.exr')
pyredner.imwrite(img, 'results/test_teapot_specular/target.png')
target = pyredner.imread('results/test_teapot_specular/target.exr')

# Perturb the scene, this is our initial guess
# We perturb the last shape, which is the SIGGRAPH logo
ref_pos = scene.shapes[-1].vertices
with tf.device(pyredner.get_device_name()):
    translation = tf.Variable([20.0, 0.0, 2.0], trainable=True)
    scene.shapes[-1].vertices = ref_pos + translation
scene_args = pyredner.serialize_scene(scene=scene,
                                      num_samples=512,
                                      max_bounces=2)
# Render the initial guess
img = pyredner.render(1, *scene_args)
Exemplo n.º 6
0
def render_deferred(scene: Union[pyredner.Scene, List[pyredner.Scene]],
                    lights: Union[List[DeferredLight],
                                  List[List[DeferredLight]]],
                    alpha: bool = False,
                    aa_samples: int = 2,
                    seed: Optional[Union[int, List[int]]] = None,
                    sample_pixel_center: bool = False):
    """
        Render the scenes using `deferred rendering <https://en.wikipedia.org/wiki/Deferred_shading>`_.
        We generate G-buffer images containing world-space position,
        normal, and albedo using redner, then shade the G-buffer
        using TensorFlow code. Assuming Lambertian shading and does not
        compute shadow.

        Args
        ====
        scene: Union[pyredner.Scene, List[pyredner.Scene]]
            pyredner Scene containing camera, geometry and material.
            Can be a single scene or a list for batch render.
            For batch rendering all scenes need to have the same resolution.
        lights: Union[List[DeferredLight], List[List[DeferredLight]]]
            Lights for deferred rendering. If the scene is a list, and only
            a single list of lights is provided, the same lights are applied
            to all scenes. If a list of lists of lights is provided, each scene
            is lit by the corresponding lights.
        alpha: bool
            If set to False, generates a 3-channel image,
            otherwise generates a 4-channel image where the
            fourth channel is alpha.
        aa_samples: int
            Number of samples used for anti-aliasing at both x, y dimensions
            (e.g. if aa_samples=2, 4 samples are used).
        seed: Optional[Union[int, List[int]]]
            Random seed used for sampling. Randomly assigned if set to None.
            For batch render, if seed it not None, need to provide a list
            of seeds.
        sample_pixel_center: bool
            Always sample at the pixel center when rendering.
            This trades noise with aliasing.
            If this option is activated, the rendering becomes non-differentiable
            (since there is no antialiasing integral),
            and redner's edge sampling becomes an approximation to the gradients of the aliased rendering.

        Returns
        =======
        tf.Tensor or List[tf.Tensor]
            | if input scene is a list: a tensor with size [N, H, W, C], N is the list size
            | else: a tensor with size [H, W, C]
            | if alpha == True, C = 4.
            | else, C = 3.
    """
    channels = [
        redner.channels.position, redner.channels.shading_normal,
        redner.channels.diffuse_reflectance
    ]
    if alpha:
        channels.append(redner.channels.alpha)
    if isinstance(scene, pyredner.Scene):
        if seed == None:
            seed = random.randint(0, 16777216)
        # We do full-screen anti-aliasing: increase the rendering resolution
        # and downsample it after lighting
        org_res = scene.camera.resolution
        scene.camera.resolution = (org_res[0] * aa_samples,
                                   org_res[1] * aa_samples)
        scene_args = pyredner.serialize_scene(\
            scene = scene,
            num_samples = (1, 1),
            max_bounces = 0,
            sampler_type = redner.SamplerType.sobol,
            channels = channels,
            use_secondary_edge_sampling = False,
            sample_pixel_center = sample_pixel_center)
        # Need to revert the resolution back
        scene.camera.resolution = org_res
        g_buffer = pyredner.render(seed, *scene_args)
        pos = g_buffer[:, :, :3]
        normal = g_buffer[:, :, 3:6]
        albedo = g_buffer[:, :, 6:9]
        img = tf.zeros((g_buffer.shape[0], g_buffer.shape[1], 3))
        for light in lights:
            img = img + light.render(pos, normal, albedo)
        if alpha:
            # alpha is in the last channel
            img = tf.concat((img, g_buffer[:, :, 9:10]), axis=2)
        if aa_samples > 1:
            # Downsample
            img = tf.expand_dims(img, 0)  # HWC -> NHWC
            # TODO: switch to method = 'area' when tensorflow implements the gradients...
            img = tf.image.resize(img,
                                  size=org_res,
                                  method='bilinear',
                                  antialias=True)
            img = tf.squeeze(img, axis=0)  # NHWC -> HWC
        return img
    else:
        assert (isinstance(scene, list))
        if seed == None:
            # Randomly generate a list of seed
            seed = []
            for i in range(len(scene)):
                seed.append(random.randint(0, 16777216))
        assert (len(seed) == len(scene))
        if len(lights) > 0 and not isinstance(lights[0], list):
            # Specialize version: stack g buffers and light all images in parallel
            g_buffers = []
            # Render each scene in the batch and stack them together
            for sc, se in zip(scene, seed):
                # We do full-screen anti-aliasing: increase the rendering resolution
                # and downsample it after lighting
                org_res = sc.camera.resolution
                sc.camera.resolution = (org_res[0] * aa_samples,
                                        org_res[1] * aa_samples)
                scene_args = pyredner.serialize_scene(\
                    scene = sc,
                    num_samples = (1, 1),
                    max_bounces = 0,
                    sampler_type = redner.SamplerType.sobol,
                    channels = channels,
                    use_secondary_edge_sampling = False,
                    sample_pixel_center = sample_pixel_center)
                # Need to revert the resolution back
                sc.camera.resolution = org_res
                g_buffers.append(pyredner.render(se, *scene_args))
            g_buffers = tf.stack(g_buffers)
            pos = g_buffers[:, :, :, :3]
            normal = g_buffers[:, :, :, 3:6]
            albedo = g_buffers[:, :, :, 6:9]
            imgs = tf.zeros((g_buffers.shape[0], g_buffers.shape[1],
                             g_buffers.shape[2], 3))
            for light in lights:
                imgs = imgs + light.render(pos, normal, albedo)
            if alpha:
                imgs = tf.concat((imgs, g_buffers[:, :, :, 9:10]), axis=-1)
        else:
            # If each scene has a different lighting: light them in the loop
            imgs = []
            # Render each scene in the batch and stack them together
            for sc, se, lgts in zip(scene, seed, lights):
                # We do full-screen anti-aliasing: increase the rendering resolution
                # and downsample it after lighting
                org_res = sc.camera.resolution
                sc.camera.resolution = (org_res[0] * aa_samples,
                                        org_res[1] * aa_samples)
                scene_args = pyredner.serialize_scene(\
                    scene = sc,
                    num_samples = (1, 1),
                    max_bounces = 0,
                    sampler_type = redner.SamplerType.sobol,
                    channels = channels,
                    use_secondary_edge_sampling = False,
                    sample_pixel_center = sample_pixel_center)
                # Need to revert the resolution back
                sc.camera.resolution = org_res
                g_buffer = pyredner.render(se, *scene_args)
                pos = g_buffer[:, :, :3]
                normal = g_buffer[:, :, 3:6]
                albedo = g_buffer[:, :, 6:9]
                img = tf.zeros(g_buffer.shape[0], g_buffer.shape[1], 3)
                for light in lgts:
                    img = img + light.render(pos, normal, albedo)
                if alpha:
                    # alpha is in the last channel
                    img = tf.concat((img, g_buffer[:, :, 9:10]), axis=-1)
                imgs.append(img)
            imgs = tf.stack(imgs)
        if aa_samples > 1:
            # Downsample
            # TODO: switch to method = 'area' when tensorflow implements the gradients...
            imgs = tf.image.resize(imgs,
                                   size=org_res,
                                   method='bilinear',
                                   antialias=True)
        return imgs
Exemplo n.º 7
0
def render_generic(scene: pyredner.Scene,
                   channels: List,
                   max_bounces: int = 1,
                   sampler_type=pyredner.sampler_type.sobol,
                   num_samples: Union[int, Tuple[int, int]] = (4, 4),
                   seed: Optional[int] = None,
                   sample_pixel_center: bool = False):
    """
        A generic rendering function that can be either pathtracing or
        g-buffer rendering or both.

        Args
        ====
        scene: Union[pyredner.Scene, List[pyredner.Scene]]
            pyredner Scene containing camera, geometry and material.
            Can be a single scene or a list for batch render.
            For batch rendering all scenes need to have the same resolution.
        channels: List[pyredner.channels]
            | A list of the following channels\:
            | pyredner.channels.alpha
            | pyredner.channels.depth
            | pyredner.channels.position
            | pyredner.channels.geometry_normal
            | pyredner.channels.shading_normal
            | pyredner.channels.uv
            | pyredner.channels.diffuse_reflectance
            | pyredner.channels.specular_reflectance
            | pyredner.channels.roughness
            | pyredner.channels.generic_texture
            | pyredner.channels.vertex_color
            | pyredner.channels.shape_id
            | pyredner.channels.triangle_id
            | pyredner.channels.material_id
        max_bounces: int
            Number of bounces for global illumination, 1 means direct lighting only.
        sampler_type: pyredner.sampler_type
            | Which sampling pattern to use? See 
              `Chapter 7 of the PBRT book <http://www.pbr-book.org/3ed-2018/Sampling_and_Reconstruction.html>`_
              for an explanation of the difference between different samplers.
            | Following samplers are supported\:
            | pyredner.sampler_type.independent
            | pyredner.sampler_type.sobol
        num_samples: int
            Number of samples per pixel for forward and backward passes.
            Can be an integer or a tuple of 2 integers.
        seed: Optional[Union[int, List[int]]]
            Random seed used for sampling. Randomly assigned if set to None.
            For batch render, if seed it not None, need to provide a list
            of seeds.
        sample_pixel_center: bool
            Always sample at the pixel center when rendering.
            This trades noise with aliasing.
            If this option is activated, the rendering becomes non-differentiable
            (since there is no antialiasing integral),
            and redner's edge sampling becomes an approximation to the gradients of the aliased rendering.

        Returns
        =======
        tf.Tensor or List[tf.Tensor]
            | if input scene is a list: a tensor with size [N, H, W, C], N is the list size
            | else: a tensor with size [H, W, C]
    """
    if isinstance(scene, pyredner.Scene):
        if seed == None:
            seed = random.randint(0, 16777216)
        scene_args = pyredner.serialize_scene(\
            scene = scene,
            num_samples = num_samples,
            max_bounces = max_bounces,
            sampler_type = sampler_type,
            channels = channels,
            sample_pixel_center = sample_pixel_center)
        return pyredner.render(seed, *scene_args)
    else:
        assert (isinstance(scene, list))
        if seed == None:
            # Randomly generate a list of seed
            seed = []
            for i in range(len(scene)):
                seed.append(random.randint(0, 16777216))
        assert (len(seed) == len(scene))
        # Render each scene in the batch and stack them together
        imgs = []
        for sc, se in zip(scene, seed):
            scene_args = pyredner.serialize_scene(\
                scene = sc,
                num_samples = num_samples,
                max_bounces = max_bounces,
                sampler_type = sampler_type,
                channels = channels,
                sample_pixel_center = sample_pixel_center)
            imgs.append(pyredner.render(se, *scene_args))
        imgs = tf.stack(imgs)
        return imgs
Exemplo n.º 8
0
    material_id_map[key] = count
    count += 1
    materials.append(value)

# Setup geometries
shapes = []
with tf.device(pyredner.get_device_name()):
    for mtl_name, mesh in mesh_list:
        shapes.append(
            pyredner.Shape(vertices=mesh.vertices,
                           indices=mesh.indices,
                           uvs=mesh.uvs,
                           normals=mesh.normals,
                           material_id=material_id_map[mtl_name]))

with tf.device(pyredner.get_device_name()):
    envmap = pyredner.imread('sunsky.exr')
    envmap = pyredner.EnvironmentMap(envmap)

# Construct the scene
scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=envmap)
# Serialize the scene
# Here we specify the output channels as "depth", "shading_normal"
scene_args = pyredner.serialize_scene(scene=scene,
                                      num_samples=512,
                                      max_bounces=1)
img = pyredner.render(0, *scene_args)
pyredner.imwrite(img, 'results/test_compute_uvs/target.exr')
pyredner.imwrite(img, 'results/test_compute_uvs/target.png')
target = pyredner.imread('results/test_compute_uvs/target.exr')
Exemplo n.º 9
0
def render_deferred(scene: pyredner.Scene,
                    lights: List[DeferredLight],
                    alpha: bool = False,
                    aa_samples: int = 2,
                    seed: Optional[int] = None):
    """
        Render the scene using deferred rendering.
        (https://en.wikipedia.org/wiki/Deferred_shading)
        We generate a G-buffer image containing world-space position,
        normal, and albedo using redner, then shade the G-buffer
        using TensorFlow code. Assuming Lambertian shading and does not
        compute shadow.

        Args
        ====
        scene: pyredner.Scene
            pyredner Scene containing camera, geometry and material.
        lights: List[DeferredLight]

        alpha: bool
            If set to False, generates a 3-channel image,
            otherwise generates a 4-channel image where the
            fourth channel is alpha.
        aa_samples: int
            number of samples used for anti-aliasing at both x, y dimensions
            (e.g. if aa_samples=2, 4 samples are used)
        seed: Optional[int]:
            Random seed used for sampling. Randomly assigned if set to None.

        Returns
        =======
        tf.Tensor
            if alpha == True, a tensor with size [H, W, 4],
            else, a tensor with size [H, W, 3]
    """
    if seed==None:
        seed = random.randint(0, 16777216)

    org_res = scene.camera.resolution
    scene.camera.resolution = (org_res[0] * aa_samples,
                               org_res[1] * aa_samples)
    channels = [redner.channels.position,
                redner.channels.shading_normal,
                redner.channels.diffuse_reflectance]
    if alpha:
        channels.append(redner.channels.alpha)
    scene_args = pyredner.serialize_scene(\
        scene = scene,
        num_samples = (1, 1),
        max_bounces = 0,
        sampler_type = redner.SamplerType.sobol,
        channels = channels,
        use_secondary_edge_sampling = False)
    scene.camera.resolution = org_res
    g_buffer = pyredner.render(seed, *scene_args)
    pos = g_buffer[:, :, :3]
    normal = g_buffer[:, :, 3:6]
    albedo = g_buffer[:, :, 6:9]
    img = tf.zeros((g_buffer.shape[0], g_buffer.shape[1], 3))
    for light in lights:
        img = img + light.render(pos, normal, albedo)
    if aa_samples > 1:
        img = tf.expand_dims(img, 0) # HWC -> NHWC
        img = tf.image.resize(img, size = org_res, method = 'area', antialias = True)
        img = tf.squeeze(img, axis = 0) # NHWC -> HWC
    if alpha:
        img = tf.concat((img, g_buffer[:, :, 9:10]), axis = 2)
    return img