Beispiel #1
0
def render_g_buffer(scene: pyredner.Scene,
                    channels: List[redner.channels],
                    num_samples: Union[int, Tuple[int, int]] = (1, 1),
                    seed: Optional[int] = None):
    """
        Render a G buffer from the scene.

        Args
        ====
        scene: pyredner.Scene
            pyredner Scene containing camera, geometry, material, and lighting
        channels: List[pyredner.channels]
            | A list of the following channels\:
            | pyredner.channels.alpha
            | pyredner.channels.depth
            | pyredner.channels.position
            | pyredner.channels.geometry_normal
            | pyredner.channels.shading_normal
            | pyredner.channels.uv
            | pyredner.channels.diffuse_reflectance
            | pyredner.channels.specular_reflectance
            | pyredner.channels.roughness
            | pyredner.channels.generic_texture
            | pyredner.channels.vertex_color
            | pyredner.channels.shape_id
            | pyredner.channels.material_id
        num_samples: Union[int, Tuple[int, int]]
            Number of samples for forward and backward passes, respectively.
            If a single integer is provided, use the same number of samples
            for both.
        seed: Optional[int]
            Random seed used for sampling. Randomly assigned if set to None.

        Returns
        =======
        tf.Tensor
            a tensor with size [H, W, C]
    """
    if seed==None:
        seed = random.randint(0, 16777216)
    scene_args = pyredner.serialize_scene(\
        scene = scene,
        num_samples = num_samples,
        max_bounces = 0,
        sampler_type = redner.SamplerType.sobol,
        channels = channels,
        use_secondary_edge_sampling = False)
    return pyredner.render(seed, *scene_args)
Beispiel #2
0
def render_pathtracing(scene: pyredner.Scene,
                       alpha: bool = False,
                       max_bounces: int = 1,
                       sampler_type: redner.SamplerType = pyredner.sampler_type.sobol,
                       num_samples: Union[int, Tuple[int, int]] = (4, 4),
                       seed: Optional[int] = None):
    """
        Render a pyredner scene using pathtracing.

        Args
        ====
        scene: pyredner.Scene
            pyredner Scene containing camera, geometry, material, and lighting
        max_bounces: int
            Number of bounces for global illumination, 1 means direct lighting only.
        num_samples: int
            Number of samples per pixel for forward and backward passes.
            Can be an integer or a tuple of 2 integers.
        sampler_type: pyredner.sampler_type
            | Which sampling pattern to use? See 
              `Chapter 7 of the PBRT book <http://www.pbr-book.org/3ed-2018/Sampling_and_Reconstruction.html>`_
              for an explanation of the difference between different samplers.
            | Following samplers are supported\:
            | pyredner.sampler_type.independent
            | pyredner.sampler_type.sobol

        Returns
        =======
        torch.Tensor
            if alpha == True, a tensor with size [H, W, 4],
            else, a tensor with size [H, W, 3]
    """
    if seed==None:
        seed = random.randint(0, 16777216)
    channels = [redner.channels.radiance]
    if alpha:
        channels.append(redner.channels.alpha)
    scene_args = pyredner.serialize_scene(\
        scene = scene,
        num_samples = num_samples,
        max_bounces = max_bounces,
        sampler_type = sampler_type,
        channels = channels)
    return pyredner.render(seed, *scene_args)
Beispiel #3
0
def render_albedo(scene: pyredner.Scene,
                  alpha: bool = False,
                  num_samples: Union[int, Tuple[int, int]] = (16, 4),
                  seed: Optional[int] = None):
    """
        Render the diffuse albedo color of the scene.

        Args
        ====
        scene: pyredner.Scene
            pyredner Scene containing camera, geometry and material.
        alpha: bool
            If set to False, generates a 3-channel image,
            otherwise generates a 4-channel image where the
            fourth channel is alpha.
        num_samples: Union[int, Tuple[int, int]]:
            number of samples for forward and backward passes, respectively
            if a single integer is provided, use the same number of samples
            for both
        seed: Optional[int]:
            Random seed used for sampling. Randomly assigned if set to None.

        Returns
        =======
        tf.Tensor
            if alpha == True, a tensor with size [H, W, 4],
            else, a tensor with size [H, W, 3]
    """
    if seed==None:
        seed = random.randint(0, 16777216)
    channels = [redner.channels.diffuse_reflectance]
    if alpha:
        channels.append(redner.channels.alpha)
    scene_args = pyredner.serialize_scene(\
        scene = scene,
        num_samples = num_samples,
        max_bounces = 0,
        sampler_type = redner.SamplerType.sobol,
        channels = channels,
        use_secondary_edge_sampling = False)
    return pyredner.render(seed, *scene_args)
Beispiel #4
0
                      dtype=tf.float32)
    shape_plane = pyredner.Shape(vertices, indices, 0, uvs)
    light_vertices = tf.Variable([[-1.0, -1.0, -7.0], [1.0, -1.0, -7.0],
                                  [-1.0, 1.0, -7.0], [1.0, 1.0, -7.0]],
                                 dtype=tf.float32)
    light_indices = tf.constant([[0, 1, 2], [1, 3, 2]], dtype=tf.int32)
    shape_light = pyredner.Shape(light_vertices, light_indices, 1)
shapes = [shape_plane, shape_light]
with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
    light_intensity = tf.Variable([20.0, 20.0, 20.0], dtype=tf.float32)
# The first argument is the shape id of the light
light = pyredner.AreaLight(1, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
scene_args = pyredner.serialize_scene(scene=scene,
                                      num_samples=16,
                                      max_bounces=1)

# Render our target
img = pyredner.render(0, *scene_args)
pyredner.imwrite(img, 'results/test_svbrdf/target.exr')
pyredner.imwrite(img, 'results/test_svbrdf/target.png')
target = pyredner.imread('results/test_svbrdf/target.exr')

# Our initial guess is three gray textures
with tf.device(pyredner.get_device_name()):
    diffuse_tex = tf.Variable(tf.ones((256, 256, 3), dtype=np.float32) * 0.5,
                              trainable=True)
    specular_tex = tf.Variable(tf.ones((256, 256, 3), dtype=np.float32) * 0.5,
                               trainable=True)
    roughness_tex = tf.Variable(tf.ones((256, 256, 1), dtype=np.float32) * 0.5,
Beispiel #5
0
def render_deferred(scene: Union[pyredner.Scene, List[pyredner.Scene]],
                    lights: Union[List[DeferredLight],
                                  List[List[DeferredLight]]],
                    alpha: bool = False,
                    aa_samples: int = 2,
                    seed: Optional[Union[int, List[int]]] = None,
                    sample_pixel_center: bool = False):
    """
        Render the scenes using `deferred rendering <https://en.wikipedia.org/wiki/Deferred_shading>`_.
        We generate G-buffer images containing world-space position,
        normal, and albedo using redner, then shade the G-buffer
        using TensorFlow code. Assuming Lambertian shading and does not
        compute shadow.

        Args
        ====
        scene: Union[pyredner.Scene, List[pyredner.Scene]]
            pyredner Scene containing camera, geometry and material.
            Can be a single scene or a list for batch render.
            For batch rendering all scenes need to have the same resolution.
        lights: Union[List[DeferredLight], List[List[DeferredLight]]]
            Lights for deferred rendering. If the scene is a list, and only
            a single list of lights is provided, the same lights are applied
            to all scenes. If a list of lists of lights is provided, each scene
            is lit by the corresponding lights.
        alpha: bool
            If set to False, generates a 3-channel image,
            otherwise generates a 4-channel image where the
            fourth channel is alpha.
        aa_samples: int
            Number of samples used for anti-aliasing at both x, y dimensions
            (e.g. if aa_samples=2, 4 samples are used).
        seed: Optional[Union[int, List[int]]]
            Random seed used for sampling. Randomly assigned if set to None.
            For batch render, if seed it not None, need to provide a list
            of seeds.
        sample_pixel_center: bool
            Always sample at the pixel center when rendering.
            This trades noise with aliasing.
            If this option is activated, the rendering becomes non-differentiable
            (since there is no antialiasing integral),
            and redner's edge sampling becomes an approximation to the gradients of the aliased rendering.

        Returns
        =======
        tf.Tensor or List[tf.Tensor]
            | if input scene is a list: a tensor with size [N, H, W, C], N is the list size
            | else: a tensor with size [H, W, C]
            | if alpha == True, C = 4.
            | else, C = 3.
    """
    channels = [
        redner.channels.position, redner.channels.shading_normal,
        redner.channels.diffuse_reflectance
    ]
    if alpha:
        channels.append(redner.channels.alpha)
    if isinstance(scene, pyredner.Scene):
        if seed == None:
            seed = random.randint(0, 16777216)
        # We do full-screen anti-aliasing: increase the rendering resolution
        # and downsample it after lighting
        org_res = scene.camera.resolution
        scene.camera.resolution = (org_res[0] * aa_samples,
                                   org_res[1] * aa_samples)
        scene_args = pyredner.serialize_scene(\
            scene = scene,
            num_samples = (1, 1),
            max_bounces = 0,
            sampler_type = redner.SamplerType.sobol,
            channels = channels,
            use_secondary_edge_sampling = False,
            sample_pixel_center = sample_pixel_center)
        # Need to revert the resolution back
        scene.camera.resolution = org_res
        g_buffer = pyredner.render(seed, *scene_args)
        pos = g_buffer[:, :, :3]
        normal = g_buffer[:, :, 3:6]
        albedo = g_buffer[:, :, 6:9]
        img = tf.zeros((g_buffer.shape[0], g_buffer.shape[1], 3))
        for light in lights:
            img = img + light.render(pos, normal, albedo)
        if alpha:
            # alpha is in the last channel
            img = tf.concat((img, g_buffer[:, :, 9:10]), axis=2)
        if aa_samples > 1:
            # Downsample
            img = tf.expand_dims(img, 0)  # HWC -> NHWC
            # TODO: switch to method = 'area' when tensorflow implements the gradients...
            img = tf.image.resize(img,
                                  size=org_res,
                                  method='bilinear',
                                  antialias=True)
            img = tf.squeeze(img, axis=0)  # NHWC -> HWC
        return img
    else:
        assert (isinstance(scene, list))
        if seed == None:
            # Randomly generate a list of seed
            seed = []
            for i in range(len(scene)):
                seed.append(random.randint(0, 16777216))
        assert (len(seed) == len(scene))
        if len(lights) > 0 and not isinstance(lights[0], list):
            # Specialize version: stack g buffers and light all images in parallel
            g_buffers = []
            # Render each scene in the batch and stack them together
            for sc, se in zip(scene, seed):
                # We do full-screen anti-aliasing: increase the rendering resolution
                # and downsample it after lighting
                org_res = sc.camera.resolution
                sc.camera.resolution = (org_res[0] * aa_samples,
                                        org_res[1] * aa_samples)
                scene_args = pyredner.serialize_scene(\
                    scene = sc,
                    num_samples = (1, 1),
                    max_bounces = 0,
                    sampler_type = redner.SamplerType.sobol,
                    channels = channels,
                    use_secondary_edge_sampling = False,
                    sample_pixel_center = sample_pixel_center)
                # Need to revert the resolution back
                sc.camera.resolution = org_res
                g_buffers.append(pyredner.render(se, *scene_args))
            g_buffers = tf.stack(g_buffers)
            pos = g_buffers[:, :, :, :3]
            normal = g_buffers[:, :, :, 3:6]
            albedo = g_buffers[:, :, :, 6:9]
            imgs = tf.zeros((g_buffers.shape[0], g_buffers.shape[1],
                             g_buffers.shape[2], 3))
            for light in lights:
                imgs = imgs + light.render(pos, normal, albedo)
            if alpha:
                imgs = tf.concat((imgs, g_buffers[:, :, :, 9:10]), axis=-1)
        else:
            # If each scene has a different lighting: light them in the loop
            imgs = []
            # Render each scene in the batch and stack them together
            for sc, se, lgts in zip(scene, seed, lights):
                # We do full-screen anti-aliasing: increase the rendering resolution
                # and downsample it after lighting
                org_res = sc.camera.resolution
                sc.camera.resolution = (org_res[0] * aa_samples,
                                        org_res[1] * aa_samples)
                scene_args = pyredner.serialize_scene(\
                    scene = sc,
                    num_samples = (1, 1),
                    max_bounces = 0,
                    sampler_type = redner.SamplerType.sobol,
                    channels = channels,
                    use_secondary_edge_sampling = False,
                    sample_pixel_center = sample_pixel_center)
                # Need to revert the resolution back
                sc.camera.resolution = org_res
                g_buffer = pyredner.render(se, *scene_args)
                pos = g_buffer[:, :, :3]
                normal = g_buffer[:, :, 3:6]
                albedo = g_buffer[:, :, 6:9]
                img = tf.zeros(g_buffer.shape[0], g_buffer.shape[1], 3)
                for light in lgts:
                    img = img + light.render(pos, normal, albedo)
                if alpha:
                    # alpha is in the last channel
                    img = tf.concat((img, g_buffer[:, :, 9:10]), axis=-1)
                imgs.append(img)
            imgs = tf.stack(imgs)
        if aa_samples > 1:
            # Downsample
            # TODO: switch to method = 'area' when tensorflow implements the gradients...
            imgs = tf.image.resize(imgs,
                                   size=org_res,
                                   method='bilinear',
                                   antialias=True)
        return imgs
Beispiel #6
0
def render_generic(scene: pyredner.Scene,
                   channels: List,
                   max_bounces: int = 1,
                   sampler_type=pyredner.sampler_type.sobol,
                   num_samples: Union[int, Tuple[int, int]] = (4, 4),
                   seed: Optional[int] = None,
                   sample_pixel_center: bool = False):
    """
        A generic rendering function that can be either pathtracing or
        g-buffer rendering or both.

        Args
        ====
        scene: Union[pyredner.Scene, List[pyredner.Scene]]
            pyredner Scene containing camera, geometry and material.
            Can be a single scene or a list for batch render.
            For batch rendering all scenes need to have the same resolution.
        channels: List[pyredner.channels]
            | A list of the following channels\:
            | pyredner.channels.alpha
            | pyredner.channels.depth
            | pyredner.channels.position
            | pyredner.channels.geometry_normal
            | pyredner.channels.shading_normal
            | pyredner.channels.uv
            | pyredner.channels.diffuse_reflectance
            | pyredner.channels.specular_reflectance
            | pyredner.channels.roughness
            | pyredner.channels.generic_texture
            | pyredner.channels.vertex_color
            | pyredner.channels.shape_id
            | pyredner.channels.triangle_id
            | pyredner.channels.material_id
        max_bounces: int
            Number of bounces for global illumination, 1 means direct lighting only.
        sampler_type: pyredner.sampler_type
            | Which sampling pattern to use? See 
              `Chapter 7 of the PBRT book <http://www.pbr-book.org/3ed-2018/Sampling_and_Reconstruction.html>`_
              for an explanation of the difference between different samplers.
            | Following samplers are supported\:
            | pyredner.sampler_type.independent
            | pyredner.sampler_type.sobol
        num_samples: int
            Number of samples per pixel for forward and backward passes.
            Can be an integer or a tuple of 2 integers.
        seed: Optional[Union[int, List[int]]]
            Random seed used for sampling. Randomly assigned if set to None.
            For batch render, if seed it not None, need to provide a list
            of seeds.
        sample_pixel_center: bool
            Always sample at the pixel center when rendering.
            This trades noise with aliasing.
            If this option is activated, the rendering becomes non-differentiable
            (since there is no antialiasing integral),
            and redner's edge sampling becomes an approximation to the gradients of the aliased rendering.

        Returns
        =======
        tf.Tensor or List[tf.Tensor]
            | if input scene is a list: a tensor with size [N, H, W, C], N is the list size
            | else: a tensor with size [H, W, C]
    """
    if isinstance(scene, pyredner.Scene):
        if seed == None:
            seed = random.randint(0, 16777216)
        scene_args = pyredner.serialize_scene(\
            scene = scene,
            num_samples = num_samples,
            max_bounces = max_bounces,
            sampler_type = sampler_type,
            channels = channels,
            sample_pixel_center = sample_pixel_center)
        return pyredner.render(seed, *scene_args)
    else:
        assert (isinstance(scene, list))
        if seed == None:
            # Randomly generate a list of seed
            seed = []
            for i in range(len(scene)):
                seed.append(random.randint(0, 16777216))
        assert (len(seed) == len(scene))
        # Render each scene in the batch and stack them together
        imgs = []
        for sc, se in zip(scene, seed):
            scene_args = pyredner.serialize_scene(\
                scene = sc,
                num_samples = num_samples,
                max_bounces = max_bounces,
                sampler_type = sampler_type,
                channels = channels,
                sample_pixel_center = sample_pixel_center)
            imgs.append(pyredner.render(se, *scene_args))
        imgs = tf.stack(imgs)
        return imgs
# Setup light source
with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
    light = pyredner.AreaLight(shape_id=1,
                               intensity=tf.Variable([20.0, 20.0, 20.0],
                                                     dtype=tf.float32,
                                                     use_resource=True))
area_lights = [light]

# Construct the scene
scene = pyredner.Scene(cam, shapes, materials, area_lights)
# Serialize the scene
# Here we specify the output channels as "radiance" and "alpha"
# Render the scene as our target image.
scene_args = pyredner.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1,
    channels = [redner.channels.radiance, redner.channels.alpha])

# Render the scene as our target image.
# Render. The first argument is the seed for RNG in the renderer.
img = pyredner.render(0, *scene_args)

background = pyredner.imread('scenes/textures/siggraph.jpg')
background = tf.convert_to_tensor(skimage.transform.resize(
    background.numpy(), (256, 256, 3)),
                                  dtype=tf.float32)
img = img[:, :, :3] * img[:, :, 3:4] + background * (1 - img[:, :, 3:4])

# Save the images.
# The output image is in the GPU memory if you are using GPU.
Beispiel #8
0
        shapes.append(
            pyredner.Shape(vertices=mesh.vertices,
                           indices=mesh.indices,
                           uvs=mesh.uvs,
                           normals=mesh.normals,
                           material_id=material_id_map[mtl_name]))

# We don't setup any light source here

# Construct the scene
scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=None)
# Serialize the scene
# Here we specify the output channels as "depth", "shading_normal"
scene_args = pyredner.serialize_scene(
    scene=scene,
    num_samples=16,
    max_bounces=0,
    channels=[redner.channels.depth, redner.channels.shading_normal])

# Render. The first argument is the seed for RNG in the renderer.
img = pyredner.render(0, *scene_args)
# Save the images.
depth = img[:, :, 0]
normal = img[:, :, 1:4]
pyredner.imwrite(depth, 'results/test_g_buffer/target_depth.exr')
pyredner.imwrite(depth,
                 'results/test_g_buffer/target_depth.png',
                 normalize=True)
pyredner.imwrite(normal, 'results/test_g_buffer/target_normal.exr')
pyredner.imwrite(normal,
                 'results/test_g_buffer/target_normal.png',
Beispiel #9
0
# assign the intensity of the light, which is a length 3 float tensor in CPU.
with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
    light = pyredner.AreaLight(shape_id=1,
                               intensity=tf.Variable([20.0, 20.0, 20.0],
                                                     dtype=tf.float32))
area_lights = [light]

# Finally we construct our scene using all the variables we setup previously.
scene = pyredner.Scene(cam, shapes, materials, area_lights)
# All TensorFlow functions take a flat array of TensorFlow tensors as input,
# therefore we need to serialize the scene into an array. The following
# function is doing this. We also specify how many Monte Carlo samples we want to
# use per pixel and the number of bounces for indirect illumination here
# (one bounce means only direct illumination).
scene_args = pyredner.serialize_scene(scene=scene,
                                      num_samples=16,
                                      max_bounces=1)

# Render the scene as our target image.
# To render the scene, we use our custom PyTorch function in pyredner/render_pytorch.py
# First setup the alias of the render function

# Render. The first argument is the seed for RNG in the renderer.
# Redner automatically maps the devices in the render function, so no need to specify tf.device here.
img = pyredner.render(0, *scene_args)
# Save the images.
pyredner.imwrite(img, 'results/test_single_triangle/target.exr')
pyredner.imwrite(img, 'results/test_single_triangle/target.png')
# Read the target image we just saved.
target = pyredner.imread('results/test_single_triangle/target.exr')
if pyredner.get_use_gpu():
Beispiel #10
0
                                  normals=normals,
                                  colors=vertex_color,
                                  material_id=0)
shapes = [shape_sphere]

with tf.device(pyredner.get_device_name()):
    envmap = pyredner.imread('sunsky.exr')
    envmap = pyredner.EnvironmentMap(envmap)
scene = pyredner.Scene(camera=cam,
                       shapes=shapes,
                       materials=materials,
                       area_lights=[],
                       envmap=envmap)
scene_args = pyredner.serialize_scene(
    scene=scene,
    num_samples=256,
    max_bounces=1,
    channels=[redner.channels.radiance, redner.channels.vertex_color])

img = pyredner.render(0, *scene_args)
img_radiance = img[:, :, :3]
img_vertex_color = img[:, :, 3:]
pyredner.imwrite(img_radiance, 'results/test_vertex_color/target.exr')
pyredner.imwrite(img_radiance, 'results/test_vertex_color/target.png')
pyredner.imwrite(img_vertex_color,
                 'results/test_vertex_color/target_color.png')
target_radiance = pyredner.imread('results/test_vertex_color/target.exr')

# Initial guess. Set to 0.5 for all vertices.
with tf.device(pyredner.get_device_name()):
    shape_sphere.colors = tf.Variable(tf.zeros_like(vertices) + 0.5)
Beispiel #11
0
def render_deferred(scene: pyredner.Scene,
                    lights: List[DeferredLight],
                    alpha: bool = False,
                    aa_samples: int = 2,
                    seed: Optional[int] = None):
    """
        Render the scene using deferred rendering.
        (https://en.wikipedia.org/wiki/Deferred_shading)
        We generate a G-buffer image containing world-space position,
        normal, and albedo using redner, then shade the G-buffer
        using TensorFlow code. Assuming Lambertian shading and does not
        compute shadow.

        Args
        ====
        scene: pyredner.Scene
            pyredner Scene containing camera, geometry and material.
        lights: List[DeferredLight]

        alpha: bool
            If set to False, generates a 3-channel image,
            otherwise generates a 4-channel image where the
            fourth channel is alpha.
        aa_samples: int
            number of samples used for anti-aliasing at both x, y dimensions
            (e.g. if aa_samples=2, 4 samples are used)
        seed: Optional[int]:
            Random seed used for sampling. Randomly assigned if set to None.

        Returns
        =======
        tf.Tensor
            if alpha == True, a tensor with size [H, W, 4],
            else, a tensor with size [H, W, 3]
    """
    if seed==None:
        seed = random.randint(0, 16777216)

    org_res = scene.camera.resolution
    scene.camera.resolution = (org_res[0] * aa_samples,
                               org_res[1] * aa_samples)
    channels = [redner.channels.position,
                redner.channels.shading_normal,
                redner.channels.diffuse_reflectance]
    if alpha:
        channels.append(redner.channels.alpha)
    scene_args = pyredner.serialize_scene(\
        scene = scene,
        num_samples = (1, 1),
        max_bounces = 0,
        sampler_type = redner.SamplerType.sobol,
        channels = channels,
        use_secondary_edge_sampling = False)
    scene.camera.resolution = org_res
    g_buffer = pyredner.render(seed, *scene_args)
    pos = g_buffer[:, :, :3]
    normal = g_buffer[:, :, 3:6]
    albedo = g_buffer[:, :, 6:9]
    img = tf.zeros((g_buffer.shape[0], g_buffer.shape[1], 3))
    for light in lights:
        img = img + light.render(pos, normal, albedo)
    if aa_samples > 1:
        img = tf.expand_dims(img, 0) # HWC -> NHWC
        img = tf.image.resize(img, size = org_res, method = 'area', antialias = True)
        img = tf.squeeze(img, axis = 0) # NHWC -> HWC
    if alpha:
        img = tf.concat((img, g_buffer[:, :, 9:10]), axis = 2)
    return img