Example #1
0
def automatic_camera_placement(shapes: List,
                               resolution: Tuple[int, int]):
    """
        Given a list of shapes, generates camera parameters automatically
        using the bounding boxes of the shapes. Place the camera at
        some distances from the shapes, so that it can see all of them.
        Inspired by https://github.com/mitsuba-renderer/mitsuba/blob/master/src/librender/scene.cpp#L286
    """
    assert(tf.executing_eagerly())
    aabb_min = tf.constant((float('inf'), float('inf'), float('inf')))
    aabb_max = -tf.constant((float('inf'), float('inf'), float('inf')))
    for shape in shapes:
        v = shape.vertices    
        v_min = tf.reduce_min(v, 0)
        v_max = tf.reduce_max(v, 0)
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            v_min = tf.identity(v_min)
            v_max = tf.identity(v_max)
        aabb_min = tf.minimum(aabb_min, v_min)
        aabb_max = tf.maximum(aabb_max, v_max)
    assert(tf.reduce_all(tf.math.is_finite(aabb_min)) and tf.reduce_all(tf.math.is_finite(aabb_max)))
    center = (aabb_max + aabb_min) * 0.5
    extents = aabb_max - aabb_min
    max_extents_xy = tf.maximum(extents[0], extents[1])
    distance = max_extents_xy / (2 * math.tan(45 * 0.5 * math.pi / 180.0))
    max_extents_xyz = tf.maximum(extents[2], max_extents_xy)    
    return Camera(position = tf.stack((center[0], center[1], aabb_min[2] - distance)),
                  look_at = center,
                  up = tf.constant((0.0, 1.0, 0.0)),
                  fov = tf.constant([45.0]),
                  clip_near = 0.001 * float(distance),
                  resolution = resolution)
Example #2
0
 def intrinsic_mat(self, value):
     if value is not None:
         self._intrinsic_mat = value
         with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
             self.intrinsic_mat_inv = tf.linalg.inv(self._intrinsic_mat)
     else:
         assert(self.fov is not None)
         self.fov = self._fov
Example #3
0
 def cam_to_world(self, value):
     if value is not None:
         self._cam_to_world = value
         with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
             self.world_to_cam = tf.linalg.inv(self.cam_to_world)
     else:
         self._cam_to_world = None
         self.world_to_cam = None
Example #4
0
 def fov(self, value):
     with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
         self._fov = tf.identity(value).cpu()
         fov_factor = 1.0 / tf.tan(transform.radians(0.5 * self._fov))
         o = tf.convert_to_tensor(np.ones([1], dtype=np.float32),
                                  dtype=tf.float32)
         diag = tf.concat([fov_factor, fov_factor, o], 0)
         self._cam_to_ndc = tf.linalg.tensor_diag(diag)
         self.ndc_to_cam = tf.linalg.inv(self._cam_to_ndc)
Example #5
0
 def fov(self, value):
     if value is not None:
         self._fov = value
         with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
             fov_factor = 1.0 / tf.tan(transform.radians(0.5 * self._fov))
             o = tf.ones([1], dtype=tf.float32)
             diag = tf.concat([fov_factor, fov_factor, o], 0)
             self._intrinsic_mat = tf.linalg.tensor_diag(diag)
             self.intrinsic_mat_inv = tf.linalg.inv(self._intrinsic_mat)
     else:
         self._fov = None
Example #6
0
 def __init__(self,
              position: Optional[tf.Tensor] = None,
              look_at: Optional[tf.Tensor] = None,
              up: Optional[tf.Tensor] = None,
              fov: Optional[tf.Tensor] = None,
              clip_near: float = 1e-4,
              resolution: Tuple[int] = (256, 256),
              cam_to_world: Optional[tf.Tensor] = None,
              intrinsic_mat: Optional[tf.Tensor] = None,
              camera_type = pyredner.camera_type.perspective,
              fisheye: bool = False):
     assert(tf.executing_eagerly())
     if position is not None:
         assert(position.dtype == tf.float32)
         assert(len(position.shape) == 1 and position.shape[0] == 3)
     if look_at is not None:
         assert(look_at.dtype == tf.float32)
         assert(len(look_at.shape) == 1 and look_at.shape[0] == 3)
     if up is not None:
         assert(up.dtype == tf.float32)
         assert(len(up.shape) == 1 and up.shape[0] == 3)
     if fov is not None:
         assert(fov.dtype == tf.float32)
         assert(len(fov.shape) == 1 and fov.shape[0] == 1)
     assert(isinstance(clip_near, float))
     if position is None and look_at is None and up is None:
         assert(cam_to_world is  not None)
     
     self.position = position
     self.look_at = look_at
     self.up = up
     self.fov = fov
     with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
         if cam_to_world is not None:
             self.cam_to_world = cam_to_world
         else:
             self.cam_to_world = None
         if intrinsic_mat is None:
             if camera_type == redner.CameraType.perspective:
                 fov_factor = 1.0 / tf.tan(transform.radians(0.5 * fov))
                 o = tf.ones([1], dtype=tf.float32)
                 diag = tf.concat([fov_factor, fov_factor, o], 0)
                 self._intrinsic_mat = tf.linalg.tensor_diag(diag)
             else:
                 self._intrinsic_mat = tf.eye(3, dtype=tf.float32)   
         else:
             self._intrinsic_mat = intrinsic_mat
         self.intrinsic_mat_inv = tf.linalg.inv(self._intrinsic_mat)
     self.clip_near = clip_near
     self.resolution = resolution
     self.camera_type = camera_type
     if fisheye:
         self.camera_type = redner.CameraType.fisheye
Example #7
0
def compute_uvs(vertices, indices, print_progress = True):
    """
        Compute UV coordinates of a given mesh using a charting algorithm
        with least square conformal mapping. This calls the `xatlas <https://github.com/jpcy/xatlas>`_ library.

        Args
        ====
        vertices: tf.Tensor
            3D position of vertices
            float32 tensor with size num_vertices x 3
        indices: tf.Tensor
            vertex indices of triangle faces.
            int32 tensor with size num_triangles x 3

        Returns
        =======
        tf.Tensor
            uv vertices pool, float32 Tensor with size num_uv_vertices x 3
        tf.Tensor
            uv indices, int32 Tensor with size num_triangles x 3
    """
    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        vertices = tf.identity(vertices)
        indices = tf.identity(indices)
        uv_trimesh = redner.UVTriMesh(redner.float_ptr(pyredner.data_ptr(vertices)),
                                      redner.int_ptr(pyredner.data_ptr(indices)),
                                      redner.float_ptr(0),
                                      redner.int_ptr(0),
                                      int(vertices.shape[0]),
                                      0,
                                      int(indices.shape[0]))

        atlas = redner.TextureAtlas()
        num_uv_vertices = redner.automatic_uv_map([uv_trimesh], atlas, print_progress)[0]

        uvs = tf.zeros([num_uv_vertices, 2], dtype=tf.float32)
        uv_indices = tf.zeros_like(indices)
        uv_trimesh.uvs = redner.float_ptr(pyredner.data_ptr(uvs))
        uv_trimesh.uv_indices = redner.int_ptr(pyredner.data_ptr(uv_indices))
        uv_trimesh.num_uv_vertices = num_uv_vertices

        redner.copy_texture_atlas(atlas, [uv_trimesh])

    with tf.device(pyredner.get_device_name()):
        vertices = tf.identity(vertices)
        indices = tf.identity(indices)
        uvs = tf.identity(uvs)
        uv_indices = tf.identity(uv_indices)
    return uvs, uv_indices
Example #8
0
    def __init__(self, values, env_to_world=tf.eye(4, 4)):
        assert (tf.executing_eagerly())
        # Convert to constant texture if necessary
        if tf.is_tensor(values):
            values = pyredner.Texture(values)

        # assert(values.texels.is_contiguous())
        assert (values.texels.dtype == tf.float32)

        with tf.device(pyredner.get_device_name()):
            # Build sampling table
            luminance = 0.212671 * values.texels[:, :, 0] + \
                        0.715160 * values.texels[:, :, 1] + \
                        0.072169 * values.texels[:, :, 2]
            # For each y, compute CDF over x
            sample_cdf_xs_ = tf.cumsum(luminance, axis=1)

            y_weight = tf.sin(
                math.pi *
                (tf.cast(tf.range(luminance.shape[0].value), tf.float32) + 0.5)
                / float(luminance.shape[0].value))

            # Compute CDF for x
            sample_cdf_ys_ = tf.cumsum(sample_cdf_xs_[:, -1] * y_weight,
                                       axis=0)
            pdf_norm = (luminance.shape[0].value * luminance.shape[1].value) / \
                 (sample_cdf_ys_[-1] * (2 * math.pi * math.pi))
            # Normalize to [0, 1)
            sample_cdf_xs = (sample_cdf_xs_ - sample_cdf_xs_[:, 0:1]) / \
                tf.math.maximum(
                    sample_cdf_xs_[
                        :,
                        (luminance.shape[1].value - 1):luminance.shape[1].value],
                        1e-8 * tf.convert_to_tensor(np.ones((sample_cdf_xs_.shape[0], 1)), dtype=tf.float32)
                    )
            sample_cdf_ys = (sample_cdf_ys_ - sample_cdf_ys_[0]) / \
                tf.math.maximum(sample_cdf_ys_[-1], tf.constant([1e-8]))

            self.values = values
            self.sample_cdf_ys = sample_cdf_ys
            self.sample_cdf_xs = sample_cdf_xs
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            self.pdf_norm = pdf_norm.cpu()
            env_to_world = tf.identity(env_to_world).cpu()
            self.env_to_world = env_to_world
            self.world_to_env = tf.linalg.inv(env_to_world)
Example #9
0
    def __init__(self,
                 position: tf.Tensor,
                 look_at: tf.Tensor,
                 up: tf.Tensor,
                 fov: tf.Tensor,
                 clip_near: float,
                 resolution: Tuple[int],
                 cam_to_ndc: tf.Tensor = None,
                 camera_type=redner.CameraType.perspective,
                 fisheye: bool = False):
        assert (tf.executing_eagerly())
        assert (position.dtype == tf.float32)
        assert (len(position.shape) == 1 and position.shape[0] == 3)
        assert (look_at.dtype == tf.float32)
        assert (len(look_at.shape) == 1 and look_at.shape[0] == 3)
        assert (up.dtype == tf.float32)
        assert (len(up.shape) == 1 and up.shape[0] == 3)
        if fov is not None:
            assert (fov.dtype == tf.float32)
            assert (len(fov.shape) == 1 and fov.shape[0] == 1)
        assert (isinstance(clip_near, float))

        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            self.position = tf.identity(position).cpu()
            self.look_at = tf.identity(look_at).cpu()
            self.up = tf.identity(up).cpu()
            self.fov = tf.identity(fov).cpu()
            if cam_to_ndc is None:
                if camera_type == redner.CameraType.perspective:
                    fov_factor = 1.0 / tf.tan(transform.radians(0.5 * fov))
                    o = tf.convert_to_tensor(np.ones([1], dtype=np.float32),
                                             dtype=tf.float32)
                    diag = tf.concat([fov_factor, fov_factor, o], 0)
                    self._cam_to_ndc = tf.linalg.tensor_diag(diag)
                else:
                    self._cam_to_ndc = tf.eye(3, dtype=tf.float32)
            else:
                self._cam_to_ndc = tf.identity(cam_to_ndc).cpu()
            self.ndc_to_cam = tf.linalg.inv(self.cam_to_ndc)
            self.clip_near = clip_near
            self.resolution = resolution
            self.camera_type = camera_type
            if fisheye:
                self.camera_type = redner.CameraType.fisheye
Example #10
0
def unpack_args(seed,
                args,
                use_primary_edge_sampling=None,
                use_secondary_edge_sampling=None):
    """
        Given a list of serialized scene arguments, unpack
        all information into a Context.
    """
    # Unpack arguments
    current_index = 0
    num_shapes = int(args[current_index])
    current_index += 1
    num_materials = int(args[current_index])
    current_index += 1
    num_lights = int(args[current_index])
    current_index += 1

    # Camera arguments
    cam_position = args[current_index]
    current_index += 1
    cam_look_at = args[current_index]
    current_index += 1
    cam_up = args[current_index]
    current_index += 1
    cam_to_world = args[current_index]
    current_index += 1
    world_to_cam = args[current_index]
    current_index += 1
    intrinsic_mat_inv = args[current_index]
    current_index += 1
    intrinsic_mat = args[current_index]
    current_index += 1
    clip_near = float(args[current_index])
    current_index += 1
    resolution = args[current_index].numpy()  # Tuple[int, int]
    current_index += 1
    viewport = args[current_index].numpy()  # Tuple[int, int, int, int]
    current_index += 1
    camera_type = RednerCameraType.asCameraType(
        args[current_index])  # FIXME: Map to custom type
    current_index += 1

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        if is_empty_tensor(cam_to_world):
            camera = redner.Camera(
                resolution[1],
                resolution[0],
                redner.float_ptr(pyredner.data_ptr(cam_position)),
                redner.float_ptr(pyredner.data_ptr(cam_look_at)),
                redner.float_ptr(pyredner.data_ptr(cam_up)),
                redner.float_ptr(0),  # cam_to_world
                redner.float_ptr(0),  # world_to_cam
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat)),
                clip_near,
                camera_type,
                redner.Vector2i(viewport[1], viewport[0]),
                redner.Vector2i(viewport[3], viewport[2]))
        else:
            camera = redner.Camera(
                resolution[1], resolution[0], redner.float_ptr(0),
                redner.float_ptr(0), redner.float_ptr(0),
                redner.float_ptr(pyredner.data_ptr(cam_to_world)),
                redner.float_ptr(pyredner.data_ptr(world_to_cam)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat)), clip_near,
                camera_type, redner.Vector2i(viewport[1], viewport[0]),
                redner.Vector2i(viewport[3], viewport[2]))

    with tf.device(pyredner.get_device_name()):
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            uv_indices = args[current_index]
            current_index += 1
            normal_indices = args[current_index]
            current_index += 1
            colors = args[current_index]
            current_index += 1
            material_id = int(args[current_index])
            current_index += 1
            light_id = int(args[current_index])
            current_index += 1

            shapes.append(redner.Shape(\
                redner.float_ptr(pyredner.data_ptr(vertices)),
                redner.int_ptr(pyredner.data_ptr(indices)),
                redner.float_ptr(pyredner.data_ptr(uvs) if not is_empty_tensor(uvs) else 0),
                redner.float_ptr(pyredner.data_ptr(normals) if not is_empty_tensor(normals) else 0),
                redner.int_ptr(pyredner.data_ptr(uv_indices) if not is_empty_tensor(uv_indices) else 0),
                redner.int_ptr(pyredner.data_ptr(normal_indices) if not is_empty_tensor(normal_indices) else 0),
                redner.float_ptr(pyredner.data_ptr(colors) if not is_empty_tensor(colors) else 0),
                int(vertices.shape[0]),
                int(uvs.shape[0]) if not is_empty_tensor(uvs) else 0,
                int(normals.shape[0]) if not is_empty_tensor(normals) else 0,
                int(indices.shape[0]),
                material_id,
                light_id))

    materials = []
    with tf.device(pyredner.get_device_name()):
        for i in range(num_materials):
            num_levels = int(args[current_index])
            current_index += 1
            diffuse_reflectance = []
            for j in range(num_levels):
                diffuse_reflectance.append(args[current_index])
                current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            specular_reflectance = []
            for j in range(num_levels):
                specular_reflectance.append(args[current_index])
                current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            roughness = []
            for j in range(num_levels):
                roughness.append(args[current_index])
                current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            generic_texture = []
            if num_levels > 0:
                for j in range(num_levels):
                    generic_texture.append(args[current_index])
                    current_index += 1
                generic_uv_scale = args[current_index]
                current_index += 1
            else:
                generic_uv_scale = None

            num_levels = int(args[current_index])
            current_index += 1
            normal_map = []
            if num_levels > 0:
                for j in range(num_levels):
                    normal_map.append(args[current_index])
                    current_index += 1
                normal_map_uv_scale = args[current_index]
                current_index += 1
            else:
                normal_map_uv_scale = None

            compute_specular_lighting = bool(args[current_index])
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1
            use_vertex_color = bool(args[current_index])
            current_index += 1

            if get_tensor_dimension(diffuse_reflectance[0]) == 1:
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(diffuse_reflectance[0]))],
                    [0],
                    [0],
                    3, redner.float_ptr(pyredner.data_ptr(diffuse_uv_scale)))
            else:
                assert (get_tensor_dimension(diffuse_reflectance[0]) == 3)
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in diffuse_reflectance],
                    [x.shape[1] for x in diffuse_reflectance],
                    [x.shape[0] for x in diffuse_reflectance],
                    3,
                    redner.float_ptr(pyredner.data_ptr(diffuse_uv_scale)))

            if get_tensor_dimension(specular_reflectance[0]) == 1:
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(specular_reflectance[0]))],
                    [0],
                    [0],
                    3, redner.float_ptr(pyredner.data_ptr(specular_uv_scale)))
            else:
                assert (get_tensor_dimension(specular_reflectance[0]) == 3)
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in specular_reflectance],
                    [x.shape[1] for x in specular_reflectance],
                    [x.shape[0] for x in specular_reflectance],
                    3,
                    redner.float_ptr(pyredner.data_ptr(specular_uv_scale)))

            if get_tensor_dimension(roughness[0]) == 1:
                roughness = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(roughness[0]))],
                    [0],
                    [0],
                    1, redner.float_ptr(pyredner.data_ptr(roughness_uv_scale)))
            else:
                assert (get_tensor_dimension(roughness[0]) == 3)
                roughness = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in roughness],
                    [x.shape[1] for x in roughness],
                    [x.shape[0] for x in roughness],
                    3,
                    redner.float_ptr(pyredner.data_ptr(roughness_uv_scale)))

            if len(generic_texture) > 0:
                generic_texture = redner.TextureN(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in generic_texture],
                    [x.shape[1] for x in generic_texture],
                    [x.shape[0] for x in generic_texture],
                    generic_texture[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(generic_uv_scale)))
            else:
                generic_texture = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))

            if len(normal_map) > 0:
                normal_map = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in normal_map],
                    [x.shape[1] for x in normal_map],
                    [x.shape[0] for x in normal_map],
                    normal_map[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(normal_map_uv_scale)))
            else:
                normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))

            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                generic_texture,
                normal_map,
                compute_specular_lighting,
                two_sided,
                use_vertex_color))

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        area_lights = []
        for i in range(num_lights):
            shape_id = int(args[current_index])
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1
            directly_visible = bool(args[current_index])
            current_index += 1

            area_lights.append(
                redner.AreaLight(
                    shape_id, redner.float_ptr(pyredner.data_ptr(intensity)),
                    two_sided, directly_visible))

    envmap = None
    if not is_empty_tensor(args[current_index]):
        num_levels = args[current_index]
        current_index += 1
        values = []
        for j in range(num_levels):
            values.append(args[current_index])
            current_index += 1
        envmap_uv_scale = args[current_index]
        current_index += 1
        env_to_world = args[current_index]
        current_index += 1
        world_to_env = args[current_index]
        current_index += 1
        sample_cdf_ys = args[current_index]
        current_index += 1
        sample_cdf_xs = args[current_index]
        current_index += 1
        pdf_norm = float(args[current_index])
        current_index += 1
        directly_visible = bool(args[current_index])
        current_index += 1

        assert isinstance(pdf_norm, float)
        with tf.device(pyredner.get_device_name()):
            sample_cdf_ys = redner.float_ptr(pyredner.data_ptr(sample_cdf_ys))
            sample_cdf_xs = redner.float_ptr(pyredner.data_ptr(sample_cdf_xs))
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            env_to_world = redner.float_ptr(pyredner.data_ptr(env_to_world))
            world_to_env = redner.float_ptr(pyredner.data_ptr(world_to_env))
        with tf.device(pyredner.get_device_name()):
            values = redner.Texture3(\
                [redner.float_ptr(pyredner.data_ptr(x)) for x in values],
                [x.shape[1] for x in values], # width
                [x.shape[0] for x in values], # height
                3, # channels
                redner.float_ptr(pyredner.data_ptr(envmap_uv_scale)))
        envmap = redner.EnvironmentMap(\
            values,
            env_to_world,
            world_to_env,
            sample_cdf_ys,
            sample_cdf_xs,
            pdf_norm,
            directly_visible)
    else:
        current_index += 1

    # Options
    num_samples = args[current_index]
    current_index += 1
    if len(num_samples.shape) == 0 or num_samples.shape[0] == 1:
        num_samples = int(num_samples)
    else:
        assert (num_samples.shape[0] == 2)
        num_samples = (int(num_samples[0]), int(num_samples[1]))
    max_bounces = int(args[current_index])
    current_index += 1

    num_channel_args = int(args[current_index])
    current_index += 1

    channels = []
    for _ in range(num_channel_args):
        ch = args[current_index]
        ch = RednerChannels.asChannel(ch)
        channels.append(ch)
        current_index += 1

    sampler_type = args[current_index]
    sampler_type = RednerSamplerType.asSamplerType(sampler_type)
    current_index += 1

    use_primary_edge_sampling = args[current_index]
    current_index += 1
    use_secondary_edge_sampling = args[current_index]
    current_index += 1
    sample_pixel_center = args[current_index]
    current_index += 1

    start = time.time()
    scene = redner.Scene(camera, shapes, materials, area_lights, envmap,
                         pyredner.get_use_gpu(), pyredner.get_gpu_device_id(),
                         use_primary_edge_sampling,
                         use_secondary_edge_sampling)
    time_elapsed = time.time() - start
    if get_print_timing():
        print('Scene construction, time: %.5f s' % time_elapsed)

    # check that num_samples is a tuple
    if isinstance(num_samples, int):
        num_samples = (num_samples, num_samples)

    options = redner.RenderOptions(seed, num_samples[0], max_bounces, channels,
                                   sampler_type, sample_pixel_center)

    ctx = Context()
    ctx.channels = channels
    ctx.options = options
    ctx.resolution = resolution
    ctx.viewport = viewport
    ctx.scene = scene
    ctx.camera = camera
    ctx.shapes = shapes
    ctx.materials = materials
    ctx.area_lights = area_lights
    ctx.envmap = envmap
    ctx.scene = scene
    ctx.options = options
    ctx.num_samples = num_samples
    ctx.num_channel_args = num_channel_args

    return ctx
Example #11
0
def forward(seed: int, *args):
    """
        Forward rendering pass: given a scene and output an image.
    """
    global __ctx
    ctx = __ctx

    # Unpack arguments
    current_index = 0
    num_shapes = int(args[current_index])
    current_index += 1
    num_materials = int(args[current_index])
    current_index += 1
    num_lights = int(args[current_index])
    current_index += 1

    # Camera arguments
    cam_position = args[current_index]
    current_index += 1
    cam_look_at = args[current_index]
    current_index += 1
    cam_up = args[current_index]
    current_index += 1
    ndc_to_cam = args[current_index]
    current_index += 1
    cam_to_ndc = args[current_index]
    current_index += 1
    clip_near = float(args[current_index])
    current_index += 1
    resolution = args[current_index].numpy()  # Tuple[int, int]
    current_index += 1
    camera_type = pyredner.RednerCameraType.asCameraType(
        args[current_index])  # FIXME: Map to custom type
    current_index += 1

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        camera = redner.Camera(
            resolution[1], resolution[0],
            redner.float_ptr(pyredner.data_ptr(cam_position)),
            redner.float_ptr(pyredner.data_ptr(cam_look_at)),
            redner.float_ptr(pyredner.data_ptr(cam_up)),
            redner.float_ptr(pyredner.data_ptr(ndc_to_cam)),
            redner.float_ptr(pyredner.data_ptr(cam_to_ndc)), clip_near,
            camera_type)

    with tf.device(pyredner.get_device_name()):
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            material_id = int(args[current_index])
            current_index += 1
            light_id = int(args[current_index])
            current_index += 1
            shapes.append(redner.Shape(\
                redner.float_ptr(pyredner.data_ptr(vertices)),
                redner.int_ptr(pyredner.data_ptr(indices)),
                redner.float_ptr(pyredner.data_ptr(uvs) if uvs is not None else 0),
                redner.float_ptr(pyredner.data_ptr(normals) if normals is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                material_id,
                light_id))

    materials = []
    with tf.device(pyredner.get_device_name()):
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            normal_map = args[current_index]
            current_index += 1
            normal_map_uv_scale = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1

            diffuse_reflectance_ptr = redner.float_ptr(
                pyredner.data_ptr(diffuse_reflectance))
            specular_reflectance_ptr = redner.float_ptr(
                pyredner.data_ptr(specular_reflectance))
            roughness_ptr = redner.float_ptr(pyredner.data_ptr(roughness))
            if normal_map.shape[0] > 0:
                normal_map_ptr = redner.float_ptr(
                    pyredner.data_ptr(normal_map))
            diffuse_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(diffuse_uv_scale))
            specular_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(specular_uv_scale))
            roughness_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(roughness_uv_scale))
            if normal_map.shape[0] > 0:
                normal_map_uv_scale_ptr = redner.float_ptr(
                    pyredner.data_ptr(normal_map_uv_scale))
            if get_tensor_dimension(diffuse_reflectance) == 1:
                diffuse_reflectance = redner.Texture3(diffuse_reflectance_ptr,
                                                      0, 0, 0,
                                                      diffuse_uv_scale_ptr)
            else:
                diffuse_reflectance = redner.Texture3(\
                    diffuse_reflectance_ptr,
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[0]), # num levels
                    diffuse_uv_scale_ptr)
            if get_tensor_dimension(specular_reflectance) == 1:
                specular_reflectance = redner.Texture3(
                    specular_reflectance_ptr, 0, 0, 0, specular_uv_scale_ptr)
            else:
                specular_reflectance = redner.Texture3(\
                    specular_reflectance_ptr,
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[0]), # num levels
                    specular_uv_scale_ptr)
            if get_tensor_dimension(roughness) == 1:
                roughness = redner.Texture1(roughness_ptr, 0, 0, 0,
                                            roughness_uv_scale_ptr)
            else:
                assert (get_tensor_dimension(roughness) == 4)
                roughness = redner.Texture1(\
                    roughness_ptr,
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[0]), # num levels
                    roughness_uv_scale_ptr)
            if normal_map.shape[0] > 0:
                normal_map = redner.Texture3(\
                    normal_map_ptr,
                    int(normal_map.shape[2]),
                    int(normal_map.shape[1]),
                    int(normal_map.shape[0]),
                    normal_map_uv_scale_ptr)
            else:
                normal_map = redner.Texture3(\
                    redner.float_ptr(0), 0, 0, 0, redner.float_ptr(0))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                normal_map,
                two_sided))

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        area_lights = []
        for i in range(num_lights):
            shape_id = int(args[current_index])
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1

            area_lights.append(
                redner.AreaLight(
                    shape_id, redner.float_ptr(pyredner.data_ptr(intensity)),
                    two_sided))

    envmap = None
    if not is_empty_tensor(args[current_index]):
        values = args[current_index]
        current_index += 1
        envmap_uv_scale = args[current_index]
        current_index += 1
        env_to_world = args[current_index]
        current_index += 1
        world_to_env = args[current_index]
        current_index += 1
        sample_cdf_ys = args[current_index]
        current_index += 1
        sample_cdf_xs = args[current_index]
        current_index += 1
        pdf_norm = float(args[current_index])
        current_index += 1

        assert isinstance(pdf_norm, float)
        with tf.device(pyredner.get_device_name()):
            values_ptr = redner.float_ptr(pyredner.data_ptr(values))
            sample_cdf_ys = redner.float_ptr(pyredner.data_ptr(sample_cdf_ys))
            sample_cdf_xs = redner.float_ptr(pyredner.data_ptr(sample_cdf_xs))
            envmap_uv_scale = redner.float_ptr(
                pyredner.data_ptr(envmap_uv_scale))
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            env_to_world = redner.float_ptr(pyredner.data_ptr(env_to_world))
            world_to_env = redner.float_ptr(pyredner.data_ptr(world_to_env))
        values = redner.Texture3(
            values_ptr,
            int(values.shape[2]),  # width
            int(values.shape[1]),  # height
            int(values.shape[0]),  # num levels
            envmap_uv_scale)
        envmap = redner.EnvironmentMap(\
            values,
            env_to_world,
            world_to_env,
            sample_cdf_ys,
            sample_cdf_xs,
            pdf_norm)
    else:
        current_index += 7

    # Options
    num_samples = int(args[current_index])
    current_index += 1
    max_bounces = int(args[current_index])
    current_index += 1

    __num_channels = int(args[current_index])
    current_index += 1

    channels = []
    for _ in range(__num_channels):
        ch = args[current_index]
        ch = pyredner.RednerChannels.asChannel(ch)
        channels.append(ch)
        current_index += 1

    sampler_type = args[current_index]
    sampler_type = pyredner.RednerSamplerType.asSamplerType(sampler_type)
    current_index += 1

    use_primary_edge_sampling = args[current_index]
    current_index += 1
    use_secondary_edge_sampling = args[current_index]
    current_index += 1

    scene = redner.Scene(camera, shapes, materials, area_lights, envmap,
                         pyredner.get_use_gpu(), pyredner.get_gpu_device_id(),
                         use_primary_edge_sampling,
                         use_secondary_edge_sampling)

    # check that num_samples is a tuple
    if isinstance(num_samples, int):
        num_samples = (num_samples, num_samples)

    options = redner.RenderOptions(seed, num_samples[0], max_bounces, channels,
                                   sampler_type)
    num_channels = redner.compute_num_channels(channels)

    with tf.device(pyredner.get_device_name()):
        rendered_image = tf.zeros(
            shape=[resolution[0], resolution[1], num_channels],
            dtype=tf.float32)

        start = time.time()

        # pdb.set_trace()
        redner.render(scene, options,
                      redner.float_ptr(pyredner.data_ptr(rendered_image)),
                      redner.float_ptr(0), None, redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Forward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # debug_img = tf.zeros((256, 256, 3), dtype=tf.float32)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(pyredner.data_ptr(rendered_image)),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.png')
        # exit()

        # import pdb; pdb.set_trace()

    ctx.shapes = shapes
    ctx.materials = materials
    ctx.area_lights = area_lights
    ctx.envmap = envmap
    ctx.scene = scene
    ctx.options = options
    ctx.num_samples = num_samples
    ctx.num_channels = __num_channels
    return rendered_image
Example #12
0
                                                   np.min(specular))
specular = tf.convert_to_tensor(np.tile(np.reshape(specular, (256, 256, 1)),
                                        (1, 1, 3)),
                                dtype=tf.float32)
roughness = perlin(x, y, seed=2)
roughness = (roughness - np.min(roughness) + 1e-3) / (np.max(roughness) -
                                                      np.min(roughness))
roughness = tf.convert_to_tensor(np.reshape(roughness, (256, 256, 1)),
                                 dtype=tf.float32)

# Use GPU if available
pyredner.set_use_gpu(
    tf.test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=None))

# Set up the scene using Pytorch tensor
with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
    position = tf.Variable([0.0, 0.0, -5.0], dtype=tf.float32)
    look_at = tf.Variable([0.0, 0.0, 0.0], dtype=tf.float32)
    up = tf.Variable([0.0, 1.0, 0.0], dtype=tf.float32)
    fov = tf.Variable([45.0], dtype=tf.float32)
    clip_near = 1e-2
    resolution = (256, 256)
    cam = pyredner.Camera(position=position,
                          look_at=look_at,
                          up=up,
                          fov=fov,
                          clip_near=clip_near,
                          resolution=resolution)

mat_perlin = pyredner.Material(diffuse_reflectance=diffuse,
                               specular_reflectance=specular,
Example #13
0
 def env_to_world(self, value):
     self._env_to_world = value
     with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
         self.world_to_env = tf.linalg.inv(self._env_to_world)
Example #14
0
 def cam_to_ndc(self, value):
     with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
         self._cam_to_ndc = tf.identity(value).cpu()
         self.ndc_to_cam = tf.linalg.inv(self._cam_to_ndc)
Example #15
0
def serialize_scene(scene: pyredner.Scene,
                    num_samples: Union[int, Tuple[int, int]],
                    max_bounces: int,
                    channels=[redner.channels.radiance],
                    sampler_type=redner.SamplerType.independent,
                    use_primary_edge_sampling=True,
                    use_secondary_edge_sampling=True) -> List:
    """
        Given a pyredner scene & rendering options, convert them to a linear list of argument,
        so that we can use it in PyTorch.

        Args
        ====
        scene: pyredner.Scene
        num_samples: int
            number of samples per pixel for forward and backward passes
            can be an integer or a tuple of 2 integers
            if a single integer is provided, use the same number of samples
            for both
        max_bounces: int
            number of bounces for global illumination
            1 means direct lighting only
        channels: List[redner.channels]
            | A list of channels that should present in the output image
            | following channels are supported\:
            | redner.channels.radiance,
            | redner.channels.alpha,
            | redner.channels.depth,
            | redner.channels.position,
            | redner.channels.geometry_normal,
            | redner.channels.shading_normal,
            | redner.channels.uv,
            | redner.channels.diffuse_reflectance,
            | redner.channels.specular_reflectance,
            | redner.channels.vertex_color,
            | redner.channels.roughness,
            | redner.channels.generic_texture,
            | redner.channels.shape_id,
            | redner.channels.material_id
            | all channels, except for shape id and material id, are differentiable
        sampler_type: redner.SamplerType
            | Which sampling pattern to use?
            | see `Chapter 7 of the PBRT book <http://www.pbr-book.org/3ed-2018/Sampling_and_Reconstruction.html>`
              for an explanation of the difference between different samplers.
            | Following samplers are supported:
            | redner.SamplerType.independent
            | redner.SamplerType.sobol
        use_primary_edge_sampling: bool

        use_secondary_edge_sampling: bool

    """
    cam = scene.camera
    num_shapes = len(scene.shapes)
    num_materials = len(scene.materials)
    num_lights = len(scene.area_lights)
    num_channels = len(channels)

    for light_id, light in enumerate(scene.area_lights):
        scene.shapes[light.shape_id].light_id = light_id

    args = []
    args.append(tf.constant(num_shapes))
    args.append(tf.constant(num_materials))
    args.append(tf.constant(num_lights))
    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        if cam.position is None:
            args.append(__EMPTY_TENSOR)
            args.append(__EMPTY_TENSOR)
            args.append(__EMPTY_TENSOR)
        else:
            args.append(tf.identity(cam.position))
            args.append(tf.identity(cam.look_at))
            args.append(tf.identity(cam.up))
        if cam.cam_to_world is None:
            args.append(__EMPTY_TENSOR)
            args.append(__EMPTY_TENSOR)
        else:
            args.append(tf.identity(cam.cam_to_world))
            args.append(tf.identity(cam.world_to_cam))
        args.append(tf.identity(cam.intrinsic_mat_inv))
        args.append(tf.identity(cam.intrinsic_mat))
    args.append(tf.constant(cam.clip_near))
    args.append(tf.constant(cam.resolution))
    args.append(RednerCameraType.asTensor(cam.camera_type))
    for shape in scene.shapes:
        with tf.device(pyredner.get_device_name()):
            args.append(tf.identity(shape.vertices))
            args.append(tf.identity(shape.indices))
            if shape.uvs is None:
                args.append(__EMPTY_TENSOR)
            else:
                args.append(tf.identity(shape.uvs))
            if shape.normals is None:
                args.append(__EMPTY_TENSOR)
            else:
                args.append(tf.identity(shape.normals))
            if shape.uv_indices is None:
                args.append(__EMPTY_TENSOR)
            else:
                args.append(tf.identity(shape.uv_indices))
            if shape.normal_indices is None:
                args.append(__EMPTY_TENSOR)
            else:
                args.append(tf.identity(shape.normal_indices))
            if shape.colors is None:
                args.append(__EMPTY_TENSOR)
            else:
                args.append(tf.identity(shape.colors))
        args.append(tf.constant(shape.material_id))
        args.append(tf.constant(shape.light_id))
    for material in scene.materials:
        with tf.device(pyredner.get_device_name()):
            args.append(tf.identity(material.diffuse_reflectance.mipmap))
            args.append(tf.identity(material.diffuse_reflectance.uv_scale))
            args.append(tf.identity(material.specular_reflectance.mipmap))
            args.append(tf.identity(material.specular_reflectance.uv_scale))
            args.append(tf.identity(material.roughness.mipmap))
            args.append(tf.identity(material.roughness.uv_scale))
            if material.generic_texture is not None:
                args.append(tf.identity(material.generic_texture.mipmap))
                args.append(tf.identity(material.generic_texture.uv_scale))
            else:
                args.append(__EMPTY_TENSOR)
                args.append(__EMPTY_TENSOR)
            if material.normal_map is not None:
                args.append(tf.identity(material.normal_map.mipmap))
                args.append(tf.identity(material.normal_map.uv_scale))
            else:
                args.append(__EMPTY_TENSOR)
                args.append(__EMPTY_TENSOR)
        args.append(tf.constant(material.compute_specular_lighting))
        args.append(tf.constant(material.two_sided))
        args.append(tf.constant(material.use_vertex_color))
    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        for light in scene.area_lights:
            args.append(tf.constant(light.shape_id))
            args.append(tf.identity(light.intensity))
            args.append(tf.constant(light.two_sided))
    if scene.envmap is not None:
        with tf.device(pyredner.get_device_name()):
            args.append(tf.identity(scene.envmap.values.mipmap))
            args.append(tf.identity(scene.envmap.values.uv_scale))
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            args.append(tf.identity(scene.envmap.env_to_world))
            args.append(tf.identity(scene.envmap.world_to_env))
        with tf.device(pyredner.get_device_name()):
            args.append(tf.identity(scene.envmap.sample_cdf_ys))
            args.append(tf.identity(scene.envmap.sample_cdf_xs))
        args.append(scene.envmap.pdf_norm)
    else:
        args.append(__EMPTY_TENSOR)
        args.append(__EMPTY_TENSOR)
        args.append(__EMPTY_TENSOR)
        args.append(__EMPTY_TENSOR)
        args.append(__EMPTY_TENSOR)
        args.append(__EMPTY_TENSOR)
        args.append(__EMPTY_TENSOR)

    args.append(tf.constant(num_samples))
    args.append(tf.constant(max_bounces))
    args.append(tf.constant(num_channels))
    for ch in channels:
        args.append(RednerChannels.asTensor(ch))

    args.append(RednerSamplerType.asTensor(sampler_type))
    args.append(tf.constant(use_primary_edge_sampling))
    args.append(tf.constant(use_secondary_edge_sampling))
    return args
Example #16
0
def serialize_scene(scene: pyredner.Scene,
                    num_samples: Union[int, Tuple[int, int]],
                    max_bounces: int,
                    channels=[redner.channels.radiance],
                    sampler_type=redner.SamplerType.independent,
                    use_primary_edge_sampling=True,
                    use_secondary_edge_sampling=True,
                    sample_pixel_center: bool = False) -> List:
    """
        Given a pyredner scene & rendering options, convert them to a linear list of argument,
        so that we can use it in TensorFlow.

        Args
        ====
        scene: pyredner.Scene
        num_samples: int
            number of samples per pixel for forward and backward passes
            can be an integer or a tuple of 2 integers
            if a single integer is provided, use the same number of samples
            for both
        max_bounces: int
            number of bounces for global illumination
            1 means direct lighting only
        channels: List[redner.channels]
            | A list of channels that should present in the output image
            | following channels are supported\:
            | redner.channels.radiance,
            | redner.channels.alpha,
            | redner.channels.depth,
            | redner.channels.position,
            | redner.channels.geometry_normal,
            | redner.channels.shading_normal,
            | redner.channels.uv,
            | redner.channels.diffuse_reflectance,
            | redner.channels.specular_reflectance,
            | redner.channels.vertex_color,
            | redner.channels.roughness,
            | redner.channels.generic_texture,
            | redner.channels.shape_id,
            | redner.channels.triangle_id,
            | redner.channels.material_id
            | all channels, except for shape id, triangle id and material id, are differentiable
        sampler_type: redner.SamplerType
            | Which sampling pattern to use?
            | see `Chapter 7 of the PBRT book <http://www.pbr-book.org/3ed-2018/Sampling_and_Reconstruction.html>`
              for an explanation of the difference between different samplers.
            | Following samplers are supported:
            | redner.SamplerType.independent
            | redner.SamplerType.sobol
        use_primary_edge_sampling: bool

        use_secondary_edge_sampling: bool

        sample_pixel_center: bool
            Always sample at the pixel center when rendering.
            This trades noise with aliasing.
            If this option is activated, the rendering becomes non-differentiable
            (since there is no antialiasing integral),
            and redner's edge sampling becomes an approximation to the gradients of the aliased rendering.
    """
    # TODO: figure out a way to determine whether a TF tensor requires gradient or not
    cam = scene.camera
    num_shapes = len(scene.shapes)
    num_materials = len(scene.materials)
    num_lights = len(scene.area_lights)
    num_channels = len(channels)

    for light_id, light in enumerate(scene.area_lights):
        scene.shapes[light.shape_id].light_id = light_id

    if max_bounces == 0:
        use_secondary_edge_sampling = False

    args = []
    args.append(tf.constant(num_shapes))
    args.append(tf.constant(num_materials))
    args.append(tf.constant(num_lights))
    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        if cam.position is None:
            args.append(__EMPTY_TENSOR)
            args.append(__EMPTY_TENSOR)
            args.append(__EMPTY_TENSOR)
        else:
            args.append(tf.identity(cam.position))
            args.append(tf.identity(cam.look_at))
            args.append(tf.identity(cam.up))
        if cam.cam_to_world is None:
            args.append(__EMPTY_TENSOR)
            args.append(__EMPTY_TENSOR)
        else:
            args.append(tf.identity(cam.cam_to_world))
            args.append(tf.identity(cam.world_to_cam))
        args.append(tf.identity(cam.intrinsic_mat_inv))
        args.append(tf.identity(cam.intrinsic_mat))
    args.append(tf.constant(cam.clip_near))
    args.append(tf.constant(cam.resolution))
    viewport = cam.viewport
    if viewport is None:
        viewport = (0, 0, cam.resolution[0], cam.resolution[1])
    # Clamp the viewport if necessary
    viewport = (max(viewport[0], 0), max(viewport[1], 0),
                min(viewport[2],
                    cam.resolution[0]), min(viewport[3], cam.resolution[1]))
    args.append(tf.constant(viewport))
    args.append(RednerCameraType.asTensor(cam.camera_type))
    for shape in scene.shapes:
        with tf.device(pyredner.get_device_name()):
            args.append(tf.identity(shape.vertices))
            # HACK: tf.bitcast forces tensorflow to copy int32 to GPU memory.
            # tf.identity stopped working since TF 2.1 (if you print the device
            # it will say it's on GPU, but the address returned by data_ptr is wrong).
            # Hopefully TF people will fix this in the future.
            args.append(tf.bitcast(shape.indices, type=tf.int32))
            if shape.uvs is None:
                args.append(__EMPTY_TENSOR)
            else:
                args.append(tf.identity(shape.uvs))
            if shape.normals is None:
                args.append(__EMPTY_TENSOR)
            else:
                args.append(tf.identity(shape.normals))
            if shape.uv_indices is None:
                args.append(__EMPTY_TENSOR)
            else:
                args.append(tf.bitcast(shape.uv_indices, type=tf.int32))
            if shape.normal_indices is None:
                args.append(__EMPTY_TENSOR)
            else:
                args.append(tf.bitcast(shape.normal_indices, type=tf.int32))
            if shape.colors is None:
                args.append(__EMPTY_TENSOR)
            else:
                args.append(tf.identity(shape.colors))
        args.append(tf.constant(shape.material_id))
        args.append(tf.constant(shape.light_id))
    for material in scene.materials:
        serialize_texture(material.diffuse_reflectance, args)
        serialize_texture(material.specular_reflectance, args)
        serialize_texture(material.roughness, args)
        serialize_texture(material.generic_texture, args)
        serialize_texture(material.normal_map, args)
        args.append(tf.constant(material.compute_specular_lighting))
        args.append(tf.constant(material.two_sided))
        args.append(tf.constant(material.use_vertex_color))
    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        for light in scene.area_lights:
            args.append(tf.constant(light.shape_id))
            args.append(tf.identity(light.intensity))
            args.append(tf.constant(light.two_sided))
            args.append(tf.constant(light.directly_visible))
    if scene.envmap is not None:
        serialize_texture(scene.envmap.values, args)
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            args.append(tf.identity(scene.envmap.env_to_world))
            args.append(tf.identity(scene.envmap.world_to_env))
        with tf.device(pyredner.get_device_name()):
            args.append(tf.identity(scene.envmap.sample_cdf_ys))
            args.append(tf.identity(scene.envmap.sample_cdf_xs))
        args.append(scene.envmap.pdf_norm)
        args.append(scene.envmap.directly_visible)
    else:
        args.append(__EMPTY_TENSOR)

    args.append(tf.constant(num_samples))
    args.append(tf.constant(max_bounces))
    args.append(tf.constant(num_channels))
    for ch in channels:
        args.append(RednerChannels.asTensor(ch))

    args.append(RednerSamplerType.asTensor(sampler_type))
    args.append(tf.constant(use_primary_edge_sampling))
    args.append(tf.constant(use_secondary_edge_sampling))
    args.append(tf.constant(sample_pixel_center))
    return args
Example #17
0
def parse_shape(node, material_dict, shape_id):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        to_world = tf.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        max_smooth_angle = -1
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
                elif child.attrib['name'] == 'maxSmoothAngle':
                    max_smooth_angle = float(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = tf.constant([
                                light_intensity[0], light_intensity[0],
                                light_intensity[0]
                            ])

        if node.attrib['type'] == 'obj':
            _, mesh_list, _ = pyredner.load_obj(filename, obj_group=False)
            # Convert to CPU for rebuild_topology
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                vertices = tf.identity(mesh_list[0][1].vertices)
                indices = tf.identity(mesh_list[0][1].indices)
                uvs = mesh_list[0][1].uvs
                normals = mesh_list[0][1].normals
                uv_indices = mesh_list[0][1].uv_indices
                normal_indices = mesh_list[0][1].normal_indices
                if uvs is not None:
                    uvs = tf.identity(uvs)
                if normals is not None:
                    normals = tf.identity(normals)
                if uv_indices is not None:
                    uv_indices = tf.identity(uv_indices)
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = tf.convert_to_tensor(mitsuba_tri_mesh.vertices)
            indices = tf.convert_to_tensor(mitsuba_tri_mesh.indices)
            uvs = tf.convert_to_tensor(mitsuba_tri_mesh.uvs)
            normals = tf.convert_to_tensor(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None
            uv_indices = None  # Serialized doesn't use different indices for UV & normal
            normal_indices = None

        # Transform the vertices and normals
        vertices = tf.concat(
            (vertices, tf.ones([vertices.shape[0], 1], dtype=tf.float32)),
            axis=1)
        vertices = vertices @ tf.transpose(to_world, [1, 0])
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3]

        if normals is not None:
            normals = normals @ (tf.linalg.inv(tf.transpose(to_world,
                                                            [0, 1]))[:3, :3])
        assert (vertices is not None)
        assert (indices is not None)
        if max_smooth_angle >= 0:
            if normals is None:
                normals = tf.zeros_like(vertices)
            new_num_vertices = redner.rebuild_topology(\
                redner.float_ptr(pyredner.data_ptr(vertices)),
                redner.int_ptr(pyredner.data_ptr(indices)),
                redner.float_ptr(pyredner.data_ptr(uvs) if uvs is not None else 0),
                redner.float_ptr(pyredner.data_ptr(normals) if normals is not None else 0),
                redner.int_ptr(pyredner.data_ptr(uv_indices) if uv_indices is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                max_smooth_angle)
            print('Rebuilt topology, original vertices size: {}, new vertices size: {}'.format(\
                int(vertices.shape[0]), new_num_vertices))
            vertices.resize_(new_num_vertices, 3)
            if uvs is not None:
                uvs.resize_(new_num_vertices, 2)
            if normals is not None:
                normals.resize_(new_num_vertices, 3)

        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        return pyredner.Shape(vertices=vertices,
                              indices=indices,
                              uvs=uvs,
                              normals=normals,
                              uv_indices=uv_indices,
                              normal_indices=normal_indices,
                              material_id=mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = tf.constant([[0, 2, 1], [1, 2, 3]], dtype=tf.int32)
        vertices = tf.constant([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = tf.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = tf.constant([
                                light_intensity[0], light_intensity[0],
                                light_intensity[0]
                            ])
        # Transform the vertices and normals
        vertices = tf.concat(
            (vertices,
             tf.convert_to_tensor(np.ones(vertices.shape[0], 1),
                                  dtype=tf.float32)),
            axis=1)
        vertices = vertices @ tf.transpose(to_world, [0, 1])
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3]
        if normals is not None:
            normals = normals @ (tf.linalg.inv(tf.transpose(to_world,
                                                            [0, 1]))[:3, :3])
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyrender.Light(shape_id, light_intensity)

        return pyredner.Shape(vertices=vertices,
                              indices=indices,
                              uvs=uvs,
                              normals=normals,
                              material_id=mat_id), lgt
    else:
        assert (False)
Example #18
0
    def backward(grad_img):
        scene = ctx.scene
        options = ctx.options

        buffers = create_gradient_buffers(ctx)

        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            grad_img = tf.identity(grad_img)
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                buffers.d_scene,
                redner.float_ptr(0),  # translational_gradient_image
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if get_print_timing():
            print('Backward pass, time: %.5f s' % time_elapsed)

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if ctx.camera.use_look_at:
            ret_list.append(buffers.d_position)
            ret_list.append(buffers.d_look_at)
            ret_list.append(buffers.d_up)
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(buffers.d_cam_to_world)
            ret_list.append(buffers.d_world_to_cam)
        ret_list.append(buffers.d_intrinsic_mat_inv)
        ret_list.append(buffers.d_intrinsic_mat)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # viewport
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(buffers.d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(buffers.d_uvs_list[i])
            ret_list.append(buffers.d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(buffers.d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in buffers.d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(buffers.d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in buffers.d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(buffers.d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in buffers.d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(buffers.d_roughness_uv_scale_list[i])
            if buffers.d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in buffers.d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(buffers.d_generic_uv_scale_list[i])
            if buffers.d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in buffers.d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(buffers.d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(buffers.d_intensity_list[i]))
            ret_list.append(None)  # two_sided
            ret_list.append(None)  # directly_visible

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in buffers.d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(buffers.d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(buffers.d_world_to_env))
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
            ret_list.append(None)  # directly_visible
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channel_args):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return ret_list
Example #19
0
    def backward(grad_img):
        camera = ctx.camera
        scene = ctx.scene
        options = ctx.options

        with tf.device(pyredner.get_device_name()):
            if camera.use_look_at:
                d_position = tf.zeros(3, dtype=tf.float32)
                d_look_at = tf.zeros(3, dtype=tf.float32)
                d_up = tf.zeros(3, dtype=tf.float32)
                d_cam_to_world = None
                d_world_to_cam = None
            else:
                d_position = None
                d_look_at = None
                d_up = None
                d_cam_to_world = tf.zeros([4, 4], dtype=tf.float32)
                d_world_to_cam = tf.zeros([4, 4], dtype=tf.float32)
            d_intrinsic_mat_inv = tf.zeros([3, 3], dtype=tf.float32)
            d_intrinsic_mat = tf.zeros([3, 3], dtype=tf.float32)
            if camera.use_look_at:
                d_camera = redner.DCamera(
                    redner.float_ptr(pyredner.data_ptr(d_position)),
                    redner.float_ptr(pyredner.data_ptr(d_look_at)),
                    redner.float_ptr(pyredner.data_ptr(d_up)),
                    redner.float_ptr(0),  # cam_to_world
                    redner.float_ptr(0),  # world_to_cam
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))
            else:
                d_camera = redner.DCamera(
                    redner.float_ptr(0), redner.float_ptr(0),
                    redner.float_ptr(0),
                    redner.float_ptr(pyredner.data_ptr(d_cam_to_world)),
                    redner.float_ptr(pyredner.data_ptr(d_world_to_cam)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))

        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_colors_list = []
        d_shapes = []
        with tf.device(pyredner.get_device_name()):
            for i, shape in enumerate(ctx.shapes):
                num_vertices = shape.num_vertices
                d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
                d_uvs = tf.zeros([num_vertices, 2],
                                 dtype=tf.float32) if shape.has_uvs() else None
                d_normals = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_normals() else None
                d_colors = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_colors() else None
                d_vertices_list.append(d_vertices)
                d_uvs_list.append(d_uvs)
                d_normals_list.append(d_normals)
                d_colors_list.append(d_colors)
                d_shapes.append(redner.DShape(\
                    redner.float_ptr(pyredner.data_ptr(d_vertices)),
                    redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_colors) if d_colors is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_normal_map_list = []
        d_diffuse_uv_scale_list = []
        d_specular_uv_scale_list = []
        d_roughness_uv_scale_list = []
        d_generic_list = []
        d_generic_uv_scale_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        with tf.device(pyredner.get_device_name()):
            for material in ctx.materials:
                if material.get_diffuse_size(0)[0] == 0:
                    d_diffuse = [tf.zeros(3, dtype=tf.float32)]
                else:
                    d_diffuse = []
                    for l in range(material.get_diffuse_levels()):
                        diffuse_size = material.get_diffuse_size(l)
                        d_diffuse.append(\
                            tf.zeros([diffuse_size[1],
                                      diffuse_size[0],
                                      3], dtype=tf.float32))

                if material.get_specular_size(0)[0] == 0:
                    d_specular = [tf.zeros(3, dtype=tf.float32)]
                else:
                    d_specular = []
                    for l in range(material.get_specular_levels()):
                        specular_size = material.get_specular_size(l)
                        d_specular.append(\
                            tf.zeros([specular_size[1],
                                      specular_size[0],
                                      3], dtype=tf.float32))

                if material.get_roughness_size(0)[0] == 0:
                    d_roughness = [tf.zeros(1, dtype=tf.float32)]
                else:
                    d_roughness = []
                    for l in range(material.get_roughness_levels()):
                        roughness_size = material.get_roughness_size(l)
                        d_roughness.append(\
                            tf.zeros([roughness_size[1],
                                      roughness_size[0],
                                      1], dtype=tf.float32))
                # HACK: tensorflow's eager mode uses a cache to store scalar
                #       constants to avoid memory copy. If we pass scalar tensors
                #       into the C++ code and modify them, we would corrupt the
                #       cache, causing incorrect result in future scalar constant
                #       creations. Thus we force tensorflow to copy by plusing a zero.
                # (also see https://github.com/tensorflow/tensorflow/issues/11186
                #  for more discussion regarding copying tensors)
                if d_roughness[0].shape.num_elements() == 1:
                    d_roughness[0] = d_roughness[0] + 0

                if material.get_generic_levels() == 0:
                    d_generic = None
                else:
                    d_generic = []
                    for l in range(material.get_generic_levels()):
                        generic_size = material.get_generic_size(l)
                        d_generic.append(\
                            tf.zeros([generic_size[2],
                                      generic_size[1],
                                      generic_size[0]], dtype=tf.float32))

                if material.get_normal_map_levels() == 0:
                    d_normal_map = None
                else:
                    d_normal_map = []
                    for l in range(material.get_normal_map_levels()):
                        normal_map_size = material.get_normal_map_size(l)
                        d_normal_map.append(\
                            tf.zeros([normal_map_size[1],
                                      normal_map_size[0],
                                      3], dtype=tf.float32))

                d_diffuse_list.append(d_diffuse)
                d_specular_list.append(d_specular)
                d_roughness_list.append(d_roughness)
                d_generic_list.append(d_generic)
                d_normal_map_list.append(d_normal_map)

                d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
                if d_generic is None:
                    d_generic_uv_scale = None
                else:
                    d_generic_uv_scale = tf.zeros([2], dtype=tf.float32)
                if d_normal_map is None:
                    d_normal_map_uv_scale = None
                else:
                    d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
                d_specular_uv_scale_list.append(d_specular_uv_scale)
                d_roughness_uv_scale_list.append(d_roughness_uv_scale)
                d_generic_uv_scale_list.append(d_generic_uv_scale)
                d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)

                if len(d_diffuse[0].shape) == 1:
                    d_diffuse_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(d_diffuse[0]))],
                        [0],
                        [0],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))
                else:
                    d_diffuse_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_diffuse],
                        [x.shape[1] for x in d_diffuse],
                        [x.shape[0] for x in d_diffuse],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))

                if len(d_specular[0].shape) == 1:
                    d_specular_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(d_specular[0]))],
                        [0],
                        [0],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))
                else:
                    d_specular_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_specular],
                        [x.shape[1] for x in d_specular],
                        [x.shape[0] for x in d_specular],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))

                if len(d_roughness[0].shape) == 1:
                    d_roughness_tex = redner.Texture1(\
                        [redner.float_ptr(pyredner.data_ptr(d_roughness[0]))],
                        [0],
                        [0],
                        1,
                        redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))
                else:
                    d_roughness_tex = redner.Texture1(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_roughness],
                        [x.shape[1] for x in d_roughness],
                        [x.shape[0] for x in d_roughness],
                        1,
                        redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))

                if d_generic is None:
                    d_generic_tex = redner.TextureN(\
                        [], [], [], 0, redner.float_ptr(0))
                else:
                    d_generic_tex = redner.TextureN(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_generic],
                        [x.shape[1] for x in d_generic],
                        [x.shape[0] for x in d_generic],
                        d_generic[0].shape[2],
                        redner.float_ptr(pyredner.data_ptr(d_generic_uv_scale)))

                if d_normal_map is None:
                    d_normal_map = redner.Texture3(\
                        [], [], [], 0, redner.float_ptr(0))
                else:
                    d_normal_map = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_normal_map],
                        [x.shape[1] for x in d_normal_map],
                        [x.shape[0] for x in d_normal_map],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_normal_map_uv_scale)))

                d_materials.append(redner.DMaterial(\
                    d_diffuse_tex, d_specular_tex, d_roughness_tex,
                    d_generic_tex, d_normal_map))

        d_intensity_list = []
        d_area_lights = []
        with tf.device(pyredner.get_device_name()):
            for light in ctx.area_lights:
                d_intensity = tf.zeros(3, dtype=tf.float32)
                d_intensity_list.append(d_intensity)
                d_area_lights.append(\
                    redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            with tf.device(pyredner.get_device_name()):
                d_envmap_values = []
                for l in range(envmap.get_levels()):
                    size = envmap.get_size(l)
                    d_envmap_values.append(\
                        tf.zeros([size[1],
                                  size[0],
                                  3], dtype=tf.float32))
                d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
                d_envmap_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_envmap_values],
                    [x.shape[1] for x in d_envmap_values],
                    [x.shape[0] for x in d_envmap_values],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_envmap_uv_scale)))
                d_envmap = redner.DEnvironmentMap(
                    d_envmap_tex,
                    redner.float_ptr(pyredner.data_ptr(d_world_to_env)))

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_area_lights,
                                d_envmap, pyredner.get_use_gpu(),
                                pyredner.get_gpu_device_id())
        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            grad_img = tf.identity(grad_img)
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                d_scene,
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = tf.ones([256, 256, 3], dtype=tf.float32)
        # debug_img = tf.zeros([256, 256, 3], dtype=tf.float32)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(pyredner.data_ptr(grad_img)),
        #               d_scene,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if camera.use_look_at:
            ret_list.append(d_position)
            ret_list.append(d_look_at)
            ret_list.append(d_up)
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(d_cam_to_world)
            ret_list.append(d_world_to_cam)
        ret_list.append(d_intrinsic_mat_inv)
        ret_list.append(d_intrinsic_mat)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(d_roughness_uv_scale_list[i])
            if d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(d_generic_uv_scale_list[i])
            if d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_intensity_list[i]))
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_world_to_env))
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channels):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return ret_list