Exemplo n.º 1
0
    def forward(ctx, seed, *args):
        """
            Forward rendering pass: given a scene and output an image.
        """
        # Unpack arguments
        current_index = 0
        num_shapes = args[current_index]
        current_index += 1
        num_materials = args[current_index]
        current_index += 1
        num_lights = args[current_index]
        current_index += 1
        cam_to_world = args[current_index]
        current_index += 1
        world_to_cam = args[current_index]
        current_index += 1
        fov_factor = args[current_index]
        current_index += 1
        clip_near = args[current_index]
        current_index += 1
        resolution = args[current_index]
        current_index += 1
        fisheye = args[current_index]
        current_index += 1
        assert (cam_to_world.is_contiguous())
        assert (world_to_cam.is_contiguous())
        camera = redner.Camera(resolution[1], resolution[0],
                               redner.float_ptr(cam_to_world.data_ptr()),
                               redner.float_ptr(world_to_cam.data_ptr()),
                               fov_factor.item(), clip_near, fisheye)
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            material_id = args[current_index]
            current_index += 1
            light_id = args[current_index]
            current_index += 1
            assert (vertices.is_contiguous())
            assert (indices.is_contiguous())
            if uvs is not None:
                assert (uvs.is_contiguous())
            if normals is not None:
                assert (normals.is_contiguous())
            shapes.append(redner.Shape(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                material_id,
                light_id))
        materials = []
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            assert (diffuse_reflectance.is_contiguous())
            if diffuse_reflectance.dim() == 1:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()), 0, 0, 0,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            else:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()),
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[0]), # num levels
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            assert (specular_reflectance.is_contiguous())
            if specular_reflectance.dim() == 1:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()), 0, 0, 0,
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            else:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()),
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[0]), # num levels
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            assert (roughness.is_contiguous())
            if roughness.dim() == 1:
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()), 0, 0, 0,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            else:
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()),
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[0]), # num levels
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                two_sided))

        lights = []
        for i in range(num_lights):
            shape_id = args[current_index]
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1

            lights.append(
                redner.Light(shape_id, redner.float_ptr(intensity.data_ptr()),
                             two_sided))

        scene = redner.Scene(camera, shapes, materials, lights,
                             pyredner.get_use_gpu())
        num_samples = args[current_index]
        current_index += 1
        max_bounces = args[current_index]
        current_index += 1
        options = redner.RenderOptions(seed, num_samples, max_bounces)
        rendered_image = torch.zeros(resolution[0],
                                     resolution[1],
                                     3,
                                     device=pyredner.get_device())
        redner.render(scene, options,
                      redner.float_ptr(rendered_image.data_ptr()),
                      redner.float_ptr(0), None, redner.float_ptr(0))

        # # For debugging
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(rendered_image.data_ptr()),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # exit()

        ctx.shapes = shapes
        ctx.materials = materials
        ctx.lights = lights
        ctx.scene = scene
        ctx.options = options
        return rendered_image
Exemplo n.º 2
0
def create_gradient_buffers(ctx):
    scene = ctx.scene
    options = ctx.options
    camera = ctx.camera

    buffers = Context()

    with tf.device(pyredner.get_device_name()):
        if camera.use_look_at:
            buffers.d_position = tf.zeros(3, dtype=tf.float32)
            buffers.d_look_at = tf.zeros(3, dtype=tf.float32)
            buffers.d_up = tf.zeros(3, dtype=tf.float32)
            buffers.d_cam_to_world = None
            buffers.d_world_to_cam = None
        else:
            buffers.d_position = None
            buffers.d_look_at = None
            buffers.d_up = None
            buffers.d_cam_to_world = tf.zeros([4, 4], dtype=tf.float32)
            buffers.d_world_to_cam = tf.zeros([4, 4], dtype=tf.float32)
        buffers.d_intrinsic_mat_inv = tf.zeros([3, 3], dtype=tf.float32)
        buffers.d_intrinsic_mat = tf.zeros([3, 3], dtype=tf.float32)
        if camera.use_look_at:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(pyredner.data_ptr(buffers.d_position)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_look_at)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_up)),
                redner.float_ptr(0), # cam_to_world
                redner.float_ptr(0), # world_to_cam
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat)))
        else:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(0),
                redner.float_ptr(0),
                redner.float_ptr(0),
                redner.float_ptr(pyredner.data_ptr(buffers.d_cam_to_world)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_world_to_cam)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat)))

    buffers.d_vertices_list = []
    buffers.d_uvs_list = []
    buffers.d_normals_list = []
    buffers.d_colors_list = []
    buffers.d_shapes = []
    with tf.device(pyredner.get_device_name()):
        for i, shape in enumerate(ctx.shapes):
            num_vertices = shape.num_vertices
            d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
            d_uvs = tf.zeros([num_vertices, 2],
                             dtype=tf.float32) if shape.has_uvs() else None
            d_normals = tf.zeros(
                [num_vertices, 3],
                dtype=tf.float32) if shape.has_normals() else None
            d_colors = tf.zeros(
                [num_vertices, 3],
                dtype=tf.float32) if shape.has_colors() else None
            buffers.d_vertices_list.append(d_vertices)
            buffers.d_uvs_list.append(d_uvs)
            buffers.d_normals_list.append(d_normals)
            buffers.d_colors_list.append(d_colors)
            buffers.d_shapes.append(redner.DShape(\
                redner.float_ptr(pyredner.data_ptr(d_vertices)),
                redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0),
                redner.float_ptr(pyredner.data_ptr(d_colors) if d_colors is not None else 0)))

    buffers.d_diffuse_list = []
    buffers.d_specular_list = []
    buffers.d_roughness_list = []
    buffers.d_normal_map_list = []
    buffers.d_diffuse_uv_scale_list = []
    buffers.d_specular_uv_scale_list = []
    buffers.d_roughness_uv_scale_list = []
    buffers.d_generic_list = []
    buffers.d_generic_uv_scale_list = []
    buffers.d_normal_map_uv_scale_list = []
    buffers.d_materials = []
    with tf.device(pyredner.get_device_name()):
        for material in ctx.materials:
            if material.get_diffuse_size(0)[0] == 0:
                d_diffuse = [tf.zeros(3, dtype=tf.float32)]
            else:
                d_diffuse = []
                for l in range(material.get_diffuse_levels()):
                    diffuse_size = material.get_diffuse_size(l)
                    d_diffuse.append(\
                        tf.zeros([diffuse_size[1],
                                  diffuse_size[0],
                                  3], dtype=tf.float32))

            if material.get_specular_size(0)[0] == 0:
                d_specular = [tf.zeros(3, dtype=tf.float32)]
            else:
                d_specular = []
                for l in range(material.get_specular_levels()):
                    specular_size = material.get_specular_size(l)
                    d_specular.append(\
                        tf.zeros([specular_size[1],
                                  specular_size[0],
                                  3], dtype=tf.float32))

            if material.get_roughness_size(0)[0] == 0:
                d_roughness = [tf.zeros(1, dtype=tf.float32)]
            else:
                d_roughness = []
                for l in range(material.get_roughness_levels()):
                    roughness_size = material.get_roughness_size(l)
                    d_roughness.append(\
                        tf.zeros([roughness_size[1],
                                  roughness_size[0],
                                  1], dtype=tf.float32))
            # HACK: tensorflow's eager mode uses a cache to store scalar
            #       constants to avoid memory copy. If we pass scalar tensors
            #       into the C++ code and modify them, we would corrupt the
            #       cache, causing incorrect result in future scalar constant
            #       creations. Thus we force tensorflow to copy by plusing a zero.
            # (also see https://github.com/tensorflow/tensorflow/issues/11186
            #  for more discussion regarding copying tensors)
            if d_roughness[0].shape.num_elements() == 1:
                d_roughness[0] = d_roughness[0] + 0

            if material.get_generic_levels() == 0:
                d_generic = None
            else:
                d_generic = []
                for l in range(material.get_generic_levels()):
                    generic_size = material.get_generic_size(l)
                    d_generic.append(\
                        tf.zeros([generic_size[2],
                                  generic_size[1],
                                  generic_size[0]], dtype=tf.float32))

            if material.get_normal_map_levels() == 0:
                d_normal_map = None
            else:
                d_normal_map = []
                for l in range(material.get_normal_map_levels()):
                    normal_map_size = material.get_normal_map_size(l)
                    d_normal_map.append(\
                        tf.zeros([normal_map_size[1],
                                  normal_map_size[0],
                                  3], dtype=tf.float32))

            buffers.d_diffuse_list.append(d_diffuse)
            buffers.d_specular_list.append(d_specular)
            buffers.d_roughness_list.append(d_roughness)
            buffers.d_generic_list.append(d_generic)
            buffers.d_normal_map_list.append(d_normal_map)

            d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
            d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
            d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
            if d_generic is None:
                d_generic_uv_scale = None
            else:
                d_generic_uv_scale = tf.zeros([2], dtype=tf.float32)
            if d_normal_map is None:
                d_normal_map_uv_scale = None
            else:
                d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
            buffers.d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
            buffers.d_specular_uv_scale_list.append(d_specular_uv_scale)
            buffers.d_roughness_uv_scale_list.append(d_roughness_uv_scale)
            buffers.d_generic_uv_scale_list.append(d_generic_uv_scale)
            buffers.d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)

            if len(d_diffuse[0].shape) == 1:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(d_diffuse[0]))],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))
            else:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_diffuse],
                    [x.shape[1] for x in d_diffuse],
                    [x.shape[0] for x in d_diffuse],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))

            if len(d_specular[0].shape) == 1:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(d_specular[0]))],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))
            else:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_specular],
                    [x.shape[1] for x in d_specular],
                    [x.shape[0] for x in d_specular],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))

            if len(d_roughness[0].shape) == 1:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(d_roughness[0]))],
                    [0],
                    [0],
                    1,
                    redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))
            else:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_roughness],
                    [x.shape[1] for x in d_roughness],
                    [x.shape[0] for x in d_roughness],
                    1,
                    redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))

            if d_generic is None:
                d_generic_tex = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_generic_tex = redner.TextureN(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_generic],
                    [x.shape[1] for x in d_generic],
                    [x.shape[0] for x in d_generic],
                    d_generic[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(d_generic_uv_scale)))

            if d_normal_map is None:
                d_normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_normal_map = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_normal_map],
                    [x.shape[1] for x in d_normal_map],
                    [x.shape[0] for x in d_normal_map],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_normal_map_uv_scale)))

            buffers.d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex,
                d_generic_tex, d_normal_map))

    buffers.d_intensity_list = []
    buffers.d_area_lights = []
    with tf.device(pyredner.get_device_name()):
        for light in ctx.area_lights:
            d_intensity = tf.zeros(3, dtype=tf.float32)
            buffers.d_intensity_list.append(d_intensity)
            buffers.d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

    buffers.d_envmap = None
    if ctx.envmap is not None:
        envmap = ctx.envmap
        with tf.device(pyredner.get_device_name()):
            buffers.d_envmap_values = []
            for l in range(envmap.get_levels()):
                size = envmap.get_size(l)
                buffers.d_envmap_values.append(\
                    tf.zeros([size[1],
                              size[0],
                              3], dtype=tf.float32))
            buffers.d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
            buffers.d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
            d_envmap_tex = redner.Texture3(\
                [redner.float_ptr(pyredner.data_ptr(x)) for x in buffers.d_envmap_values],
                [x.shape[1] for x in buffers.d_envmap_values],
                [x.shape[0] for x in buffers.d_envmap_values],
                3,
                redner.float_ptr(pyredner.data_ptr(buffers.d_envmap_uv_scale)))
            buffers.d_envmap = redner.DEnvironmentMap(
                d_envmap_tex,
                redner.float_ptr(pyredner.data_ptr(buffers.d_world_to_env)))

    buffers.d_scene = redner.DScene(buffers.d_camera, buffers.d_shapes,
                                    buffers.d_materials, buffers.d_area_lights,
                                    buffers.d_envmap, pyredner.get_use_gpu(),
                                    pyredner.get_gpu_device_id())
    return buffers
Exemplo n.º 3
0
    def backward(ctx, grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options

        d_fov_factor = torch.zeros(1)
        d_cam_to_world = torch.zeros(4, 4)
        d_world_to_cam = torch.zeros(4, 4)
        d_camera = redner.DCamera(redner.float_ptr(d_cam_to_world.data_ptr()),
                                  redner.float_ptr(d_world_to_cam.data_ptr()),
                                  redner.float_ptr(d_fov_factor.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            d_vertices = torch.zeros(num_vertices,
                                     3,
                                     device=pyredner.get_device())
            d_uvs = torch.zeros(
                num_vertices, 2,
                device=pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(
                num_vertices, 3,
                device=pyredner.get_device()) if shape.has_normals() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_materials = []
        for material in ctx.materials:
            diffuse_size = material.get_diffuse_size()
            specular_size = material.get_specular_size()
            roughness_size = material.get_roughness_size()
            if diffuse_size[0] == 0:
                d_diffuse = torch.zeros(3, device=pyredner.get_device())
            else:
                d_diffuse = torch.zeros(diffuse_size[2],
                                        diffuse_size[1],
                                        diffuse_size[0],
                                        3,
                                        device=pyredner.get_device())
            if specular_size[0] == 0:
                d_specular = torch.zeros(3, device=pyredner.get_device())
            else:
                d_specular = torch.zeros(specular_size[2],
                                         specular_size[1],
                                         specular_size[0],
                                         3,
                                         device=pyredner.get_device())
            if roughness_size[0] == 0:
                d_roughness = torch.zeros(1, device=pyredner.get_device())
            else:
                d_roughness = torch.zeros(roughness_size[2],
                                          roughness_size[1],
                                          roughness_size[0],
                                          device=pyredner.get_device())
            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_diffuse_uv_scale = torch.zeros(2)
            d_specular_uv_scale = torch.zeros(2)
            d_roughness_uv_scale = torch.zeros(2)
            d_diffuse_tex = redner.Texture3(\
                redner.float_ptr(d_diffuse.data_ptr()),
                diffuse_size[0], diffuse_size[1], diffuse_size[2],
                redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            d_specular_tex = redner.Texture3(\
                redner.float_ptr(d_specular.data_ptr()),
                specular_size[0], specular_size[1], specular_size[2],
                redner.float_ptr(d_specular_uv_scale.data_ptr()))
            d_roughness_tex = redner.Texture1(\
                redner.float_ptr(d_roughness.data_ptr()),
                roughness_size[0], roughness_size[1], roughness_size[2],
                redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex))

        d_intensity_list = []
        d_lights = []
        for light in ctx.lights:
            d_intensity = torch.zeros(3, device=pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_lights.append(
                redner.DLight(redner.float_ptr(d_intensity.data_ptr())))

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_lights,
                                pyredner.get_use_gpu())
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        redner.render(scene, options, redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()), d_scene,
                      redner.float_ptr(0))

        # # For debugging
        # grad_img = torch.ones(256, 256, 3)
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        ret_list.append(d_cam_to_world)
        ret_list.append(d_world_to_cam)
        ret_list.append(d_fov_factor)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # fisheye

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(None)  # diffuse_uv_scale
            ret_list.append(d_specular_list[i])
            ret_list.append(None)  # specular_uv_scale
            ret_list.append(d_roughness_list[i])
            ret_list.append(None)  # roughness_uv_scale
            ret_list.append(None)  # two sided

        num_lights = len(ctx.lights)
        for i in range(num_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces

        return tuple(ret_list)
Exemplo n.º 4
0
    def backward(grad_img):
        global __ctx
        ctx = __ctx
        scene = ctx.scene
        options = ctx.options
        with tf.device(pyredner.get_device_name()):
            d_position = tf.zeros(3, dtype=tf.float32)
            d_look_at = tf.zeros(3, dtype=tf.float32)
            d_up = tf.zeros(3, dtype=tf.float32)
            d_ndc_to_cam = tf.zeros([3, 3], dtype=tf.float32)
            d_cam_to_ndc = tf.zeros([3, 3], dtype=tf.float32)
            d_camera = redner.DCamera(
                redner.float_ptr(pyredner.data_ptr(d_position)),
                redner.float_ptr(pyredner.data_ptr(d_look_at)),
                redner.float_ptr(pyredner.data_ptr(d_up)),
                redner.float_ptr(pyredner.data_ptr(d_ndc_to_cam)),
                redner.float_ptr(pyredner.data_ptr(d_cam_to_ndc)))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        with tf.device(pyredner.get_device_name()):
            for i, shape in enumerate(ctx.shapes):
                num_vertices = shape.num_vertices
                d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
                d_uvs = tf.zeros([num_vertices, 2],
                                 dtype=tf.float32) if shape.has_uvs() else None
                d_normals = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_normals() else None
                d_vertices_list.append(d_vertices)
                d_uvs_list.append(d_uvs)
                d_normals_list.append(d_normals)
                d_shapes.append(redner.DShape(\
                    redner.float_ptr(pyredner.data_ptr(d_vertices)),
                    redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_normal_map_list = []
        d_diffuse_uv_scale_list = []
        d_specular_uv_scale_list = []
        d_roughness_uv_scale_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        with tf.device(pyredner.get_device_name()):
            for material in ctx.materials:
                diffuse_size = material.get_diffuse_size()
                specular_size = material.get_specular_size()
                roughness_size = material.get_roughness_size()
                normal_map_size = material.get_normal_map_size()
                if diffuse_size[0] == 0:
                    d_diffuse = tf.zeros(3, dtype=tf.float32)
                else:
                    d_diffuse = tf.zeros(
                        [diffuse_size[2], diffuse_size[1], diffuse_size[0], 3],
                        dtype=tf.float32)
                if specular_size[0] == 0:
                    d_specular = tf.zeros(3, dtype=tf.float32)
                else:
                    d_specular = tf.zeros([
                        specular_size[2], specular_size[1], specular_size[0], 3
                    ],
                                          dtype=tf.float32)
                if roughness_size[0] == 0:
                    d_roughness = tf.zeros(1, dtype=tf.float32)
                else:
                    d_roughness = tf.zeros([
                        roughness_size[2], roughness_size[1],
                        roughness_size[0], 1
                    ],
                                           dtype=tf.float32)
                # HACK: tensorflow's eager mode uses a cache to store scalar
                #       constants to avoid memory copy. If we pass scalar tensors
                #       into the C++ code and modify them, we would corrupt the
                #       cache, causing incorrect result in future scalar constant
                #       creations. Thus we force tensorflow to copy by plusing a zero
                # (also see https://github.com/tensorflow/tensorflow/issues/11186
                #  for more discussion regarding copying tensors)
                if d_roughness.shape.num_elements() == 1:
                    d_roughness = d_roughness + 0
                if normal_map_size[0] == 0:
                    d_normal_map = None
                else:
                    d_normal_map = tf.zeros([
                        normal_map_size[2], normal_map_size[1],
                        normal_map_size[0], 3
                    ],
                                            dtype=tf.float32)

                d_diffuse_list.append(d_diffuse)
                d_specular_list.append(d_specular)
                d_roughness_list.append(d_roughness)
                d_normal_map_list.append(d_normal_map)
                d_diffuse = redner.float_ptr(pyredner.data_ptr(d_diffuse))
                d_specular = redner.float_ptr(pyredner.data_ptr(d_specular))
                d_roughness = redner.float_ptr(pyredner.data_ptr(d_roughness))
                if normal_map_size[0] > 0:
                    d_normal_map = redner.float_ptr(
                        pyredner.data_ptr(d_normal_map))
                d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
                if normal_map_size[0] > 0:
                    d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
                else:
                    d_normal_map_uv_scale = None
                d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
                d_specular_uv_scale_list.append(d_specular_uv_scale)
                d_roughness_uv_scale_list.append(d_roughness_uv_scale)
                d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
                d_diffuse_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_diffuse_uv_scale))
                d_specular_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_specular_uv_scale))
                d_roughness_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_roughness_uv_scale))
                if normal_map_size[0] > 0:
                    d_normal_map_uv_scale = redner.float_ptr(
                        pyredner.data_ptr(d_normal_map_uv_scale))
                d_diffuse_tex = redner.Texture3(\
                    d_diffuse, diffuse_size[0], diffuse_size[1], diffuse_size[2], d_diffuse_uv_scale)
                d_specular_tex = redner.Texture3(\
                    d_specular, specular_size[0], specular_size[1], specular_size[2], d_specular_uv_scale)
                d_roughness_tex = redner.Texture1(\
                    d_roughness, roughness_size[0], roughness_size[1], roughness_size[2],  d_roughness_uv_scale)
                if normal_map_size[0] > 0:
                    d_normal_map_tex = redner.Texture3(\
                        d_normal_map, normal_map_size[0], normal_map_size[1], normal_map_size[2], d_normal_map_uv_scale)
                else:
                    d_normal_map_tex = redner.Texture3(\
                        redner.float_ptr(0), 0, 0, 0, redner.float_ptr(0))
                d_materials.append(
                    redner.DMaterial(d_diffuse_tex, d_specular_tex,
                                     d_roughness_tex, d_normal_map_tex))

        d_intensity_list = []
        d_area_lights = []
        with tf.device(pyredner.get_device_name()):
            for light in ctx.area_lights:
                d_intensity = tf.zeros(3, dtype=tf.float32)
                d_intensity_list.append(d_intensity)
                d_area_lights.append(\
                    redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            with tf.device(pyredner.get_device_name()):
                d_envmap_values = tf.zeros([size[2], size[1], size[0], 3],
                                           dtype=tf.float32)
                d_envmap_values_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_envmap_values))
                d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_envmap_uv_scale_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_envmap_uv_scale))
                d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
                d_world_to_env_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_world_to_env))
            d_envmap_tex = redner.Texture3(\
                d_envmap_values_ptr, size[0], size[1], size[2], d_envmap_uv_scale_ptr)
            d_envmap = redner.DEnvironmentMap(d_envmap_tex, d_world_to_env_ptr)

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_area_lights,
                                d_envmap, pyredner.get_use_gpu(), -1)
        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            if pyredner.get_use_gpu():
                grad_img = grad_img.gpu(pyredner.get_gpu_device_id())
            else:
                grad_img = grad_img.cpu()
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                d_scene,
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = tf.ones([256, 256, 3], dtype=tf.float32)
        # debug_img = tf.zeros([256, 256, 3], dtype=tf.float32)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(pyredner.data_ptr(grad_img)),
        #               d_scene,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        ret_list.append(d_position)
        ret_list.append(d_look_at)
        ret_list.append(d_up)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(d_specular_list[i])
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(d_roughness_list[i])
            ret_list.append(d_roughness_uv_scale_list[i])
            ret_list.append(d_normal_map_list[i])
            ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            ret_list.append(d_world_to_env.cpu())
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channels):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling

        # pdb.set_trace()
        return ret_list
Exemplo n.º 5
0
def unpack_args(seed,
                args,
                use_primary_edge_sampling=None,
                use_secondary_edge_sampling=None):
    """
        Given a list of serialized scene arguments, unpack
        all information into a Context.
    """
    # Unpack arguments
    current_index = 0
    num_shapes = int(args[current_index])
    current_index += 1
    num_materials = int(args[current_index])
    current_index += 1
    num_lights = int(args[current_index])
    current_index += 1

    # Camera arguments
    cam_position = args[current_index]
    current_index += 1
    cam_look_at = args[current_index]
    current_index += 1
    cam_up = args[current_index]
    current_index += 1
    cam_to_world = args[current_index]
    current_index += 1
    world_to_cam = args[current_index]
    current_index += 1
    intrinsic_mat_inv = args[current_index]
    current_index += 1
    intrinsic_mat = args[current_index]
    current_index += 1
    clip_near = float(args[current_index])
    current_index += 1
    resolution = args[current_index].numpy()  # Tuple[int, int]
    current_index += 1
    viewport = args[current_index].numpy()  # Tuple[int, int, int, int]
    current_index += 1
    camera_type = RednerCameraType.asCameraType(
        args[current_index])  # FIXME: Map to custom type
    current_index += 1

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        if is_empty_tensor(cam_to_world):
            camera = redner.Camera(
                resolution[1],
                resolution[0],
                redner.float_ptr(pyredner.data_ptr(cam_position)),
                redner.float_ptr(pyredner.data_ptr(cam_look_at)),
                redner.float_ptr(pyredner.data_ptr(cam_up)),
                redner.float_ptr(0),  # cam_to_world
                redner.float_ptr(0),  # world_to_cam
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat)),
                clip_near,
                camera_type,
                redner.Vector2i(viewport[1], viewport[0]),
                redner.Vector2i(viewport[3], viewport[2]))
        else:
            camera = redner.Camera(
                resolution[1], resolution[0], redner.float_ptr(0),
                redner.float_ptr(0), redner.float_ptr(0),
                redner.float_ptr(pyredner.data_ptr(cam_to_world)),
                redner.float_ptr(pyredner.data_ptr(world_to_cam)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat)), clip_near,
                camera_type, redner.Vector2i(viewport[1], viewport[0]),
                redner.Vector2i(viewport[3], viewport[2]))

    with tf.device(pyredner.get_device_name()):
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            uv_indices = args[current_index]
            current_index += 1
            normal_indices = args[current_index]
            current_index += 1
            colors = args[current_index]
            current_index += 1
            material_id = int(args[current_index])
            current_index += 1
            light_id = int(args[current_index])
            current_index += 1

            shapes.append(redner.Shape(\
                redner.float_ptr(pyredner.data_ptr(vertices)),
                redner.int_ptr(pyredner.data_ptr(indices)),
                redner.float_ptr(pyredner.data_ptr(uvs) if not is_empty_tensor(uvs) else 0),
                redner.float_ptr(pyredner.data_ptr(normals) if not is_empty_tensor(normals) else 0),
                redner.int_ptr(pyredner.data_ptr(uv_indices) if not is_empty_tensor(uv_indices) else 0),
                redner.int_ptr(pyredner.data_ptr(normal_indices) if not is_empty_tensor(normal_indices) else 0),
                redner.float_ptr(pyredner.data_ptr(colors) if not is_empty_tensor(colors) else 0),
                int(vertices.shape[0]),
                int(uvs.shape[0]) if not is_empty_tensor(uvs) else 0,
                int(normals.shape[0]) if not is_empty_tensor(normals) else 0,
                int(indices.shape[0]),
                material_id,
                light_id))

    materials = []
    with tf.device(pyredner.get_device_name()):
        for i in range(num_materials):
            num_levels = int(args[current_index])
            current_index += 1
            diffuse_reflectance = []
            for j in range(num_levels):
                diffuse_reflectance.append(args[current_index])
                current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            specular_reflectance = []
            for j in range(num_levels):
                specular_reflectance.append(args[current_index])
                current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            roughness = []
            for j in range(num_levels):
                roughness.append(args[current_index])
                current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            generic_texture = []
            if num_levels > 0:
                for j in range(num_levels):
                    generic_texture.append(args[current_index])
                    current_index += 1
                generic_uv_scale = args[current_index]
                current_index += 1
            else:
                generic_uv_scale = None

            num_levels = int(args[current_index])
            current_index += 1
            normal_map = []
            if num_levels > 0:
                for j in range(num_levels):
                    normal_map.append(args[current_index])
                    current_index += 1
                normal_map_uv_scale = args[current_index]
                current_index += 1
            else:
                normal_map_uv_scale = None

            compute_specular_lighting = bool(args[current_index])
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1
            use_vertex_color = bool(args[current_index])
            current_index += 1

            if get_tensor_dimension(diffuse_reflectance[0]) == 1:
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(diffuse_reflectance[0]))],
                    [0],
                    [0],
                    3, redner.float_ptr(pyredner.data_ptr(diffuse_uv_scale)))
            else:
                assert (get_tensor_dimension(diffuse_reflectance[0]) == 3)
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in diffuse_reflectance],
                    [x.shape[1] for x in diffuse_reflectance],
                    [x.shape[0] for x in diffuse_reflectance],
                    3,
                    redner.float_ptr(pyredner.data_ptr(diffuse_uv_scale)))

            if get_tensor_dimension(specular_reflectance[0]) == 1:
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(specular_reflectance[0]))],
                    [0],
                    [0],
                    3, redner.float_ptr(pyredner.data_ptr(specular_uv_scale)))
            else:
                assert (get_tensor_dimension(specular_reflectance[0]) == 3)
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in specular_reflectance],
                    [x.shape[1] for x in specular_reflectance],
                    [x.shape[0] for x in specular_reflectance],
                    3,
                    redner.float_ptr(pyredner.data_ptr(specular_uv_scale)))

            if get_tensor_dimension(roughness[0]) == 1:
                roughness = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(roughness[0]))],
                    [0],
                    [0],
                    1, redner.float_ptr(pyredner.data_ptr(roughness_uv_scale)))
            else:
                assert (get_tensor_dimension(roughness[0]) == 3)
                roughness = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in roughness],
                    [x.shape[1] for x in roughness],
                    [x.shape[0] for x in roughness],
                    3,
                    redner.float_ptr(pyredner.data_ptr(roughness_uv_scale)))

            if len(generic_texture) > 0:
                generic_texture = redner.TextureN(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in generic_texture],
                    [x.shape[1] for x in generic_texture],
                    [x.shape[0] for x in generic_texture],
                    generic_texture[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(generic_uv_scale)))
            else:
                generic_texture = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))

            if len(normal_map) > 0:
                normal_map = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in normal_map],
                    [x.shape[1] for x in normal_map],
                    [x.shape[0] for x in normal_map],
                    normal_map[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(normal_map_uv_scale)))
            else:
                normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))

            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                generic_texture,
                normal_map,
                compute_specular_lighting,
                two_sided,
                use_vertex_color))

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        area_lights = []
        for i in range(num_lights):
            shape_id = int(args[current_index])
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1
            directly_visible = bool(args[current_index])
            current_index += 1

            area_lights.append(
                redner.AreaLight(
                    shape_id, redner.float_ptr(pyredner.data_ptr(intensity)),
                    two_sided, directly_visible))

    envmap = None
    if not is_empty_tensor(args[current_index]):
        num_levels = args[current_index]
        current_index += 1
        values = []
        for j in range(num_levels):
            values.append(args[current_index])
            current_index += 1
        envmap_uv_scale = args[current_index]
        current_index += 1
        env_to_world = args[current_index]
        current_index += 1
        world_to_env = args[current_index]
        current_index += 1
        sample_cdf_ys = args[current_index]
        current_index += 1
        sample_cdf_xs = args[current_index]
        current_index += 1
        pdf_norm = float(args[current_index])
        current_index += 1
        directly_visible = bool(args[current_index])
        current_index += 1

        assert isinstance(pdf_norm, float)
        with tf.device(pyredner.get_device_name()):
            sample_cdf_ys = redner.float_ptr(pyredner.data_ptr(sample_cdf_ys))
            sample_cdf_xs = redner.float_ptr(pyredner.data_ptr(sample_cdf_xs))
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            env_to_world = redner.float_ptr(pyredner.data_ptr(env_to_world))
            world_to_env = redner.float_ptr(pyredner.data_ptr(world_to_env))
        with tf.device(pyredner.get_device_name()):
            values = redner.Texture3(\
                [redner.float_ptr(pyredner.data_ptr(x)) for x in values],
                [x.shape[1] for x in values], # width
                [x.shape[0] for x in values], # height
                3, # channels
                redner.float_ptr(pyredner.data_ptr(envmap_uv_scale)))
        envmap = redner.EnvironmentMap(\
            values,
            env_to_world,
            world_to_env,
            sample_cdf_ys,
            sample_cdf_xs,
            pdf_norm,
            directly_visible)
    else:
        current_index += 1

    # Options
    num_samples = args[current_index]
    current_index += 1
    if len(num_samples.shape) == 0 or num_samples.shape[0] == 1:
        num_samples = int(num_samples)
    else:
        assert (num_samples.shape[0] == 2)
        num_samples = (int(num_samples[0]), int(num_samples[1]))
    max_bounces = int(args[current_index])
    current_index += 1

    num_channel_args = int(args[current_index])
    current_index += 1

    channels = []
    for _ in range(num_channel_args):
        ch = args[current_index]
        ch = RednerChannels.asChannel(ch)
        channels.append(ch)
        current_index += 1

    sampler_type = args[current_index]
    sampler_type = RednerSamplerType.asSamplerType(sampler_type)
    current_index += 1

    use_primary_edge_sampling = args[current_index]
    current_index += 1
    use_secondary_edge_sampling = args[current_index]
    current_index += 1
    sample_pixel_center = args[current_index]
    current_index += 1

    start = time.time()
    scene = redner.Scene(camera, shapes, materials, area_lights, envmap,
                         pyredner.get_use_gpu(), pyredner.get_gpu_device_id(),
                         use_primary_edge_sampling,
                         use_secondary_edge_sampling)
    time_elapsed = time.time() - start
    if get_print_timing():
        print('Scene construction, time: %.5f s' % time_elapsed)

    # check that num_samples is a tuple
    if isinstance(num_samples, int):
        num_samples = (num_samples, num_samples)

    options = redner.RenderOptions(seed, num_samples[0], max_bounces, channels,
                                   sampler_type, sample_pixel_center)

    ctx = Context()
    ctx.channels = channels
    ctx.options = options
    ctx.resolution = resolution
    ctx.viewport = viewport
    ctx.scene = scene
    ctx.camera = camera
    ctx.shapes = shapes
    ctx.materials = materials
    ctx.area_lights = area_lights
    ctx.envmap = envmap
    ctx.scene = scene
    ctx.options = options
    ctx.num_samples = num_samples
    ctx.num_channel_args = num_channel_args

    return ctx
Exemplo n.º 6
0
    def backward(ctx, grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options

        d_cam_position = torch.zeros(3)
        d_cam_look = torch.zeros(3)
        d_cam_up = torch.zeros(3)
        d_ndc_to_cam = torch.zeros(3, 3)
        d_cam_to_ndc = torch.zeros(3, 3)
        d_camera = redner.DCamera(redner.float_ptr(d_cam_position.data_ptr()),
                                  redner.float_ptr(d_cam_look.data_ptr()),
                                  redner.float_ptr(d_cam_up.data_ptr()),
                                  redner.float_ptr(d_ndc_to_cam.data_ptr()),
                                  redner.float_ptr(d_cam_to_ndc.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            d_vertices = torch.zeros(num_vertices,
                                     3,
                                     device=pyredner.get_device())
            d_uvs = torch.zeros(
                num_vertices, 2,
                device=pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(
                num_vertices, 3,
                device=pyredner.get_device()) if shape.has_normals() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_materials = []
        for material in ctx.materials:
            diffuse_size = material.get_diffuse_size()
            specular_size = material.get_specular_size()
            roughness_size = material.get_roughness_size()
            if diffuse_size[0] == 0:
                d_diffuse = torch.zeros(3, device=pyredner.get_device())
            else:
                d_diffuse = torch.zeros(diffuse_size[2],
                                        diffuse_size[1],
                                        diffuse_size[0],
                                        3,
                                        device=pyredner.get_device())
            if specular_size[0] == 0:
                d_specular = torch.zeros(3, device=pyredner.get_device())
            else:
                d_specular = torch.zeros(specular_size[2],
                                         specular_size[1],
                                         specular_size[0],
                                         3,
                                         device=pyredner.get_device())
            if roughness_size[0] == 0:
                d_roughness = torch.zeros(1, device=pyredner.get_device())
            else:
                d_roughness = torch.zeros(roughness_size[2],
                                          roughness_size[1],
                                          roughness_size[0],
                                          1,
                                          device=pyredner.get_device())
            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_diffuse_uv_scale = torch.zeros(2)
            d_specular_uv_scale = torch.zeros(2)
            d_roughness_uv_scale = torch.zeros(2)
            d_diffuse_tex = redner.Texture3(\
                redner.float_ptr(d_diffuse.data_ptr()),
                diffuse_size[0], diffuse_size[1], diffuse_size[2],
                redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            d_specular_tex = redner.Texture3(\
                redner.float_ptr(d_specular.data_ptr()),
                specular_size[0], specular_size[1], specular_size[2],
                redner.float_ptr(d_specular_uv_scale.data_ptr()))
            d_roughness_tex = redner.Texture1(\
                redner.float_ptr(d_roughness.data_ptr()),
                roughness_size[0], roughness_size[1], roughness_size[2],
                redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex))

        d_intensity_list = []
        d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device=pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            d_envmap_values = \
                torch.zeros(size[2],
                            size[1],
                            size[0],
                            3,
                            device = pyredner.get_device())
            d_envmap_uv_scale = torch.zeros(2)
            d_envmap_tex = redner.Texture3(\
                redner.float_ptr(d_envmap_values.data_ptr()),
                size[0], size[1], size[2],
                redner.float_ptr(d_envmap_uv_scale.data_ptr()))
            d_world_to_env = torch.zeros(4, 4)
            d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(d_world_to_env.data_ptr()))

        d_scene = redner.DScene(
            d_camera, d_shapes, d_materials, d_area_lights, d_envmap,
            pyredner.get_use_gpu(),
            pyredner.get_device().index
            if pyredner.get_device().index is not None else -1)
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003

        options.num_samples = ctx.num_samples[1]
        start = time.time()
        redner.render(scene, options, redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()), d_scene,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # # pyredner.imwrite(grad_img, 'grad_img.exr')
        # # grad_img = torch.ones(256, 256, 3, device = pyredner.get_device())
        # debug_img = torch.zeros(256, 256, 3)
        # start = time.time()
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # time_elapsed = time.time() - start
        # if print_timing:
        #     print('Backward pass, time: %.5f s' % time_elapsed)
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # debug_img = debug_img.numpy()
        # print(np.max(debug_img))
        # print(np.unravel_index(np.argmax(debug_img), debug_img.shape))
        # print(np.min(debug_img))
        # print(np.unravel_index(np.argmin(debug_img), debug_img.shape))
        # print(np.sum(debug_img) / 3)
        # debug_max = 0.5
        # debug_min = -0.5
        # debug_img = np.clip((debug_img - debug_min) / (debug_max - debug_min), 0, 1)
        # debug_img = debug_img[:, :, 0]
        # import matplotlib.cm as cm
        # debug_img = cm.viridis(debug_img)
        # skimage.io.imsave('debug.png', np.power(debug_img, 1/2.2))
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        ret_list.append(d_cam_position)
        ret_list.append(d_cam_look)
        ret_list.append(d_cam_up)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # fisheye

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(None)  # diffuse_uv_scale
            ret_list.append(d_specular_list[i])
            ret_list.append(None)  # specular_uv_scale
            ret_list.append(d_roughness_list[i])
            ret_list.append(None)  # roughness_uv_scale
            ret_list.append(None)  # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(None)  # uv_scale
            ret_list.append(None)  # env_to_world
            ret_list.append(d_world_to_env)
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # channels
        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling

        return tuple(ret_list)
Exemplo n.º 7
0
def forward(seed: int, *args):
    """
        Forward rendering pass: given a scene and output an image.
    """
    global __ctx
    ctx = __ctx

    # Unpack arguments
    current_index = 0
    num_shapes = int(args[current_index])
    current_index += 1
    num_materials = int(args[current_index])
    current_index += 1
    num_lights = int(args[current_index])
    current_index += 1

    # Camera arguments
    cam_position = args[current_index]
    current_index += 1
    cam_look_at = args[current_index]
    current_index += 1
    cam_up = args[current_index]
    current_index += 1
    ndc_to_cam = args[current_index]
    current_index += 1
    cam_to_ndc = args[current_index]
    current_index += 1
    clip_near = float(args[current_index])
    current_index += 1
    resolution = args[current_index].numpy()  # Tuple[int, int]
    current_index += 1
    camera_type = pyredner.RednerCameraType.asCameraType(
        args[current_index])  # FIXME: Map to custom type
    current_index += 1

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        camera = redner.Camera(
            resolution[1], resolution[0],
            redner.float_ptr(pyredner.data_ptr(cam_position)),
            redner.float_ptr(pyredner.data_ptr(cam_look_at)),
            redner.float_ptr(pyredner.data_ptr(cam_up)),
            redner.float_ptr(pyredner.data_ptr(ndc_to_cam)),
            redner.float_ptr(pyredner.data_ptr(cam_to_ndc)), clip_near,
            camera_type)

    with tf.device(pyredner.get_device_name()):
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            material_id = int(args[current_index])
            current_index += 1
            light_id = int(args[current_index])
            current_index += 1
            shapes.append(redner.Shape(\
                redner.float_ptr(pyredner.data_ptr(vertices)),
                redner.int_ptr(pyredner.data_ptr(indices)),
                redner.float_ptr(pyredner.data_ptr(uvs) if uvs is not None else 0),
                redner.float_ptr(pyredner.data_ptr(normals) if normals is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                material_id,
                light_id))

    materials = []
    with tf.device(pyredner.get_device_name()):
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            normal_map = args[current_index]
            current_index += 1
            normal_map_uv_scale = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1

            diffuse_reflectance_ptr = redner.float_ptr(
                pyredner.data_ptr(diffuse_reflectance))
            specular_reflectance_ptr = redner.float_ptr(
                pyredner.data_ptr(specular_reflectance))
            roughness_ptr = redner.float_ptr(pyredner.data_ptr(roughness))
            if normal_map.shape[0] > 0:
                normal_map_ptr = redner.float_ptr(
                    pyredner.data_ptr(normal_map))
            diffuse_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(diffuse_uv_scale))
            specular_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(specular_uv_scale))
            roughness_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(roughness_uv_scale))
            if normal_map.shape[0] > 0:
                normal_map_uv_scale_ptr = redner.float_ptr(
                    pyredner.data_ptr(normal_map_uv_scale))
            if get_tensor_dimension(diffuse_reflectance) == 1:
                diffuse_reflectance = redner.Texture3(diffuse_reflectance_ptr,
                                                      0, 0, 0,
                                                      diffuse_uv_scale_ptr)
            else:
                diffuse_reflectance = redner.Texture3(\
                    diffuse_reflectance_ptr,
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[0]), # num levels
                    diffuse_uv_scale_ptr)
            if get_tensor_dimension(specular_reflectance) == 1:
                specular_reflectance = redner.Texture3(
                    specular_reflectance_ptr, 0, 0, 0, specular_uv_scale_ptr)
            else:
                specular_reflectance = redner.Texture3(\
                    specular_reflectance_ptr,
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[0]), # num levels
                    specular_uv_scale_ptr)
            if get_tensor_dimension(roughness) == 1:
                roughness = redner.Texture1(roughness_ptr, 0, 0, 0,
                                            roughness_uv_scale_ptr)
            else:
                assert (get_tensor_dimension(roughness) == 4)
                roughness = redner.Texture1(\
                    roughness_ptr,
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[0]), # num levels
                    roughness_uv_scale_ptr)
            if normal_map.shape[0] > 0:
                normal_map = redner.Texture3(\
                    normal_map_ptr,
                    int(normal_map.shape[2]),
                    int(normal_map.shape[1]),
                    int(normal_map.shape[0]),
                    normal_map_uv_scale_ptr)
            else:
                normal_map = redner.Texture3(\
                    redner.float_ptr(0), 0, 0, 0, redner.float_ptr(0))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                normal_map,
                two_sided))

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        area_lights = []
        for i in range(num_lights):
            shape_id = int(args[current_index])
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1

            area_lights.append(
                redner.AreaLight(
                    shape_id, redner.float_ptr(pyredner.data_ptr(intensity)),
                    two_sided))

    envmap = None
    if not is_empty_tensor(args[current_index]):
        values = args[current_index]
        current_index += 1
        envmap_uv_scale = args[current_index]
        current_index += 1
        env_to_world = args[current_index]
        current_index += 1
        world_to_env = args[current_index]
        current_index += 1
        sample_cdf_ys = args[current_index]
        current_index += 1
        sample_cdf_xs = args[current_index]
        current_index += 1
        pdf_norm = float(args[current_index])
        current_index += 1

        assert isinstance(pdf_norm, float)
        with tf.device(pyredner.get_device_name()):
            values_ptr = redner.float_ptr(pyredner.data_ptr(values))
            sample_cdf_ys = redner.float_ptr(pyredner.data_ptr(sample_cdf_ys))
            sample_cdf_xs = redner.float_ptr(pyredner.data_ptr(sample_cdf_xs))
            envmap_uv_scale = redner.float_ptr(
                pyredner.data_ptr(envmap_uv_scale))
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            env_to_world = redner.float_ptr(pyredner.data_ptr(env_to_world))
            world_to_env = redner.float_ptr(pyredner.data_ptr(world_to_env))
        values = redner.Texture3(
            values_ptr,
            int(values.shape[2]),  # width
            int(values.shape[1]),  # height
            int(values.shape[0]),  # num levels
            envmap_uv_scale)
        envmap = redner.EnvironmentMap(\
            values,
            env_to_world,
            world_to_env,
            sample_cdf_ys,
            sample_cdf_xs,
            pdf_norm)
    else:
        current_index += 7

    # Options
    num_samples = int(args[current_index])
    current_index += 1
    max_bounces = int(args[current_index])
    current_index += 1

    __num_channels = int(args[current_index])
    current_index += 1

    channels = []
    for _ in range(__num_channels):
        ch = args[current_index]
        ch = pyredner.RednerChannels.asChannel(ch)
        channels.append(ch)
        current_index += 1

    sampler_type = args[current_index]
    sampler_type = pyredner.RednerSamplerType.asSamplerType(sampler_type)
    current_index += 1

    use_primary_edge_sampling = args[current_index]
    current_index += 1
    use_secondary_edge_sampling = args[current_index]
    current_index += 1

    scene = redner.Scene(camera, shapes, materials, area_lights, envmap,
                         pyredner.get_use_gpu(), pyredner.get_gpu_device_id(),
                         use_primary_edge_sampling,
                         use_secondary_edge_sampling)

    # check that num_samples is a tuple
    if isinstance(num_samples, int):
        num_samples = (num_samples, num_samples)

    options = redner.RenderOptions(seed, num_samples[0], max_bounces, channels,
                                   sampler_type)
    num_channels = redner.compute_num_channels(channels)

    with tf.device(pyredner.get_device_name()):
        rendered_image = tf.zeros(
            shape=[resolution[0], resolution[1], num_channels],
            dtype=tf.float32)

        start = time.time()

        # pdb.set_trace()
        redner.render(scene, options,
                      redner.float_ptr(pyredner.data_ptr(rendered_image)),
                      redner.float_ptr(0), None, redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Forward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # debug_img = tf.zeros((256, 256, 3), dtype=tf.float32)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(pyredner.data_ptr(rendered_image)),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.png')
        # exit()

        # import pdb; pdb.set_trace()

    ctx.shapes = shapes
    ctx.materials = materials
    ctx.area_lights = area_lights
    ctx.envmap = envmap
    ctx.scene = scene
    ctx.options = options
    ctx.num_samples = num_samples
    ctx.num_channels = __num_channels
    return rendered_image
Exemplo n.º 8
0
    def backward(grad_img):
        camera = ctx.camera
        scene = ctx.scene
        options = ctx.options

        with tf.device(pyredner.get_device_name()):
            if camera.use_look_at:
                d_position = tf.zeros(3, dtype=tf.float32)
                d_look_at = tf.zeros(3, dtype=tf.float32)
                d_up = tf.zeros(3, dtype=tf.float32)
                d_cam_to_world = None
                d_world_to_cam = None
            else:
                d_position = None
                d_look_at = None
                d_up = None
                d_cam_to_world = tf.zeros([4, 4], dtype=tf.float32)
                d_world_to_cam = tf.zeros([4, 4], dtype=tf.float32)
            d_intrinsic_mat_inv = tf.zeros([3, 3], dtype=tf.float32)
            d_intrinsic_mat = tf.zeros([3, 3], dtype=tf.float32)
            if camera.use_look_at:
                d_camera = redner.DCamera(
                    redner.float_ptr(pyredner.data_ptr(d_position)),
                    redner.float_ptr(pyredner.data_ptr(d_look_at)),
                    redner.float_ptr(pyredner.data_ptr(d_up)),
                    redner.float_ptr(0),  # cam_to_world
                    redner.float_ptr(0),  # world_to_cam
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))
            else:
                d_camera = redner.DCamera(
                    redner.float_ptr(0), redner.float_ptr(0),
                    redner.float_ptr(0),
                    redner.float_ptr(pyredner.data_ptr(d_cam_to_world)),
                    redner.float_ptr(pyredner.data_ptr(d_world_to_cam)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))

        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_colors_list = []
        d_shapes = []
        with tf.device(pyredner.get_device_name()):
            for i, shape in enumerate(ctx.shapes):
                num_vertices = shape.num_vertices
                d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
                d_uvs = tf.zeros([num_vertices, 2],
                                 dtype=tf.float32) if shape.has_uvs() else None
                d_normals = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_normals() else None
                d_colors = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_colors() else None
                d_vertices_list.append(d_vertices)
                d_uvs_list.append(d_uvs)
                d_normals_list.append(d_normals)
                d_colors_list.append(d_colors)
                d_shapes.append(redner.DShape(\
                    redner.float_ptr(pyredner.data_ptr(d_vertices)),
                    redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_colors) if d_colors is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_normal_map_list = []
        d_diffuse_uv_scale_list = []
        d_specular_uv_scale_list = []
        d_roughness_uv_scale_list = []
        d_generic_list = []
        d_generic_uv_scale_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        with tf.device(pyredner.get_device_name()):
            for material in ctx.materials:
                if material.get_diffuse_size(0)[0] == 0:
                    d_diffuse = [tf.zeros(3, dtype=tf.float32)]
                else:
                    d_diffuse = []
                    for l in range(material.get_diffuse_levels()):
                        diffuse_size = material.get_diffuse_size(l)
                        d_diffuse.append(\
                            tf.zeros([diffuse_size[1],
                                      diffuse_size[0],
                                      3], dtype=tf.float32))

                if material.get_specular_size(0)[0] == 0:
                    d_specular = [tf.zeros(3, dtype=tf.float32)]
                else:
                    d_specular = []
                    for l in range(material.get_specular_levels()):
                        specular_size = material.get_specular_size(l)
                        d_specular.append(\
                            tf.zeros([specular_size[1],
                                      specular_size[0],
                                      3], dtype=tf.float32))

                if material.get_roughness_size(0)[0] == 0:
                    d_roughness = [tf.zeros(1, dtype=tf.float32)]
                else:
                    d_roughness = []
                    for l in range(material.get_roughness_levels()):
                        roughness_size = material.get_roughness_size(l)
                        d_roughness.append(\
                            tf.zeros([roughness_size[1],
                                      roughness_size[0],
                                      1], dtype=tf.float32))
                # HACK: tensorflow's eager mode uses a cache to store scalar
                #       constants to avoid memory copy. If we pass scalar tensors
                #       into the C++ code and modify them, we would corrupt the
                #       cache, causing incorrect result in future scalar constant
                #       creations. Thus we force tensorflow to copy by plusing a zero.
                # (also see https://github.com/tensorflow/tensorflow/issues/11186
                #  for more discussion regarding copying tensors)
                if d_roughness[0].shape.num_elements() == 1:
                    d_roughness[0] = d_roughness[0] + 0

                if material.get_generic_levels() == 0:
                    d_generic = None
                else:
                    d_generic = []
                    for l in range(material.get_generic_levels()):
                        generic_size = material.get_generic_size(l)
                        d_generic.append(\
                            tf.zeros([generic_size[2],
                                      generic_size[1],
                                      generic_size[0]], dtype=tf.float32))

                if material.get_normal_map_levels() == 0:
                    d_normal_map = None
                else:
                    d_normal_map = []
                    for l in range(material.get_normal_map_levels()):
                        normal_map_size = material.get_normal_map_size(l)
                        d_normal_map.append(\
                            tf.zeros([normal_map_size[1],
                                      normal_map_size[0],
                                      3], dtype=tf.float32))

                d_diffuse_list.append(d_diffuse)
                d_specular_list.append(d_specular)
                d_roughness_list.append(d_roughness)
                d_generic_list.append(d_generic)
                d_normal_map_list.append(d_normal_map)

                d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
                if d_generic is None:
                    d_generic_uv_scale = None
                else:
                    d_generic_uv_scale = tf.zeros([2], dtype=tf.float32)
                if d_normal_map is None:
                    d_normal_map_uv_scale = None
                else:
                    d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
                d_specular_uv_scale_list.append(d_specular_uv_scale)
                d_roughness_uv_scale_list.append(d_roughness_uv_scale)
                d_generic_uv_scale_list.append(d_generic_uv_scale)
                d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)

                if len(d_diffuse[0].shape) == 1:
                    d_diffuse_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(d_diffuse[0]))],
                        [0],
                        [0],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))
                else:
                    d_diffuse_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_diffuse],
                        [x.shape[1] for x in d_diffuse],
                        [x.shape[0] for x in d_diffuse],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))

                if len(d_specular[0].shape) == 1:
                    d_specular_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(d_specular[0]))],
                        [0],
                        [0],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))
                else:
                    d_specular_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_specular],
                        [x.shape[1] for x in d_specular],
                        [x.shape[0] for x in d_specular],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))

                if len(d_roughness[0].shape) == 1:
                    d_roughness_tex = redner.Texture1(\
                        [redner.float_ptr(pyredner.data_ptr(d_roughness[0]))],
                        [0],
                        [0],
                        1,
                        redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))
                else:
                    d_roughness_tex = redner.Texture1(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_roughness],
                        [x.shape[1] for x in d_roughness],
                        [x.shape[0] for x in d_roughness],
                        1,
                        redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))

                if d_generic is None:
                    d_generic_tex = redner.TextureN(\
                        [], [], [], 0, redner.float_ptr(0))
                else:
                    d_generic_tex = redner.TextureN(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_generic],
                        [x.shape[1] for x in d_generic],
                        [x.shape[0] for x in d_generic],
                        d_generic[0].shape[2],
                        redner.float_ptr(pyredner.data_ptr(d_generic_uv_scale)))

                if d_normal_map is None:
                    d_normal_map = redner.Texture3(\
                        [], [], [], 0, redner.float_ptr(0))
                else:
                    d_normal_map = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_normal_map],
                        [x.shape[1] for x in d_normal_map],
                        [x.shape[0] for x in d_normal_map],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_normal_map_uv_scale)))

                d_materials.append(redner.DMaterial(\
                    d_diffuse_tex, d_specular_tex, d_roughness_tex,
                    d_generic_tex, d_normal_map))

        d_intensity_list = []
        d_area_lights = []
        with tf.device(pyredner.get_device_name()):
            for light in ctx.area_lights:
                d_intensity = tf.zeros(3, dtype=tf.float32)
                d_intensity_list.append(d_intensity)
                d_area_lights.append(\
                    redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            with tf.device(pyredner.get_device_name()):
                d_envmap_values = []
                for l in range(envmap.get_levels()):
                    size = envmap.get_size(l)
                    d_envmap_values.append(\
                        tf.zeros([size[1],
                                  size[0],
                                  3], dtype=tf.float32))
                d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
                d_envmap_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_envmap_values],
                    [x.shape[1] for x in d_envmap_values],
                    [x.shape[0] for x in d_envmap_values],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_envmap_uv_scale)))
                d_envmap = redner.DEnvironmentMap(
                    d_envmap_tex,
                    redner.float_ptr(pyredner.data_ptr(d_world_to_env)))

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_area_lights,
                                d_envmap, pyredner.get_use_gpu(),
                                pyredner.get_gpu_device_id())
        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            grad_img = tf.identity(grad_img)
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                d_scene,
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = tf.ones([256, 256, 3], dtype=tf.float32)
        # debug_img = tf.zeros([256, 256, 3], dtype=tf.float32)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(pyredner.data_ptr(grad_img)),
        #               d_scene,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if camera.use_look_at:
            ret_list.append(d_position)
            ret_list.append(d_look_at)
            ret_list.append(d_up)
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(d_cam_to_world)
            ret_list.append(d_world_to_cam)
        ret_list.append(d_intrinsic_mat_inv)
        ret_list.append(d_intrinsic_mat)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(d_roughness_uv_scale_list[i])
            if d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(d_generic_uv_scale_list[i])
            if d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_intensity_list[i]))
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_world_to_env))
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channels):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return ret_list
Exemplo n.º 9
0
    def forward(ctx, seed, *args):
        """
            Forward rendering pass: given a scene and output an image.
        """
        # Unpack arguments
        current_index = 0
        num_shapes = args[current_index]
        current_index += 1
        num_materials = args[current_index]
        current_index += 1
        num_lights = args[current_index]
        current_index += 1
        cam_position = args[current_index]
        current_index += 1
        cam_look_at = args[current_index]
        current_index += 1
        cam_up = args[current_index]
        current_index += 1
        ndc_to_cam = args[current_index]
        current_index += 1
        cam_to_ndc = args[current_index]
        current_index += 1
        clip_near = args[current_index]
        current_index += 1
        resolution = args[current_index]
        current_index += 1
        fisheye = args[current_index]
        current_index += 1
        camera = redner.Camera(resolution[1], resolution[0],
                               redner.float_ptr(cam_position.data_ptr()),
                               redner.float_ptr(cam_look_at.data_ptr()),
                               redner.float_ptr(cam_up.data_ptr()),
                               redner.float_ptr(ndc_to_cam.data_ptr()),
                               redner.float_ptr(cam_to_ndc.data_ptr()),
                               clip_near, fisheye)
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            material_id = args[current_index]
            current_index += 1
            light_id = args[current_index]
            current_index += 1
            assert (vertices.is_contiguous())
            assert (indices.is_contiguous())
            if uvs is not None:
                assert (uvs.is_contiguous())
            if normals is not None:
                assert (normals.is_contiguous())
            shapes.append(redner.Shape(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                material_id,
                light_id))
        materials = []
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            assert (diffuse_reflectance.is_contiguous())
            if diffuse_reflectance.dim() == 1:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()), 0, 0, 0,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            else:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()),
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[0]), # num levels
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            assert (specular_reflectance.is_contiguous())
            if specular_reflectance.dim() == 1:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()), 0, 0, 0,
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            else:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()),
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[0]), # num levels
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            assert (roughness.is_contiguous())
            if roughness.dim() == 1:
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()), 0, 0, 0,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            else:
                assert (roughness.dim() == 4)
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()),
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[0]), # num levels
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                two_sided))

        area_lights = []
        for i in range(num_lights):
            shape_id = args[current_index]
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1

            area_lights.append(redner.AreaLight(\
                shape_id,
                redner.float_ptr(intensity.data_ptr()),
                two_sided))

        envmap = None
        if args[current_index] is not None:
            values = args[current_index]
            current_index += 1
            envmap_uv_scale = args[current_index]
            current_index += 1
            env_to_world = args[current_index]
            current_index += 1
            world_to_env = args[current_index]
            current_index += 1
            sample_cdf_ys = args[current_index]
            current_index += 1
            sample_cdf_xs = args[current_index]
            current_index += 1
            pdf_norm = args[current_index]
            current_index += 1
            values = redner.Texture3(\
                redner.float_ptr(values.data_ptr()),
                int(values.shape[2]), # width
                int(values.shape[1]), # height
                int(values.shape[0]), # num levels
                redner.float_ptr(envmap_uv_scale.data_ptr()))
            envmap = redner.EnvironmentMap(\
                values,
                redner.float_ptr(env_to_world.data_ptr()),
                redner.float_ptr(world_to_env.data_ptr()),
                redner.float_ptr(sample_cdf_ys.data_ptr()),
                redner.float_ptr(sample_cdf_xs.data_ptr()),
                pdf_norm)
        else:
            current_index += 7

        # Options
        num_samples = args[current_index]
        current_index += 1
        max_bounces = args[current_index]
        current_index += 1
        channels = args[current_index]
        current_index += 1
        sampler_type = args[current_index]
        current_index += 1
        use_primary_edge_sampling = args[current_index]
        current_index += 1
        use_secondary_edge_sampling = args[current_index]
        current_index += 1

        start = time.time()
        scene = redner.Scene(
            camera, shapes, materials, area_lights, envmap,
            pyredner.get_use_gpu(),
            pyredner.get_device().index
            if pyredner.get_device().index is not None else -1,
            use_primary_edge_sampling, use_secondary_edge_sampling)
        time_elapsed = time.time() - start
        if print_timing:
            print('Scene construction, time: %.5f s' % time_elapsed)

        # check that num_samples is a tuple
        if isinstance(num_samples, int):
            num_samples = (num_samples, num_samples)

        options = redner.RenderOptions(seed, num_samples[0], max_bounces,
                                       channels, sampler_type)
        num_channels = redner.compute_num_channels(channels)
        rendered_image = torch.zeros(resolution[0],
                                     resolution[1],
                                     num_channels,
                                     device=pyredner.get_device())
        start = time.time()
        redner.render(scene, options,
                      redner.float_ptr(rendered_image.data_ptr()),
                      redner.float_ptr(0), None, redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Forward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(rendered_image.data_ptr()),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # exit()

        ctx.shapes = shapes
        ctx.materials = materials
        ctx.area_lights = area_lights
        ctx.envmap = envmap
        ctx.scene = scene
        ctx.options = options
        ctx.num_samples = num_samples
        return rendered_image
Exemplo n.º 10
0
    def create_gradient_buffers(ctx):
        scene = ctx.scene
        options = ctx.options
        camera = ctx.camera

        buffers = Context()

        if camera.use_look_at:
            buffers.d_cam_position = torch.zeros(3, device = pyredner.get_device())
            buffers.d_cam_look = torch.zeros(3, device = pyredner.get_device())
            buffers.d_cam_up = torch.zeros(3, device = pyredner.get_device())
            buffers.d_cam_to_world = None
            buffers.d_world_to_cam = None
        else:
            buffers.d_cam_position = None
            buffers.d_cam_look = None
            buffers.d_cam_up = None
            buffers.d_cam_to_world = torch.zeros(4, 4, device = pyredner.get_device())
            buffers.d_world_to_cam = torch.zeros(4, 4, device = pyredner.get_device())
        buffers.d_intrinsic_mat_inv = torch.zeros(3, 3, device = pyredner.get_device())
        buffers.d_intrinsic_mat = torch.zeros(3, 3, device = pyredner.get_device())
        if camera.use_look_at:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(buffers.d_cam_position.data_ptr()),
                redner.float_ptr(buffers.d_cam_look.data_ptr()),
                redner.float_ptr(buffers.d_cam_up.data_ptr()),
                redner.float_ptr(0), # cam_to_world
                redner.float_ptr(0), # world_to_cam
                redner.float_ptr(buffers.d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(buffers.d_intrinsic_mat.data_ptr()))
        else:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(0), # pos
                redner.float_ptr(0), # look
                redner.float_ptr(0), # up
                redner.float_ptr(buffers.d_cam_to_world.data_ptr()),
                redner.float_ptr(buffers.d_world_to_cam.data_ptr()),
                redner.float_ptr(buffers.d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(buffers.d_intrinsic_mat.data_ptr()))
        buffers.d_vertices_list = []
        buffers.d_uvs_list = []
        buffers.d_normals_list = []
        buffers.d_colors_list = []
        buffers.d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            num_uv_vertices = shape.num_uv_vertices
            num_normal_vertices = shape.num_normal_vertices
            d_vertices = torch.zeros(num_vertices, 3,
                device = pyredner.get_device())
            d_uvs = torch.zeros(num_uv_vertices, 2,
                device = pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(num_normal_vertices, 3,
                device = pyredner.get_device()) if shape.has_normals() else None
            d_colors = torch.zeros(num_vertices, 3,
                device = pyredner.get_device()) if shape.has_colors() else None
            buffers.d_vertices_list.append(d_vertices)
            buffers.d_uvs_list.append(d_uvs)
            buffers.d_normals_list.append(d_normals)
            buffers.d_colors_list.append(d_colors)
            buffers.d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0),
                redner.float_ptr(d_colors.data_ptr() if d_colors is not None else 0)))

        buffers.d_diffuse_list = []
        buffers.d_diffuse_uv_scale_list = []
        buffers.d_specular_list = []
        buffers.d_specular_uv_scale_list = []
        buffers.d_roughness_list = []
        buffers.d_roughness_uv_scale_list = []
        buffers.d_generic_list = []
        buffers.d_generic_uv_scale_list = []
        buffers.d_normal_map_list = []
        buffers.d_normal_map_uv_scale_list = []
        buffers.d_materials = []
        for material in ctx.materials:
            if material.get_diffuse_size(0)[0] == 0:
                d_diffuse = [torch.zeros(3, device = pyredner.get_device())]
            else:
                d_diffuse = []
                for l in range(material.get_diffuse_levels()):
                    diffuse_size = material.get_diffuse_size(l)
                    d_diffuse.append(\
                        torch.zeros(diffuse_size[1],
                                    diffuse_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_specular_size(0)[0] == 0:
                d_specular = [torch.zeros(3, device = pyredner.get_device())]
            else:
                d_specular = []
                for l in range(material.get_specular_levels()):
                    specular_size = material.get_specular_size(l)
                    d_specular.append(\
                        torch.zeros(specular_size[1],
                                    specular_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_roughness_size(0)[0] == 0:
                d_roughness = [torch.zeros(1, device = pyredner.get_device())]
            else:
                d_roughness = []
                for l in range(material.get_roughness_levels()):
                    roughness_size = material.get_roughness_size(l)
                    d_roughness.append(\
                        torch.zeros(roughness_size[1],
                                    roughness_size[0],
                                    1, device = pyredner.get_device()))

            if material.get_generic_levels() == 0:
                d_generic = None
            else:
                d_generic = []
                for l in range(material.get_generic_levels()):
                    generic_size = material.get_generic_size(l)
                    d_generic.append(\
                        torch.zeros(generic_size[2],
                                    generic_size[1],
                                    generic_size[0], device = pyredner.get_device()))

            if material.get_normal_map_levels() == 0:
                d_normal_map = None
            else:
                d_normal_map = []
                for l in range(material.get_normal_map_levels()):
                    normal_map_size = material.get_normal_map_size(l)
                    d_normal_map.append(\
                        torch.zeros(normal_map_size[1],
                                    normal_map_size[0],
                                    3, device = pyredner.get_device()))

            buffers.d_diffuse_list.append(d_diffuse)
            buffers.d_specular_list.append(d_specular)
            buffers.d_roughness_list.append(d_roughness)
            buffers.d_generic_list.append(d_generic)
            buffers.d_normal_map_list.append(d_normal_map)
            d_diffuse_uv_scale = torch.zeros(2, device = pyredner.get_device())
            d_specular_uv_scale = torch.zeros(2, device = pyredner.get_device())
            d_roughness_uv_scale = torch.zeros(2, device = pyredner.get_device())
            buffers.d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
            buffers.d_specular_uv_scale_list.append(d_specular_uv_scale)
            buffers.d_roughness_uv_scale_list.append(d_roughness_uv_scale)
            if d_generic is None:
                d_generic_uv_scale = None
            else:
                d_generic_uv_scale = torch.zeros(2, device = pyredner.get_device())
            if d_normal_map is None:
                d_normal_map_uv_scale = None
            else:
                d_normal_map_uv_scale = torch.zeros(2, device = pyredner.get_device())

            buffers.d_generic_uv_scale_list.append(d_generic_uv_scale)
            buffers.d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
            if d_diffuse[0].dim() == 1:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(d_diffuse[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            else:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_diffuse],
                    [x.shape[1] for x in d_diffuse],
                    [x.shape[0] for x in d_diffuse],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))

            if d_specular[0].dim() == 1:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(d_specular[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))
            else:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_specular],
                    [x.shape[1] for x in d_specular],
                    [x.shape[0] for x in d_specular],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))

            if d_roughness[0].dim() == 1:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(d_roughness[0].data_ptr())],
                    [0],
                    [0],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            else:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(x.data_ptr()) for x in d_roughness],
                    [x.shape[1] for x in d_roughness],
                    [x.shape[0] for x in d_roughness],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))

            if d_generic is None:
                d_generic_tex = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_generic_tex = redner.TextureN(\
                    [redner.float_ptr(x.data_ptr()) for x in d_generic],
                    [x.shape[1] for x in d_generic],
                    [x.shape[0] for x in d_generic],
                    d_generic[0].shape[2],
                    redner.float_ptr(d_generic_uv_scale.data_ptr()))

            if d_normal_map is None:
                d_normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_normal_map = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_normal_map],
                    [x.shape[1] for x in d_normal_map],
                    [x.shape[0] for x in d_normal_map],
                    3,
                    redner.float_ptr(d_normal_map_uv_scale.data_ptr()))
            buffers.d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex,
                d_generic_tex, d_normal_map))

        buffers.d_intensity_list = []
        buffers.d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device = pyredner.get_device())
            buffers.d_intensity_list.append(d_intensity)
            buffers.d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        buffers.d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            buffers.d_envmap_values = []
            for l in range(envmap.get_levels()):
                size = envmap.get_size(l)
                buffers.d_envmap_values.append(\
                    torch.zeros(size[1],
                                size[0],
                                3, device = pyredner.get_device()))
            buffers.d_envmap_uv_scale = torch.zeros(2, device = pyredner.get_device())
            d_envmap_tex = redner.Texture3(\
                [redner.float_ptr(x.data_ptr()) for x in buffers.d_envmap_values],
                [x.shape[1] for x in buffers.d_envmap_values],
                [x.shape[0] for x in buffers.d_envmap_values],
                3,
                redner.float_ptr(buffers.d_envmap_uv_scale.data_ptr()))
            buffers.d_world_to_env = torch.zeros(4, 4, device = pyredner.get_device())
            buffers.d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(buffers.d_world_to_env.data_ptr()))

        buffers.d_scene = redner.DScene(buffers.d_camera,
                                        buffers.d_shapes,
                                        buffers.d_materials,
                                        buffers.d_area_lights,
                                        buffers.d_envmap,
                                        pyredner.get_use_gpu(),
                                        pyredner.get_device().index if pyredner.get_device().index is not None else -1)
        return buffers
Exemplo n.º 11
0
    def unpack_args(seed,
                    args,
                    use_primary_edge_sampling = None,
                    use_secondary_edge_sampling = None):
        """
            Given a list of serialized scene arguments, unpack
            all information into a Context.
        """

        current_index = 0
        num_shapes = args[current_index]
        current_index += 1
        num_materials = args[current_index]
        current_index += 1
        num_lights = args[current_index]
        current_index += 1

        cam_position = args[current_index]
        current_index += 1
        cam_look_at = args[current_index]
        current_index += 1
        cam_up = args[current_index]
        current_index += 1
        cam_to_world = args[current_index]
        current_index += 1
        world_to_cam = args[current_index]
        current_index += 1
        intrinsic_mat_inv = args[current_index]
        current_index += 1
        intrinsic_mat = args[current_index]
        current_index += 1
        clip_near = args[current_index]
        current_index += 1
        resolution = args[current_index]
        current_index += 1
        viewport = args[current_index]
        current_index += 1
        camera_type = args[current_index]
        current_index += 1
        if cam_to_world is None:
            camera = redner.Camera(resolution[1],
                                   resolution[0],
                                   redner.float_ptr(cam_position.data_ptr()),
                                   redner.float_ptr(cam_look_at.data_ptr()),
                                   redner.float_ptr(cam_up.data_ptr()),
                                   redner.float_ptr(0), # cam_to_world
                                   redner.float_ptr(0), # world_to_cam
                                   redner.float_ptr(intrinsic_mat_inv.data_ptr()),
                                   redner.float_ptr(intrinsic_mat.data_ptr()),
                                   clip_near,
                                   camera_type,
                                   redner.Vector2i(viewport[1], viewport[0]),
                                   redner.Vector2i(viewport[3], viewport[2]))
        else:
            camera = redner.Camera(resolution[1],
                                   resolution[0],
                                   redner.float_ptr(0), # cam_position
                                   redner.float_ptr(0), # cam_look_at
                                   redner.float_ptr(0), # cam_up
                                   redner.float_ptr(cam_to_world.data_ptr()),
                                   redner.float_ptr(world_to_cam.data_ptr()),
                                   redner.float_ptr(intrinsic_mat_inv.data_ptr()),
                                   redner.float_ptr(intrinsic_mat.data_ptr()),
                                   clip_near,
                                   camera_type,
                                   redner.Vector2i(viewport[1], viewport[0]),
                                   redner.Vector2i(viewport[3], viewport[2]))
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            uv_indices = args[current_index]
            current_index += 1
            normal_indices = args[current_index]
            current_index += 1
            colors = args[current_index]
            current_index += 1
            material_id = args[current_index]
            current_index += 1
            light_id = args[current_index]
            current_index += 1
            assert(vertices.is_contiguous())
            assert(indices.is_contiguous())
            if uvs is not None:
                assert(uvs.is_contiguous())
            if normals is not None:
                assert(normals.is_contiguous())
            if uv_indices is not None:
                assert(uv_indices.is_contiguous())
            if normal_indices is not None:
                assert(normal_indices.is_contiguous())
            shapes.append(redner.Shape(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                redner.int_ptr(uv_indices.data_ptr() if uv_indices is not None else 0),
                redner.int_ptr(normal_indices.data_ptr() if normal_indices is not None else 0),
                redner.float_ptr(colors.data_ptr() if colors is not None else 0),
                int(vertices.shape[0]),
                int(uvs.shape[0]) if uvs is not None else 0,
                int(normals.shape[0]) if normals is not None else 0,
                int(indices.shape[0]),
                material_id,
                light_id))

        materials = []
        for i in range(num_materials):
            num_levels = args[current_index]
            current_index += 1
            diffuse_reflectance = []
            for j in range(num_levels):
                diffuse_reflectance.append(args[current_index])
                current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            
            num_levels = args[current_index]
            current_index += 1
            specular_reflectance = []
            for j in range(num_levels):
                specular_reflectance.append(args[current_index])
                current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            
            num_levels = args[current_index]
            current_index += 1
            roughness = []
            for j in range(num_levels):
                roughness.append(args[current_index])
                current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1

            num_levels = args[current_index]
            current_index += 1
            generic_texture = []
            if num_levels > 0:
                for j in range(num_levels):
                    generic_texture.append(args[current_index])
                    current_index += 1
                generic_uv_scale = args[current_index]
                current_index += 1
            else:
                generic_uv_scale = None

            num_levels = args[current_index]
            current_index += 1
            normal_map = []
            if num_levels > 0:
                for j in range(num_levels):
                    normal_map.append(args[current_index])
                    current_index += 1
                normal_map_uv_scale = args[current_index]
                current_index += 1
            else:
                normal_map_uv_scale = None

            compute_specular_lighting = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            use_vertex_color = args[current_index]
            current_index += 1

            if diffuse_reflectance[0].dim() == 1:
                # Constant texture
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(diffuse_reflectance[0].data_ptr())],
                    [0], [0], 3,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            else:
                assert(diffuse_reflectance[0].dim() == 3)
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in diffuse_reflectance],
                    [x.shape[1] for x in diffuse_reflectance],
                    [x.shape[0] for x in diffuse_reflectance],
                    3,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))

            if specular_reflectance[0].dim() == 1:
                # Constant texture
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(specular_reflectance[0].data_ptr())],
                    [0], [0], 3,
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            else:
                assert(specular_reflectance[0].dim() == 3)
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in specular_reflectance],
                    [x.shape[1] for x in specular_reflectance],
                    [x.shape[0] for x in specular_reflectance],
                    3,
                    redner.float_ptr(specular_uv_scale.data_ptr()))

            if roughness[0].dim() == 1:
                # Constant texture
                roughness = redner.Texture1(\
                    [redner.float_ptr(roughness[0].data_ptr())],
                    [0], [0], 1,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            else:
                assert(roughness[0].dim() == 3)
                roughness = redner.Texture1(\
                    [redner.float_ptr(x.data_ptr()) for x in roughness],
                    [x.shape[1] for x in roughness],
                    [x.shape[0] for x in roughness],
                    1,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))

            if len(generic_texture) > 0:
                assert(generic_texture[0].dim() == 3)
                generic_texture = redner.TextureN(\
                    [redner.float_ptr(x.data_ptr()) for x in generic_texture],
                    [x.shape[1] for x in generic_texture],
                    [x.shape[0] for x in generic_texture],
                    generic_texture[0].shape[2],
                    redner.float_ptr(generic_uv_scale.data_ptr()))
            else:
                generic_texture = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))

            if len(normal_map) > 0:
                assert(normal_map[0].dim() == 3)
                normal_map = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in normal_map],
                    [x.shape[1] for x in normal_map],
                    [x.shape[0] for x in normal_map],
                    3,
                    redner.float_ptr(normal_map_uv_scale.data_ptr()))
            else:
                normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                generic_texture,
                normal_map,
                compute_specular_lighting,
                two_sided,
                use_vertex_color))

        area_lights = []
        for i in range(num_lights):
            shape_id = args[current_index]
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            directly_visible = args[current_index]
            current_index += 1

            area_lights.append(redner.AreaLight(\
                shape_id,
                redner.float_ptr(intensity.data_ptr()),
                two_sided,
                directly_visible))

        envmap = None
        if args[current_index] is not None:
            num_levels = args[current_index]
            current_index += 1
            values = []
            for j in range(num_levels):
                values.append(args[current_index])
                current_index += 1
            envmap_uv_scale = args[current_index]
            current_index += 1
            env_to_world = args[current_index]
            current_index += 1
            world_to_env = args[current_index]
            current_index += 1
            sample_cdf_ys = args[current_index]
            current_index += 1
            sample_cdf_xs = args[current_index]
            current_index += 1
            pdf_norm = args[current_index]
            current_index += 1
            directly_visible = args[current_index]
            current_index += 1
            values = redner.Texture3(\
                [redner.float_ptr(x.data_ptr()) for x in values],
                [x.shape[1] for x in values], # width
                [x.shape[0] for x in values], # height
                3, # channels
                redner.float_ptr(envmap_uv_scale.data_ptr()))
            envmap = redner.EnvironmentMap(\
                values,
                redner.float_ptr(env_to_world.data_ptr()),
                redner.float_ptr(world_to_env.data_ptr()),
                redner.float_ptr(sample_cdf_ys.data_ptr()),
                redner.float_ptr(sample_cdf_xs.data_ptr()),
                pdf_norm,
                directly_visible)
        else:
            current_index += 1

        # Options
        num_samples = args[current_index]
        current_index += 1
        max_bounces = args[current_index]
        current_index += 1
        channels = args[current_index]
        current_index += 1
        sampler_type = args[current_index]
        current_index += 1
        use_primary_edge_sampling_ = args[current_index]
        current_index += 1
        use_secondary_edge_sampling_ = args[current_index]
        current_index += 1
        sample_pixel_center = args[current_index]
        current_index += 1

        if use_primary_edge_sampling is None:
            use_primary_edge_sampling = use_primary_edge_sampling_
        if use_secondary_edge_sampling is None:
            use_secondary_edge_sampling = use_secondary_edge_sampling_

        start = time.time()
        scene = redner.Scene(camera,
                             shapes,
                             materials,
                             area_lights,
                             envmap,
                             pyredner.get_use_gpu(),
                             pyredner.get_device().index if pyredner.get_device().index is not None else -1,
                             use_primary_edge_sampling,
                             use_secondary_edge_sampling)
        time_elapsed = time.time() - start
        if get_print_timing():
            print('Scene construction, time: %.5f s' % time_elapsed)

        # check that num_samples is a tuple
        if isinstance(num_samples, int):
            num_samples = (num_samples, num_samples)

        options = redner.RenderOptions(seed,
                                       num_samples[0],
                                       max_bounces,
                                       channels,
                                       sampler_type,
                                       sample_pixel_center)

        ctx = Context()
        ctx.channels = channels
        ctx.options = options
        ctx.resolution = resolution
        ctx.viewport = viewport
        ctx.scene = scene
        ctx.camera = camera
        ctx.shapes = shapes
        ctx.materials = materials
        ctx.area_lights = area_lights
        ctx.envmap = envmap
        ctx.scene = scene
        ctx.options = options
        ctx.num_samples = num_samples

        return ctx
Exemplo n.º 12
0
    def backward(ctx,
                 grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options

        d_cam_to_world = torch.zeros(4, 4)
        d_world_to_cam = torch.zeros(4, 4)
        d_ndc_to_cam = torch.zeros(3, 3)
        d_cam_to_ndc = torch.zeros(3, 3)
        d_camera = redner.DCamera(redner.float_ptr(d_cam_to_world.data_ptr()),
                                  redner.float_ptr(d_world_to_cam.data_ptr()),
                                  redner.float_ptr(d_ndc_to_cam.data_ptr()),
                                  redner.float_ptr(d_cam_to_ndc.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            d_vertices = torch.zeros(num_vertices, 3,
                device = pyredner.get_device())
            d_uvs = torch.zeros(num_vertices, 2,
                device = pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(num_vertices, 3,
                device = pyredner.get_device()) if shape.has_normals() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_materials = []
        for material in ctx.materials:
            diffuse_size = material.get_diffuse_size()
            specular_size = material.get_specular_size()
            roughness_size = material.get_roughness_size()
            if diffuse_size[0] == 0:
                d_diffuse = torch.zeros(3, device = pyredner.get_device())
            else:
                d_diffuse = torch.zeros(diffuse_size[2],
                                        diffuse_size[1],
                                        diffuse_size[0],
                                        3, device = pyredner.get_device())
            if specular_size[0] == 0:
                d_specular = torch.zeros(3, device = pyredner.get_device())
            else:
                d_specular = torch.zeros(specular_size[2],
                                         specular_size[1],
                                         specular_size[0],
                                         3, device = pyredner.get_device())
            if roughness_size[0] == 0:
                d_roughness = torch.zeros(1, device = pyredner.get_device())
            else:
                d_roughness = torch.zeros(roughness_size[2],
                                          roughness_size[1],
                                          roughness_size[0],
                                          1, device = pyredner.get_device())
            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_diffuse_uv_scale = torch.zeros(2)
            d_specular_uv_scale = torch.zeros(2)
            d_roughness_uv_scale = torch.zeros(2)
            d_diffuse_tex = redner.Texture3(\
                redner.float_ptr(d_diffuse.data_ptr()),
                diffuse_size[0], diffuse_size[1], diffuse_size[2],
                redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            d_specular_tex = redner.Texture3(\
                redner.float_ptr(d_specular.data_ptr()),
                specular_size[0], specular_size[1], specular_size[2],
                redner.float_ptr(d_specular_uv_scale.data_ptr()))
            d_roughness_tex = redner.Texture1(\
                redner.float_ptr(d_roughness.data_ptr()),
                roughness_size[0], roughness_size[1], roughness_size[2],
                redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex))

        d_intensity_list = []
        d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device = pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            d_envmap_values = \
                torch.zeros(size[2],
                            size[1],
                            size[0],
                            3,
                            device = pyredner.get_device())
            d_envmap_uv_scale = torch.zeros(2)
            d_envmap_tex = redner.Texture3(\
                redner.float_ptr(d_envmap_values.data_ptr()),
                size[0], size[1], size[2],
                redner.float_ptr(d_envmap_uv_scale.data_ptr()))
            d_world_to_env = torch.zeros(4, 4)
            d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(d_world_to_env.data_ptr()))

        d_scene = redner.DScene(d_camera,
                                d_shapes,
                                d_materials,
                                d_area_lights,
                                d_envmap,
                                pyredner.get_use_gpu())
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()
        redner.render(scene, options,
                      redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()),
                      d_scene,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = torch.ones(256, 256, 3)
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None) # seed
        ret_list.append(None) # num_shapes
        ret_list.append(None) # num_materials
        ret_list.append(None) # num_lights
        ret_list.append(d_cam_to_world)
        ret_list.append(d_world_to_cam)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None) # clip near
        ret_list.append(None) # resolution
        ret_list.append(None) # fisheye

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None) # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None) # material id
            ret_list.append(None) # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(None) # diffuse_uv_scale
            ret_list.append(d_specular_list[i])
            ret_list.append(None) # specular_uv_scale
            ret_list.append(d_roughness_list[i])
            ret_list.append(None) # roughness_uv_scale
            ret_list.append(None) # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None) # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None) # two sided
            ret_list.append(None)

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(None) # uv_scale
            ret_list.append(None) # env_to_world
            ret_list.append(d_world_to_env)
            ret_list.append(None) # sample_cdf_ys
            ret_list.append(None) # sample_cdf_xs
            ret_list.append(None) # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
        
        ret_list.append(None) # num samples
        ret_list.append(None) # num bounces
        ret_list.append(None) # channels

        return tuple(ret_list)
Exemplo n.º 13
0
    def forward(ctx, seed, *args):
        """
            Forward rendering pass: given a serialized scene and output an image.
        """
        # Unpack arguments
        current_index = 0
        num_shapes = args[current_index]
        current_index += 1
        num_materials = args[current_index]
        current_index += 1
        num_lights = args[current_index]
        current_index += 1

        cam_position = args[current_index]
        current_index += 1
        cam_look_at = args[current_index]
        current_index += 1
        cam_up = args[current_index]
        current_index += 1
        cam_to_world = args[current_index]
        current_index += 1
        world_to_cam = args[current_index]
        current_index += 1
        intrinsic_mat_inv = args[current_index]
        current_index += 1
        intrinsic_mat = args[current_index]
        current_index += 1
        clip_near = args[current_index]
        current_index += 1
        resolution = args[current_index]
        current_index += 1
        camera_type = args[current_index]
        current_index += 1
        if cam_to_world is None:
            camera = redner.Camera(
                resolution[1],
                resolution[0],
                redner.float_ptr(cam_position.data_ptr()),
                redner.float_ptr(cam_look_at.data_ptr()),
                redner.float_ptr(cam_up.data_ptr()),
                redner.float_ptr(0),  # cam_to_world
                redner.float_ptr(0),  # world_to_cam
                redner.float_ptr(intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(intrinsic_mat.data_ptr()),
                clip_near,
                camera_type)
        else:
            camera = redner.Camera(
                resolution[1],
                resolution[0],
                redner.float_ptr(0),  # cam_position
                redner.float_ptr(0),  # cam_look_at
                redner.float_ptr(0),  # cam_up
                redner.float_ptr(cam_to_world.data_ptr()),
                redner.float_ptr(world_to_cam.data_ptr()),
                redner.float_ptr(intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(intrinsic_mat.data_ptr()),
                clip_near,
                camera_type)
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            uv_indices = args[current_index]
            current_index += 1
            normal_indices = args[current_index]
            current_index += 1
            colors = args[current_index]
            current_index += 1
            material_id = args[current_index]
            current_index += 1
            light_id = args[current_index]
            current_index += 1
            assert (vertices.is_contiguous())
            assert (indices.is_contiguous())
            if uvs is not None:
                assert (uvs.is_contiguous())
            if normals is not None:
                assert (normals.is_contiguous())
            if uv_indices is not None:
                assert (uv_indices.is_contiguous())
            if normal_indices is not None:
                assert (normal_indices.is_contiguous())
            shapes.append(redner.Shape(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                redner.int_ptr(uv_indices.data_ptr() if uv_indices is not None else 0),
                redner.int_ptr(normal_indices.data_ptr() if normal_indices is not None else 0),
                redner.float_ptr(colors.data_ptr() if colors is not None else 0),
                int(vertices.shape[0]),
                int(uvs.shape[0]) if uvs is not None else 0,
                int(normals.shape[0]) if normals is not None else 0,
                int(indices.shape[0]),
                material_id,
                light_id))
        materials = []
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            diffuse_mesh_colors_resolution = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            specular_mesh_colors_resolution = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            roughness_mesh_colors_resolution = args[current_index]
            current_index += 1
            generic_texture = args[current_index]
            current_index += 1
            generic_uv_scale = args[current_index]
            current_index += 1
            generic_mesh_colors_resolution = args[current_index]
            current_index += 1
            normal_map = args[current_index]
            current_index += 1
            normal_map_uv_scale = args[current_index]
            current_index += 1
            normal_map_mesh_colors_resolution = args[current_index]
            current_index += 1
            compute_specular_lighting = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1
            use_vertex_color = args[current_index]
            current_index += 1

            assert (diffuse_reflectance.is_contiguous())
            if diffuse_reflectance.dim() == 1:
                num_levels = 0
                height = 0
                if diffuse_mesh_colors_resolution > 0:
                    num_levels = 1
                    height = int(diffuse_reflectance.size()[0] / 3 / int(
                        ((diffuse_mesh_colors_resolution + 1) *
                         (diffuse_mesh_colors_resolution + 2)) / 2))

                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()),
                    0,
                    height,
                    3,
                    num_levels,
                    diffuse_mesh_colors_resolution,
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))
            else:
                diffuse_reflectance = redner.Texture3(\
                    redner.float_ptr(diffuse_reflectance.data_ptr()),
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[3]), # channels
                    int(diffuse_reflectance.shape[0]), # num levels
                0, # mesh_colors_resolution
                    redner.float_ptr(diffuse_uv_scale.data_ptr()))

            assert (specular_reflectance.is_contiguous())
            if specular_reflectance.dim() == 1:
                num_levels = 0
                height = 0
                if specular_mesh_colors_resolution > 0:
                    num_levels = 1
                    height = int(specular_reflectance.size()[0] / 3 / int(
                        ((specular_mesh_colors_resolution + 1) *
                         (specular_mesh_colors_resolution + 2)) / 2))

                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()),
                    0,
                    height,
                    3,
                    num_levels,
                    specular_mesh_colors_resolution,
                    redner.float_ptr(specular_uv_scale.data_ptr()))
            else:
                specular_reflectance = redner.Texture3(\
                    redner.float_ptr(specular_reflectance.data_ptr()),
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[3]), # channels
                    int(specular_reflectance.shape[0]), # num levels
                0, # mesh_colors_resolution
                    redner.float_ptr(specular_uv_scale.data_ptr()))

            assert (roughness.is_contiguous())
            if roughness.dim() == 1:
                num_levels = 0
                height = 0
                if roughness_mesh_colors_resolution > 0:
                    num_levels = 1
                    height = int(roughness.size()[0] / int(
                        ((roughness_mesh_colors_resolution + 1) *
                         (roughness_mesh_colors_resolution + 2)) / 2))

                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()),
                0,
                height,
                1,
                num_levels,
                roughness_mesh_colors_resolution,
                    redner.float_ptr(roughness_uv_scale.data_ptr()))
            else:
                assert (roughness.dim() == 4)
                roughness = redner.Texture1(\
                    redner.float_ptr(roughness.data_ptr()),
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[3]), # channels
                    int(roughness.shape[0]), # num levels
                0, # mesh_colors_resolution
                    redner.float_ptr(roughness_uv_scale.data_ptr()))

            if generic_texture is not None:
                if generic_texture.dim() == 1:
                    num_levels = 0
                    height = 0
                    if generic_mesh_colors_resolution > 0:
                        num_levels = 1
                        height = int(
                            roughness.size()[0] /
                            int(generic_texture.shape[3]) / int(
                                ((generic_mesh_colors_resolution + 1) *
                                 (generic_mesh_colors_resolution + 2)) / 2))

                    generic_texture = redner.TextureN(\
      redner.float_ptr(generic_texture.data_ptr()),
                    0,
                    height,
                    int(generic_texture.shape[3]),
                    num_levels,
                    generic_mesh_colors_resolution,
                    redner.float_ptr(generic_uv_scale.data_ptr()))
                else:
                    assert (generic_texture.dim() == 4)
                    generic_texture = redner.TextureN(\
      redner.float_ptr(generic_texture.data_ptr()),
                    int(generic_texture.shape[2]), # width
                    int(generic_texture.shape[1]), # height
                    int(generic_texture.shape[3]), # channels
                    int(generic_texture.shape[0]), # num levels
                    0, # mesh_colors_resolution
                    redner.float_ptr(generic_uv_scale.data_ptr()))
            else:
                generic_texture = redner.TextureN(\
                    redner.float_ptr(0), 0, 0, 0, 0, 0, redner.float_ptr(0))

            if normal_map is not None:
                if normal_map.dim() == 1:
                    num_levels = 0
                    height = 0
                    if normal_map_mesh_colors_resolution > 0:
                        num_levels = 1
                        height = int(normal_map.size()[0] / 3 / int(
                            ((normal_map_mesh_colors_resolution + 1) *
                             (normal_map_mesh_colors_resolution + 2)) / 2))

                    normal_map = redner.Texture3(\
                        redner.float_ptr(normal_map.data_ptr()),
                        0,
                        height,
                        3,
                        num_levels,
                        normal_map_mesh_colors_resolution,
                        redner.float_ptr(normal_map_uv_scale.data_ptr()))
                else:
                    assert (normal_map.dim() == 4)
                    normal_map = redner.Texture3(\
      redner.float_ptr(normal_map.data_ptr()),
                    int(normal_map.shape[2]), # width
                    int(normal_map.shape[1]), # height
                    int(normal_map.shape[3]), # channels
                    int(normal_map.shape[0]), # num levels
                    0, # mesh_colors_resolution
                    redner.float_ptr(normal_map_uv_scale.data_ptr()))
            else:
                normal_map = redner.Texture3(\
                    redner.float_ptr(0), 0, 0, 0, 0, 0, redner.float_ptr(0))

            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                generic_texture,
                normal_map,
                compute_specular_lighting,
                two_sided,
                use_vertex_color))

        area_lights = []
        for i in range(num_lights):
            shape_id = args[current_index]
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = args[current_index]
            current_index += 1

            area_lights.append(redner.AreaLight(\
                shape_id,
                redner.float_ptr(intensity.data_ptr()),
                two_sided))

        envmap = None
        if args[current_index] is not None:
            values = args[current_index]
            current_index += 1
            envmap_uv_scale = args[current_index]
            current_index += 1
            env_to_world = args[current_index]
            current_index += 1
            world_to_env = args[current_index]
            current_index += 1
            sample_cdf_ys = args[current_index]
            current_index += 1
            sample_cdf_xs = args[current_index]
            current_index += 1
            pdf_norm = args[current_index]
            current_index += 1
            values = redner.Texture3(\
                redner.float_ptr(values.data_ptr()),
                int(values.shape[2]), # width
                int(values.shape[1]), # height
                0, # channels
                int(values.shape[0]), # num levels
            0, #mesh_colors_resolution
                redner.float_ptr(envmap_uv_scale.data_ptr()))
            envmap = redner.EnvironmentMap(\
                values,
                redner.float_ptr(env_to_world.data_ptr()),
                redner.float_ptr(world_to_env.data_ptr()),
                redner.float_ptr(sample_cdf_ys.data_ptr()),
                redner.float_ptr(sample_cdf_xs.data_ptr()),
                pdf_norm)
        else:
            current_index += 7

        # Options
        num_samples = args[current_index]
        current_index += 1
        max_bounces = args[current_index]
        current_index += 1
        channels = args[current_index]
        current_index += 1
        sampler_type = args[current_index]
        current_index += 1
        use_primary_edge_sampling = args[current_index]
        current_index += 1
        use_secondary_edge_sampling = args[current_index]
        current_index += 1

        start = time.time()
        scene = redner.Scene(
            camera, shapes, materials, area_lights, envmap,
            pyredner.get_use_gpu(),
            pyredner.get_device().index
            if pyredner.get_device().index is not None else -1,
            use_primary_edge_sampling, use_secondary_edge_sampling)
        time_elapsed = time.time() - start
        if print_timing:
            print('Scene construction, time: %.5f s' % time_elapsed)

        # check that num_samples is a tuple
        if isinstance(num_samples, int):
            num_samples = (num_samples, num_samples)

        options = redner.RenderOptions(seed, num_samples[0], max_bounces,
                                       channels, sampler_type)
        num_channels = redner.compute_num_channels(
            channels, scene.max_generic_texture_dimension)
        rendered_image = torch.zeros(resolution[0],
                                     resolution[1],
                                     num_channels,
                                     device=pyredner.get_device())
        start = time.time()
        redner.render(scene, options,
                      redner.float_ptr(rendered_image.data_ptr()),
                      redner.float_ptr(0), None, redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Forward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(rendered_image.data_ptr()),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # exit()

        ctx.camera = camera
        ctx.shapes = shapes
        ctx.materials = materials
        ctx.area_lights = area_lights
        ctx.envmap = envmap
        ctx.scene = scene
        ctx.options = options
        ctx.num_samples = num_samples
        ctx.args = args  # Important to prevent GC from deallocating the tensors
        return rendered_image
Exemplo n.º 14
0
    def backward(ctx, grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options
        camera = ctx.camera

        if camera.use_look_at:
            d_cam_position = torch.zeros(3, device=pyredner.get_device())
            d_cam_look = torch.zeros(3, device=pyredner.get_device())
            d_cam_up = torch.zeros(3, device=pyredner.get_device())
            d_cam_to_world = None
            d_world_to_cam = None
        else:
            d_cam_position = None
            d_cam_look = None
            d_cam_up = None
            d_cam_to_world = torch.zeros(4, 4, device=pyredner.get_device())
            d_world_to_cam = torch.zeros(4, 4, device=pyredner.get_device())
        d_intrinsic_mat_inv = torch.zeros(3, 3, device=pyredner.get_device())
        d_intrinsic_mat = torch.zeros(3, 3, device=pyredner.get_device())
        if camera.use_look_at:
            d_camera = redner.DCamera(
                redner.float_ptr(d_cam_position.data_ptr()),
                redner.float_ptr(d_cam_look.data_ptr()),
                redner.float_ptr(d_cam_up.data_ptr()),
                redner.float_ptr(0),  # cam_to_world
                redner.float_ptr(0),  # world_to_cam
                redner.float_ptr(d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(d_intrinsic_mat.data_ptr()))
        else:
            d_camera = redner.DCamera(
                redner.float_ptr(0),  # pos
                redner.float_ptr(0),  # look
                redner.float_ptr(0),  # up
                redner.float_ptr(d_cam_to_world.data_ptr()),
                redner.float_ptr(d_world_to_cam.data_ptr()),
                redner.float_ptr(d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(d_intrinsic_mat.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_colors_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            num_uv_vertices = shape.num_uv_vertices
            num_normal_vertices = shape.num_normal_vertices
            d_vertices = torch.zeros(num_vertices,
                                     3,
                                     device=pyredner.get_device())
            d_uvs = torch.zeros(
                num_uv_vertices, 2,
                device=pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(
                num_normal_vertices, 3,
                device=pyredner.get_device()) if shape.has_normals() else None
            d_colors = torch.zeros(
                num_vertices, 3,
                device=pyredner.get_device()) if shape.has_colors() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_colors_list.append(d_colors)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0),
                redner.float_ptr(d_colors.data_ptr() if d_colors is not None else 0)))

        d_diffuse_list = []
        d_diffuse_uv_scale_list = []
        d_specular_list = []
        d_specular_uv_scale_list = []
        d_roughness_list = []
        d_roughness_uv_scale_list = []
        d_generic_list = []
        d_generic_uv_scale_list = []
        d_normal_map_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        for material in ctx.materials:
            if material.get_diffuse_size(0)[0] == 0:
                d_diffuse = [torch.zeros(3, device=pyredner.get_device())]
            else:
                d_diffuse = []
                for l in range(material.get_diffuse_levels()):
                    diffuse_size = material.get_diffuse_size(l)
                    d_diffuse.append(\
                        torch.zeros(diffuse_size[1],
                                    diffuse_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_specular_size(0)[0] == 0:
                d_specular = [torch.zeros(3, device=pyredner.get_device())]
            else:
                d_specular = []
                for l in range(material.get_specular_levels()):
                    specular_size = material.get_specular_size(l)
                    d_specular.append(\
                        torch.zeros(specular_size[1],
                                    specular_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_roughness_size(0)[0] == 0:
                d_roughness = [torch.zeros(1, device=pyredner.get_device())]
            else:
                d_roughness = []
                for l in range(material.get_roughness_levels()):
                    roughness_size = material.get_roughness_size(l)
                    d_roughness.append(\
                        torch.zeros(roughness_size[1],
                                    roughness_size[0],
                                    1, device = pyredner.get_device()))

            if material.get_generic_levels() == 0:
                d_generic = None
            else:
                d_generic = []
                for l in range(material.get_generic_levels()):
                    generic_size = material.get_generic_size(l)
                    d_generic.append(\
                        torch.zeros(generic_size[2],
                                    generic_size[1],
                                    generic_size[0], device = pyredner.get_device()))

            if material.get_normal_map_levels() == 0:
                d_normal_map = None
            else:
                d_normal_map = []
                for l in range(material.get_normal_map_levels()):
                    normal_map_size = material.get_normal_map_size(l)
                    d_normal_map.append(\
                        torch.zeros(normal_map_size[1],
                                    normal_map_size[0],
                                    3, device = pyredner.get_device()))

            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_generic_list.append(d_generic)
            d_normal_map_list.append(d_normal_map)
            d_diffuse_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_specular_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_roughness_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
            d_specular_uv_scale_list.append(d_specular_uv_scale)
            d_roughness_uv_scale_list.append(d_roughness_uv_scale)
            if d_generic is None:
                d_generic_uv_scale = None
            else:
                d_generic_uv_scale = torch.zeros(2,
                                                 device=pyredner.get_device())
            if d_normal_map is None:
                d_normal_map_uv_scale = None
            else:
                d_normal_map_uv_scale = torch.zeros(
                    2, device=pyredner.get_device())

            d_generic_uv_scale_list.append(d_generic_uv_scale)
            d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
            if d_diffuse[0].dim() == 1:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(d_diffuse[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            else:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_diffuse],
                    [x.shape[1] for x in d_diffuse],
                    [x.shape[0] for x in d_diffuse],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))

            if d_specular[0].dim() == 1:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(d_specular[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))
            else:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_specular],
                    [x.shape[1] for x in d_specular],
                    [x.shape[0] for x in d_specular],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))

            if d_roughness[0].dim() == 1:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(d_roughness[0].data_ptr())],
                    [0],
                    [0],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            else:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(x.data_ptr()) for x in d_roughness],
                    [x.shape[1] for x in d_roughness],
                    [x.shape[0] for x in d_roughness],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))

            if d_generic is None:
                d_generic_tex = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_generic_tex = redner.TextureN(\
                    [redner.float_ptr(x.data_ptr()) for x in d_generic],
                    [x.shape[1] for x in d_generic],
                    [x.shape[0] for x in d_generic],
                    d_generic[0].shape[2],
                    redner.float_ptr(d_generic_uv_scale.data_ptr()))

            if d_normal_map is None:
                d_normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_normal_map = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_normal_map],
                    [x.shape[1] for x in d_normal_map],
                    [x.shape[0] for x in d_normal_map],
                    3,
                    redner.float_ptr(d_normal_map_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex,
                d_generic_tex, d_normal_map))

        d_intensity_list = []
        d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device=pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            d_envmap_values = []
            for l in range(envmap.get_levels()):
                size = envmap.get_size(l)
                d_envmap_values.append(\
                    torch.zeros(size[1],
                                size[0],
                                3, device = pyredner.get_device()))
            d_envmap_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_envmap_tex = redner.Texture3(\
                [redner.float_ptr(x.data_ptr()) for x in d_envmap_values],
                [x.shape[1] for x in d_envmap_values],
                [x.shape[0] for x in d_envmap_values],
                3,
                redner.float_ptr(d_envmap_uv_scale.data_ptr()))
            d_world_to_env = torch.zeros(4, 4, device=pyredner.get_device())
            d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(d_world_to_env.data_ptr()))

        d_scene = redner.DScene(
            d_camera, d_shapes, d_materials, d_area_lights, d_envmap,
            pyredner.get_use_gpu(),
            pyredner.get_device().index
            if pyredner.get_device().index is not None else -1)
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003

        options.num_samples = ctx.num_samples[1]
        start = time.time()
        redner.render(scene, options, redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()), d_scene,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = torch.ones(256, 256, 3, device = pyredner.get_device())
        # debug_img = torch.zeros(256, 256, 3)
        # start = time.time()
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # time_elapsed = time.time() - start
        # if print_timing:
        #     print('Backward pass, time: %.5f s' % time_elapsed)
        # debug_img = debug_img[:, :, 0]
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # debug_img = debug_img.numpy()
        # print(np.max(debug_img))
        # print(np.unravel_index(np.argmax(debug_img), debug_img.shape))
        # print(np.min(debug_img))
        # print(np.unravel_index(np.argmin(debug_img), debug_img.shape))
        # print(np.sum(debug_img) / 3)
        # debug_max = 0.5
        # debug_min = -0.5
        # debug_img = np.clip((debug_img - debug_min) / (debug_max - debug_min), 0, 1)
        # # debug_img = debug_img[:, :, 0]
        # import matplotlib.cm as cm
        # debug_img = cm.viridis(debug_img)
        # skimage.io.imsave('debug.png', np.power(debug_img, 1/2.2))
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if camera.use_look_at:
            ret_list.append(d_cam_position.cpu())
            ret_list.append(d_cam_look.cpu())
            ret_list.append(d_cam_up.cpu())
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(d_cam_to_world.cpu())
            ret_list.append(d_world_to_cam.cpu())
        ret_list.append(d_intrinsic_mat_inv.cpu())
        ret_list.append(d_intrinsic_mat.cpu())
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(d_roughness_uv_scale_list[i])
            if d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(d_generic_uv_scale_list[i])
            if d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            ret_list.append(d_world_to_env.cpu())
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # channels
        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return tuple(ret_list)