Пример #1
0
def compute_uvs(vertices, indices, print_progress = True):
    """
        Compute UV coordinates of a given mesh using a charting algorithm
        with least square conformal mapping. This calls the `xatlas <https://github.com/jpcy/xatlas>`_ library.

        Args
        ====
        vertices: tf.Tensor
            3D position of vertices
            float32 tensor with size num_vertices x 3
        indices: tf.Tensor
            vertex indices of triangle faces.
            int32 tensor with size num_triangles x 3

        Returns
        =======
        tf.Tensor
            uv vertices pool, float32 Tensor with size num_uv_vertices x 3
        tf.Tensor
            uv indices, int32 Tensor with size num_triangles x 3
    """
    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        vertices = tf.identity(vertices)
        indices = tf.identity(indices)
        uv_trimesh = redner.UVTriMesh(redner.float_ptr(pyredner.data_ptr(vertices)),
                                      redner.int_ptr(pyredner.data_ptr(indices)),
                                      redner.float_ptr(0),
                                      redner.int_ptr(0),
                                      int(vertices.shape[0]),
                                      0,
                                      int(indices.shape[0]))

        atlas = redner.TextureAtlas()
        num_uv_vertices = redner.automatic_uv_map([uv_trimesh], atlas, print_progress)[0]

        uvs = tf.zeros([num_uv_vertices, 2], dtype=tf.float32)
        uv_indices = tf.zeros_like(indices)
        uv_trimesh.uvs = redner.float_ptr(pyredner.data_ptr(uvs))
        uv_trimesh.uv_indices = redner.int_ptr(pyredner.data_ptr(uv_indices))
        uv_trimesh.num_uv_vertices = num_uv_vertices

        redner.copy_texture_atlas(atlas, [uv_trimesh])

    with tf.device(pyredner.get_device_name()):
        vertices = tf.identity(vertices)
        indices = tf.identity(indices)
        uvs = tf.identity(uvs)
        uv_indices = tf.identity(uv_indices)
    return uvs, uv_indices
Пример #2
0
def forward(seed: int, *args):
    """
        Forward rendering pass: given a serialized scene and output an image.
    """

    args_ctx = unpack_args(seed, args)
    area_lights = args_ctx.area_lights
    camera = args_ctx.camera
    channels = args_ctx.channels
    envmap = args_ctx.envmap
    materials = args_ctx.materials
    num_samples = args_ctx.num_samples
    options = args_ctx.options
    resolution = args_ctx.resolution
    viewport = args_ctx.viewport
    scene = args_ctx.scene
    shapes = args_ctx.shapes
    num_channel_args = args_ctx.num_channel_args
    num_channels = redner.compute_num_channels(
        channels, scene.max_generic_texture_dimension)

    with tf.device(pyredner.get_device_name()):
        img_height = viewport[2] - viewport[0]
        img_width = viewport[3] - viewport[1]
        rendered_image = tf.zeros(shape=[img_height, img_width, num_channels],
                                  dtype=tf.float32)

        start = time.time()
        redner.render(
            scene,
            options,
            redner.float_ptr(pyredner.data_ptr(rendered_image)),
            redner.float_ptr(0),  # d_rendered_image
            None,  # d_scene
            redner.float_ptr(0),  # translational_gradient_image
            redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start
        if get_print_timing():
            print('Forward pass, time: %.5f s' % time_elapsed)

    ctx = Context()
    ctx.camera = camera
    ctx.shapes = shapes
    ctx.materials = materials
    ctx.area_lights = area_lights
    ctx.envmap = envmap
    ctx.scene = scene
    ctx.options = options
    ctx.num_samples = num_samples
    ctx.num_channel_args = num_channel_args
    ctx.args = args  # important to avoid GC on tf tensors
    return rendered_image, ctx
Пример #3
0
    def backward(grad_img):
        scene = ctx.scene
        options = ctx.options

        buffers = create_gradient_buffers(ctx)

        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            grad_img = tf.identity(grad_img)
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                buffers.d_scene,
                redner.float_ptr(0),  # translational_gradient_image
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if get_print_timing():
            print('Backward pass, time: %.5f s' % time_elapsed)

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if ctx.camera.use_look_at:
            ret_list.append(buffers.d_position)
            ret_list.append(buffers.d_look_at)
            ret_list.append(buffers.d_up)
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(buffers.d_cam_to_world)
            ret_list.append(buffers.d_world_to_cam)
        ret_list.append(buffers.d_intrinsic_mat_inv)
        ret_list.append(buffers.d_intrinsic_mat)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # viewport
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(buffers.d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(buffers.d_uvs_list[i])
            ret_list.append(buffers.d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(buffers.d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in buffers.d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(buffers.d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in buffers.d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(buffers.d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in buffers.d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(buffers.d_roughness_uv_scale_list[i])
            if buffers.d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in buffers.d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(buffers.d_generic_uv_scale_list[i])
            if buffers.d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in buffers.d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(buffers.d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(buffers.d_intensity_list[i]))
            ret_list.append(None)  # two_sided
            ret_list.append(None)  # directly_visible

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in buffers.d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(buffers.d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(buffers.d_world_to_env))
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
            ret_list.append(None)  # directly_visible
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channel_args):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return ret_list
Пример #4
0
def create_gradient_buffers(ctx):
    scene = ctx.scene
    options = ctx.options
    camera = ctx.camera

    buffers = Context()

    with tf.device(pyredner.get_device_name()):
        if camera.use_look_at:
            buffers.d_position = tf.zeros(3, dtype=tf.float32)
            buffers.d_look_at = tf.zeros(3, dtype=tf.float32)
            buffers.d_up = tf.zeros(3, dtype=tf.float32)
            buffers.d_cam_to_world = None
            buffers.d_world_to_cam = None
        else:
            buffers.d_position = None
            buffers.d_look_at = None
            buffers.d_up = None
            buffers.d_cam_to_world = tf.zeros([4, 4], dtype=tf.float32)
            buffers.d_world_to_cam = tf.zeros([4, 4], dtype=tf.float32)
        buffers.d_intrinsic_mat_inv = tf.zeros([3, 3], dtype=tf.float32)
        buffers.d_intrinsic_mat = tf.zeros([3, 3], dtype=tf.float32)
        if camera.use_look_at:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(pyredner.data_ptr(buffers.d_position)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_look_at)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_up)),
                redner.float_ptr(0), # cam_to_world
                redner.float_ptr(0), # world_to_cam
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat)))
        else:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(0),
                redner.float_ptr(0),
                redner.float_ptr(0),
                redner.float_ptr(pyredner.data_ptr(buffers.d_cam_to_world)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_world_to_cam)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat)))

    buffers.d_vertices_list = []
    buffers.d_uvs_list = []
    buffers.d_normals_list = []
    buffers.d_colors_list = []
    buffers.d_shapes = []
    with tf.device(pyredner.get_device_name()):
        for i, shape in enumerate(ctx.shapes):
            num_vertices = shape.num_vertices
            d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
            d_uvs = tf.zeros([num_vertices, 2],
                             dtype=tf.float32) if shape.has_uvs() else None
            d_normals = tf.zeros(
                [num_vertices, 3],
                dtype=tf.float32) if shape.has_normals() else None
            d_colors = tf.zeros(
                [num_vertices, 3],
                dtype=tf.float32) if shape.has_colors() else None
            buffers.d_vertices_list.append(d_vertices)
            buffers.d_uvs_list.append(d_uvs)
            buffers.d_normals_list.append(d_normals)
            buffers.d_colors_list.append(d_colors)
            buffers.d_shapes.append(redner.DShape(\
                redner.float_ptr(pyredner.data_ptr(d_vertices)),
                redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0),
                redner.float_ptr(pyredner.data_ptr(d_colors) if d_colors is not None else 0)))

    buffers.d_diffuse_list = []
    buffers.d_specular_list = []
    buffers.d_roughness_list = []
    buffers.d_normal_map_list = []
    buffers.d_diffuse_uv_scale_list = []
    buffers.d_specular_uv_scale_list = []
    buffers.d_roughness_uv_scale_list = []
    buffers.d_generic_list = []
    buffers.d_generic_uv_scale_list = []
    buffers.d_normal_map_uv_scale_list = []
    buffers.d_materials = []
    with tf.device(pyredner.get_device_name()):
        for material in ctx.materials:
            if material.get_diffuse_size(0)[0] == 0:
                d_diffuse = [tf.zeros(3, dtype=tf.float32)]
            else:
                d_diffuse = []
                for l in range(material.get_diffuse_levels()):
                    diffuse_size = material.get_diffuse_size(l)
                    d_diffuse.append(\
                        tf.zeros([diffuse_size[1],
                                  diffuse_size[0],
                                  3], dtype=tf.float32))

            if material.get_specular_size(0)[0] == 0:
                d_specular = [tf.zeros(3, dtype=tf.float32)]
            else:
                d_specular = []
                for l in range(material.get_specular_levels()):
                    specular_size = material.get_specular_size(l)
                    d_specular.append(\
                        tf.zeros([specular_size[1],
                                  specular_size[0],
                                  3], dtype=tf.float32))

            if material.get_roughness_size(0)[0] == 0:
                d_roughness = [tf.zeros(1, dtype=tf.float32)]
            else:
                d_roughness = []
                for l in range(material.get_roughness_levels()):
                    roughness_size = material.get_roughness_size(l)
                    d_roughness.append(\
                        tf.zeros([roughness_size[1],
                                  roughness_size[0],
                                  1], dtype=tf.float32))
            # HACK: tensorflow's eager mode uses a cache to store scalar
            #       constants to avoid memory copy. If we pass scalar tensors
            #       into the C++ code and modify them, we would corrupt the
            #       cache, causing incorrect result in future scalar constant
            #       creations. Thus we force tensorflow to copy by plusing a zero.
            # (also see https://github.com/tensorflow/tensorflow/issues/11186
            #  for more discussion regarding copying tensors)
            if d_roughness[0].shape.num_elements() == 1:
                d_roughness[0] = d_roughness[0] + 0

            if material.get_generic_levels() == 0:
                d_generic = None
            else:
                d_generic = []
                for l in range(material.get_generic_levels()):
                    generic_size = material.get_generic_size(l)
                    d_generic.append(\
                        tf.zeros([generic_size[2],
                                  generic_size[1],
                                  generic_size[0]], dtype=tf.float32))

            if material.get_normal_map_levels() == 0:
                d_normal_map = None
            else:
                d_normal_map = []
                for l in range(material.get_normal_map_levels()):
                    normal_map_size = material.get_normal_map_size(l)
                    d_normal_map.append(\
                        tf.zeros([normal_map_size[1],
                                  normal_map_size[0],
                                  3], dtype=tf.float32))

            buffers.d_diffuse_list.append(d_diffuse)
            buffers.d_specular_list.append(d_specular)
            buffers.d_roughness_list.append(d_roughness)
            buffers.d_generic_list.append(d_generic)
            buffers.d_normal_map_list.append(d_normal_map)

            d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
            d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
            d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
            if d_generic is None:
                d_generic_uv_scale = None
            else:
                d_generic_uv_scale = tf.zeros([2], dtype=tf.float32)
            if d_normal_map is None:
                d_normal_map_uv_scale = None
            else:
                d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
            buffers.d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
            buffers.d_specular_uv_scale_list.append(d_specular_uv_scale)
            buffers.d_roughness_uv_scale_list.append(d_roughness_uv_scale)
            buffers.d_generic_uv_scale_list.append(d_generic_uv_scale)
            buffers.d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)

            if len(d_diffuse[0].shape) == 1:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(d_diffuse[0]))],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))
            else:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_diffuse],
                    [x.shape[1] for x in d_diffuse],
                    [x.shape[0] for x in d_diffuse],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))

            if len(d_specular[0].shape) == 1:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(d_specular[0]))],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))
            else:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_specular],
                    [x.shape[1] for x in d_specular],
                    [x.shape[0] for x in d_specular],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))

            if len(d_roughness[0].shape) == 1:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(d_roughness[0]))],
                    [0],
                    [0],
                    1,
                    redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))
            else:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_roughness],
                    [x.shape[1] for x in d_roughness],
                    [x.shape[0] for x in d_roughness],
                    1,
                    redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))

            if d_generic is None:
                d_generic_tex = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_generic_tex = redner.TextureN(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_generic],
                    [x.shape[1] for x in d_generic],
                    [x.shape[0] for x in d_generic],
                    d_generic[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(d_generic_uv_scale)))

            if d_normal_map is None:
                d_normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_normal_map = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_normal_map],
                    [x.shape[1] for x in d_normal_map],
                    [x.shape[0] for x in d_normal_map],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_normal_map_uv_scale)))

            buffers.d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex,
                d_generic_tex, d_normal_map))

    buffers.d_intensity_list = []
    buffers.d_area_lights = []
    with tf.device(pyredner.get_device_name()):
        for light in ctx.area_lights:
            d_intensity = tf.zeros(3, dtype=tf.float32)
            buffers.d_intensity_list.append(d_intensity)
            buffers.d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

    buffers.d_envmap = None
    if ctx.envmap is not None:
        envmap = ctx.envmap
        with tf.device(pyredner.get_device_name()):
            buffers.d_envmap_values = []
            for l in range(envmap.get_levels()):
                size = envmap.get_size(l)
                buffers.d_envmap_values.append(\
                    tf.zeros([size[1],
                              size[0],
                              3], dtype=tf.float32))
            buffers.d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
            buffers.d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
            d_envmap_tex = redner.Texture3(\
                [redner.float_ptr(pyredner.data_ptr(x)) for x in buffers.d_envmap_values],
                [x.shape[1] for x in buffers.d_envmap_values],
                [x.shape[0] for x in buffers.d_envmap_values],
                3,
                redner.float_ptr(pyredner.data_ptr(buffers.d_envmap_uv_scale)))
            buffers.d_envmap = redner.DEnvironmentMap(
                d_envmap_tex,
                redner.float_ptr(pyredner.data_ptr(buffers.d_world_to_env)))

    buffers.d_scene = redner.DScene(buffers.d_camera, buffers.d_shapes,
                                    buffers.d_materials, buffers.d_area_lights,
                                    buffers.d_envmap, pyredner.get_use_gpu(),
                                    pyredner.get_gpu_device_id())
    return buffers
Пример #5
0
def unpack_args(seed,
                args,
                use_primary_edge_sampling=None,
                use_secondary_edge_sampling=None):
    """
        Given a list of serialized scene arguments, unpack
        all information into a Context.
    """
    # Unpack arguments
    current_index = 0
    num_shapes = int(args[current_index])
    current_index += 1
    num_materials = int(args[current_index])
    current_index += 1
    num_lights = int(args[current_index])
    current_index += 1

    # Camera arguments
    cam_position = args[current_index]
    current_index += 1
    cam_look_at = args[current_index]
    current_index += 1
    cam_up = args[current_index]
    current_index += 1
    cam_to_world = args[current_index]
    current_index += 1
    world_to_cam = args[current_index]
    current_index += 1
    intrinsic_mat_inv = args[current_index]
    current_index += 1
    intrinsic_mat = args[current_index]
    current_index += 1
    clip_near = float(args[current_index])
    current_index += 1
    resolution = args[current_index].numpy()  # Tuple[int, int]
    current_index += 1
    viewport = args[current_index].numpy()  # Tuple[int, int, int, int]
    current_index += 1
    camera_type = RednerCameraType.asCameraType(
        args[current_index])  # FIXME: Map to custom type
    current_index += 1

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        if is_empty_tensor(cam_to_world):
            camera = redner.Camera(
                resolution[1],
                resolution[0],
                redner.float_ptr(pyredner.data_ptr(cam_position)),
                redner.float_ptr(pyredner.data_ptr(cam_look_at)),
                redner.float_ptr(pyredner.data_ptr(cam_up)),
                redner.float_ptr(0),  # cam_to_world
                redner.float_ptr(0),  # world_to_cam
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat)),
                clip_near,
                camera_type,
                redner.Vector2i(viewport[1], viewport[0]),
                redner.Vector2i(viewport[3], viewport[2]))
        else:
            camera = redner.Camera(
                resolution[1], resolution[0], redner.float_ptr(0),
                redner.float_ptr(0), redner.float_ptr(0),
                redner.float_ptr(pyredner.data_ptr(cam_to_world)),
                redner.float_ptr(pyredner.data_ptr(world_to_cam)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat)), clip_near,
                camera_type, redner.Vector2i(viewport[1], viewport[0]),
                redner.Vector2i(viewport[3], viewport[2]))

    with tf.device(pyredner.get_device_name()):
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            uv_indices = args[current_index]
            current_index += 1
            normal_indices = args[current_index]
            current_index += 1
            colors = args[current_index]
            current_index += 1
            material_id = int(args[current_index])
            current_index += 1
            light_id = int(args[current_index])
            current_index += 1

            shapes.append(redner.Shape(\
                redner.float_ptr(pyredner.data_ptr(vertices)),
                redner.int_ptr(pyredner.data_ptr(indices)),
                redner.float_ptr(pyredner.data_ptr(uvs) if not is_empty_tensor(uvs) else 0),
                redner.float_ptr(pyredner.data_ptr(normals) if not is_empty_tensor(normals) else 0),
                redner.int_ptr(pyredner.data_ptr(uv_indices) if not is_empty_tensor(uv_indices) else 0),
                redner.int_ptr(pyredner.data_ptr(normal_indices) if not is_empty_tensor(normal_indices) else 0),
                redner.float_ptr(pyredner.data_ptr(colors) if not is_empty_tensor(colors) else 0),
                int(vertices.shape[0]),
                int(uvs.shape[0]) if not is_empty_tensor(uvs) else 0,
                int(normals.shape[0]) if not is_empty_tensor(normals) else 0,
                int(indices.shape[0]),
                material_id,
                light_id))

    materials = []
    with tf.device(pyredner.get_device_name()):
        for i in range(num_materials):
            num_levels = int(args[current_index])
            current_index += 1
            diffuse_reflectance = []
            for j in range(num_levels):
                diffuse_reflectance.append(args[current_index])
                current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            specular_reflectance = []
            for j in range(num_levels):
                specular_reflectance.append(args[current_index])
                current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            roughness = []
            for j in range(num_levels):
                roughness.append(args[current_index])
                current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            generic_texture = []
            if num_levels > 0:
                for j in range(num_levels):
                    generic_texture.append(args[current_index])
                    current_index += 1
                generic_uv_scale = args[current_index]
                current_index += 1
            else:
                generic_uv_scale = None

            num_levels = int(args[current_index])
            current_index += 1
            normal_map = []
            if num_levels > 0:
                for j in range(num_levels):
                    normal_map.append(args[current_index])
                    current_index += 1
                normal_map_uv_scale = args[current_index]
                current_index += 1
            else:
                normal_map_uv_scale = None

            compute_specular_lighting = bool(args[current_index])
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1
            use_vertex_color = bool(args[current_index])
            current_index += 1

            if get_tensor_dimension(diffuse_reflectance[0]) == 1:
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(diffuse_reflectance[0]))],
                    [0],
                    [0],
                    3, redner.float_ptr(pyredner.data_ptr(diffuse_uv_scale)))
            else:
                assert (get_tensor_dimension(diffuse_reflectance[0]) == 3)
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in diffuse_reflectance],
                    [x.shape[1] for x in diffuse_reflectance],
                    [x.shape[0] for x in diffuse_reflectance],
                    3,
                    redner.float_ptr(pyredner.data_ptr(diffuse_uv_scale)))

            if get_tensor_dimension(specular_reflectance[0]) == 1:
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(specular_reflectance[0]))],
                    [0],
                    [0],
                    3, redner.float_ptr(pyredner.data_ptr(specular_uv_scale)))
            else:
                assert (get_tensor_dimension(specular_reflectance[0]) == 3)
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in specular_reflectance],
                    [x.shape[1] for x in specular_reflectance],
                    [x.shape[0] for x in specular_reflectance],
                    3,
                    redner.float_ptr(pyredner.data_ptr(specular_uv_scale)))

            if get_tensor_dimension(roughness[0]) == 1:
                roughness = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(roughness[0]))],
                    [0],
                    [0],
                    1, redner.float_ptr(pyredner.data_ptr(roughness_uv_scale)))
            else:
                assert (get_tensor_dimension(roughness[0]) == 3)
                roughness = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in roughness],
                    [x.shape[1] for x in roughness],
                    [x.shape[0] for x in roughness],
                    3,
                    redner.float_ptr(pyredner.data_ptr(roughness_uv_scale)))

            if len(generic_texture) > 0:
                generic_texture = redner.TextureN(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in generic_texture],
                    [x.shape[1] for x in generic_texture],
                    [x.shape[0] for x in generic_texture],
                    generic_texture[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(generic_uv_scale)))
            else:
                generic_texture = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))

            if len(normal_map) > 0:
                normal_map = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in normal_map],
                    [x.shape[1] for x in normal_map],
                    [x.shape[0] for x in normal_map],
                    normal_map[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(normal_map_uv_scale)))
            else:
                normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))

            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                generic_texture,
                normal_map,
                compute_specular_lighting,
                two_sided,
                use_vertex_color))

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        area_lights = []
        for i in range(num_lights):
            shape_id = int(args[current_index])
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1
            directly_visible = bool(args[current_index])
            current_index += 1

            area_lights.append(
                redner.AreaLight(
                    shape_id, redner.float_ptr(pyredner.data_ptr(intensity)),
                    two_sided, directly_visible))

    envmap = None
    if not is_empty_tensor(args[current_index]):
        num_levels = args[current_index]
        current_index += 1
        values = []
        for j in range(num_levels):
            values.append(args[current_index])
            current_index += 1
        envmap_uv_scale = args[current_index]
        current_index += 1
        env_to_world = args[current_index]
        current_index += 1
        world_to_env = args[current_index]
        current_index += 1
        sample_cdf_ys = args[current_index]
        current_index += 1
        sample_cdf_xs = args[current_index]
        current_index += 1
        pdf_norm = float(args[current_index])
        current_index += 1
        directly_visible = bool(args[current_index])
        current_index += 1

        assert isinstance(pdf_norm, float)
        with tf.device(pyredner.get_device_name()):
            sample_cdf_ys = redner.float_ptr(pyredner.data_ptr(sample_cdf_ys))
            sample_cdf_xs = redner.float_ptr(pyredner.data_ptr(sample_cdf_xs))
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            env_to_world = redner.float_ptr(pyredner.data_ptr(env_to_world))
            world_to_env = redner.float_ptr(pyredner.data_ptr(world_to_env))
        with tf.device(pyredner.get_device_name()):
            values = redner.Texture3(\
                [redner.float_ptr(pyredner.data_ptr(x)) for x in values],
                [x.shape[1] for x in values], # width
                [x.shape[0] for x in values], # height
                3, # channels
                redner.float_ptr(pyredner.data_ptr(envmap_uv_scale)))
        envmap = redner.EnvironmentMap(\
            values,
            env_to_world,
            world_to_env,
            sample_cdf_ys,
            sample_cdf_xs,
            pdf_norm,
            directly_visible)
    else:
        current_index += 1

    # Options
    num_samples = args[current_index]
    current_index += 1
    if len(num_samples.shape) == 0 or num_samples.shape[0] == 1:
        num_samples = int(num_samples)
    else:
        assert (num_samples.shape[0] == 2)
        num_samples = (int(num_samples[0]), int(num_samples[1]))
    max_bounces = int(args[current_index])
    current_index += 1

    num_channel_args = int(args[current_index])
    current_index += 1

    channels = []
    for _ in range(num_channel_args):
        ch = args[current_index]
        ch = RednerChannels.asChannel(ch)
        channels.append(ch)
        current_index += 1

    sampler_type = args[current_index]
    sampler_type = RednerSamplerType.asSamplerType(sampler_type)
    current_index += 1

    use_primary_edge_sampling = args[current_index]
    current_index += 1
    use_secondary_edge_sampling = args[current_index]
    current_index += 1
    sample_pixel_center = args[current_index]
    current_index += 1

    start = time.time()
    scene = redner.Scene(camera, shapes, materials, area_lights, envmap,
                         pyredner.get_use_gpu(), pyredner.get_gpu_device_id(),
                         use_primary_edge_sampling,
                         use_secondary_edge_sampling)
    time_elapsed = time.time() - start
    if get_print_timing():
        print('Scene construction, time: %.5f s' % time_elapsed)

    # check that num_samples is a tuple
    if isinstance(num_samples, int):
        num_samples = (num_samples, num_samples)

    options = redner.RenderOptions(seed, num_samples[0], max_bounces, channels,
                                   sampler_type, sample_pixel_center)

    ctx = Context()
    ctx.channels = channels
    ctx.options = options
    ctx.resolution = resolution
    ctx.viewport = viewport
    ctx.scene = scene
    ctx.camera = camera
    ctx.shapes = shapes
    ctx.materials = materials
    ctx.area_lights = area_lights
    ctx.envmap = envmap
    ctx.scene = scene
    ctx.options = options
    ctx.num_samples = num_samples
    ctx.num_channel_args = num_channel_args

    return ctx
Пример #6
0
def visualize_screen_gradient(grad_img: tf.Tensor,
                              seed: int,
                              scene: pyredner.Scene,
                              num_samples: Union[int, Tuple[int, int]],
                              max_bounces: int,
                              channels: List = [redner.channels.radiance],
                              sampler_type=redner.SamplerType.independent,
                              use_primary_edge_sampling: bool = True,
                              use_secondary_edge_sampling: bool = True,
                              sample_pixel_center: bool = False):
    """
        Given a serialized scene and output an 2-channel image,
        which visualizes the derivatives of pixel color with respect to 
        the screen space coordinates.

        Args
        ====
        grad_img: Optional[tf.Tensor]
            The "adjoint" of the backpropagation gradient. If you don't know
            what this means just give None
        seed: int
            seed for the Monte Carlo random samplers
        See serialize_scene for the explanation of the rest of the arguments.
    """

    args = serialize_scene(\
        scene = scene,
        num_samples = num_samples,
        max_bounces = max_bounces,
        sampler_type = sampler_type,
        channels = channels,
        sample_pixel_center = sample_pixel_center)
    args_ctx = unpack_args(\
        seed, args, use_primary_edge_sampling, use_secondary_edge_sampling)
    channels = args_ctx.channels
    options = args_ctx.options
    resolution = args_ctx.resolution
    viewport = args_ctx.viewport
    scene = args_ctx.scene

    buffers = create_gradient_buffers(args_ctx)
    num_channels = redner.compute_num_channels(
        channels, scene.max_generic_texture_dimension)
    with tf.device(pyredner.get_device_name()):
        img_height = viewport[2] - viewport[0]
        img_width = viewport[3] - viewport[1]
        screen_gradient_image = tf.zeros(\
            shape = [img_height, img_width, 2],
            dtype = tf.float32)
        if grad_img is not None:
            assert (grad_img.shape[0] == resolution[0])
            assert (grad_img.shape[1] == resolution[1])
            assert (grad_img.shape[2] == num_channels)
        else:
            grad_img = tf.ones(\
                shape = [img_height, img_width, num_channels],
                dtype = tf.float32)
        start = time.time()
        redner.render(
            scene,
            options,
            redner.float_ptr(0),  # rendered_image
            redner.float_ptr(pyredner.data_ptr(grad_img)),  # d_rendered_image
            buffers.d_scene,
            redner.float_ptr(pyredner.data_ptr(screen_gradient_image)),
            redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start
    if get_print_timing():
        print('Visualize gradient, time: %.5f s' % time_elapsed)

    return screen_gradient_image
Пример #7
0
    def backward(grad_img):
        global __ctx
        ctx = __ctx
        scene = ctx.scene
        options = ctx.options
        with tf.device(pyredner.get_device_name()):
            d_position = tf.zeros(3, dtype=tf.float32)
            d_look_at = tf.zeros(3, dtype=tf.float32)
            d_up = tf.zeros(3, dtype=tf.float32)
            d_ndc_to_cam = tf.zeros([3, 3], dtype=tf.float32)
            d_cam_to_ndc = tf.zeros([3, 3], dtype=tf.float32)
            d_camera = redner.DCamera(
                redner.float_ptr(pyredner.data_ptr(d_position)),
                redner.float_ptr(pyredner.data_ptr(d_look_at)),
                redner.float_ptr(pyredner.data_ptr(d_up)),
                redner.float_ptr(pyredner.data_ptr(d_ndc_to_cam)),
                redner.float_ptr(pyredner.data_ptr(d_cam_to_ndc)))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        with tf.device(pyredner.get_device_name()):
            for i, shape in enumerate(ctx.shapes):
                num_vertices = shape.num_vertices
                d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
                d_uvs = tf.zeros([num_vertices, 2],
                                 dtype=tf.float32) if shape.has_uvs() else None
                d_normals = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_normals() else None
                d_vertices_list.append(d_vertices)
                d_uvs_list.append(d_uvs)
                d_normals_list.append(d_normals)
                d_shapes.append(redner.DShape(\
                    redner.float_ptr(pyredner.data_ptr(d_vertices)),
                    redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_normal_map_list = []
        d_diffuse_uv_scale_list = []
        d_specular_uv_scale_list = []
        d_roughness_uv_scale_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        with tf.device(pyredner.get_device_name()):
            for material in ctx.materials:
                diffuse_size = material.get_diffuse_size()
                specular_size = material.get_specular_size()
                roughness_size = material.get_roughness_size()
                normal_map_size = material.get_normal_map_size()
                if diffuse_size[0] == 0:
                    d_diffuse = tf.zeros(3, dtype=tf.float32)
                else:
                    d_diffuse = tf.zeros(
                        [diffuse_size[2], diffuse_size[1], diffuse_size[0], 3],
                        dtype=tf.float32)
                if specular_size[0] == 0:
                    d_specular = tf.zeros(3, dtype=tf.float32)
                else:
                    d_specular = tf.zeros([
                        specular_size[2], specular_size[1], specular_size[0], 3
                    ],
                                          dtype=tf.float32)
                if roughness_size[0] == 0:
                    d_roughness = tf.zeros(1, dtype=tf.float32)
                else:
                    d_roughness = tf.zeros([
                        roughness_size[2], roughness_size[1],
                        roughness_size[0], 1
                    ],
                                           dtype=tf.float32)
                # HACK: tensorflow's eager mode uses a cache to store scalar
                #       constants to avoid memory copy. If we pass scalar tensors
                #       into the C++ code and modify them, we would corrupt the
                #       cache, causing incorrect result in future scalar constant
                #       creations. Thus we force tensorflow to copy by plusing a zero
                # (also see https://github.com/tensorflow/tensorflow/issues/11186
                #  for more discussion regarding copying tensors)
                if d_roughness.shape.num_elements() == 1:
                    d_roughness = d_roughness + 0
                if normal_map_size[0] == 0:
                    d_normal_map = None
                else:
                    d_normal_map = tf.zeros([
                        normal_map_size[2], normal_map_size[1],
                        normal_map_size[0], 3
                    ],
                                            dtype=tf.float32)

                d_diffuse_list.append(d_diffuse)
                d_specular_list.append(d_specular)
                d_roughness_list.append(d_roughness)
                d_normal_map_list.append(d_normal_map)
                d_diffuse = redner.float_ptr(pyredner.data_ptr(d_diffuse))
                d_specular = redner.float_ptr(pyredner.data_ptr(d_specular))
                d_roughness = redner.float_ptr(pyredner.data_ptr(d_roughness))
                if normal_map_size[0] > 0:
                    d_normal_map = redner.float_ptr(
                        pyredner.data_ptr(d_normal_map))
                d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
                if normal_map_size[0] > 0:
                    d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
                else:
                    d_normal_map_uv_scale = None
                d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
                d_specular_uv_scale_list.append(d_specular_uv_scale)
                d_roughness_uv_scale_list.append(d_roughness_uv_scale)
                d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
                d_diffuse_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_diffuse_uv_scale))
                d_specular_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_specular_uv_scale))
                d_roughness_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_roughness_uv_scale))
                if normal_map_size[0] > 0:
                    d_normal_map_uv_scale = redner.float_ptr(
                        pyredner.data_ptr(d_normal_map_uv_scale))
                d_diffuse_tex = redner.Texture3(\
                    d_diffuse, diffuse_size[0], diffuse_size[1], diffuse_size[2], d_diffuse_uv_scale)
                d_specular_tex = redner.Texture3(\
                    d_specular, specular_size[0], specular_size[1], specular_size[2], d_specular_uv_scale)
                d_roughness_tex = redner.Texture1(\
                    d_roughness, roughness_size[0], roughness_size[1], roughness_size[2],  d_roughness_uv_scale)
                if normal_map_size[0] > 0:
                    d_normal_map_tex = redner.Texture3(\
                        d_normal_map, normal_map_size[0], normal_map_size[1], normal_map_size[2], d_normal_map_uv_scale)
                else:
                    d_normal_map_tex = redner.Texture3(\
                        redner.float_ptr(0), 0, 0, 0, redner.float_ptr(0))
                d_materials.append(
                    redner.DMaterial(d_diffuse_tex, d_specular_tex,
                                     d_roughness_tex, d_normal_map_tex))

        d_intensity_list = []
        d_area_lights = []
        with tf.device(pyredner.get_device_name()):
            for light in ctx.area_lights:
                d_intensity = tf.zeros(3, dtype=tf.float32)
                d_intensity_list.append(d_intensity)
                d_area_lights.append(\
                    redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            with tf.device(pyredner.get_device_name()):
                d_envmap_values = tf.zeros([size[2], size[1], size[0], 3],
                                           dtype=tf.float32)
                d_envmap_values_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_envmap_values))
                d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_envmap_uv_scale_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_envmap_uv_scale))
                d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
                d_world_to_env_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_world_to_env))
            d_envmap_tex = redner.Texture3(\
                d_envmap_values_ptr, size[0], size[1], size[2], d_envmap_uv_scale_ptr)
            d_envmap = redner.DEnvironmentMap(d_envmap_tex, d_world_to_env_ptr)

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_area_lights,
                                d_envmap, pyredner.get_use_gpu(), -1)
        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            if pyredner.get_use_gpu():
                grad_img = grad_img.gpu(pyredner.get_gpu_device_id())
            else:
                grad_img = grad_img.cpu()
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                d_scene,
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = tf.ones([256, 256, 3], dtype=tf.float32)
        # debug_img = tf.zeros([256, 256, 3], dtype=tf.float32)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(pyredner.data_ptr(grad_img)),
        #               d_scene,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        ret_list.append(d_position)
        ret_list.append(d_look_at)
        ret_list.append(d_up)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(d_specular_list[i])
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(d_roughness_list[i])
            ret_list.append(d_roughness_uv_scale_list[i])
            ret_list.append(d_normal_map_list[i])
            ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            ret_list.append(d_world_to_env.cpu())
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channels):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling

        # pdb.set_trace()
        return ret_list
Пример #8
0
def forward(seed: int, *args):
    """
        Forward rendering pass: given a scene and output an image.
    """
    global __ctx
    ctx = __ctx

    # Unpack arguments
    current_index = 0
    num_shapes = int(args[current_index])
    current_index += 1
    num_materials = int(args[current_index])
    current_index += 1
    num_lights = int(args[current_index])
    current_index += 1

    # Camera arguments
    cam_position = args[current_index]
    current_index += 1
    cam_look_at = args[current_index]
    current_index += 1
    cam_up = args[current_index]
    current_index += 1
    ndc_to_cam = args[current_index]
    current_index += 1
    cam_to_ndc = args[current_index]
    current_index += 1
    clip_near = float(args[current_index])
    current_index += 1
    resolution = args[current_index].numpy()  # Tuple[int, int]
    current_index += 1
    camera_type = pyredner.RednerCameraType.asCameraType(
        args[current_index])  # FIXME: Map to custom type
    current_index += 1

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        camera = redner.Camera(
            resolution[1], resolution[0],
            redner.float_ptr(pyredner.data_ptr(cam_position)),
            redner.float_ptr(pyredner.data_ptr(cam_look_at)),
            redner.float_ptr(pyredner.data_ptr(cam_up)),
            redner.float_ptr(pyredner.data_ptr(ndc_to_cam)),
            redner.float_ptr(pyredner.data_ptr(cam_to_ndc)), clip_near,
            camera_type)

    with tf.device(pyredner.get_device_name()):
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            material_id = int(args[current_index])
            current_index += 1
            light_id = int(args[current_index])
            current_index += 1
            shapes.append(redner.Shape(\
                redner.float_ptr(pyredner.data_ptr(vertices)),
                redner.int_ptr(pyredner.data_ptr(indices)),
                redner.float_ptr(pyredner.data_ptr(uvs) if uvs is not None else 0),
                redner.float_ptr(pyredner.data_ptr(normals) if normals is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                material_id,
                light_id))

    materials = []
    with tf.device(pyredner.get_device_name()):
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            normal_map = args[current_index]
            current_index += 1
            normal_map_uv_scale = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1

            diffuse_reflectance_ptr = redner.float_ptr(
                pyredner.data_ptr(diffuse_reflectance))
            specular_reflectance_ptr = redner.float_ptr(
                pyredner.data_ptr(specular_reflectance))
            roughness_ptr = redner.float_ptr(pyredner.data_ptr(roughness))
            if normal_map.shape[0] > 0:
                normal_map_ptr = redner.float_ptr(
                    pyredner.data_ptr(normal_map))
            diffuse_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(diffuse_uv_scale))
            specular_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(specular_uv_scale))
            roughness_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(roughness_uv_scale))
            if normal_map.shape[0] > 0:
                normal_map_uv_scale_ptr = redner.float_ptr(
                    pyredner.data_ptr(normal_map_uv_scale))
            if get_tensor_dimension(diffuse_reflectance) == 1:
                diffuse_reflectance = redner.Texture3(diffuse_reflectance_ptr,
                                                      0, 0, 0,
                                                      diffuse_uv_scale_ptr)
            else:
                diffuse_reflectance = redner.Texture3(\
                    diffuse_reflectance_ptr,
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[0]), # num levels
                    diffuse_uv_scale_ptr)
            if get_tensor_dimension(specular_reflectance) == 1:
                specular_reflectance = redner.Texture3(
                    specular_reflectance_ptr, 0, 0, 0, specular_uv_scale_ptr)
            else:
                specular_reflectance = redner.Texture3(\
                    specular_reflectance_ptr,
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[0]), # num levels
                    specular_uv_scale_ptr)
            if get_tensor_dimension(roughness) == 1:
                roughness = redner.Texture1(roughness_ptr, 0, 0, 0,
                                            roughness_uv_scale_ptr)
            else:
                assert (get_tensor_dimension(roughness) == 4)
                roughness = redner.Texture1(\
                    roughness_ptr,
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[0]), # num levels
                    roughness_uv_scale_ptr)
            if normal_map.shape[0] > 0:
                normal_map = redner.Texture3(\
                    normal_map_ptr,
                    int(normal_map.shape[2]),
                    int(normal_map.shape[1]),
                    int(normal_map.shape[0]),
                    normal_map_uv_scale_ptr)
            else:
                normal_map = redner.Texture3(\
                    redner.float_ptr(0), 0, 0, 0, redner.float_ptr(0))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                normal_map,
                two_sided))

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        area_lights = []
        for i in range(num_lights):
            shape_id = int(args[current_index])
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1

            area_lights.append(
                redner.AreaLight(
                    shape_id, redner.float_ptr(pyredner.data_ptr(intensity)),
                    two_sided))

    envmap = None
    if not is_empty_tensor(args[current_index]):
        values = args[current_index]
        current_index += 1
        envmap_uv_scale = args[current_index]
        current_index += 1
        env_to_world = args[current_index]
        current_index += 1
        world_to_env = args[current_index]
        current_index += 1
        sample_cdf_ys = args[current_index]
        current_index += 1
        sample_cdf_xs = args[current_index]
        current_index += 1
        pdf_norm = float(args[current_index])
        current_index += 1

        assert isinstance(pdf_norm, float)
        with tf.device(pyredner.get_device_name()):
            values_ptr = redner.float_ptr(pyredner.data_ptr(values))
            sample_cdf_ys = redner.float_ptr(pyredner.data_ptr(sample_cdf_ys))
            sample_cdf_xs = redner.float_ptr(pyredner.data_ptr(sample_cdf_xs))
            envmap_uv_scale = redner.float_ptr(
                pyredner.data_ptr(envmap_uv_scale))
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            env_to_world = redner.float_ptr(pyredner.data_ptr(env_to_world))
            world_to_env = redner.float_ptr(pyredner.data_ptr(world_to_env))
        values = redner.Texture3(
            values_ptr,
            int(values.shape[2]),  # width
            int(values.shape[1]),  # height
            int(values.shape[0]),  # num levels
            envmap_uv_scale)
        envmap = redner.EnvironmentMap(\
            values,
            env_to_world,
            world_to_env,
            sample_cdf_ys,
            sample_cdf_xs,
            pdf_norm)
    else:
        current_index += 7

    # Options
    num_samples = int(args[current_index])
    current_index += 1
    max_bounces = int(args[current_index])
    current_index += 1

    __num_channels = int(args[current_index])
    current_index += 1

    channels = []
    for _ in range(__num_channels):
        ch = args[current_index]
        ch = pyredner.RednerChannels.asChannel(ch)
        channels.append(ch)
        current_index += 1

    sampler_type = args[current_index]
    sampler_type = pyredner.RednerSamplerType.asSamplerType(sampler_type)
    current_index += 1

    use_primary_edge_sampling = args[current_index]
    current_index += 1
    use_secondary_edge_sampling = args[current_index]
    current_index += 1

    scene = redner.Scene(camera, shapes, materials, area_lights, envmap,
                         pyredner.get_use_gpu(), pyredner.get_gpu_device_id(),
                         use_primary_edge_sampling,
                         use_secondary_edge_sampling)

    # check that num_samples is a tuple
    if isinstance(num_samples, int):
        num_samples = (num_samples, num_samples)

    options = redner.RenderOptions(seed, num_samples[0], max_bounces, channels,
                                   sampler_type)
    num_channels = redner.compute_num_channels(channels)

    with tf.device(pyredner.get_device_name()):
        rendered_image = tf.zeros(
            shape=[resolution[0], resolution[1], num_channels],
            dtype=tf.float32)

        start = time.time()

        # pdb.set_trace()
        redner.render(scene, options,
                      redner.float_ptr(pyredner.data_ptr(rendered_image)),
                      redner.float_ptr(0), None, redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Forward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # debug_img = tf.zeros((256, 256, 3), dtype=tf.float32)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(pyredner.data_ptr(rendered_image)),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.png')
        # exit()

        # import pdb; pdb.set_trace()

    ctx.shapes = shapes
    ctx.materials = materials
    ctx.area_lights = area_lights
    ctx.envmap = envmap
    ctx.scene = scene
    ctx.options = options
    ctx.num_samples = num_samples
    ctx.num_channels = __num_channels
    return rendered_image
Пример #9
0
    def backward(grad_img):
        camera = ctx.camera
        scene = ctx.scene
        options = ctx.options

        with tf.device(pyredner.get_device_name()):
            if camera.use_look_at:
                d_position = tf.zeros(3, dtype=tf.float32)
                d_look_at = tf.zeros(3, dtype=tf.float32)
                d_up = tf.zeros(3, dtype=tf.float32)
                d_cam_to_world = None
                d_world_to_cam = None
            else:
                d_position = None
                d_look_at = None
                d_up = None
                d_cam_to_world = tf.zeros([4, 4], dtype=tf.float32)
                d_world_to_cam = tf.zeros([4, 4], dtype=tf.float32)
            d_intrinsic_mat_inv = tf.zeros([3, 3], dtype=tf.float32)
            d_intrinsic_mat = tf.zeros([3, 3], dtype=tf.float32)
            if camera.use_look_at:
                d_camera = redner.DCamera(
                    redner.float_ptr(pyredner.data_ptr(d_position)),
                    redner.float_ptr(pyredner.data_ptr(d_look_at)),
                    redner.float_ptr(pyredner.data_ptr(d_up)),
                    redner.float_ptr(0),  # cam_to_world
                    redner.float_ptr(0),  # world_to_cam
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))
            else:
                d_camera = redner.DCamera(
                    redner.float_ptr(0), redner.float_ptr(0),
                    redner.float_ptr(0),
                    redner.float_ptr(pyredner.data_ptr(d_cam_to_world)),
                    redner.float_ptr(pyredner.data_ptr(d_world_to_cam)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))

        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_colors_list = []
        d_shapes = []
        with tf.device(pyredner.get_device_name()):
            for i, shape in enumerate(ctx.shapes):
                num_vertices = shape.num_vertices
                d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
                d_uvs = tf.zeros([num_vertices, 2],
                                 dtype=tf.float32) if shape.has_uvs() else None
                d_normals = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_normals() else None
                d_colors = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_colors() else None
                d_vertices_list.append(d_vertices)
                d_uvs_list.append(d_uvs)
                d_normals_list.append(d_normals)
                d_colors_list.append(d_colors)
                d_shapes.append(redner.DShape(\
                    redner.float_ptr(pyredner.data_ptr(d_vertices)),
                    redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_colors) if d_colors is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_normal_map_list = []
        d_diffuse_uv_scale_list = []
        d_specular_uv_scale_list = []
        d_roughness_uv_scale_list = []
        d_generic_list = []
        d_generic_uv_scale_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        with tf.device(pyredner.get_device_name()):
            for material in ctx.materials:
                if material.get_diffuse_size(0)[0] == 0:
                    d_diffuse = [tf.zeros(3, dtype=tf.float32)]
                else:
                    d_diffuse = []
                    for l in range(material.get_diffuse_levels()):
                        diffuse_size = material.get_diffuse_size(l)
                        d_diffuse.append(\
                            tf.zeros([diffuse_size[1],
                                      diffuse_size[0],
                                      3], dtype=tf.float32))

                if material.get_specular_size(0)[0] == 0:
                    d_specular = [tf.zeros(3, dtype=tf.float32)]
                else:
                    d_specular = []
                    for l in range(material.get_specular_levels()):
                        specular_size = material.get_specular_size(l)
                        d_specular.append(\
                            tf.zeros([specular_size[1],
                                      specular_size[0],
                                      3], dtype=tf.float32))

                if material.get_roughness_size(0)[0] == 0:
                    d_roughness = [tf.zeros(1, dtype=tf.float32)]
                else:
                    d_roughness = []
                    for l in range(material.get_roughness_levels()):
                        roughness_size = material.get_roughness_size(l)
                        d_roughness.append(\
                            tf.zeros([roughness_size[1],
                                      roughness_size[0],
                                      1], dtype=tf.float32))
                # HACK: tensorflow's eager mode uses a cache to store scalar
                #       constants to avoid memory copy. If we pass scalar tensors
                #       into the C++ code and modify them, we would corrupt the
                #       cache, causing incorrect result in future scalar constant
                #       creations. Thus we force tensorflow to copy by plusing a zero.
                # (also see https://github.com/tensorflow/tensorflow/issues/11186
                #  for more discussion regarding copying tensors)
                if d_roughness[0].shape.num_elements() == 1:
                    d_roughness[0] = d_roughness[0] + 0

                if material.get_generic_levels() == 0:
                    d_generic = None
                else:
                    d_generic = []
                    for l in range(material.get_generic_levels()):
                        generic_size = material.get_generic_size(l)
                        d_generic.append(\
                            tf.zeros([generic_size[2],
                                      generic_size[1],
                                      generic_size[0]], dtype=tf.float32))

                if material.get_normal_map_levels() == 0:
                    d_normal_map = None
                else:
                    d_normal_map = []
                    for l in range(material.get_normal_map_levels()):
                        normal_map_size = material.get_normal_map_size(l)
                        d_normal_map.append(\
                            tf.zeros([normal_map_size[1],
                                      normal_map_size[0],
                                      3], dtype=tf.float32))

                d_diffuse_list.append(d_diffuse)
                d_specular_list.append(d_specular)
                d_roughness_list.append(d_roughness)
                d_generic_list.append(d_generic)
                d_normal_map_list.append(d_normal_map)

                d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
                if d_generic is None:
                    d_generic_uv_scale = None
                else:
                    d_generic_uv_scale = tf.zeros([2], dtype=tf.float32)
                if d_normal_map is None:
                    d_normal_map_uv_scale = None
                else:
                    d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
                d_specular_uv_scale_list.append(d_specular_uv_scale)
                d_roughness_uv_scale_list.append(d_roughness_uv_scale)
                d_generic_uv_scale_list.append(d_generic_uv_scale)
                d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)

                if len(d_diffuse[0].shape) == 1:
                    d_diffuse_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(d_diffuse[0]))],
                        [0],
                        [0],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))
                else:
                    d_diffuse_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_diffuse],
                        [x.shape[1] for x in d_diffuse],
                        [x.shape[0] for x in d_diffuse],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))

                if len(d_specular[0].shape) == 1:
                    d_specular_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(d_specular[0]))],
                        [0],
                        [0],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))
                else:
                    d_specular_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_specular],
                        [x.shape[1] for x in d_specular],
                        [x.shape[0] for x in d_specular],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))

                if len(d_roughness[0].shape) == 1:
                    d_roughness_tex = redner.Texture1(\
                        [redner.float_ptr(pyredner.data_ptr(d_roughness[0]))],
                        [0],
                        [0],
                        1,
                        redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))
                else:
                    d_roughness_tex = redner.Texture1(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_roughness],
                        [x.shape[1] for x in d_roughness],
                        [x.shape[0] for x in d_roughness],
                        1,
                        redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))

                if d_generic is None:
                    d_generic_tex = redner.TextureN(\
                        [], [], [], 0, redner.float_ptr(0))
                else:
                    d_generic_tex = redner.TextureN(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_generic],
                        [x.shape[1] for x in d_generic],
                        [x.shape[0] for x in d_generic],
                        d_generic[0].shape[2],
                        redner.float_ptr(pyredner.data_ptr(d_generic_uv_scale)))

                if d_normal_map is None:
                    d_normal_map = redner.Texture3(\
                        [], [], [], 0, redner.float_ptr(0))
                else:
                    d_normal_map = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_normal_map],
                        [x.shape[1] for x in d_normal_map],
                        [x.shape[0] for x in d_normal_map],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_normal_map_uv_scale)))

                d_materials.append(redner.DMaterial(\
                    d_diffuse_tex, d_specular_tex, d_roughness_tex,
                    d_generic_tex, d_normal_map))

        d_intensity_list = []
        d_area_lights = []
        with tf.device(pyredner.get_device_name()):
            for light in ctx.area_lights:
                d_intensity = tf.zeros(3, dtype=tf.float32)
                d_intensity_list.append(d_intensity)
                d_area_lights.append(\
                    redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            with tf.device(pyredner.get_device_name()):
                d_envmap_values = []
                for l in range(envmap.get_levels()):
                    size = envmap.get_size(l)
                    d_envmap_values.append(\
                        tf.zeros([size[1],
                                  size[0],
                                  3], dtype=tf.float32))
                d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
                d_envmap_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_envmap_values],
                    [x.shape[1] for x in d_envmap_values],
                    [x.shape[0] for x in d_envmap_values],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_envmap_uv_scale)))
                d_envmap = redner.DEnvironmentMap(
                    d_envmap_tex,
                    redner.float_ptr(pyredner.data_ptr(d_world_to_env)))

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_area_lights,
                                d_envmap, pyredner.get_use_gpu(),
                                pyredner.get_gpu_device_id())
        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            grad_img = tf.identity(grad_img)
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                d_scene,
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = tf.ones([256, 256, 3], dtype=tf.float32)
        # debug_img = tf.zeros([256, 256, 3], dtype=tf.float32)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(pyredner.data_ptr(grad_img)),
        #               d_scene,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if camera.use_look_at:
            ret_list.append(d_position)
            ret_list.append(d_look_at)
            ret_list.append(d_up)
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(d_cam_to_world)
            ret_list.append(d_world_to_cam)
        ret_list.append(d_intrinsic_mat_inv)
        ret_list.append(d_intrinsic_mat)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(d_roughness_uv_scale_list[i])
            if d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(d_generic_uv_scale_list[i])
            if d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_intensity_list[i]))
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_world_to_env))
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channels):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return ret_list
Пример #10
0
def parse_shape(node, material_dict, shape_id):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        to_world = tf.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        max_smooth_angle = -1
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
                elif child.attrib['name'] == 'maxSmoothAngle':
                    max_smooth_angle = float(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = tf.constant([
                                light_intensity[0], light_intensity[0],
                                light_intensity[0]
                            ])

        if node.attrib['type'] == 'obj':
            _, mesh_list, _ = pyredner.load_obj(filename)
            # Convert to CPU for rebuild_topology
            vertices = mesh_list[0][1].vertices.cpu()
            indices = mesh_list[0][1].indices.cpu()
            uvs = mesh_list[0][1].uvs
            normals = mesh_list[0][1].normals
            uv_indices = mesh_list[0][1].uv_indices
            normal_indices = mesh_list[0][1].normal_indices
            if uvs is not None:
                uvs = uvs.cpu()
            if normals is not None:
                normals = normals.cpu()
            if uv_indices is not None:
                uv_indices = uv_indices.cpu()
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = tf.convert_to_tensor(mitsuba_tri_mesh.vertices)
            indices = tf.convert_to_tensor(mitsuba_tri_mesh.indices)
            uvs = tf.convert_to_tensor(mitsuba_tri_mesh.uvs)
            normals = tf.convert_to_tensor(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None

        # Transform the vertices and normals
        vertices = tf.concat(
            (vertices, tf.ones([vertices.shape[0], 1], dtype=tf.float32)),
            axis=1)
        vertices = vertices @ tf.transpose(to_world, [0, 1])
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3]
        if normals is not None:
            normals = normals @ (tf.linalg.inv(tf.transpose(to_world,
                                                            [0, 1]))[:3, :3])
        assert (vertices is not None)
        assert (indices is not None)
        if max_smooth_angle >= 0:
            if normals is None:
                normals = tf.zeros_like(vertices)
            new_num_vertices = redner.rebuild_topology(\
                redner.float_ptr(pyredner.data_ptr(vertices)),
                redner.int_ptr(pyredner.data_ptr(indices)),
                redner.float_ptr(pyredner.data_ptr(uvs) if uvs is not None else 0),
                redner.float_ptr(pyredner.data_ptr(normals) if normals is not None else 0),
                redner.int_ptr(pyredner.data_ptr(uv_indices) if uv_indices is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                max_smooth_angle)
            print('Rebuilt topology, original vertices size: {}, new vertices size: {}'.format(\
                int(vertices.shape[0]), new_num_vertices))
            vertices.resize_(new_num_vertices, 3)
            if uvs is not None:
                uvs.resize_(new_num_vertices, 2)
            if normals is not None:
                normals.resize_(new_num_vertices, 3)

        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        return pyredner.Shape(vertices=vertices,
                              indices=indices,
                              uvs=uvs,
                              normals=normals,
                              material_id=mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = tf.constant([[0, 2, 1], [1, 2, 3]], dtype=tf.int32)
        vertices = tf.constant([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = tf.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = tf.constant([
                                light_intensity[0], light_intensity[0],
                                light_intensity[0]
                            ])
        # Transform the vertices and normals
        vertices = tf.concat(
            (vertices,
             tf.convert_to_tensor(np.ones(vertices.shape[0], 1),
                                  dtype=tf.float32)),
            axis=1)
        vertices = vertices @ tf.transpose(to_world, [0, 1])
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3]
        if normals is not None:
            normals = normals @ (tf.linalg.inv(tf.transpose(to_world,
                                                            [0, 1]))[:3, :3])
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyrender.Light(shape_id, light_intensity)

        return pyredner.Shape(vertices=vertices,
                              indices=indices,
                              uvs=uvs,
                              normals=normals,
                              material_id=mat_id), lgt
    else:
        assert (False)