Exemplo n.º 1
0
    def __init__(self,
                 vertices: tf.Tensor,
                 indices: tf.Tensor,
                 uvs: Optional[tf.Tensor] = None,
                 normals: Optional[tf.Tensor] = None,
                 material_id: int = 0):
        assert (tf.executing_eagerly())
        assert (vertices.dtype == tf.float32)
        assert (indices.dtype == tf.int32)
        if uvs is not None:
            assert (uvs.dtype == tf.float32)
        if normals is not None:
            assert (normals.dtype == tf.float32)
        if pyredner.get_use_gpu():
            # Automatically copy all tensors to GPU
            # tf.Variable doesn't support .gpu(), so we'll wrap it with an identity().
            vertices = tf.identity(vertices).gpu(pyredner.get_gpu_device_id())
            indices = tf.identity(indices).gpu(pyredner.get_gpu_device_id())
            if uvs is not None:
                uvs = tf.identity(uvs).gpu(pyredner.get_gpu_device_id())
            if normals is not None:
                normals = tf.identity(normals).gpu(
                    pyredner.get_gpu_device_id())
        else:
            # Automatically copy to CPU
            vertices = tf.identity(vertices).cpu()
            indices = tf.identity(indices).cpu()
            if uvs is not None:
                uvs = tf.identity(uvs).cpu()
            if normals is not None:
                normals = tf.identity(normals).cpu()

        self.vertices = vertices
        self.indices = indices
        self.uvs = uvs
        self.normals = normals
        self.material_id = material_id
        self.light_id = -1
Exemplo n.º 2
0
def render(*x):
    """
        The main TensorFlow interface of C++ redner.
    """
    assert (tf.executing_eagerly())
    if pyredner.get_use_gpu(
    ) and os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] != 'true':
        print('******************** WARNING ********************')
        print('Tensorflow by default allocates all GPU memory,')
        print('causing huge amount of page faults when rendering.')
        print(
            'Please set the environment variable TF_FORCE_GPU_ALLOW_GROWTH to true,'
        )
        print('so that Tensorflow allocates memory on demand.')
        print('*************************************************')

    seed, args = int(x[0]), x[1:]
    img, ctx = forward(seed, *args)

    def backward(grad_img):
        scene = ctx.scene
        options = ctx.options

        buffers = create_gradient_buffers(ctx)

        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            grad_img = tf.identity(grad_img)
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                buffers.d_scene,
                redner.float_ptr(0),  # translational_gradient_image
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if get_print_timing():
            print('Backward pass, time: %.5f s' % time_elapsed)

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if ctx.camera.use_look_at:
            ret_list.append(buffers.d_position)
            ret_list.append(buffers.d_look_at)
            ret_list.append(buffers.d_up)
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(buffers.d_cam_to_world)
            ret_list.append(buffers.d_world_to_cam)
        ret_list.append(buffers.d_intrinsic_mat_inv)
        ret_list.append(buffers.d_intrinsic_mat)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # viewport
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(buffers.d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(buffers.d_uvs_list[i])
            ret_list.append(buffers.d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(buffers.d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in buffers.d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(buffers.d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in buffers.d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(buffers.d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in buffers.d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(buffers.d_roughness_uv_scale_list[i])
            if buffers.d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in buffers.d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(buffers.d_generic_uv_scale_list[i])
            if buffers.d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in buffers.d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(buffers.d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(buffers.d_intensity_list[i]))
            ret_list.append(None)  # two_sided
            ret_list.append(None)  # directly_visible

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in buffers.d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(buffers.d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(buffers.d_world_to_env))
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
            ret_list.append(None)  # directly_visible
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channel_args):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return ret_list

    return img, backward
Exemplo n.º 3
0
def create_gradient_buffers(ctx):
    scene = ctx.scene
    options = ctx.options
    camera = ctx.camera

    buffers = Context()

    with tf.device(pyredner.get_device_name()):
        if camera.use_look_at:
            buffers.d_position = tf.zeros(3, dtype=tf.float32)
            buffers.d_look_at = tf.zeros(3, dtype=tf.float32)
            buffers.d_up = tf.zeros(3, dtype=tf.float32)
            buffers.d_cam_to_world = None
            buffers.d_world_to_cam = None
        else:
            buffers.d_position = None
            buffers.d_look_at = None
            buffers.d_up = None
            buffers.d_cam_to_world = tf.zeros([4, 4], dtype=tf.float32)
            buffers.d_world_to_cam = tf.zeros([4, 4], dtype=tf.float32)
        buffers.d_intrinsic_mat_inv = tf.zeros([3, 3], dtype=tf.float32)
        buffers.d_intrinsic_mat = tf.zeros([3, 3], dtype=tf.float32)
        if camera.use_look_at:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(pyredner.data_ptr(buffers.d_position)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_look_at)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_up)),
                redner.float_ptr(0), # cam_to_world
                redner.float_ptr(0), # world_to_cam
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat)))
        else:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(0),
                redner.float_ptr(0),
                redner.float_ptr(0),
                redner.float_ptr(pyredner.data_ptr(buffers.d_cam_to_world)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_world_to_cam)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat)))

    buffers.d_vertices_list = []
    buffers.d_uvs_list = []
    buffers.d_normals_list = []
    buffers.d_colors_list = []
    buffers.d_shapes = []
    with tf.device(pyredner.get_device_name()):
        for i, shape in enumerate(ctx.shapes):
            num_vertices = shape.num_vertices
            d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
            d_uvs = tf.zeros([num_vertices, 2],
                             dtype=tf.float32) if shape.has_uvs() else None
            d_normals = tf.zeros(
                [num_vertices, 3],
                dtype=tf.float32) if shape.has_normals() else None
            d_colors = tf.zeros(
                [num_vertices, 3],
                dtype=tf.float32) if shape.has_colors() else None
            buffers.d_vertices_list.append(d_vertices)
            buffers.d_uvs_list.append(d_uvs)
            buffers.d_normals_list.append(d_normals)
            buffers.d_colors_list.append(d_colors)
            buffers.d_shapes.append(redner.DShape(\
                redner.float_ptr(pyredner.data_ptr(d_vertices)),
                redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0),
                redner.float_ptr(pyredner.data_ptr(d_colors) if d_colors is not None else 0)))

    buffers.d_diffuse_list = []
    buffers.d_specular_list = []
    buffers.d_roughness_list = []
    buffers.d_normal_map_list = []
    buffers.d_diffuse_uv_scale_list = []
    buffers.d_specular_uv_scale_list = []
    buffers.d_roughness_uv_scale_list = []
    buffers.d_generic_list = []
    buffers.d_generic_uv_scale_list = []
    buffers.d_normal_map_uv_scale_list = []
    buffers.d_materials = []
    with tf.device(pyredner.get_device_name()):
        for material in ctx.materials:
            if material.get_diffuse_size(0)[0] == 0:
                d_diffuse = [tf.zeros(3, dtype=tf.float32)]
            else:
                d_diffuse = []
                for l in range(material.get_diffuse_levels()):
                    diffuse_size = material.get_diffuse_size(l)
                    d_diffuse.append(\
                        tf.zeros([diffuse_size[1],
                                  diffuse_size[0],
                                  3], dtype=tf.float32))

            if material.get_specular_size(0)[0] == 0:
                d_specular = [tf.zeros(3, dtype=tf.float32)]
            else:
                d_specular = []
                for l in range(material.get_specular_levels()):
                    specular_size = material.get_specular_size(l)
                    d_specular.append(\
                        tf.zeros([specular_size[1],
                                  specular_size[0],
                                  3], dtype=tf.float32))

            if material.get_roughness_size(0)[0] == 0:
                d_roughness = [tf.zeros(1, dtype=tf.float32)]
            else:
                d_roughness = []
                for l in range(material.get_roughness_levels()):
                    roughness_size = material.get_roughness_size(l)
                    d_roughness.append(\
                        tf.zeros([roughness_size[1],
                                  roughness_size[0],
                                  1], dtype=tf.float32))
            # HACK: tensorflow's eager mode uses a cache to store scalar
            #       constants to avoid memory copy. If we pass scalar tensors
            #       into the C++ code and modify them, we would corrupt the
            #       cache, causing incorrect result in future scalar constant
            #       creations. Thus we force tensorflow to copy by plusing a zero.
            # (also see https://github.com/tensorflow/tensorflow/issues/11186
            #  for more discussion regarding copying tensors)
            if d_roughness[0].shape.num_elements() == 1:
                d_roughness[0] = d_roughness[0] + 0

            if material.get_generic_levels() == 0:
                d_generic = None
            else:
                d_generic = []
                for l in range(material.get_generic_levels()):
                    generic_size = material.get_generic_size(l)
                    d_generic.append(\
                        tf.zeros([generic_size[2],
                                  generic_size[1],
                                  generic_size[0]], dtype=tf.float32))

            if material.get_normal_map_levels() == 0:
                d_normal_map = None
            else:
                d_normal_map = []
                for l in range(material.get_normal_map_levels()):
                    normal_map_size = material.get_normal_map_size(l)
                    d_normal_map.append(\
                        tf.zeros([normal_map_size[1],
                                  normal_map_size[0],
                                  3], dtype=tf.float32))

            buffers.d_diffuse_list.append(d_diffuse)
            buffers.d_specular_list.append(d_specular)
            buffers.d_roughness_list.append(d_roughness)
            buffers.d_generic_list.append(d_generic)
            buffers.d_normal_map_list.append(d_normal_map)

            d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
            d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
            d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
            if d_generic is None:
                d_generic_uv_scale = None
            else:
                d_generic_uv_scale = tf.zeros([2], dtype=tf.float32)
            if d_normal_map is None:
                d_normal_map_uv_scale = None
            else:
                d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
            buffers.d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
            buffers.d_specular_uv_scale_list.append(d_specular_uv_scale)
            buffers.d_roughness_uv_scale_list.append(d_roughness_uv_scale)
            buffers.d_generic_uv_scale_list.append(d_generic_uv_scale)
            buffers.d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)

            if len(d_diffuse[0].shape) == 1:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(d_diffuse[0]))],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))
            else:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_diffuse],
                    [x.shape[1] for x in d_diffuse],
                    [x.shape[0] for x in d_diffuse],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))

            if len(d_specular[0].shape) == 1:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(d_specular[0]))],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))
            else:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_specular],
                    [x.shape[1] for x in d_specular],
                    [x.shape[0] for x in d_specular],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))

            if len(d_roughness[0].shape) == 1:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(d_roughness[0]))],
                    [0],
                    [0],
                    1,
                    redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))
            else:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_roughness],
                    [x.shape[1] for x in d_roughness],
                    [x.shape[0] for x in d_roughness],
                    1,
                    redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))

            if d_generic is None:
                d_generic_tex = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_generic_tex = redner.TextureN(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_generic],
                    [x.shape[1] for x in d_generic],
                    [x.shape[0] for x in d_generic],
                    d_generic[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(d_generic_uv_scale)))

            if d_normal_map is None:
                d_normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_normal_map = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_normal_map],
                    [x.shape[1] for x in d_normal_map],
                    [x.shape[0] for x in d_normal_map],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_normal_map_uv_scale)))

            buffers.d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex,
                d_generic_tex, d_normal_map))

    buffers.d_intensity_list = []
    buffers.d_area_lights = []
    with tf.device(pyredner.get_device_name()):
        for light in ctx.area_lights:
            d_intensity = tf.zeros(3, dtype=tf.float32)
            buffers.d_intensity_list.append(d_intensity)
            buffers.d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

    buffers.d_envmap = None
    if ctx.envmap is not None:
        envmap = ctx.envmap
        with tf.device(pyredner.get_device_name()):
            buffers.d_envmap_values = []
            for l in range(envmap.get_levels()):
                size = envmap.get_size(l)
                buffers.d_envmap_values.append(\
                    tf.zeros([size[1],
                              size[0],
                              3], dtype=tf.float32))
            buffers.d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
            buffers.d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
            d_envmap_tex = redner.Texture3(\
                [redner.float_ptr(pyredner.data_ptr(x)) for x in buffers.d_envmap_values],
                [x.shape[1] for x in buffers.d_envmap_values],
                [x.shape[0] for x in buffers.d_envmap_values],
                3,
                redner.float_ptr(pyredner.data_ptr(buffers.d_envmap_uv_scale)))
            buffers.d_envmap = redner.DEnvironmentMap(
                d_envmap_tex,
                redner.float_ptr(pyredner.data_ptr(buffers.d_world_to_env)))

    buffers.d_scene = redner.DScene(buffers.d_camera, buffers.d_shapes,
                                    buffers.d_materials, buffers.d_area_lights,
                                    buffers.d_envmap, pyredner.get_use_gpu(),
                                    pyredner.get_gpu_device_id())
    return buffers
Exemplo n.º 4
0
def unpack_args(seed,
                args,
                use_primary_edge_sampling=None,
                use_secondary_edge_sampling=None):
    """
        Given a list of serialized scene arguments, unpack
        all information into a Context.
    """
    # Unpack arguments
    current_index = 0
    num_shapes = int(args[current_index])
    current_index += 1
    num_materials = int(args[current_index])
    current_index += 1
    num_lights = int(args[current_index])
    current_index += 1

    # Camera arguments
    cam_position = args[current_index]
    current_index += 1
    cam_look_at = args[current_index]
    current_index += 1
    cam_up = args[current_index]
    current_index += 1
    cam_to_world = args[current_index]
    current_index += 1
    world_to_cam = args[current_index]
    current_index += 1
    intrinsic_mat_inv = args[current_index]
    current_index += 1
    intrinsic_mat = args[current_index]
    current_index += 1
    clip_near = float(args[current_index])
    current_index += 1
    resolution = args[current_index].numpy()  # Tuple[int, int]
    current_index += 1
    viewport = args[current_index].numpy()  # Tuple[int, int, int, int]
    current_index += 1
    camera_type = RednerCameraType.asCameraType(
        args[current_index])  # FIXME: Map to custom type
    current_index += 1

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        if is_empty_tensor(cam_to_world):
            camera = redner.Camera(
                resolution[1],
                resolution[0],
                redner.float_ptr(pyredner.data_ptr(cam_position)),
                redner.float_ptr(pyredner.data_ptr(cam_look_at)),
                redner.float_ptr(pyredner.data_ptr(cam_up)),
                redner.float_ptr(0),  # cam_to_world
                redner.float_ptr(0),  # world_to_cam
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat)),
                clip_near,
                camera_type,
                redner.Vector2i(viewport[1], viewport[0]),
                redner.Vector2i(viewport[3], viewport[2]))
        else:
            camera = redner.Camera(
                resolution[1], resolution[0], redner.float_ptr(0),
                redner.float_ptr(0), redner.float_ptr(0),
                redner.float_ptr(pyredner.data_ptr(cam_to_world)),
                redner.float_ptr(pyredner.data_ptr(world_to_cam)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(intrinsic_mat)), clip_near,
                camera_type, redner.Vector2i(viewport[1], viewport[0]),
                redner.Vector2i(viewport[3], viewport[2]))

    with tf.device(pyredner.get_device_name()):
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            uv_indices = args[current_index]
            current_index += 1
            normal_indices = args[current_index]
            current_index += 1
            colors = args[current_index]
            current_index += 1
            material_id = int(args[current_index])
            current_index += 1
            light_id = int(args[current_index])
            current_index += 1

            shapes.append(redner.Shape(\
                redner.float_ptr(pyredner.data_ptr(vertices)),
                redner.int_ptr(pyredner.data_ptr(indices)),
                redner.float_ptr(pyredner.data_ptr(uvs) if not is_empty_tensor(uvs) else 0),
                redner.float_ptr(pyredner.data_ptr(normals) if not is_empty_tensor(normals) else 0),
                redner.int_ptr(pyredner.data_ptr(uv_indices) if not is_empty_tensor(uv_indices) else 0),
                redner.int_ptr(pyredner.data_ptr(normal_indices) if not is_empty_tensor(normal_indices) else 0),
                redner.float_ptr(pyredner.data_ptr(colors) if not is_empty_tensor(colors) else 0),
                int(vertices.shape[0]),
                int(uvs.shape[0]) if not is_empty_tensor(uvs) else 0,
                int(normals.shape[0]) if not is_empty_tensor(normals) else 0,
                int(indices.shape[0]),
                material_id,
                light_id))

    materials = []
    with tf.device(pyredner.get_device_name()):
        for i in range(num_materials):
            num_levels = int(args[current_index])
            current_index += 1
            diffuse_reflectance = []
            for j in range(num_levels):
                diffuse_reflectance.append(args[current_index])
                current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            specular_reflectance = []
            for j in range(num_levels):
                specular_reflectance.append(args[current_index])
                current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            roughness = []
            for j in range(num_levels):
                roughness.append(args[current_index])
                current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1

            num_levels = int(args[current_index])
            current_index += 1
            generic_texture = []
            if num_levels > 0:
                for j in range(num_levels):
                    generic_texture.append(args[current_index])
                    current_index += 1
                generic_uv_scale = args[current_index]
                current_index += 1
            else:
                generic_uv_scale = None

            num_levels = int(args[current_index])
            current_index += 1
            normal_map = []
            if num_levels > 0:
                for j in range(num_levels):
                    normal_map.append(args[current_index])
                    current_index += 1
                normal_map_uv_scale = args[current_index]
                current_index += 1
            else:
                normal_map_uv_scale = None

            compute_specular_lighting = bool(args[current_index])
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1
            use_vertex_color = bool(args[current_index])
            current_index += 1

            if get_tensor_dimension(diffuse_reflectance[0]) == 1:
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(diffuse_reflectance[0]))],
                    [0],
                    [0],
                    3, redner.float_ptr(pyredner.data_ptr(diffuse_uv_scale)))
            else:
                assert (get_tensor_dimension(diffuse_reflectance[0]) == 3)
                diffuse_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in diffuse_reflectance],
                    [x.shape[1] for x in diffuse_reflectance],
                    [x.shape[0] for x in diffuse_reflectance],
                    3,
                    redner.float_ptr(pyredner.data_ptr(diffuse_uv_scale)))

            if get_tensor_dimension(specular_reflectance[0]) == 1:
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(specular_reflectance[0]))],
                    [0],
                    [0],
                    3, redner.float_ptr(pyredner.data_ptr(specular_uv_scale)))
            else:
                assert (get_tensor_dimension(specular_reflectance[0]) == 3)
                specular_reflectance = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in specular_reflectance],
                    [x.shape[1] for x in specular_reflectance],
                    [x.shape[0] for x in specular_reflectance],
                    3,
                    redner.float_ptr(pyredner.data_ptr(specular_uv_scale)))

            if get_tensor_dimension(roughness[0]) == 1:
                roughness = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(roughness[0]))],
                    [0],
                    [0],
                    1, redner.float_ptr(pyredner.data_ptr(roughness_uv_scale)))
            else:
                assert (get_tensor_dimension(roughness[0]) == 3)
                roughness = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in roughness],
                    [x.shape[1] for x in roughness],
                    [x.shape[0] for x in roughness],
                    3,
                    redner.float_ptr(pyredner.data_ptr(roughness_uv_scale)))

            if len(generic_texture) > 0:
                generic_texture = redner.TextureN(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in generic_texture],
                    [x.shape[1] for x in generic_texture],
                    [x.shape[0] for x in generic_texture],
                    generic_texture[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(generic_uv_scale)))
            else:
                generic_texture = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))

            if len(normal_map) > 0:
                normal_map = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in normal_map],
                    [x.shape[1] for x in normal_map],
                    [x.shape[0] for x in normal_map],
                    normal_map[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(normal_map_uv_scale)))
            else:
                normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))

            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                generic_texture,
                normal_map,
                compute_specular_lighting,
                two_sided,
                use_vertex_color))

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        area_lights = []
        for i in range(num_lights):
            shape_id = int(args[current_index])
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1
            directly_visible = bool(args[current_index])
            current_index += 1

            area_lights.append(
                redner.AreaLight(
                    shape_id, redner.float_ptr(pyredner.data_ptr(intensity)),
                    two_sided, directly_visible))

    envmap = None
    if not is_empty_tensor(args[current_index]):
        num_levels = args[current_index]
        current_index += 1
        values = []
        for j in range(num_levels):
            values.append(args[current_index])
            current_index += 1
        envmap_uv_scale = args[current_index]
        current_index += 1
        env_to_world = args[current_index]
        current_index += 1
        world_to_env = args[current_index]
        current_index += 1
        sample_cdf_ys = args[current_index]
        current_index += 1
        sample_cdf_xs = args[current_index]
        current_index += 1
        pdf_norm = float(args[current_index])
        current_index += 1
        directly_visible = bool(args[current_index])
        current_index += 1

        assert isinstance(pdf_norm, float)
        with tf.device(pyredner.get_device_name()):
            sample_cdf_ys = redner.float_ptr(pyredner.data_ptr(sample_cdf_ys))
            sample_cdf_xs = redner.float_ptr(pyredner.data_ptr(sample_cdf_xs))
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            env_to_world = redner.float_ptr(pyredner.data_ptr(env_to_world))
            world_to_env = redner.float_ptr(pyredner.data_ptr(world_to_env))
        with tf.device(pyredner.get_device_name()):
            values = redner.Texture3(\
                [redner.float_ptr(pyredner.data_ptr(x)) for x in values],
                [x.shape[1] for x in values], # width
                [x.shape[0] for x in values], # height
                3, # channels
                redner.float_ptr(pyredner.data_ptr(envmap_uv_scale)))
        envmap = redner.EnvironmentMap(\
            values,
            env_to_world,
            world_to_env,
            sample_cdf_ys,
            sample_cdf_xs,
            pdf_norm,
            directly_visible)
    else:
        current_index += 1

    # Options
    num_samples = args[current_index]
    current_index += 1
    if len(num_samples.shape) == 0 or num_samples.shape[0] == 1:
        num_samples = int(num_samples)
    else:
        assert (num_samples.shape[0] == 2)
        num_samples = (int(num_samples[0]), int(num_samples[1]))
    max_bounces = int(args[current_index])
    current_index += 1

    num_channel_args = int(args[current_index])
    current_index += 1

    channels = []
    for _ in range(num_channel_args):
        ch = args[current_index]
        ch = RednerChannels.asChannel(ch)
        channels.append(ch)
        current_index += 1

    sampler_type = args[current_index]
    sampler_type = RednerSamplerType.asSamplerType(sampler_type)
    current_index += 1

    use_primary_edge_sampling = args[current_index]
    current_index += 1
    use_secondary_edge_sampling = args[current_index]
    current_index += 1
    sample_pixel_center = args[current_index]
    current_index += 1

    start = time.time()
    scene = redner.Scene(camera, shapes, materials, area_lights, envmap,
                         pyredner.get_use_gpu(), pyredner.get_gpu_device_id(),
                         use_primary_edge_sampling,
                         use_secondary_edge_sampling)
    time_elapsed = time.time() - start
    if get_print_timing():
        print('Scene construction, time: %.5f s' % time_elapsed)

    # check that num_samples is a tuple
    if isinstance(num_samples, int):
        num_samples = (num_samples, num_samples)

    options = redner.RenderOptions(seed, num_samples[0], max_bounces, channels,
                                   sampler_type, sample_pixel_center)

    ctx = Context()
    ctx.channels = channels
    ctx.options = options
    ctx.resolution = resolution
    ctx.viewport = viewport
    ctx.scene = scene
    ctx.camera = camera
    ctx.shapes = shapes
    ctx.materials = materials
    ctx.area_lights = area_lights
    ctx.envmap = envmap
    ctx.scene = scene
    ctx.options = options
    ctx.num_samples = num_samples
    ctx.num_channel_args = num_channel_args

    return ctx
Exemplo n.º 5
0
    def backward(grad_img):
        global __ctx
        ctx = __ctx
        scene = ctx.scene
        options = ctx.options
        with tf.device(pyredner.get_device_name()):
            d_position = tf.zeros(3, dtype=tf.float32)
            d_look_at = tf.zeros(3, dtype=tf.float32)
            d_up = tf.zeros(3, dtype=tf.float32)
            d_ndc_to_cam = tf.zeros([3, 3], dtype=tf.float32)
            d_cam_to_ndc = tf.zeros([3, 3], dtype=tf.float32)
            d_camera = redner.DCamera(
                redner.float_ptr(pyredner.data_ptr(d_position)),
                redner.float_ptr(pyredner.data_ptr(d_look_at)),
                redner.float_ptr(pyredner.data_ptr(d_up)),
                redner.float_ptr(pyredner.data_ptr(d_ndc_to_cam)),
                redner.float_ptr(pyredner.data_ptr(d_cam_to_ndc)))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        with tf.device(pyredner.get_device_name()):
            for i, shape in enumerate(ctx.shapes):
                num_vertices = shape.num_vertices
                d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
                d_uvs = tf.zeros([num_vertices, 2],
                                 dtype=tf.float32) if shape.has_uvs() else None
                d_normals = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_normals() else None
                d_vertices_list.append(d_vertices)
                d_uvs_list.append(d_uvs)
                d_normals_list.append(d_normals)
                d_shapes.append(redner.DShape(\
                    redner.float_ptr(pyredner.data_ptr(d_vertices)),
                    redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_normal_map_list = []
        d_diffuse_uv_scale_list = []
        d_specular_uv_scale_list = []
        d_roughness_uv_scale_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        with tf.device(pyredner.get_device_name()):
            for material in ctx.materials:
                diffuse_size = material.get_diffuse_size()
                specular_size = material.get_specular_size()
                roughness_size = material.get_roughness_size()
                normal_map_size = material.get_normal_map_size()
                if diffuse_size[0] == 0:
                    d_diffuse = tf.zeros(3, dtype=tf.float32)
                else:
                    d_diffuse = tf.zeros(
                        [diffuse_size[2], diffuse_size[1], diffuse_size[0], 3],
                        dtype=tf.float32)
                if specular_size[0] == 0:
                    d_specular = tf.zeros(3, dtype=tf.float32)
                else:
                    d_specular = tf.zeros([
                        specular_size[2], specular_size[1], specular_size[0], 3
                    ],
                                          dtype=tf.float32)
                if roughness_size[0] == 0:
                    d_roughness = tf.zeros(1, dtype=tf.float32)
                else:
                    d_roughness = tf.zeros([
                        roughness_size[2], roughness_size[1],
                        roughness_size[0], 1
                    ],
                                           dtype=tf.float32)
                # HACK: tensorflow's eager mode uses a cache to store scalar
                #       constants to avoid memory copy. If we pass scalar tensors
                #       into the C++ code and modify them, we would corrupt the
                #       cache, causing incorrect result in future scalar constant
                #       creations. Thus we force tensorflow to copy by plusing a zero
                # (also see https://github.com/tensorflow/tensorflow/issues/11186
                #  for more discussion regarding copying tensors)
                if d_roughness.shape.num_elements() == 1:
                    d_roughness = d_roughness + 0
                if normal_map_size[0] == 0:
                    d_normal_map = None
                else:
                    d_normal_map = tf.zeros([
                        normal_map_size[2], normal_map_size[1],
                        normal_map_size[0], 3
                    ],
                                            dtype=tf.float32)

                d_diffuse_list.append(d_diffuse)
                d_specular_list.append(d_specular)
                d_roughness_list.append(d_roughness)
                d_normal_map_list.append(d_normal_map)
                d_diffuse = redner.float_ptr(pyredner.data_ptr(d_diffuse))
                d_specular = redner.float_ptr(pyredner.data_ptr(d_specular))
                d_roughness = redner.float_ptr(pyredner.data_ptr(d_roughness))
                if normal_map_size[0] > 0:
                    d_normal_map = redner.float_ptr(
                        pyredner.data_ptr(d_normal_map))
                d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
                if normal_map_size[0] > 0:
                    d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
                else:
                    d_normal_map_uv_scale = None
                d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
                d_specular_uv_scale_list.append(d_specular_uv_scale)
                d_roughness_uv_scale_list.append(d_roughness_uv_scale)
                d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
                d_diffuse_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_diffuse_uv_scale))
                d_specular_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_specular_uv_scale))
                d_roughness_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_roughness_uv_scale))
                if normal_map_size[0] > 0:
                    d_normal_map_uv_scale = redner.float_ptr(
                        pyredner.data_ptr(d_normal_map_uv_scale))
                d_diffuse_tex = redner.Texture3(\
                    d_diffuse, diffuse_size[0], diffuse_size[1], diffuse_size[2], d_diffuse_uv_scale)
                d_specular_tex = redner.Texture3(\
                    d_specular, specular_size[0], specular_size[1], specular_size[2], d_specular_uv_scale)
                d_roughness_tex = redner.Texture1(\
                    d_roughness, roughness_size[0], roughness_size[1], roughness_size[2],  d_roughness_uv_scale)
                if normal_map_size[0] > 0:
                    d_normal_map_tex = redner.Texture3(\
                        d_normal_map, normal_map_size[0], normal_map_size[1], normal_map_size[2], d_normal_map_uv_scale)
                else:
                    d_normal_map_tex = redner.Texture3(\
                        redner.float_ptr(0), 0, 0, 0, redner.float_ptr(0))
                d_materials.append(
                    redner.DMaterial(d_diffuse_tex, d_specular_tex,
                                     d_roughness_tex, d_normal_map_tex))

        d_intensity_list = []
        d_area_lights = []
        with tf.device(pyredner.get_device_name()):
            for light in ctx.area_lights:
                d_intensity = tf.zeros(3, dtype=tf.float32)
                d_intensity_list.append(d_intensity)
                d_area_lights.append(\
                    redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            with tf.device(pyredner.get_device_name()):
                d_envmap_values = tf.zeros([size[2], size[1], size[0], 3],
                                           dtype=tf.float32)
                d_envmap_values_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_envmap_values))
                d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_envmap_uv_scale_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_envmap_uv_scale))
                d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
                d_world_to_env_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_world_to_env))
            d_envmap_tex = redner.Texture3(\
                d_envmap_values_ptr, size[0], size[1], size[2], d_envmap_uv_scale_ptr)
            d_envmap = redner.DEnvironmentMap(d_envmap_tex, d_world_to_env_ptr)

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_area_lights,
                                d_envmap, pyredner.get_use_gpu(), -1)
        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            if pyredner.get_use_gpu():
                grad_img = grad_img.gpu(pyredner.get_gpu_device_id())
            else:
                grad_img = grad_img.cpu()
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                d_scene,
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = tf.ones([256, 256, 3], dtype=tf.float32)
        # debug_img = tf.zeros([256, 256, 3], dtype=tf.float32)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(pyredner.data_ptr(grad_img)),
        #               d_scene,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        ret_list.append(d_position)
        ret_list.append(d_look_at)
        ret_list.append(d_up)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(d_specular_list[i])
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(d_roughness_list[i])
            ret_list.append(d_roughness_uv_scale_list[i])
            ret_list.append(d_normal_map_list[i])
            ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            ret_list.append(d_world_to_env.cpu())
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channels):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling

        # pdb.set_trace()
        return ret_list
Exemplo n.º 6
0
def forward(seed: int, *args):
    """
        Forward rendering pass: given a scene and output an image.
    """
    global __ctx
    ctx = __ctx

    # Unpack arguments
    current_index = 0
    num_shapes = int(args[current_index])
    current_index += 1
    num_materials = int(args[current_index])
    current_index += 1
    num_lights = int(args[current_index])
    current_index += 1

    # Camera arguments
    cam_position = args[current_index]
    current_index += 1
    cam_look_at = args[current_index]
    current_index += 1
    cam_up = args[current_index]
    current_index += 1
    ndc_to_cam = args[current_index]
    current_index += 1
    cam_to_ndc = args[current_index]
    current_index += 1
    clip_near = float(args[current_index])
    current_index += 1
    resolution = args[current_index].numpy()  # Tuple[int, int]
    current_index += 1
    camera_type = pyredner.RednerCameraType.asCameraType(
        args[current_index])  # FIXME: Map to custom type
    current_index += 1

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        camera = redner.Camera(
            resolution[1], resolution[0],
            redner.float_ptr(pyredner.data_ptr(cam_position)),
            redner.float_ptr(pyredner.data_ptr(cam_look_at)),
            redner.float_ptr(pyredner.data_ptr(cam_up)),
            redner.float_ptr(pyredner.data_ptr(ndc_to_cam)),
            redner.float_ptr(pyredner.data_ptr(cam_to_ndc)), clip_near,
            camera_type)

    with tf.device(pyredner.get_device_name()):
        shapes = []
        for i in range(num_shapes):
            vertices = args[current_index]
            current_index += 1
            indices = args[current_index]
            current_index += 1
            uvs = args[current_index]
            current_index += 1
            normals = args[current_index]
            current_index += 1
            material_id = int(args[current_index])
            current_index += 1
            light_id = int(args[current_index])
            current_index += 1
            shapes.append(redner.Shape(\
                redner.float_ptr(pyredner.data_ptr(vertices)),
                redner.int_ptr(pyredner.data_ptr(indices)),
                redner.float_ptr(pyredner.data_ptr(uvs) if uvs is not None else 0),
                redner.float_ptr(pyredner.data_ptr(normals) if normals is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                material_id,
                light_id))

    materials = []
    with tf.device(pyredner.get_device_name()):
        for i in range(num_materials):
            diffuse_reflectance = args[current_index]
            current_index += 1
            diffuse_uv_scale = args[current_index]
            current_index += 1
            specular_reflectance = args[current_index]
            current_index += 1
            specular_uv_scale = args[current_index]
            current_index += 1
            roughness = args[current_index]
            current_index += 1
            roughness_uv_scale = args[current_index]
            current_index += 1
            normal_map = args[current_index]
            current_index += 1
            normal_map_uv_scale = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1

            diffuse_reflectance_ptr = redner.float_ptr(
                pyredner.data_ptr(diffuse_reflectance))
            specular_reflectance_ptr = redner.float_ptr(
                pyredner.data_ptr(specular_reflectance))
            roughness_ptr = redner.float_ptr(pyredner.data_ptr(roughness))
            if normal_map.shape[0] > 0:
                normal_map_ptr = redner.float_ptr(
                    pyredner.data_ptr(normal_map))
            diffuse_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(diffuse_uv_scale))
            specular_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(specular_uv_scale))
            roughness_uv_scale_ptr = redner.float_ptr(
                pyredner.data_ptr(roughness_uv_scale))
            if normal_map.shape[0] > 0:
                normal_map_uv_scale_ptr = redner.float_ptr(
                    pyredner.data_ptr(normal_map_uv_scale))
            if get_tensor_dimension(diffuse_reflectance) == 1:
                diffuse_reflectance = redner.Texture3(diffuse_reflectance_ptr,
                                                      0, 0, 0,
                                                      diffuse_uv_scale_ptr)
            else:
                diffuse_reflectance = redner.Texture3(\
                    diffuse_reflectance_ptr,
                    int(diffuse_reflectance.shape[2]), # width
                    int(diffuse_reflectance.shape[1]), # height
                    int(diffuse_reflectance.shape[0]), # num levels
                    diffuse_uv_scale_ptr)
            if get_tensor_dimension(specular_reflectance) == 1:
                specular_reflectance = redner.Texture3(
                    specular_reflectance_ptr, 0, 0, 0, specular_uv_scale_ptr)
            else:
                specular_reflectance = redner.Texture3(\
                    specular_reflectance_ptr,
                    int(specular_reflectance.shape[2]), # width
                    int(specular_reflectance.shape[1]), # height
                    int(specular_reflectance.shape[0]), # num levels
                    specular_uv_scale_ptr)
            if get_tensor_dimension(roughness) == 1:
                roughness = redner.Texture1(roughness_ptr, 0, 0, 0,
                                            roughness_uv_scale_ptr)
            else:
                assert (get_tensor_dimension(roughness) == 4)
                roughness = redner.Texture1(\
                    roughness_ptr,
                    int(roughness.shape[2]), # width
                    int(roughness.shape[1]), # height
                    int(roughness.shape[0]), # num levels
                    roughness_uv_scale_ptr)
            if normal_map.shape[0] > 0:
                normal_map = redner.Texture3(\
                    normal_map_ptr,
                    int(normal_map.shape[2]),
                    int(normal_map.shape[1]),
                    int(normal_map.shape[0]),
                    normal_map_uv_scale_ptr)
            else:
                normal_map = redner.Texture3(\
                    redner.float_ptr(0), 0, 0, 0, redner.float_ptr(0))
            materials.append(redner.Material(\
                diffuse_reflectance,
                specular_reflectance,
                roughness,
                normal_map,
                two_sided))

    with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
        area_lights = []
        for i in range(num_lights):
            shape_id = int(args[current_index])
            current_index += 1
            intensity = args[current_index]
            current_index += 1
            two_sided = bool(args[current_index])
            current_index += 1

            area_lights.append(
                redner.AreaLight(
                    shape_id, redner.float_ptr(pyredner.data_ptr(intensity)),
                    two_sided))

    envmap = None
    if not is_empty_tensor(args[current_index]):
        values = args[current_index]
        current_index += 1
        envmap_uv_scale = args[current_index]
        current_index += 1
        env_to_world = args[current_index]
        current_index += 1
        world_to_env = args[current_index]
        current_index += 1
        sample_cdf_ys = args[current_index]
        current_index += 1
        sample_cdf_xs = args[current_index]
        current_index += 1
        pdf_norm = float(args[current_index])
        current_index += 1

        assert isinstance(pdf_norm, float)
        with tf.device(pyredner.get_device_name()):
            values_ptr = redner.float_ptr(pyredner.data_ptr(values))
            sample_cdf_ys = redner.float_ptr(pyredner.data_ptr(sample_cdf_ys))
            sample_cdf_xs = redner.float_ptr(pyredner.data_ptr(sample_cdf_xs))
            envmap_uv_scale = redner.float_ptr(
                pyredner.data_ptr(envmap_uv_scale))
        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            env_to_world = redner.float_ptr(pyredner.data_ptr(env_to_world))
            world_to_env = redner.float_ptr(pyredner.data_ptr(world_to_env))
        values = redner.Texture3(
            values_ptr,
            int(values.shape[2]),  # width
            int(values.shape[1]),  # height
            int(values.shape[0]),  # num levels
            envmap_uv_scale)
        envmap = redner.EnvironmentMap(\
            values,
            env_to_world,
            world_to_env,
            sample_cdf_ys,
            sample_cdf_xs,
            pdf_norm)
    else:
        current_index += 7

    # Options
    num_samples = int(args[current_index])
    current_index += 1
    max_bounces = int(args[current_index])
    current_index += 1

    __num_channels = int(args[current_index])
    current_index += 1

    channels = []
    for _ in range(__num_channels):
        ch = args[current_index]
        ch = pyredner.RednerChannels.asChannel(ch)
        channels.append(ch)
        current_index += 1

    sampler_type = args[current_index]
    sampler_type = pyredner.RednerSamplerType.asSamplerType(sampler_type)
    current_index += 1

    use_primary_edge_sampling = args[current_index]
    current_index += 1
    use_secondary_edge_sampling = args[current_index]
    current_index += 1

    scene = redner.Scene(camera, shapes, materials, area_lights, envmap,
                         pyredner.get_use_gpu(), pyredner.get_gpu_device_id(),
                         use_primary_edge_sampling,
                         use_secondary_edge_sampling)

    # check that num_samples is a tuple
    if isinstance(num_samples, int):
        num_samples = (num_samples, num_samples)

    options = redner.RenderOptions(seed, num_samples[0], max_bounces, channels,
                                   sampler_type)
    num_channels = redner.compute_num_channels(channels)

    with tf.device(pyredner.get_device_name()):
        rendered_image = tf.zeros(
            shape=[resolution[0], resolution[1], num_channels],
            dtype=tf.float32)

        start = time.time()

        # pdb.set_trace()
        redner.render(scene, options,
                      redner.float_ptr(pyredner.data_ptr(rendered_image)),
                      redner.float_ptr(0), None, redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Forward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # debug_img = tf.zeros((256, 256, 3), dtype=tf.float32)
        # redner.render(scene,
        #               options,
        #               redner.float_ptr(pyredner.data_ptr(rendered_image)),
        #               redner.float_ptr(0),
        #               None,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.png')
        # exit()

        # import pdb; pdb.set_trace()

    ctx.shapes = shapes
    ctx.materials = materials
    ctx.area_lights = area_lights
    ctx.envmap = envmap
    ctx.scene = scene
    ctx.options = options
    ctx.num_samples = num_samples
    ctx.num_channels = __num_channels
    return rendered_image
Exemplo n.º 7
0
    def backward(grad_img):
        camera = ctx.camera
        scene = ctx.scene
        options = ctx.options

        with tf.device(pyredner.get_device_name()):
            if camera.use_look_at:
                d_position = tf.zeros(3, dtype=tf.float32)
                d_look_at = tf.zeros(3, dtype=tf.float32)
                d_up = tf.zeros(3, dtype=tf.float32)
                d_cam_to_world = None
                d_world_to_cam = None
            else:
                d_position = None
                d_look_at = None
                d_up = None
                d_cam_to_world = tf.zeros([4, 4], dtype=tf.float32)
                d_world_to_cam = tf.zeros([4, 4], dtype=tf.float32)
            d_intrinsic_mat_inv = tf.zeros([3, 3], dtype=tf.float32)
            d_intrinsic_mat = tf.zeros([3, 3], dtype=tf.float32)
            if camera.use_look_at:
                d_camera = redner.DCamera(
                    redner.float_ptr(pyredner.data_ptr(d_position)),
                    redner.float_ptr(pyredner.data_ptr(d_look_at)),
                    redner.float_ptr(pyredner.data_ptr(d_up)),
                    redner.float_ptr(0),  # cam_to_world
                    redner.float_ptr(0),  # world_to_cam
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))
            else:
                d_camera = redner.DCamera(
                    redner.float_ptr(0), redner.float_ptr(0),
                    redner.float_ptr(0),
                    redner.float_ptr(pyredner.data_ptr(d_cam_to_world)),
                    redner.float_ptr(pyredner.data_ptr(d_world_to_cam)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))

        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_colors_list = []
        d_shapes = []
        with tf.device(pyredner.get_device_name()):
            for i, shape in enumerate(ctx.shapes):
                num_vertices = shape.num_vertices
                d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
                d_uvs = tf.zeros([num_vertices, 2],
                                 dtype=tf.float32) if shape.has_uvs() else None
                d_normals = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_normals() else None
                d_colors = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_colors() else None
                d_vertices_list.append(d_vertices)
                d_uvs_list.append(d_uvs)
                d_normals_list.append(d_normals)
                d_colors_list.append(d_colors)
                d_shapes.append(redner.DShape(\
                    redner.float_ptr(pyredner.data_ptr(d_vertices)),
                    redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_colors) if d_colors is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_normal_map_list = []
        d_diffuse_uv_scale_list = []
        d_specular_uv_scale_list = []
        d_roughness_uv_scale_list = []
        d_generic_list = []
        d_generic_uv_scale_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        with tf.device(pyredner.get_device_name()):
            for material in ctx.materials:
                if material.get_diffuse_size(0)[0] == 0:
                    d_diffuse = [tf.zeros(3, dtype=tf.float32)]
                else:
                    d_diffuse = []
                    for l in range(material.get_diffuse_levels()):
                        diffuse_size = material.get_diffuse_size(l)
                        d_diffuse.append(\
                            tf.zeros([diffuse_size[1],
                                      diffuse_size[0],
                                      3], dtype=tf.float32))

                if material.get_specular_size(0)[0] == 0:
                    d_specular = [tf.zeros(3, dtype=tf.float32)]
                else:
                    d_specular = []
                    for l in range(material.get_specular_levels()):
                        specular_size = material.get_specular_size(l)
                        d_specular.append(\
                            tf.zeros([specular_size[1],
                                      specular_size[0],
                                      3], dtype=tf.float32))

                if material.get_roughness_size(0)[0] == 0:
                    d_roughness = [tf.zeros(1, dtype=tf.float32)]
                else:
                    d_roughness = []
                    for l in range(material.get_roughness_levels()):
                        roughness_size = material.get_roughness_size(l)
                        d_roughness.append(\
                            tf.zeros([roughness_size[1],
                                      roughness_size[0],
                                      1], dtype=tf.float32))
                # HACK: tensorflow's eager mode uses a cache to store scalar
                #       constants to avoid memory copy. If we pass scalar tensors
                #       into the C++ code and modify them, we would corrupt the
                #       cache, causing incorrect result in future scalar constant
                #       creations. Thus we force tensorflow to copy by plusing a zero.
                # (also see https://github.com/tensorflow/tensorflow/issues/11186
                #  for more discussion regarding copying tensors)
                if d_roughness[0].shape.num_elements() == 1:
                    d_roughness[0] = d_roughness[0] + 0

                if material.get_generic_levels() == 0:
                    d_generic = None
                else:
                    d_generic = []
                    for l in range(material.get_generic_levels()):
                        generic_size = material.get_generic_size(l)
                        d_generic.append(\
                            tf.zeros([generic_size[2],
                                      generic_size[1],
                                      generic_size[0]], dtype=tf.float32))

                if material.get_normal_map_levels() == 0:
                    d_normal_map = None
                else:
                    d_normal_map = []
                    for l in range(material.get_normal_map_levels()):
                        normal_map_size = material.get_normal_map_size(l)
                        d_normal_map.append(\
                            tf.zeros([normal_map_size[1],
                                      normal_map_size[0],
                                      3], dtype=tf.float32))

                d_diffuse_list.append(d_diffuse)
                d_specular_list.append(d_specular)
                d_roughness_list.append(d_roughness)
                d_generic_list.append(d_generic)
                d_normal_map_list.append(d_normal_map)

                d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
                if d_generic is None:
                    d_generic_uv_scale = None
                else:
                    d_generic_uv_scale = tf.zeros([2], dtype=tf.float32)
                if d_normal_map is None:
                    d_normal_map_uv_scale = None
                else:
                    d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
                d_specular_uv_scale_list.append(d_specular_uv_scale)
                d_roughness_uv_scale_list.append(d_roughness_uv_scale)
                d_generic_uv_scale_list.append(d_generic_uv_scale)
                d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)

                if len(d_diffuse[0].shape) == 1:
                    d_diffuse_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(d_diffuse[0]))],
                        [0],
                        [0],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))
                else:
                    d_diffuse_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_diffuse],
                        [x.shape[1] for x in d_diffuse],
                        [x.shape[0] for x in d_diffuse],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))

                if len(d_specular[0].shape) == 1:
                    d_specular_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(d_specular[0]))],
                        [0],
                        [0],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))
                else:
                    d_specular_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_specular],
                        [x.shape[1] for x in d_specular],
                        [x.shape[0] for x in d_specular],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))

                if len(d_roughness[0].shape) == 1:
                    d_roughness_tex = redner.Texture1(\
                        [redner.float_ptr(pyredner.data_ptr(d_roughness[0]))],
                        [0],
                        [0],
                        1,
                        redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))
                else:
                    d_roughness_tex = redner.Texture1(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_roughness],
                        [x.shape[1] for x in d_roughness],
                        [x.shape[0] for x in d_roughness],
                        1,
                        redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))

                if d_generic is None:
                    d_generic_tex = redner.TextureN(\
                        [], [], [], 0, redner.float_ptr(0))
                else:
                    d_generic_tex = redner.TextureN(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_generic],
                        [x.shape[1] for x in d_generic],
                        [x.shape[0] for x in d_generic],
                        d_generic[0].shape[2],
                        redner.float_ptr(pyredner.data_ptr(d_generic_uv_scale)))

                if d_normal_map is None:
                    d_normal_map = redner.Texture3(\
                        [], [], [], 0, redner.float_ptr(0))
                else:
                    d_normal_map = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_normal_map],
                        [x.shape[1] for x in d_normal_map],
                        [x.shape[0] for x in d_normal_map],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_normal_map_uv_scale)))

                d_materials.append(redner.DMaterial(\
                    d_diffuse_tex, d_specular_tex, d_roughness_tex,
                    d_generic_tex, d_normal_map))

        d_intensity_list = []
        d_area_lights = []
        with tf.device(pyredner.get_device_name()):
            for light in ctx.area_lights:
                d_intensity = tf.zeros(3, dtype=tf.float32)
                d_intensity_list.append(d_intensity)
                d_area_lights.append(\
                    redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            with tf.device(pyredner.get_device_name()):
                d_envmap_values = []
                for l in range(envmap.get_levels()):
                    size = envmap.get_size(l)
                    d_envmap_values.append(\
                        tf.zeros([size[1],
                                  size[0],
                                  3], dtype=tf.float32))
                d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
                d_envmap_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_envmap_values],
                    [x.shape[1] for x in d_envmap_values],
                    [x.shape[0] for x in d_envmap_values],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_envmap_uv_scale)))
                d_envmap = redner.DEnvironmentMap(
                    d_envmap_tex,
                    redner.float_ptr(pyredner.data_ptr(d_world_to_env)))

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_area_lights,
                                d_envmap, pyredner.get_use_gpu(),
                                pyredner.get_gpu_device_id())
        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            grad_img = tf.identity(grad_img)
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                d_scene,
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = tf.ones([256, 256, 3], dtype=tf.float32)
        # debug_img = tf.zeros([256, 256, 3], dtype=tf.float32)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(pyredner.data_ptr(grad_img)),
        #               d_scene,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if camera.use_look_at:
            ret_list.append(d_position)
            ret_list.append(d_look_at)
            ret_list.append(d_up)
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(d_cam_to_world)
            ret_list.append(d_world_to_cam)
        ret_list.append(d_intrinsic_mat_inv)
        ret_list.append(d_intrinsic_mat)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(d_roughness_uv_scale_list[i])
            if d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(d_generic_uv_scale_list[i])
            if d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_intensity_list[i]))
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_world_to_env))
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channels):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return ret_list
Exemplo n.º 8
0
                                      num_samples=16,
                                      max_bounces=1)

# Render the scene as our target image.
# To render the scene, we use our custom PyTorch function in pyredner/render_pytorch.py
# First setup the alias of the render function

# Render. The first argument is the seed for RNG in the renderer.
# Redner automatically maps the devices in the render function, so no need to specify tf.device here.
img = pyredner.render(0, *scene_args)
# Save the images.
pyredner.imwrite(img, 'results/test_single_triangle/target.exr')
pyredner.imwrite(img, 'results/test_single_triangle/target.png')
# Read the target image we just saved.
target = pyredner.imread('results/test_single_triangle/target.exr')
if pyredner.get_use_gpu():
    target = target.gpu()

# Perturb the scene, this is our initial guess.
with tf.device(pyredner.get_device_name()):
    shape_triangle.vertices = tf.Variable(
        [[-2.0, 1.5, 0.3], [0.9, 1.2, -0.3], [-0.4, -1.4, 0.2]],
        dtype=tf.float32,
        trainable=True)  # Set trainable to True since we want to optimize this
# We need to serialize the scene again to get the new arguments.
scene_args = pyredner.serialize_scene(scene=scene,
                                      num_samples=16,
                                      max_bounces=1)
# Render the initial guess.
img = pyredner.render(1, *scene_args)
# Save the images.
Exemplo n.º 9
0
def render(*x):
    """
        The main TensorFlow interface of C++ redner.
    """
    assert (tf.executing_eagerly())
    if pyredner.get_use_gpu(
    ) and os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] != 'true':
        print('******************** WARNING ********************')
        print('Tensorflow by default allocates all GPU memory,')
        print('causing huge amount of page faults when rendering.')
        print(
            'Please set the environment variable TF_FORCE_GPU_ALLOW_GROWTH to true,'
        )
        print('so that Tensorflow allocates memory on demand.')
        print('*************************************************')

    seed, args = int(x[0]), x[1:]
    img, ctx = forward(seed, *args)

    def backward(grad_img):
        camera = ctx.camera
        scene = ctx.scene
        options = ctx.options

        with tf.device(pyredner.get_device_name()):
            if camera.use_look_at:
                d_position = tf.zeros(3, dtype=tf.float32)
                d_look_at = tf.zeros(3, dtype=tf.float32)
                d_up = tf.zeros(3, dtype=tf.float32)
                d_cam_to_world = None
                d_wolrd_to_cam = None
            else:
                d_position = None
                d_look_at = None
                d_up = None
                d_cam_to_world = tf.zeros([4, 4], dtype=tf.float32)
                d_wolrd_to_cam = tf.zeros([4, 4], dtype=tf.float32)
            d_intrinsic_mat_inv = tf.zeros([3, 3], dtype=tf.float32)
            d_intrinsic_mat = tf.zeros([3, 3], dtype=tf.float32)
            if camera.use_look_at:
                d_camera = redner.DCamera(
                    redner.float_ptr(pyredner.data_ptr(d_position)),
                    redner.float_ptr(pyredner.data_ptr(d_look_at)),
                    redner.float_ptr(pyredner.data_ptr(d_up)),
                    redner.float_ptr(0),  # cam_to_world
                    redner.float_ptr(0),  # world_to_cam
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))
            else:
                d_camera = redner.DCamera(
                    redner.float_ptr(0), redner.float_ptr(0),
                    redner.float_ptr(0),
                    redner.float_ptr(pyredner.data_ptr(d_cam_to_world)),
                    redner.float_ptr(pyredner.data_ptr(d_world_to_cam)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))

        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_colors_list = []
        d_shapes = []
        with tf.device(pyredner.get_device_name()):
            for i, shape in enumerate(ctx.shapes):
                num_vertices = shape.num_vertices
                d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
                d_uvs = tf.zeros([num_vertices, 2],
                                 dtype=tf.float32) if shape.has_uvs() else None
                d_normals = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_normals() else None
                d_colors = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_colors() else None
                d_vertices_list.append(d_vertices)
                d_uvs_list.append(d_uvs)
                d_normals_list.append(d_normals)
                d_colors_list.append(d_colors)
                d_shapes.append(redner.DShape(\
                    redner.float_ptr(pyredner.data_ptr(d_vertices)),
                    redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_colors) if d_colors is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_normal_map_list = []
        d_diffuse_uv_scale_list = []
        d_specular_uv_scale_list = []
        d_roughness_uv_scale_list = []
        d_generic_list = []
        d_generic_uv_scale_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        with tf.device(pyredner.get_device_name()):
            for material in ctx.materials:
                diffuse_size = material.get_diffuse_size()
                specular_size = material.get_specular_size()
                roughness_size = material.get_roughness_size()
                generic_size = material.get_generic_size()
                normal_map_size = material.get_normal_map_size()
                if diffuse_size[0] == 0:
                    d_diffuse = tf.zeros(3, dtype=tf.float32)
                else:
                    d_diffuse = tf.zeros(
                        [diffuse_size[2], diffuse_size[1], diffuse_size[0], 3],
                        dtype=tf.float32)
                if specular_size[0] == 0:
                    d_specular = tf.zeros(3, dtype=tf.float32)
                else:
                    d_specular = tf.zeros([
                        specular_size[2], specular_size[1], specular_size[0], 3
                    ],
                                          dtype=tf.float32)
                if roughness_size[0] == 0:
                    d_roughness = tf.zeros(1, dtype=tf.float32)
                else:
                    d_roughness = tf.zeros([
                        roughness_size[2], roughness_size[1],
                        roughness_size[0], 1
                    ],
                                           dtype=tf.float32)
                # HACK: tensorflow's eager mode uses a cache to store scalar
                #       constants to avoid memory copy. If we pass scalar tensors
                #       into the C++ code and modify them, we would corrupt the
                #       cache, causing incorrect result in future scalar constant
                #       creations. Thus we force tensorflow to copy by plusing a zero.
                # (also see https://github.com/tensorflow/tensorflow/issues/11186
                #  for more discussion regarding copying tensors)
                if d_roughness.shape.num_elements() == 1:
                    d_roughness = d_roughness + 0
                if generic_size[0] == 0:
                    d_generic = None
                else:
                    d_generic = tf.zeros(
                        [generic_size[2], generic_size[1], generic_size[0], 3],
                        dtype=tf.float32)
                if normal_map_size[0] == 0:
                    d_normal_map = None
                else:
                    d_normal_map = tf.zeros([
                        normal_map_size[2], normal_map_size[1],
                        normal_map_size[0], 3
                    ],
                                            dtype=tf.float32)

                d_diffuse_list.append(d_diffuse)
                d_specular_list.append(d_specular)
                d_roughness_list.append(d_roughness)
                d_generic_list.append(d_generic)
                d_normal_map_list.append(d_normal_map)
                d_diffuse = redner.float_ptr(pyredner.data_ptr(d_diffuse))
                d_specular = redner.float_ptr(pyredner.data_ptr(d_specular))
                d_roughness = redner.float_ptr(pyredner.data_ptr(d_roughness))
                if generic_size[0] > 0:
                    d_generic = redner.float_ptr(pyredner.data_ptr(d_generic))
                if normal_map_size[0] > 0:
                    d_normal_map = redner.float_ptr(
                        pyredner.data_ptr(d_normal_map))
                d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
                if generic_size[0] > 0:
                    d_generic_uv_scale = tf.zeros([2], dtype=tf.float32)
                else:
                    d_generic_uv_scale = None
                if normal_map_size[0] > 0:
                    d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
                else:
                    d_normal_map_uv_scale = None
                d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
                d_specular_uv_scale_list.append(d_specular_uv_scale)
                d_roughness_uv_scale_list.append(d_roughness_uv_scale)
                d_generic_uv_scale_list.append(d_generic_uv_scale)
                d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
                d_diffuse_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_diffuse_uv_scale))
                d_specular_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_specular_uv_scale))
                d_roughness_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_roughness_uv_scale))
                if generic_size[0] > 0:
                    d_generic_uv_scale = redner.float_ptr(
                        pyredner.data_ptr(d_generic_uv_scale))
                if normal_map_size[0] > 0:
                    d_normal_map_uv_scale = redner.float_ptr(
                        pyredner.data_ptr(d_normal_map_uv_scale))
                d_diffuse_tex = redner.Texture3(\
                    d_diffuse, diffuse_size[0], diffuse_size[1], 3, diffuse_size[2], d_diffuse_uv_scale)
                d_specular_tex = redner.Texture3(\
                    d_specular, specular_size[0], specular_size[1], 3, specular_size[2], d_specular_uv_scale)
                d_roughness_tex = redner.Texture1(\
                    d_roughness, roughness_size[0], roughness_size[1], 1, roughness_size[2],  d_roughness_uv_scale)
                if generic_size[0] > 0:
                    d_generic_tex = redner.TextureN(\
                        d_generic_texture,
                        generic_size[1], # width
                        generic_size[2], # height
                        generic_size[0], # channels
                        generic_size[3], # num_levels
                        d_generic_uv_scale)
                else:
                    d_generic_tex = redner.TextureN(\
                        redner.float_ptr(0), 0, 0, 0, 0, redner.float_ptr(0))
                if normal_map_size[0] > 0:
                    d_normal_map_tex = redner.Texture3(\
                        d_normal_map, normal_map_size[0], normal_map_size[1], 3, normal_map_size[2], d_normal_map_uv_scale)
                else:
                    d_normal_map_tex = redner.Texture3(\
                        redner.float_ptr(0), 0, 0, 0, 0, redner.float_ptr(0))
                d_materials.append(
                    redner.DMaterial(d_diffuse_tex, d_specular_tex,
                                     d_roughness_tex, d_generic_tex,
                                     d_normal_map_tex))

        d_intensity_list = []
        d_area_lights = []
        with tf.device(pyredner.get_device_name()):
            for light in ctx.area_lights:
                d_intensity = tf.zeros(3, dtype=tf.float32)
                d_intensity_list.append(d_intensity)
                d_area_lights.append(\
                    redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            with tf.device(pyredner.get_device_name()):
                d_envmap_values = tf.zeros([size[2], size[1], size[0], 3],
                                           dtype=tf.float32)
                d_envmap_values_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_envmap_values))
                d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_envmap_uv_scale_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_envmap_uv_scale))
                d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
                d_world_to_env_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_world_to_env))
            d_envmap_tex = redner.Texture3(\
                d_envmap_values_ptr, size[0], size[1], 3, size[2], d_envmap_uv_scale_ptr)
            d_envmap = redner.DEnvironmentMap(d_envmap_tex, d_world_to_env_ptr)

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_area_lights,
                                d_envmap, pyredner.get_use_gpu(), -1)
        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            grad_img = tf.identity(grad_img)
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                d_scene,
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = tf.ones([256, 256, 3], dtype=tf.float32)
        # debug_img = tf.zeros([256, 256, 3], dtype=tf.float32)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(pyredner.data_ptr(grad_img)),
        #               d_scene,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if camera.use_look_at:
            ret_list.append(d_position)
            ret_list.append(d_look_at)
            ret_list.append(d_up)
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(d_cam_to_world)
            ret_list.append(d_world_to_cam)
        ret_list.append(d_intrinsic_mat_inv)
        ret_list.append(d_intrinsic_mat)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(d_specular_list[i])
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(d_roughness_list[i])
            ret_list.append(d_roughness_uv_scale_list[i])
            ret_list.append(d_generic_list[i])
            ret_list.append(d_generic_uv_scale_list[i])
            ret_list.append(d_normal_map_list[i])
            ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_intensity_list[i]))
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_world_to_env))
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channels):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling

        return ret_list

    return img, backward
Exemplo n.º 10
0
    def __init__(self, texels, uv_scale=tf.constant([1.0, 1.0])):
        assert (tf.executing_eagerly())
        if pyredner.get_use_gpu():
            texels = tf.identity(texels).gpu(pyredner.get_gpu_device_id())
            uv_scale = tf.identity(uv_scale).gpu(pyredner.get_gpu_device_id())
        else:
            texels = tf.identity(texels).cpu()
            uv_scale = tf.identity(uv_scale).cpu()
        self.texels = texels
        if len(texels.shape) >= 2:
            with tf.device(pyredner.get_device_name()):
                # Build a mipmap for texels
                width = max(
                    texels.shape[0], texels.shape[1]
                ).value  # Without value, it will have a type `Dimension`
                num_levels = math.ceil(math.log(width, 2) + 1)
                mipmap = tf.broadcast_to(texels, [num_levels, *texels.shape])
                if len(mipmap.shape) == 3:
                    mipmap = tf.expand_dims(mipmap, axis=-1)
                num_channels = mipmap.shape[-1]
                """NOTE: conv2d kernel axes
    
                torch: (outchannels,   in_channels / groups, kH,          kW)
                tf:    [filter_height, filter_width,         in_channels, out_channels]
                """
                box_filter = tf.ones([2, 2, num_channels, 1],
                                     dtype=tf.float32) / 4.0

                # (TF) [batch, in_height, in_width, in_channels], i.e. NHWC
                base_level = tf.transpose(tf.expand_dims(texels, axis=0),
                                          perm=[0, 3, 1, 2])

                mipmap = [base_level]
                prev_lvl = base_level
                for l in range(1, num_levels):
                    dilation_size = 2**(l - 1)
                    # Pad for circular boundary condition
                    # This is slow. The hope is at some point Tensorflow will support
                    # circular boundary condition for conv2d
                    desired_height = prev_lvl.shape[2] + dilation_size
                    while prev_lvl.shape[2] < desired_height:
                        prev_lvl = tf.concat([
                            prev_lvl, prev_lvl[:, :, 0:(desired_height -
                                                        prev_lvl.shape[2])]
                        ], 2)

                    desired_width = prev_lvl.shape[3] + dilation_size
                    while prev_lvl.shape[3] < desired_width:
                        prev_lvl = tf.concat(
                            [prev_lvl, prev_lvl[:, :, :, 0:dilation_size]], 3)
                    """NOTE: Torch conv data_format is NCHW. In Tensorflow, GPU supports
                    NCHW but CPU supports only NHWC. Hence, we need to convert between
                    NCHW and NHwC when we use CPU.
                    """
                    """NOTE: Current libxsmm and customized CPU implementations do 
                    not yet support dilation rates larger than 1, i.e. we cannot use
                    TF Conv2DCustomBackpropInputOp
    
                    https://github.com/tensorflow/tensorflow/blob/7bc1c3c37ce4e591012f4325ab7a25ae387773c7/tensorflow/core/kernels/conv_grad_input_ops.cc#L300
                    """
                    # if pyredner.use_gpu:
                    #     current_lvl = tf.nn.depthwise_conv2d(
                    #         prev_lvl,
                    #         box_filter,  # [filter_height, filter_width, in_channels, out_channels]
                    #         dilations=[dilation_size,dilation_size],
                    #         strides=[1,1,1,1],
                    #         padding="VALID",   # No padding
                    #         data_format="NCHW"
                    #     )

                    # else:
                    prev_lvl = tf.transpose(prev_lvl, perm=[0, 2, 3, 1])
                    current_lvl = tf.nn.depthwise_conv2d(
                        prev_lvl,
                        box_filter,  # [filter_height, filter_width, in_channels, out_channels]
                        dilations=[dilation_size, dilation_size],
                        strides=[1, 1, 1, 1],
                        padding="VALID",  # No padding
                        data_format="NHWC")
                    current_lvl = tf.transpose(current_lvl, [0, 3, 1, 2])

                    mipmap.append(current_lvl)
                    prev_lvl = current_lvl

                mipmap = tf.concat(mipmap, 0)
                # Convert from NCHW to NHWC
                mipmap = tf.transpose(mipmap, perm=[0, 2, 3, 1])
                texels = mipmap

        self.mipmap = texels
        self.uv_scale = uv_scale