コード例 #1
0
ファイル: render_pytorch.py プロジェクト: Gogumee/redner
    def backward(ctx, grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options

        d_fov_factor = torch.zeros(1)
        d_cam_to_world = torch.zeros(4, 4)
        d_world_to_cam = torch.zeros(4, 4)
        d_camera = redner.DCamera(redner.float_ptr(d_cam_to_world.data_ptr()),
                                  redner.float_ptr(d_world_to_cam.data_ptr()),
                                  redner.float_ptr(d_fov_factor.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            d_vertices = torch.zeros(num_vertices,
                                     3,
                                     device=pyredner.get_device())
            d_uvs = torch.zeros(
                num_vertices, 2,
                device=pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(
                num_vertices, 3,
                device=pyredner.get_device()) if shape.has_normals() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_materials = []
        for material in ctx.materials:
            diffuse_size = material.get_diffuse_size()
            specular_size = material.get_specular_size()
            roughness_size = material.get_roughness_size()
            if diffuse_size[0] == 0:
                d_diffuse = torch.zeros(3, device=pyredner.get_device())
            else:
                d_diffuse = torch.zeros(diffuse_size[2],
                                        diffuse_size[1],
                                        diffuse_size[0],
                                        3,
                                        device=pyredner.get_device())
            if specular_size[0] == 0:
                d_specular = torch.zeros(3, device=pyredner.get_device())
            else:
                d_specular = torch.zeros(specular_size[2],
                                         specular_size[1],
                                         specular_size[0],
                                         3,
                                         device=pyredner.get_device())
            if roughness_size[0] == 0:
                d_roughness = torch.zeros(1, device=pyredner.get_device())
            else:
                d_roughness = torch.zeros(roughness_size[2],
                                          roughness_size[1],
                                          roughness_size[0],
                                          device=pyredner.get_device())
            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_diffuse_uv_scale = torch.zeros(2)
            d_specular_uv_scale = torch.zeros(2)
            d_roughness_uv_scale = torch.zeros(2)
            d_diffuse_tex = redner.Texture3(\
                redner.float_ptr(d_diffuse.data_ptr()),
                diffuse_size[0], diffuse_size[1], diffuse_size[2],
                redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            d_specular_tex = redner.Texture3(\
                redner.float_ptr(d_specular.data_ptr()),
                specular_size[0], specular_size[1], specular_size[2],
                redner.float_ptr(d_specular_uv_scale.data_ptr()))
            d_roughness_tex = redner.Texture1(\
                redner.float_ptr(d_roughness.data_ptr()),
                roughness_size[0], roughness_size[1], roughness_size[2],
                redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex))

        d_intensity_list = []
        d_lights = []
        for light in ctx.lights:
            d_intensity = torch.zeros(3, device=pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_lights.append(
                redner.DLight(redner.float_ptr(d_intensity.data_ptr())))

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_lights,
                                pyredner.get_use_gpu())
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        redner.render(scene, options, redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()), d_scene,
                      redner.float_ptr(0))

        # # For debugging
        # grad_img = torch.ones(256, 256, 3)
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        ret_list.append(d_cam_to_world)
        ret_list.append(d_world_to_cam)
        ret_list.append(d_fov_factor)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # fisheye

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(None)  # diffuse_uv_scale
            ret_list.append(d_specular_list[i])
            ret_list.append(None)  # specular_uv_scale
            ret_list.append(d_roughness_list[i])
            ret_list.append(None)  # roughness_uv_scale
            ret_list.append(None)  # two sided

        num_lights = len(ctx.lights)
        for i in range(num_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces

        return tuple(ret_list)
コード例 #2
0
    def backward(grad_img):
        global __ctx
        ctx = __ctx
        scene = ctx.scene
        options = ctx.options
        with tf.device(pyredner.get_device_name()):
            d_position = tf.zeros(3, dtype=tf.float32)
            d_look_at = tf.zeros(3, dtype=tf.float32)
            d_up = tf.zeros(3, dtype=tf.float32)
            d_ndc_to_cam = tf.zeros([3, 3], dtype=tf.float32)
            d_cam_to_ndc = tf.zeros([3, 3], dtype=tf.float32)
            d_camera = redner.DCamera(
                redner.float_ptr(pyredner.data_ptr(d_position)),
                redner.float_ptr(pyredner.data_ptr(d_look_at)),
                redner.float_ptr(pyredner.data_ptr(d_up)),
                redner.float_ptr(pyredner.data_ptr(d_ndc_to_cam)),
                redner.float_ptr(pyredner.data_ptr(d_cam_to_ndc)))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        with tf.device(pyredner.get_device_name()):
            for i, shape in enumerate(ctx.shapes):
                num_vertices = shape.num_vertices
                d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
                d_uvs = tf.zeros([num_vertices, 2],
                                 dtype=tf.float32) if shape.has_uvs() else None
                d_normals = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_normals() else None
                d_vertices_list.append(d_vertices)
                d_uvs_list.append(d_uvs)
                d_normals_list.append(d_normals)
                d_shapes.append(redner.DShape(\
                    redner.float_ptr(pyredner.data_ptr(d_vertices)),
                    redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_normal_map_list = []
        d_diffuse_uv_scale_list = []
        d_specular_uv_scale_list = []
        d_roughness_uv_scale_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        with tf.device(pyredner.get_device_name()):
            for material in ctx.materials:
                diffuse_size = material.get_diffuse_size()
                specular_size = material.get_specular_size()
                roughness_size = material.get_roughness_size()
                normal_map_size = material.get_normal_map_size()
                if diffuse_size[0] == 0:
                    d_diffuse = tf.zeros(3, dtype=tf.float32)
                else:
                    d_diffuse = tf.zeros(
                        [diffuse_size[2], diffuse_size[1], diffuse_size[0], 3],
                        dtype=tf.float32)
                if specular_size[0] == 0:
                    d_specular = tf.zeros(3, dtype=tf.float32)
                else:
                    d_specular = tf.zeros([
                        specular_size[2], specular_size[1], specular_size[0], 3
                    ],
                                          dtype=tf.float32)
                if roughness_size[0] == 0:
                    d_roughness = tf.zeros(1, dtype=tf.float32)
                else:
                    d_roughness = tf.zeros([
                        roughness_size[2], roughness_size[1],
                        roughness_size[0], 1
                    ],
                                           dtype=tf.float32)
                # HACK: tensorflow's eager mode uses a cache to store scalar
                #       constants to avoid memory copy. If we pass scalar tensors
                #       into the C++ code and modify them, we would corrupt the
                #       cache, causing incorrect result in future scalar constant
                #       creations. Thus we force tensorflow to copy by plusing a zero
                # (also see https://github.com/tensorflow/tensorflow/issues/11186
                #  for more discussion regarding copying tensors)
                if d_roughness.shape.num_elements() == 1:
                    d_roughness = d_roughness + 0
                if normal_map_size[0] == 0:
                    d_normal_map = None
                else:
                    d_normal_map = tf.zeros([
                        normal_map_size[2], normal_map_size[1],
                        normal_map_size[0], 3
                    ],
                                            dtype=tf.float32)

                d_diffuse_list.append(d_diffuse)
                d_specular_list.append(d_specular)
                d_roughness_list.append(d_roughness)
                d_normal_map_list.append(d_normal_map)
                d_diffuse = redner.float_ptr(pyredner.data_ptr(d_diffuse))
                d_specular = redner.float_ptr(pyredner.data_ptr(d_specular))
                d_roughness = redner.float_ptr(pyredner.data_ptr(d_roughness))
                if normal_map_size[0] > 0:
                    d_normal_map = redner.float_ptr(
                        pyredner.data_ptr(d_normal_map))
                d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
                if normal_map_size[0] > 0:
                    d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
                else:
                    d_normal_map_uv_scale = None
                d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
                d_specular_uv_scale_list.append(d_specular_uv_scale)
                d_roughness_uv_scale_list.append(d_roughness_uv_scale)
                d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
                d_diffuse_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_diffuse_uv_scale))
                d_specular_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_specular_uv_scale))
                d_roughness_uv_scale = redner.float_ptr(
                    pyredner.data_ptr(d_roughness_uv_scale))
                if normal_map_size[0] > 0:
                    d_normal_map_uv_scale = redner.float_ptr(
                        pyredner.data_ptr(d_normal_map_uv_scale))
                d_diffuse_tex = redner.Texture3(\
                    d_diffuse, diffuse_size[0], diffuse_size[1], diffuse_size[2], d_diffuse_uv_scale)
                d_specular_tex = redner.Texture3(\
                    d_specular, specular_size[0], specular_size[1], specular_size[2], d_specular_uv_scale)
                d_roughness_tex = redner.Texture1(\
                    d_roughness, roughness_size[0], roughness_size[1], roughness_size[2],  d_roughness_uv_scale)
                if normal_map_size[0] > 0:
                    d_normal_map_tex = redner.Texture3(\
                        d_normal_map, normal_map_size[0], normal_map_size[1], normal_map_size[2], d_normal_map_uv_scale)
                else:
                    d_normal_map_tex = redner.Texture3(\
                        redner.float_ptr(0), 0, 0, 0, redner.float_ptr(0))
                d_materials.append(
                    redner.DMaterial(d_diffuse_tex, d_specular_tex,
                                     d_roughness_tex, d_normal_map_tex))

        d_intensity_list = []
        d_area_lights = []
        with tf.device(pyredner.get_device_name()):
            for light in ctx.area_lights:
                d_intensity = tf.zeros(3, dtype=tf.float32)
                d_intensity_list.append(d_intensity)
                d_area_lights.append(\
                    redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            with tf.device(pyredner.get_device_name()):
                d_envmap_values = tf.zeros([size[2], size[1], size[0], 3],
                                           dtype=tf.float32)
                d_envmap_values_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_envmap_values))
                d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_envmap_uv_scale_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_envmap_uv_scale))
                d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
                d_world_to_env_ptr = redner.float_ptr(
                    pyredner.data_ptr(d_world_to_env))
            d_envmap_tex = redner.Texture3(\
                d_envmap_values_ptr, size[0], size[1], size[2], d_envmap_uv_scale_ptr)
            d_envmap = redner.DEnvironmentMap(d_envmap_tex, d_world_to_env_ptr)

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_area_lights,
                                d_envmap, pyredner.get_use_gpu(), -1)
        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            if pyredner.get_use_gpu():
                grad_img = grad_img.gpu(pyredner.get_gpu_device_id())
            else:
                grad_img = grad_img.cpu()
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                d_scene,
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = tf.ones([256, 256, 3], dtype=tf.float32)
        # debug_img = tf.zeros([256, 256, 3], dtype=tf.float32)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(pyredner.data_ptr(grad_img)),
        #               d_scene,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        ret_list.append(d_position)
        ret_list.append(d_look_at)
        ret_list.append(d_up)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(d_specular_list[i])
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(d_roughness_list[i])
            ret_list.append(d_roughness_uv_scale_list[i])
            ret_list.append(d_normal_map_list[i])
            ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            ret_list.append(d_world_to_env.cpu())
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channels):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling

        # pdb.set_trace()
        return ret_list
コード例 #3
0
ファイル: render_tensorflow.py プロジェクト: nkyriazis/redner
def create_gradient_buffers(ctx):
    scene = ctx.scene
    options = ctx.options
    camera = ctx.camera

    buffers = Context()

    with tf.device(pyredner.get_device_name()):
        if camera.use_look_at:
            buffers.d_position = tf.zeros(3, dtype=tf.float32)
            buffers.d_look_at = tf.zeros(3, dtype=tf.float32)
            buffers.d_up = tf.zeros(3, dtype=tf.float32)
            buffers.d_cam_to_world = None
            buffers.d_world_to_cam = None
        else:
            buffers.d_position = None
            buffers.d_look_at = None
            buffers.d_up = None
            buffers.d_cam_to_world = tf.zeros([4, 4], dtype=tf.float32)
            buffers.d_world_to_cam = tf.zeros([4, 4], dtype=tf.float32)
        buffers.d_intrinsic_mat_inv = tf.zeros([3, 3], dtype=tf.float32)
        buffers.d_intrinsic_mat = tf.zeros([3, 3], dtype=tf.float32)
        if camera.use_look_at:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(pyredner.data_ptr(buffers.d_position)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_look_at)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_up)),
                redner.float_ptr(0), # cam_to_world
                redner.float_ptr(0), # world_to_cam
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat)))
        else:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(0),
                redner.float_ptr(0),
                redner.float_ptr(0),
                redner.float_ptr(pyredner.data_ptr(buffers.d_cam_to_world)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_world_to_cam)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat_inv)),
                redner.float_ptr(pyredner.data_ptr(buffers.d_intrinsic_mat)))

    buffers.d_vertices_list = []
    buffers.d_uvs_list = []
    buffers.d_normals_list = []
    buffers.d_colors_list = []
    buffers.d_shapes = []
    with tf.device(pyredner.get_device_name()):
        for i, shape in enumerate(ctx.shapes):
            num_vertices = shape.num_vertices
            d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
            d_uvs = tf.zeros([num_vertices, 2],
                             dtype=tf.float32) if shape.has_uvs() else None
            d_normals = tf.zeros(
                [num_vertices, 3],
                dtype=tf.float32) if shape.has_normals() else None
            d_colors = tf.zeros(
                [num_vertices, 3],
                dtype=tf.float32) if shape.has_colors() else None
            buffers.d_vertices_list.append(d_vertices)
            buffers.d_uvs_list.append(d_uvs)
            buffers.d_normals_list.append(d_normals)
            buffers.d_colors_list.append(d_colors)
            buffers.d_shapes.append(redner.DShape(\
                redner.float_ptr(pyredner.data_ptr(d_vertices)),
                redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0),
                redner.float_ptr(pyredner.data_ptr(d_colors) if d_colors is not None else 0)))

    buffers.d_diffuse_list = []
    buffers.d_specular_list = []
    buffers.d_roughness_list = []
    buffers.d_normal_map_list = []
    buffers.d_diffuse_uv_scale_list = []
    buffers.d_specular_uv_scale_list = []
    buffers.d_roughness_uv_scale_list = []
    buffers.d_generic_list = []
    buffers.d_generic_uv_scale_list = []
    buffers.d_normal_map_uv_scale_list = []
    buffers.d_materials = []
    with tf.device(pyredner.get_device_name()):
        for material in ctx.materials:
            if material.get_diffuse_size(0)[0] == 0:
                d_diffuse = [tf.zeros(3, dtype=tf.float32)]
            else:
                d_diffuse = []
                for l in range(material.get_diffuse_levels()):
                    diffuse_size = material.get_diffuse_size(l)
                    d_diffuse.append(\
                        tf.zeros([diffuse_size[1],
                                  diffuse_size[0],
                                  3], dtype=tf.float32))

            if material.get_specular_size(0)[0] == 0:
                d_specular = [tf.zeros(3, dtype=tf.float32)]
            else:
                d_specular = []
                for l in range(material.get_specular_levels()):
                    specular_size = material.get_specular_size(l)
                    d_specular.append(\
                        tf.zeros([specular_size[1],
                                  specular_size[0],
                                  3], dtype=tf.float32))

            if material.get_roughness_size(0)[0] == 0:
                d_roughness = [tf.zeros(1, dtype=tf.float32)]
            else:
                d_roughness = []
                for l in range(material.get_roughness_levels()):
                    roughness_size = material.get_roughness_size(l)
                    d_roughness.append(\
                        tf.zeros([roughness_size[1],
                                  roughness_size[0],
                                  1], dtype=tf.float32))
            # HACK: tensorflow's eager mode uses a cache to store scalar
            #       constants to avoid memory copy. If we pass scalar tensors
            #       into the C++ code and modify them, we would corrupt the
            #       cache, causing incorrect result in future scalar constant
            #       creations. Thus we force tensorflow to copy by plusing a zero.
            # (also see https://github.com/tensorflow/tensorflow/issues/11186
            #  for more discussion regarding copying tensors)
            if d_roughness[0].shape.num_elements() == 1:
                d_roughness[0] = d_roughness[0] + 0

            if material.get_generic_levels() == 0:
                d_generic = None
            else:
                d_generic = []
                for l in range(material.get_generic_levels()):
                    generic_size = material.get_generic_size(l)
                    d_generic.append(\
                        tf.zeros([generic_size[2],
                                  generic_size[1],
                                  generic_size[0]], dtype=tf.float32))

            if material.get_normal_map_levels() == 0:
                d_normal_map = None
            else:
                d_normal_map = []
                for l in range(material.get_normal_map_levels()):
                    normal_map_size = material.get_normal_map_size(l)
                    d_normal_map.append(\
                        tf.zeros([normal_map_size[1],
                                  normal_map_size[0],
                                  3], dtype=tf.float32))

            buffers.d_diffuse_list.append(d_diffuse)
            buffers.d_specular_list.append(d_specular)
            buffers.d_roughness_list.append(d_roughness)
            buffers.d_generic_list.append(d_generic)
            buffers.d_normal_map_list.append(d_normal_map)

            d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
            d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
            d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
            if d_generic is None:
                d_generic_uv_scale = None
            else:
                d_generic_uv_scale = tf.zeros([2], dtype=tf.float32)
            if d_normal_map is None:
                d_normal_map_uv_scale = None
            else:
                d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
            buffers.d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
            buffers.d_specular_uv_scale_list.append(d_specular_uv_scale)
            buffers.d_roughness_uv_scale_list.append(d_roughness_uv_scale)
            buffers.d_generic_uv_scale_list.append(d_generic_uv_scale)
            buffers.d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)

            if len(d_diffuse[0].shape) == 1:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(d_diffuse[0]))],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))
            else:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_diffuse],
                    [x.shape[1] for x in d_diffuse],
                    [x.shape[0] for x in d_diffuse],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))

            if len(d_specular[0].shape) == 1:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(d_specular[0]))],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))
            else:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_specular],
                    [x.shape[1] for x in d_specular],
                    [x.shape[0] for x in d_specular],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))

            if len(d_roughness[0].shape) == 1:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(d_roughness[0]))],
                    [0],
                    [0],
                    1,
                    redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))
            else:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_roughness],
                    [x.shape[1] for x in d_roughness],
                    [x.shape[0] for x in d_roughness],
                    1,
                    redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))

            if d_generic is None:
                d_generic_tex = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_generic_tex = redner.TextureN(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_generic],
                    [x.shape[1] for x in d_generic],
                    [x.shape[0] for x in d_generic],
                    d_generic[0].shape[2],
                    redner.float_ptr(pyredner.data_ptr(d_generic_uv_scale)))

            if d_normal_map is None:
                d_normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_normal_map = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_normal_map],
                    [x.shape[1] for x in d_normal_map],
                    [x.shape[0] for x in d_normal_map],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_normal_map_uv_scale)))

            buffers.d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex,
                d_generic_tex, d_normal_map))

    buffers.d_intensity_list = []
    buffers.d_area_lights = []
    with tf.device(pyredner.get_device_name()):
        for light in ctx.area_lights:
            d_intensity = tf.zeros(3, dtype=tf.float32)
            buffers.d_intensity_list.append(d_intensity)
            buffers.d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

    buffers.d_envmap = None
    if ctx.envmap is not None:
        envmap = ctx.envmap
        with tf.device(pyredner.get_device_name()):
            buffers.d_envmap_values = []
            for l in range(envmap.get_levels()):
                size = envmap.get_size(l)
                buffers.d_envmap_values.append(\
                    tf.zeros([size[1],
                              size[0],
                              3], dtype=tf.float32))
            buffers.d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
            buffers.d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
            d_envmap_tex = redner.Texture3(\
                [redner.float_ptr(pyredner.data_ptr(x)) for x in buffers.d_envmap_values],
                [x.shape[1] for x in buffers.d_envmap_values],
                [x.shape[0] for x in buffers.d_envmap_values],
                3,
                redner.float_ptr(pyredner.data_ptr(buffers.d_envmap_uv_scale)))
            buffers.d_envmap = redner.DEnvironmentMap(
                d_envmap_tex,
                redner.float_ptr(pyredner.data_ptr(buffers.d_world_to_env)))

    buffers.d_scene = redner.DScene(buffers.d_camera, buffers.d_shapes,
                                    buffers.d_materials, buffers.d_area_lights,
                                    buffers.d_envmap, pyredner.get_use_gpu(),
                                    pyredner.get_gpu_device_id())
    return buffers
コード例 #4
0
ファイル: render_pytorch.py プロジェクト: mszarski/redner
    def backward(ctx, grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options

        d_cam_position = torch.zeros(3)
        d_cam_look = torch.zeros(3)
        d_cam_up = torch.zeros(3)
        d_ndc_to_cam = torch.zeros(3, 3)
        d_cam_to_ndc = torch.zeros(3, 3)
        d_camera = redner.DCamera(redner.float_ptr(d_cam_position.data_ptr()),
                                  redner.float_ptr(d_cam_look.data_ptr()),
                                  redner.float_ptr(d_cam_up.data_ptr()),
                                  redner.float_ptr(d_ndc_to_cam.data_ptr()),
                                  redner.float_ptr(d_cam_to_ndc.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            d_vertices = torch.zeros(num_vertices,
                                     3,
                                     device=pyredner.get_device())
            d_uvs = torch.zeros(
                num_vertices, 2,
                device=pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(
                num_vertices, 3,
                device=pyredner.get_device()) if shape.has_normals() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_materials = []
        for material in ctx.materials:
            diffuse_size = material.get_diffuse_size()
            specular_size = material.get_specular_size()
            roughness_size = material.get_roughness_size()
            if diffuse_size[0] == 0:
                d_diffuse = torch.zeros(3, device=pyredner.get_device())
            else:
                d_diffuse = torch.zeros(diffuse_size[2],
                                        diffuse_size[1],
                                        diffuse_size[0],
                                        3,
                                        device=pyredner.get_device())
            if specular_size[0] == 0:
                d_specular = torch.zeros(3, device=pyredner.get_device())
            else:
                d_specular = torch.zeros(specular_size[2],
                                         specular_size[1],
                                         specular_size[0],
                                         3,
                                         device=pyredner.get_device())
            if roughness_size[0] == 0:
                d_roughness = torch.zeros(1, device=pyredner.get_device())
            else:
                d_roughness = torch.zeros(roughness_size[2],
                                          roughness_size[1],
                                          roughness_size[0],
                                          1,
                                          device=pyredner.get_device())
            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_diffuse_uv_scale = torch.zeros(2)
            d_specular_uv_scale = torch.zeros(2)
            d_roughness_uv_scale = torch.zeros(2)
            d_diffuse_tex = redner.Texture3(\
                redner.float_ptr(d_diffuse.data_ptr()),
                diffuse_size[0], diffuse_size[1], diffuse_size[2],
                redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            d_specular_tex = redner.Texture3(\
                redner.float_ptr(d_specular.data_ptr()),
                specular_size[0], specular_size[1], specular_size[2],
                redner.float_ptr(d_specular_uv_scale.data_ptr()))
            d_roughness_tex = redner.Texture1(\
                redner.float_ptr(d_roughness.data_ptr()),
                roughness_size[0], roughness_size[1], roughness_size[2],
                redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex))

        d_intensity_list = []
        d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device=pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            d_envmap_values = \
                torch.zeros(size[2],
                            size[1],
                            size[0],
                            3,
                            device = pyredner.get_device())
            d_envmap_uv_scale = torch.zeros(2)
            d_envmap_tex = redner.Texture3(\
                redner.float_ptr(d_envmap_values.data_ptr()),
                size[0], size[1], size[2],
                redner.float_ptr(d_envmap_uv_scale.data_ptr()))
            d_world_to_env = torch.zeros(4, 4)
            d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(d_world_to_env.data_ptr()))

        d_scene = redner.DScene(
            d_camera, d_shapes, d_materials, d_area_lights, d_envmap,
            pyredner.get_use_gpu(),
            pyredner.get_device().index
            if pyredner.get_device().index is not None else -1)
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003

        options.num_samples = ctx.num_samples[1]
        start = time.time()
        redner.render(scene, options, redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()), d_scene,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # # pyredner.imwrite(grad_img, 'grad_img.exr')
        # # grad_img = torch.ones(256, 256, 3, device = pyredner.get_device())
        # debug_img = torch.zeros(256, 256, 3)
        # start = time.time()
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # time_elapsed = time.time() - start
        # if print_timing:
        #     print('Backward pass, time: %.5f s' % time_elapsed)
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # debug_img = debug_img.numpy()
        # print(np.max(debug_img))
        # print(np.unravel_index(np.argmax(debug_img), debug_img.shape))
        # print(np.min(debug_img))
        # print(np.unravel_index(np.argmin(debug_img), debug_img.shape))
        # print(np.sum(debug_img) / 3)
        # debug_max = 0.5
        # debug_min = -0.5
        # debug_img = np.clip((debug_img - debug_min) / (debug_max - debug_min), 0, 1)
        # debug_img = debug_img[:, :, 0]
        # import matplotlib.cm as cm
        # debug_img = cm.viridis(debug_img)
        # skimage.io.imsave('debug.png', np.power(debug_img, 1/2.2))
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        ret_list.append(d_cam_position)
        ret_list.append(d_cam_look)
        ret_list.append(d_cam_up)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # fisheye

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(None)  # diffuse_uv_scale
            ret_list.append(d_specular_list[i])
            ret_list.append(None)  # specular_uv_scale
            ret_list.append(d_roughness_list[i])
            ret_list.append(None)  # roughness_uv_scale
            ret_list.append(None)  # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(None)  # uv_scale
            ret_list.append(None)  # env_to_world
            ret_list.append(d_world_to_env)
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # channels
        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling

        return tuple(ret_list)
コード例 #5
0
ファイル: render_tensorflow.py プロジェクト: tstullich/redner
    def backward(grad_img):
        camera = ctx.camera
        scene = ctx.scene
        options = ctx.options

        with tf.device(pyredner.get_device_name()):
            if camera.use_look_at:
                d_position = tf.zeros(3, dtype=tf.float32)
                d_look_at = tf.zeros(3, dtype=tf.float32)
                d_up = tf.zeros(3, dtype=tf.float32)
                d_cam_to_world = None
                d_world_to_cam = None
            else:
                d_position = None
                d_look_at = None
                d_up = None
                d_cam_to_world = tf.zeros([4, 4], dtype=tf.float32)
                d_world_to_cam = tf.zeros([4, 4], dtype=tf.float32)
            d_intrinsic_mat_inv = tf.zeros([3, 3], dtype=tf.float32)
            d_intrinsic_mat = tf.zeros([3, 3], dtype=tf.float32)
            if camera.use_look_at:
                d_camera = redner.DCamera(
                    redner.float_ptr(pyredner.data_ptr(d_position)),
                    redner.float_ptr(pyredner.data_ptr(d_look_at)),
                    redner.float_ptr(pyredner.data_ptr(d_up)),
                    redner.float_ptr(0),  # cam_to_world
                    redner.float_ptr(0),  # world_to_cam
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))
            else:
                d_camera = redner.DCamera(
                    redner.float_ptr(0), redner.float_ptr(0),
                    redner.float_ptr(0),
                    redner.float_ptr(pyredner.data_ptr(d_cam_to_world)),
                    redner.float_ptr(pyredner.data_ptr(d_world_to_cam)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat_inv)),
                    redner.float_ptr(pyredner.data_ptr(d_intrinsic_mat)))

        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_colors_list = []
        d_shapes = []
        with tf.device(pyredner.get_device_name()):
            for i, shape in enumerate(ctx.shapes):
                num_vertices = shape.num_vertices
                d_vertices = tf.zeros([num_vertices, 3], dtype=tf.float32)
                d_uvs = tf.zeros([num_vertices, 2],
                                 dtype=tf.float32) if shape.has_uvs() else None
                d_normals = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_normals() else None
                d_colors = tf.zeros(
                    [num_vertices, 3],
                    dtype=tf.float32) if shape.has_colors() else None
                d_vertices_list.append(d_vertices)
                d_uvs_list.append(d_uvs)
                d_normals_list.append(d_normals)
                d_colors_list.append(d_colors)
                d_shapes.append(redner.DShape(\
                    redner.float_ptr(pyredner.data_ptr(d_vertices)),
                    redner.float_ptr(pyredner.data_ptr(d_uvs) if d_uvs is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_normals) if d_normals is not None else 0),
                    redner.float_ptr(pyredner.data_ptr(d_colors) if d_colors is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_normal_map_list = []
        d_diffuse_uv_scale_list = []
        d_specular_uv_scale_list = []
        d_roughness_uv_scale_list = []
        d_generic_list = []
        d_generic_uv_scale_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        with tf.device(pyredner.get_device_name()):
            for material in ctx.materials:
                if material.get_diffuse_size(0)[0] == 0:
                    d_diffuse = [tf.zeros(3, dtype=tf.float32)]
                else:
                    d_diffuse = []
                    for l in range(material.get_diffuse_levels()):
                        diffuse_size = material.get_diffuse_size(l)
                        d_diffuse.append(\
                            tf.zeros([diffuse_size[1],
                                      diffuse_size[0],
                                      3], dtype=tf.float32))

                if material.get_specular_size(0)[0] == 0:
                    d_specular = [tf.zeros(3, dtype=tf.float32)]
                else:
                    d_specular = []
                    for l in range(material.get_specular_levels()):
                        specular_size = material.get_specular_size(l)
                        d_specular.append(\
                            tf.zeros([specular_size[1],
                                      specular_size[0],
                                      3], dtype=tf.float32))

                if material.get_roughness_size(0)[0] == 0:
                    d_roughness = [tf.zeros(1, dtype=tf.float32)]
                else:
                    d_roughness = []
                    for l in range(material.get_roughness_levels()):
                        roughness_size = material.get_roughness_size(l)
                        d_roughness.append(\
                            tf.zeros([roughness_size[1],
                                      roughness_size[0],
                                      1], dtype=tf.float32))
                # HACK: tensorflow's eager mode uses a cache to store scalar
                #       constants to avoid memory copy. If we pass scalar tensors
                #       into the C++ code and modify them, we would corrupt the
                #       cache, causing incorrect result in future scalar constant
                #       creations. Thus we force tensorflow to copy by plusing a zero.
                # (also see https://github.com/tensorflow/tensorflow/issues/11186
                #  for more discussion regarding copying tensors)
                if d_roughness[0].shape.num_elements() == 1:
                    d_roughness[0] = d_roughness[0] + 0

                if material.get_generic_levels() == 0:
                    d_generic = None
                else:
                    d_generic = []
                    for l in range(material.get_generic_levels()):
                        generic_size = material.get_generic_size(l)
                        d_generic.append(\
                            tf.zeros([generic_size[2],
                                      generic_size[1],
                                      generic_size[0]], dtype=tf.float32))

                if material.get_normal_map_levels() == 0:
                    d_normal_map = None
                else:
                    d_normal_map = []
                    for l in range(material.get_normal_map_levels()):
                        normal_map_size = material.get_normal_map_size(l)
                        d_normal_map.append(\
                            tf.zeros([normal_map_size[1],
                                      normal_map_size[0],
                                      3], dtype=tf.float32))

                d_diffuse_list.append(d_diffuse)
                d_specular_list.append(d_specular)
                d_roughness_list.append(d_roughness)
                d_generic_list.append(d_generic)
                d_normal_map_list.append(d_normal_map)

                d_diffuse_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_specular_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_roughness_uv_scale = tf.zeros([2], dtype=tf.float32)
                if d_generic is None:
                    d_generic_uv_scale = None
                else:
                    d_generic_uv_scale = tf.zeros([2], dtype=tf.float32)
                if d_normal_map is None:
                    d_normal_map_uv_scale = None
                else:
                    d_normal_map_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
                d_specular_uv_scale_list.append(d_specular_uv_scale)
                d_roughness_uv_scale_list.append(d_roughness_uv_scale)
                d_generic_uv_scale_list.append(d_generic_uv_scale)
                d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)

                if len(d_diffuse[0].shape) == 1:
                    d_diffuse_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(d_diffuse[0]))],
                        [0],
                        [0],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))
                else:
                    d_diffuse_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_diffuse],
                        [x.shape[1] for x in d_diffuse],
                        [x.shape[0] for x in d_diffuse],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_diffuse_uv_scale)))

                if len(d_specular[0].shape) == 1:
                    d_specular_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(d_specular[0]))],
                        [0],
                        [0],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))
                else:
                    d_specular_tex = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_specular],
                        [x.shape[1] for x in d_specular],
                        [x.shape[0] for x in d_specular],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_specular_uv_scale)))

                if len(d_roughness[0].shape) == 1:
                    d_roughness_tex = redner.Texture1(\
                        [redner.float_ptr(pyredner.data_ptr(d_roughness[0]))],
                        [0],
                        [0],
                        1,
                        redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))
                else:
                    d_roughness_tex = redner.Texture1(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_roughness],
                        [x.shape[1] for x in d_roughness],
                        [x.shape[0] for x in d_roughness],
                        1,
                        redner.float_ptr(pyredner.data_ptr(d_roughness_uv_scale)))

                if d_generic is None:
                    d_generic_tex = redner.TextureN(\
                        [], [], [], 0, redner.float_ptr(0))
                else:
                    d_generic_tex = redner.TextureN(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_generic],
                        [x.shape[1] for x in d_generic],
                        [x.shape[0] for x in d_generic],
                        d_generic[0].shape[2],
                        redner.float_ptr(pyredner.data_ptr(d_generic_uv_scale)))

                if d_normal_map is None:
                    d_normal_map = redner.Texture3(\
                        [], [], [], 0, redner.float_ptr(0))
                else:
                    d_normal_map = redner.Texture3(\
                        [redner.float_ptr(pyredner.data_ptr(x)) for x in d_normal_map],
                        [x.shape[1] for x in d_normal_map],
                        [x.shape[0] for x in d_normal_map],
                        3,
                        redner.float_ptr(pyredner.data_ptr(d_normal_map_uv_scale)))

                d_materials.append(redner.DMaterial(\
                    d_diffuse_tex, d_specular_tex, d_roughness_tex,
                    d_generic_tex, d_normal_map))

        d_intensity_list = []
        d_area_lights = []
        with tf.device(pyredner.get_device_name()):
            for light in ctx.area_lights:
                d_intensity = tf.zeros(3, dtype=tf.float32)
                d_intensity_list.append(d_intensity)
                d_area_lights.append(\
                    redner.DAreaLight(redner.float_ptr(pyredner.data_ptr(d_intensity))))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            with tf.device(pyredner.get_device_name()):
                d_envmap_values = []
                for l in range(envmap.get_levels()):
                    size = envmap.get_size(l)
                    d_envmap_values.append(\
                        tf.zeros([size[1],
                                  size[0],
                                  3], dtype=tf.float32))
                d_envmap_uv_scale = tf.zeros([2], dtype=tf.float32)
                d_world_to_env = tf.zeros([4, 4], dtype=tf.float32)
                d_envmap_tex = redner.Texture3(\
                    [redner.float_ptr(pyredner.data_ptr(x)) for x in d_envmap_values],
                    [x.shape[1] for x in d_envmap_values],
                    [x.shape[0] for x in d_envmap_values],
                    3,
                    redner.float_ptr(pyredner.data_ptr(d_envmap_uv_scale)))
                d_envmap = redner.DEnvironmentMap(
                    d_envmap_tex,
                    redner.float_ptr(pyredner.data_ptr(d_world_to_env)))

        d_scene = redner.DScene(d_camera, d_shapes, d_materials, d_area_lights,
                                d_envmap, pyredner.get_use_gpu(),
                                pyredner.get_gpu_device_id())
        if not get_use_correlated_random_number():
            # Decod_uple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()

        options.num_samples = ctx.num_samples[1]
        with tf.device(pyredner.get_device_name()):
            grad_img = tf.identity(grad_img)
            redner.render(
                scene,
                options,
                redner.float_ptr(0),  # rendered_image
                redner.float_ptr(pyredner.data_ptr(grad_img)),
                d_scene,
                redner.float_ptr(0))  # debug_image
        time_elapsed = time.time() - start

        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = tf.ones([256, 256, 3], dtype=tf.float32)
        # debug_img = tf.zeros([256, 256, 3], dtype=tf.float32)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(pyredner.data_ptr(grad_img)),
        #               d_scene,
        #               redner.float_ptr(pyredner.data_ptr(debug_img)))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if camera.use_look_at:
            ret_list.append(d_position)
            ret_list.append(d_look_at)
            ret_list.append(d_up)
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(d_cam_to_world)
            ret_list.append(d_world_to_cam)
        ret_list.append(d_intrinsic_mat_inv)
        ret_list.append(d_intrinsic_mat)
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(d_roughness_uv_scale_list[i])
            if d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(d_generic_uv_scale_list[i])
            if d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_intensity_list[i]))
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
                ret_list.append(tf.identity(d_world_to_env))
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # num channels
        for _ in range(ctx.num_channels):
            ret_list.append(None)  # channel

        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return ret_list
コード例 #6
0
    def create_gradient_buffers(ctx):
        scene = ctx.scene
        options = ctx.options
        camera = ctx.camera

        buffers = Context()

        if camera.use_look_at:
            buffers.d_cam_position = torch.zeros(3, device = pyredner.get_device())
            buffers.d_cam_look = torch.zeros(3, device = pyredner.get_device())
            buffers.d_cam_up = torch.zeros(3, device = pyredner.get_device())
            buffers.d_cam_to_world = None
            buffers.d_world_to_cam = None
        else:
            buffers.d_cam_position = None
            buffers.d_cam_look = None
            buffers.d_cam_up = None
            buffers.d_cam_to_world = torch.zeros(4, 4, device = pyredner.get_device())
            buffers.d_world_to_cam = torch.zeros(4, 4, device = pyredner.get_device())
        buffers.d_intrinsic_mat_inv = torch.zeros(3, 3, device = pyredner.get_device())
        buffers.d_intrinsic_mat = torch.zeros(3, 3, device = pyredner.get_device())
        if camera.use_look_at:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(buffers.d_cam_position.data_ptr()),
                redner.float_ptr(buffers.d_cam_look.data_ptr()),
                redner.float_ptr(buffers.d_cam_up.data_ptr()),
                redner.float_ptr(0), # cam_to_world
                redner.float_ptr(0), # world_to_cam
                redner.float_ptr(buffers.d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(buffers.d_intrinsic_mat.data_ptr()))
        else:
            buffers.d_camera = redner.DCamera(\
                redner.float_ptr(0), # pos
                redner.float_ptr(0), # look
                redner.float_ptr(0), # up
                redner.float_ptr(buffers.d_cam_to_world.data_ptr()),
                redner.float_ptr(buffers.d_world_to_cam.data_ptr()),
                redner.float_ptr(buffers.d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(buffers.d_intrinsic_mat.data_ptr()))
        buffers.d_vertices_list = []
        buffers.d_uvs_list = []
        buffers.d_normals_list = []
        buffers.d_colors_list = []
        buffers.d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            num_uv_vertices = shape.num_uv_vertices
            num_normal_vertices = shape.num_normal_vertices
            d_vertices = torch.zeros(num_vertices, 3,
                device = pyredner.get_device())
            d_uvs = torch.zeros(num_uv_vertices, 2,
                device = pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(num_normal_vertices, 3,
                device = pyredner.get_device()) if shape.has_normals() else None
            d_colors = torch.zeros(num_vertices, 3,
                device = pyredner.get_device()) if shape.has_colors() else None
            buffers.d_vertices_list.append(d_vertices)
            buffers.d_uvs_list.append(d_uvs)
            buffers.d_normals_list.append(d_normals)
            buffers.d_colors_list.append(d_colors)
            buffers.d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0),
                redner.float_ptr(d_colors.data_ptr() if d_colors is not None else 0)))

        buffers.d_diffuse_list = []
        buffers.d_diffuse_uv_scale_list = []
        buffers.d_specular_list = []
        buffers.d_specular_uv_scale_list = []
        buffers.d_roughness_list = []
        buffers.d_roughness_uv_scale_list = []
        buffers.d_generic_list = []
        buffers.d_generic_uv_scale_list = []
        buffers.d_normal_map_list = []
        buffers.d_normal_map_uv_scale_list = []
        buffers.d_materials = []
        for material in ctx.materials:
            if material.get_diffuse_size(0)[0] == 0:
                d_diffuse = [torch.zeros(3, device = pyredner.get_device())]
            else:
                d_diffuse = []
                for l in range(material.get_diffuse_levels()):
                    diffuse_size = material.get_diffuse_size(l)
                    d_diffuse.append(\
                        torch.zeros(diffuse_size[1],
                                    diffuse_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_specular_size(0)[0] == 0:
                d_specular = [torch.zeros(3, device = pyredner.get_device())]
            else:
                d_specular = []
                for l in range(material.get_specular_levels()):
                    specular_size = material.get_specular_size(l)
                    d_specular.append(\
                        torch.zeros(specular_size[1],
                                    specular_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_roughness_size(0)[0] == 0:
                d_roughness = [torch.zeros(1, device = pyredner.get_device())]
            else:
                d_roughness = []
                for l in range(material.get_roughness_levels()):
                    roughness_size = material.get_roughness_size(l)
                    d_roughness.append(\
                        torch.zeros(roughness_size[1],
                                    roughness_size[0],
                                    1, device = pyredner.get_device()))

            if material.get_generic_levels() == 0:
                d_generic = None
            else:
                d_generic = []
                for l in range(material.get_generic_levels()):
                    generic_size = material.get_generic_size(l)
                    d_generic.append(\
                        torch.zeros(generic_size[2],
                                    generic_size[1],
                                    generic_size[0], device = pyredner.get_device()))

            if material.get_normal_map_levels() == 0:
                d_normal_map = None
            else:
                d_normal_map = []
                for l in range(material.get_normal_map_levels()):
                    normal_map_size = material.get_normal_map_size(l)
                    d_normal_map.append(\
                        torch.zeros(normal_map_size[1],
                                    normal_map_size[0],
                                    3, device = pyredner.get_device()))

            buffers.d_diffuse_list.append(d_diffuse)
            buffers.d_specular_list.append(d_specular)
            buffers.d_roughness_list.append(d_roughness)
            buffers.d_generic_list.append(d_generic)
            buffers.d_normal_map_list.append(d_normal_map)
            d_diffuse_uv_scale = torch.zeros(2, device = pyredner.get_device())
            d_specular_uv_scale = torch.zeros(2, device = pyredner.get_device())
            d_roughness_uv_scale = torch.zeros(2, device = pyredner.get_device())
            buffers.d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
            buffers.d_specular_uv_scale_list.append(d_specular_uv_scale)
            buffers.d_roughness_uv_scale_list.append(d_roughness_uv_scale)
            if d_generic is None:
                d_generic_uv_scale = None
            else:
                d_generic_uv_scale = torch.zeros(2, device = pyredner.get_device())
            if d_normal_map is None:
                d_normal_map_uv_scale = None
            else:
                d_normal_map_uv_scale = torch.zeros(2, device = pyredner.get_device())

            buffers.d_generic_uv_scale_list.append(d_generic_uv_scale)
            buffers.d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
            if d_diffuse[0].dim() == 1:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(d_diffuse[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            else:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_diffuse],
                    [x.shape[1] for x in d_diffuse],
                    [x.shape[0] for x in d_diffuse],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))

            if d_specular[0].dim() == 1:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(d_specular[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))
            else:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_specular],
                    [x.shape[1] for x in d_specular],
                    [x.shape[0] for x in d_specular],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))

            if d_roughness[0].dim() == 1:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(d_roughness[0].data_ptr())],
                    [0],
                    [0],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            else:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(x.data_ptr()) for x in d_roughness],
                    [x.shape[1] for x in d_roughness],
                    [x.shape[0] for x in d_roughness],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))

            if d_generic is None:
                d_generic_tex = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_generic_tex = redner.TextureN(\
                    [redner.float_ptr(x.data_ptr()) for x in d_generic],
                    [x.shape[1] for x in d_generic],
                    [x.shape[0] for x in d_generic],
                    d_generic[0].shape[2],
                    redner.float_ptr(d_generic_uv_scale.data_ptr()))

            if d_normal_map is None:
                d_normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_normal_map = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_normal_map],
                    [x.shape[1] for x in d_normal_map],
                    [x.shape[0] for x in d_normal_map],
                    3,
                    redner.float_ptr(d_normal_map_uv_scale.data_ptr()))
            buffers.d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex,
                d_generic_tex, d_normal_map))

        buffers.d_intensity_list = []
        buffers.d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device = pyredner.get_device())
            buffers.d_intensity_list.append(d_intensity)
            buffers.d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        buffers.d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            buffers.d_envmap_values = []
            for l in range(envmap.get_levels()):
                size = envmap.get_size(l)
                buffers.d_envmap_values.append(\
                    torch.zeros(size[1],
                                size[0],
                                3, device = pyredner.get_device()))
            buffers.d_envmap_uv_scale = torch.zeros(2, device = pyredner.get_device())
            d_envmap_tex = redner.Texture3(\
                [redner.float_ptr(x.data_ptr()) for x in buffers.d_envmap_values],
                [x.shape[1] for x in buffers.d_envmap_values],
                [x.shape[0] for x in buffers.d_envmap_values],
                3,
                redner.float_ptr(buffers.d_envmap_uv_scale.data_ptr()))
            buffers.d_world_to_env = torch.zeros(4, 4, device = pyredner.get_device())
            buffers.d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(buffers.d_world_to_env.data_ptr()))

        buffers.d_scene = redner.DScene(buffers.d_camera,
                                        buffers.d_shapes,
                                        buffers.d_materials,
                                        buffers.d_area_lights,
                                        buffers.d_envmap,
                                        pyredner.get_use_gpu(),
                                        pyredner.get_device().index if pyredner.get_device().index is not None else -1)
        return buffers
コード例 #7
0
    def backward(ctx,
                 grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options

        d_cam_to_world = torch.zeros(4, 4)
        d_world_to_cam = torch.zeros(4, 4)
        d_ndc_to_cam = torch.zeros(3, 3)
        d_cam_to_ndc = torch.zeros(3, 3)
        d_camera = redner.DCamera(redner.float_ptr(d_cam_to_world.data_ptr()),
                                  redner.float_ptr(d_world_to_cam.data_ptr()),
                                  redner.float_ptr(d_ndc_to_cam.data_ptr()),
                                  redner.float_ptr(d_cam_to_ndc.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            d_vertices = torch.zeros(num_vertices, 3,
                device = pyredner.get_device())
            d_uvs = torch.zeros(num_vertices, 2,
                device = pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(num_vertices, 3,
                device = pyredner.get_device()) if shape.has_normals() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0)))

        d_diffuse_list = []
        d_specular_list = []
        d_roughness_list = []
        d_materials = []
        for material in ctx.materials:
            diffuse_size = material.get_diffuse_size()
            specular_size = material.get_specular_size()
            roughness_size = material.get_roughness_size()
            if diffuse_size[0] == 0:
                d_diffuse = torch.zeros(3, device = pyredner.get_device())
            else:
                d_diffuse = torch.zeros(diffuse_size[2],
                                        diffuse_size[1],
                                        diffuse_size[0],
                                        3, device = pyredner.get_device())
            if specular_size[0] == 0:
                d_specular = torch.zeros(3, device = pyredner.get_device())
            else:
                d_specular = torch.zeros(specular_size[2],
                                         specular_size[1],
                                         specular_size[0],
                                         3, device = pyredner.get_device())
            if roughness_size[0] == 0:
                d_roughness = torch.zeros(1, device = pyredner.get_device())
            else:
                d_roughness = torch.zeros(roughness_size[2],
                                          roughness_size[1],
                                          roughness_size[0],
                                          1, device = pyredner.get_device())
            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_diffuse_uv_scale = torch.zeros(2)
            d_specular_uv_scale = torch.zeros(2)
            d_roughness_uv_scale = torch.zeros(2)
            d_diffuse_tex = redner.Texture3(\
                redner.float_ptr(d_diffuse.data_ptr()),
                diffuse_size[0], diffuse_size[1], diffuse_size[2],
                redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            d_specular_tex = redner.Texture3(\
                redner.float_ptr(d_specular.data_ptr()),
                specular_size[0], specular_size[1], specular_size[2],
                redner.float_ptr(d_specular_uv_scale.data_ptr()))
            d_roughness_tex = redner.Texture1(\
                redner.float_ptr(d_roughness.data_ptr()),
                roughness_size[0], roughness_size[1], roughness_size[2],
                redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex))

        d_intensity_list = []
        d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device = pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            size = envmap.get_size()
            d_envmap_values = \
                torch.zeros(size[2],
                            size[1],
                            size[0],
                            3,
                            device = pyredner.get_device())
            d_envmap_uv_scale = torch.zeros(2)
            d_envmap_tex = redner.Texture3(\
                redner.float_ptr(d_envmap_values.data_ptr()),
                size[0], size[1], size[2],
                redner.float_ptr(d_envmap_uv_scale.data_ptr()))
            d_world_to_env = torch.zeros(4, 4)
            d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(d_world_to_env.data_ptr()))

        d_scene = redner.DScene(d_camera,
                                d_shapes,
                                d_materials,
                                d_area_lights,
                                d_envmap,
                                pyredner.get_use_gpu())
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003
        start = time.time()
        redner.render(scene, options,
                      redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()),
                      d_scene,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = torch.ones(256, 256, 3)
        # debug_img = torch.zeros(256, 256, 3)
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # exit()

        ret_list = []
        ret_list.append(None) # seed
        ret_list.append(None) # num_shapes
        ret_list.append(None) # num_materials
        ret_list.append(None) # num_lights
        ret_list.append(d_cam_to_world)
        ret_list.append(d_world_to_cam)
        ret_list.append(d_ndc_to_cam)
        ret_list.append(d_cam_to_ndc)
        ret_list.append(None) # clip near
        ret_list.append(None) # resolution
        ret_list.append(None) # fisheye

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None) # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None) # material id
            ret_list.append(None) # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(d_diffuse_list[i])
            ret_list.append(None) # diffuse_uv_scale
            ret_list.append(d_specular_list[i])
            ret_list.append(None) # specular_uv_scale
            ret_list.append(d_roughness_list[i])
            ret_list.append(None) # roughness_uv_scale
            ret_list.append(None) # two sided

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None) # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None) # two sided
            ret_list.append(None)

        if ctx.envmap is not None:
            ret_list.append(d_envmap_values)
            ret_list.append(None) # uv_scale
            ret_list.append(None) # env_to_world
            ret_list.append(d_world_to_env)
            ret_list.append(None) # sample_cdf_ys
            ret_list.append(None) # sample_cdf_xs
            ret_list.append(None) # pdf_norm
        else:
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
            ret_list.append(None)
        
        ret_list.append(None) # num samples
        ret_list.append(None) # num bounces
        ret_list.append(None) # channels

        return tuple(ret_list)
コード例 #8
0
ファイル: render_pytorch.py プロジェクト: tstullich/redner
    def backward(ctx, grad_img):
        if not grad_img.is_contiguous():
            grad_img = grad_img.contiguous()
        scene = ctx.scene
        options = ctx.options
        camera = ctx.camera

        if camera.use_look_at:
            d_cam_position = torch.zeros(3, device=pyredner.get_device())
            d_cam_look = torch.zeros(3, device=pyredner.get_device())
            d_cam_up = torch.zeros(3, device=pyredner.get_device())
            d_cam_to_world = None
            d_world_to_cam = None
        else:
            d_cam_position = None
            d_cam_look = None
            d_cam_up = None
            d_cam_to_world = torch.zeros(4, 4, device=pyredner.get_device())
            d_world_to_cam = torch.zeros(4, 4, device=pyredner.get_device())
        d_intrinsic_mat_inv = torch.zeros(3, 3, device=pyredner.get_device())
        d_intrinsic_mat = torch.zeros(3, 3, device=pyredner.get_device())
        if camera.use_look_at:
            d_camera = redner.DCamera(
                redner.float_ptr(d_cam_position.data_ptr()),
                redner.float_ptr(d_cam_look.data_ptr()),
                redner.float_ptr(d_cam_up.data_ptr()),
                redner.float_ptr(0),  # cam_to_world
                redner.float_ptr(0),  # world_to_cam
                redner.float_ptr(d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(d_intrinsic_mat.data_ptr()))
        else:
            d_camera = redner.DCamera(
                redner.float_ptr(0),  # pos
                redner.float_ptr(0),  # look
                redner.float_ptr(0),  # up
                redner.float_ptr(d_cam_to_world.data_ptr()),
                redner.float_ptr(d_world_to_cam.data_ptr()),
                redner.float_ptr(d_intrinsic_mat_inv.data_ptr()),
                redner.float_ptr(d_intrinsic_mat.data_ptr()))
        d_vertices_list = []
        d_uvs_list = []
        d_normals_list = []
        d_colors_list = []
        d_shapes = []
        for shape in ctx.shapes:
            num_vertices = shape.num_vertices
            num_uv_vertices = shape.num_uv_vertices
            num_normal_vertices = shape.num_normal_vertices
            d_vertices = torch.zeros(num_vertices,
                                     3,
                                     device=pyredner.get_device())
            d_uvs = torch.zeros(
                num_uv_vertices, 2,
                device=pyredner.get_device()) if shape.has_uvs() else None
            d_normals = torch.zeros(
                num_normal_vertices, 3,
                device=pyredner.get_device()) if shape.has_normals() else None
            d_colors = torch.zeros(
                num_vertices, 3,
                device=pyredner.get_device()) if shape.has_colors() else None
            d_vertices_list.append(d_vertices)
            d_uvs_list.append(d_uvs)
            d_normals_list.append(d_normals)
            d_colors_list.append(d_colors)
            d_shapes.append(redner.DShape(\
                redner.float_ptr(d_vertices.data_ptr()),
                redner.float_ptr(d_uvs.data_ptr() if d_uvs is not None else 0),
                redner.float_ptr(d_normals.data_ptr() if d_normals is not None else 0),
                redner.float_ptr(d_colors.data_ptr() if d_colors is not None else 0)))

        d_diffuse_list = []
        d_diffuse_uv_scale_list = []
        d_specular_list = []
        d_specular_uv_scale_list = []
        d_roughness_list = []
        d_roughness_uv_scale_list = []
        d_generic_list = []
        d_generic_uv_scale_list = []
        d_normal_map_list = []
        d_normal_map_uv_scale_list = []
        d_materials = []
        for material in ctx.materials:
            if material.get_diffuse_size(0)[0] == 0:
                d_diffuse = [torch.zeros(3, device=pyredner.get_device())]
            else:
                d_diffuse = []
                for l in range(material.get_diffuse_levels()):
                    diffuse_size = material.get_diffuse_size(l)
                    d_diffuse.append(\
                        torch.zeros(diffuse_size[1],
                                    diffuse_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_specular_size(0)[0] == 0:
                d_specular = [torch.zeros(3, device=pyredner.get_device())]
            else:
                d_specular = []
                for l in range(material.get_specular_levels()):
                    specular_size = material.get_specular_size(l)
                    d_specular.append(\
                        torch.zeros(specular_size[1],
                                    specular_size[0],
                                    3, device = pyredner.get_device()))

            if material.get_roughness_size(0)[0] == 0:
                d_roughness = [torch.zeros(1, device=pyredner.get_device())]
            else:
                d_roughness = []
                for l in range(material.get_roughness_levels()):
                    roughness_size = material.get_roughness_size(l)
                    d_roughness.append(\
                        torch.zeros(roughness_size[1],
                                    roughness_size[0],
                                    1, device = pyredner.get_device()))

            if material.get_generic_levels() == 0:
                d_generic = None
            else:
                d_generic = []
                for l in range(material.get_generic_levels()):
                    generic_size = material.get_generic_size(l)
                    d_generic.append(\
                        torch.zeros(generic_size[2],
                                    generic_size[1],
                                    generic_size[0], device = pyredner.get_device()))

            if material.get_normal_map_levels() == 0:
                d_normal_map = None
            else:
                d_normal_map = []
                for l in range(material.get_normal_map_levels()):
                    normal_map_size = material.get_normal_map_size(l)
                    d_normal_map.append(\
                        torch.zeros(normal_map_size[1],
                                    normal_map_size[0],
                                    3, device = pyredner.get_device()))

            d_diffuse_list.append(d_diffuse)
            d_specular_list.append(d_specular)
            d_roughness_list.append(d_roughness)
            d_generic_list.append(d_generic)
            d_normal_map_list.append(d_normal_map)
            d_diffuse_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_specular_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_roughness_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_diffuse_uv_scale_list.append(d_diffuse_uv_scale)
            d_specular_uv_scale_list.append(d_specular_uv_scale)
            d_roughness_uv_scale_list.append(d_roughness_uv_scale)
            if d_generic is None:
                d_generic_uv_scale = None
            else:
                d_generic_uv_scale = torch.zeros(2,
                                                 device=pyredner.get_device())
            if d_normal_map is None:
                d_normal_map_uv_scale = None
            else:
                d_normal_map_uv_scale = torch.zeros(
                    2, device=pyredner.get_device())

            d_generic_uv_scale_list.append(d_generic_uv_scale)
            d_normal_map_uv_scale_list.append(d_normal_map_uv_scale)
            if d_diffuse[0].dim() == 1:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(d_diffuse[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))
            else:
                d_diffuse_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_diffuse],
                    [x.shape[1] for x in d_diffuse],
                    [x.shape[0] for x in d_diffuse],
                    3,
                    redner.float_ptr(d_diffuse_uv_scale.data_ptr()))

            if d_specular[0].dim() == 1:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(d_specular[0].data_ptr())],
                    [0],
                    [0],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))
            else:
                d_specular_tex = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_specular],
                    [x.shape[1] for x in d_specular],
                    [x.shape[0] for x in d_specular],
                    3,
                    redner.float_ptr(d_specular_uv_scale.data_ptr()))

            if d_roughness[0].dim() == 1:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(d_roughness[0].data_ptr())],
                    [0],
                    [0],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))
            else:
                d_roughness_tex = redner.Texture1(\
                    [redner.float_ptr(x.data_ptr()) for x in d_roughness],
                    [x.shape[1] for x in d_roughness],
                    [x.shape[0] for x in d_roughness],
                    1,
                    redner.float_ptr(d_roughness_uv_scale.data_ptr()))

            if d_generic is None:
                d_generic_tex = redner.TextureN(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_generic_tex = redner.TextureN(\
                    [redner.float_ptr(x.data_ptr()) for x in d_generic],
                    [x.shape[1] for x in d_generic],
                    [x.shape[0] for x in d_generic],
                    d_generic[0].shape[2],
                    redner.float_ptr(d_generic_uv_scale.data_ptr()))

            if d_normal_map is None:
                d_normal_map = redner.Texture3(\
                    [], [], [], 0, redner.float_ptr(0))
            else:
                d_normal_map = redner.Texture3(\
                    [redner.float_ptr(x.data_ptr()) for x in d_normal_map],
                    [x.shape[1] for x in d_normal_map],
                    [x.shape[0] for x in d_normal_map],
                    3,
                    redner.float_ptr(d_normal_map_uv_scale.data_ptr()))
            d_materials.append(redner.DMaterial(\
                d_diffuse_tex, d_specular_tex, d_roughness_tex,
                d_generic_tex, d_normal_map))

        d_intensity_list = []
        d_area_lights = []
        for light in ctx.area_lights:
            d_intensity = torch.zeros(3, device=pyredner.get_device())
            d_intensity_list.append(d_intensity)
            d_area_lights.append(\
                redner.DAreaLight(redner.float_ptr(d_intensity.data_ptr())))

        d_envmap = None
        if ctx.envmap is not None:
            envmap = ctx.envmap
            d_envmap_values = []
            for l in range(envmap.get_levels()):
                size = envmap.get_size(l)
                d_envmap_values.append(\
                    torch.zeros(size[1],
                                size[0],
                                3, device = pyredner.get_device()))
            d_envmap_uv_scale = torch.zeros(2, device=pyredner.get_device())
            d_envmap_tex = redner.Texture3(\
                [redner.float_ptr(x.data_ptr()) for x in d_envmap_values],
                [x.shape[1] for x in d_envmap_values],
                [x.shape[0] for x in d_envmap_values],
                3,
                redner.float_ptr(d_envmap_uv_scale.data_ptr()))
            d_world_to_env = torch.zeros(4, 4, device=pyredner.get_device())
            d_envmap = redner.DEnvironmentMap(\
                d_envmap_tex,
                redner.float_ptr(d_world_to_env.data_ptr()))

        d_scene = redner.DScene(
            d_camera, d_shapes, d_materials, d_area_lights, d_envmap,
            pyredner.get_use_gpu(),
            pyredner.get_device().index
            if pyredner.get_device().index is not None else -1)
        if not get_use_correlated_random_number():
            # Decouple the forward/backward random numbers by adding a big prime number
            options.seed += 1000003

        options.num_samples = ctx.num_samples[1]
        start = time.time()
        redner.render(scene, options, redner.float_ptr(0),
                      redner.float_ptr(grad_img.data_ptr()), d_scene,
                      redner.float_ptr(0))
        time_elapsed = time.time() - start
        if print_timing:
            print('Backward pass, time: %.5f s' % time_elapsed)

        # For debugging
        # pyredner.imwrite(grad_img, 'grad_img.exr')
        # grad_img = torch.ones(256, 256, 3, device = pyredner.get_device())
        # debug_img = torch.zeros(256, 256, 3)
        # start = time.time()
        # redner.render(scene, options,
        #               redner.float_ptr(0),
        #               redner.float_ptr(grad_img.data_ptr()),
        #               d_scene,
        #               redner.float_ptr(debug_img.data_ptr()))
        # time_elapsed = time.time() - start
        # if print_timing:
        #     print('Backward pass, time: %.5f s' % time_elapsed)
        # debug_img = debug_img[:, :, 0]
        # pyredner.imwrite(debug_img, 'debug.exr')
        # pyredner.imwrite(-debug_img, 'debug_.exr')
        # debug_img = debug_img.numpy()
        # print(np.max(debug_img))
        # print(np.unravel_index(np.argmax(debug_img), debug_img.shape))
        # print(np.min(debug_img))
        # print(np.unravel_index(np.argmin(debug_img), debug_img.shape))
        # print(np.sum(debug_img) / 3)
        # debug_max = 0.5
        # debug_min = -0.5
        # debug_img = np.clip((debug_img - debug_min) / (debug_max - debug_min), 0, 1)
        # # debug_img = debug_img[:, :, 0]
        # import matplotlib.cm as cm
        # debug_img = cm.viridis(debug_img)
        # skimage.io.imsave('debug.png', np.power(debug_img, 1/2.2))
        # exit()

        ret_list = []
        ret_list.append(None)  # seed
        ret_list.append(None)  # num_shapes
        ret_list.append(None)  # num_materials
        ret_list.append(None)  # num_lights
        if camera.use_look_at:
            ret_list.append(d_cam_position.cpu())
            ret_list.append(d_cam_look.cpu())
            ret_list.append(d_cam_up.cpu())
            ret_list.append(None)  # cam_to_world
            ret_list.append(None)  # world_to_cam
        else:
            ret_list.append(None)  # pos
            ret_list.append(None)  # look
            ret_list.append(None)  # up
            ret_list.append(d_cam_to_world.cpu())
            ret_list.append(d_world_to_cam.cpu())
        ret_list.append(d_intrinsic_mat_inv.cpu())
        ret_list.append(d_intrinsic_mat.cpu())
        ret_list.append(None)  # clip near
        ret_list.append(None)  # resolution
        ret_list.append(None)  # camera_type

        num_shapes = len(ctx.shapes)
        for i in range(num_shapes):
            ret_list.append(d_vertices_list[i])
            ret_list.append(None)  # indices
            ret_list.append(d_uvs_list[i])
            ret_list.append(d_normals_list[i])
            ret_list.append(None)  # uv_indices
            ret_list.append(None)  # normal_indices
            ret_list.append(d_colors_list[i])
            ret_list.append(None)  # material id
            ret_list.append(None)  # light id

        num_materials = len(ctx.materials)
        for i in range(num_materials):
            ret_list.append(None)  # num_levels
            for d_diffuse in d_diffuse_list[i]:
                ret_list.append(d_diffuse)
            ret_list.append(d_diffuse_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_specular in d_specular_list[i]:
                ret_list.append(d_specular)
            ret_list.append(d_specular_uv_scale_list[i])
            ret_list.append(None)  # num_levels
            for d_roughness in d_roughness_list[i]:
                ret_list.append(d_roughness)
            ret_list.append(d_roughness_uv_scale_list[i])
            if d_generic_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_generic in d_generic_list[i]:
                    ret_list.append(d_generic)
                ret_list.append(d_generic_uv_scale_list[i])
            if d_normal_map_list[i] is None:
                ret_list.append(None)  # num_levels
            else:
                ret_list.append(None)  # num_levels
                for d_normal_map in d_normal_map_list[i]:
                    ret_list.append(d_normal_map)
                ret_list.append(d_normal_map_uv_scale_list[i])
            ret_list.append(None)  # compute_specular_lighting
            ret_list.append(None)  # two sided
            ret_list.append(None)  # use_vertex_color

        num_area_lights = len(ctx.area_lights)
        for i in range(num_area_lights):
            ret_list.append(None)  # shape id
            ret_list.append(d_intensity_list[i].cpu())
            ret_list.append(None)  # two sided

        if ctx.envmap is not None:
            ret_list.append(None)  # num_levels
            for d_values in d_envmap_values:
                ret_list.append(d_values)
            ret_list.append(d_envmap_uv_scale)
            ret_list.append(None)  # env_to_world
            ret_list.append(d_world_to_env.cpu())
            ret_list.append(None)  # sample_cdf_ys
            ret_list.append(None)  # sample_cdf_xs
            ret_list.append(None)  # pdf_norm
        else:
            ret_list.append(None)

        ret_list.append(None)  # num samples
        ret_list.append(None)  # num bounces
        ret_list.append(None)  # channels
        ret_list.append(None)  # sampler type
        ret_list.append(None)  # use_primary_edge_sampling
        ret_list.append(None)  # use_secondary_edge_sampling
        ret_list.append(None)  # sample_pixel_center

        return tuple(ret_list)