def test_top_p_dist_warper(self):
        input_ids = None
        vocab_size = 10
        batch_size = 2

        # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
        dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))

        top_p_warp = FlaxTopPLogitsWarper(0.7)
        filtered_dist = np.exp(top_p_warp(input_ids, dist, cur_len=None))

        # dist should be filtered to keep min num values so that sum is >= 0.7
        # exp (-inf) => 0
        EXPECTED_FILTERED_DIST = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
        self.assertTrue(np.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))

        # check edge cases with negative and extreme logits
        ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() - (
            vocab_size // 2
        )

        # make ramp_logits more extreme
        ramp_logits[1] = ramp_logits[1] * 100.0

        # make sure at least 2 tokens are kept
        top_p_warp = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0)
        filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len=None)

        # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
        self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist(), [3, 2])
    def test_processor_list(self):
        batch_size = 4
        sequence_length = 10
        vocab_size = 15

        # dummy input_ids and scores
        input_ids = ids_tensor((batch_size, sequence_length), vocab_size)
        input_ids_comp = input_ids.copy()

        scores = self._get_uniform_logits(batch_size, vocab_size)
        scores_comp = scores.copy()

        # instantiate all dist processors
        temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5)
        top_k_warp = FlaxTopKLogitsWarper(3)
        top_p_warp = FlaxTopPLogitsWarper(0.8)

        # no processor list
        scores = temp_dist_warp(input_ids, scores)
        scores = top_k_warp(input_ids, scores)
        scores = top_p_warp(input_ids, scores)

        # with processor list
        processor = FlaxLogitsProcessorList(
            [temp_dist_warp, top_k_warp, top_p_warp])
        scores_comp = processor(input_ids, scores_comp)

        # scores should be equal
        self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3))

        # input_ids should never be changed
        self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
    def test_processor_list_jitted(self):
        batch_size = 4
        sequence_length = 10
        vocab_size = 15
        eos_token_id = 2
        bos_token_id = 1
        max_length = 15

        # dummy input_ids and scores
        input_ids = ids_tensor((batch_size, sequence_length), vocab_size)
        input_ids_comp = input_ids.copy()

        scores = self._get_uniform_logits(batch_size, vocab_size)
        scores_comp = scores.copy()

        # instantiate all dist processors
        temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5)
        top_k_warp = FlaxTopKLogitsWarper(3)
        top_p_warp = FlaxTopPLogitsWarper(0.8)

        # instantiate all logits processors
        min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id)
        bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id)
        eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id)

        cur_len = 10

        # no processor list
        def run_no_processor_list(input_ids, scores, cur_len):
            scores = temp_dist_warp(input_ids, scores, cur_len=cur_len)
            scores = top_k_warp(input_ids, scores, cur_len=cur_len)
            scores = top_p_warp(input_ids, scores, cur_len=cur_len)
            scores = min_dist_proc(input_ids, scores, cur_len=cur_len)
            scores = bos_dist_proc(input_ids, scores, cur_len=cur_len)
            scores = eos_dist_proc(input_ids, scores, cur_len=cur_len)
            return scores

        # with processor list
        def run_processor_list(input_ids, scores, cur_len):
            processor = FlaxLogitsProcessorList(
                [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]
            )
            scores = processor(input_ids, scores, cur_len=cur_len)
            return scores

        jitted_run_no_processor_list = jax.jit(run_no_processor_list)
        jitted_run_processor_list = jax.jit(run_processor_list)

        scores = jitted_run_no_processor_list(input_ids, scores, cur_len)
        scores_comp = jitted_run_processor_list(input_ids, scores_comp, cur_len)

        # scores should be equal
        self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3))

        # input_ids should never be changed
        self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())