def test_temperature_dist_warper(self):
        input_ids = None
        length = 20

        scores = self._get_uniform_logits(batch_size=2, length=length)

        # tweak scores to not be uniform anymore
        scores[1, 5] = (1 / length) + 0.1  # peak, 1st batch
        scores[1, 10] = (1 / length) - 0.4  # valley, 1st batch

        # compute softmax
        probs = jax.nn.softmax(scores, axis=-1)

        temp_dist_warper_sharper = FlaxTemperatureLogitsWarper(temperature=0.5)
        temp_dist_warper_smoother = FlaxTemperatureLogitsWarper(temperature=1.3)

        warped_prob_sharp = jax.nn.softmax(temp_dist_warper_sharper(input_ids, scores.copy(), cur_len=None), axis=-1)
        warped_prob_smooth = jax.nn.softmax(temp_dist_warper_smoother(input_ids, scores.copy(), cur_len=None), axis=-1)

        # uniform distribution stays uniform
        self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3))
        self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3))

        # sharp peaks get higher, valleys get lower
        self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max())
        self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min())

        # smooth peaks get lower, valleys get higher
        self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max())
        self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min())
    def test_processor_list(self):
        batch_size = 4
        sequence_length = 10
        vocab_size = 15

        # dummy input_ids and scores
        input_ids = ids_tensor((batch_size, sequence_length), vocab_size)
        input_ids_comp = input_ids.copy()

        scores = self._get_uniform_logits(batch_size, vocab_size)
        scores_comp = scores.copy()

        # instantiate all dist processors
        temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5)
        top_k_warp = FlaxTopKLogitsWarper(3)
        top_p_warp = FlaxTopPLogitsWarper(0.8)

        # no processor list
        scores = temp_dist_warp(input_ids, scores)
        scores = top_k_warp(input_ids, scores)
        scores = top_p_warp(input_ids, scores)

        # with processor list
        processor = FlaxLogitsProcessorList(
            [temp_dist_warp, top_k_warp, top_p_warp])
        scores_comp = processor(input_ids, scores_comp)

        # scores should be equal
        self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3))

        # input_ids should never be changed
        self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
    def test_processor_list_jitted(self):
        batch_size = 4
        sequence_length = 10
        vocab_size = 15
        eos_token_id = 2
        bos_token_id = 1
        max_length = 15

        # dummy input_ids and scores
        input_ids = ids_tensor((batch_size, sequence_length), vocab_size)
        input_ids_comp = input_ids.copy()

        scores = self._get_uniform_logits(batch_size, vocab_size)
        scores_comp = scores.copy()

        # instantiate all dist processors
        temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5)
        top_k_warp = FlaxTopKLogitsWarper(3)
        top_p_warp = FlaxTopPLogitsWarper(0.8)

        # instantiate all logits processors
        min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id)
        bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id)
        eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id)

        cur_len = 10

        # no processor list
        def run_no_processor_list(input_ids, scores, cur_len):
            scores = temp_dist_warp(input_ids, scores, cur_len=cur_len)
            scores = top_k_warp(input_ids, scores, cur_len=cur_len)
            scores = top_p_warp(input_ids, scores, cur_len=cur_len)
            scores = min_dist_proc(input_ids, scores, cur_len=cur_len)
            scores = bos_dist_proc(input_ids, scores, cur_len=cur_len)
            scores = eos_dist_proc(input_ids, scores, cur_len=cur_len)
            return scores

        # with processor list
        def run_processor_list(input_ids, scores, cur_len):
            processor = FlaxLogitsProcessorList(
                [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]
            )
            scores = processor(input_ids, scores, cur_len=cur_len)
            return scores

        jitted_run_no_processor_list = jax.jit(run_no_processor_list)
        jitted_run_processor_list = jax.jit(run_processor_list)

        scores = jitted_run_no_processor_list(input_ids, scores, cur_len)
        scores_comp = jitted_run_processor_list(input_ids, scores_comp, cur_len)

        # scores should be equal
        self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3))

        # input_ids should never be changed
        self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())