def test_lm_generate_gptj(self):
     # Marked as @tooslow due to GPU OOM
     model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B",
                                               from_pt=True)
     input_ids = tf.convert_to_tensor([[464, 3290]],
                                      dtype=tf.int32)  # The dog
     # fmt: off
     # The dog is a man's best friend. It is a loyal companion, and it is a friend
     expected_output_ids = [
         464, 3290, 318, 257, 582, 338, 1266, 1545, 13, 632, 318, 257, 9112,
         15185, 11, 290, 340, 318, 257, 1545
     ]
     # fmt: on
     output_ids = model.generate(input_ids, do_sample=False)
     self.assertListEqual(output_ids[0].numpy().tolist(),
                          expected_output_ids)
Пример #2
0
    def _get_beam_search_test_objects(self):
        model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", revision="float16", from_pt=True)
        tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B", revision="float16")

        tokenizer.padding_side = "left"

        # Define PAD Token = EOS Token = 50256
        tokenizer.pad_token = tokenizer.eos_token
        model.config.pad_token_id = model.config.eos_token_id

        # use different length sentences to test batching
        sentences = [
            "Hello, my dog is a little",
            "Today, I",
        ]
        expected_output_sentences = [
            "Hello, my dog is a little over a year old and has been diagnosed with hip dysplasia",
            "Today, I’m going to be talking about a topic that’",
        ]
        return model, tokenizer, sentences, expected_output_sentences
    def test_gptj_sample(self):
        # Marked as @tooslow due to GPU OOM (issue #13676)
        tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B",
                                                  revision="float16")
        model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B",
                                                  revision="float16",
                                                  from_pt=True)

        tf.random.set_seed(0)
        tokenized = tokenizer("Today is a nice day and",
                              return_tensors="tf",
                              return_token_type_ids=True)
        input_ids, token_type_ids = tokenized.input_ids, tokenized.token_type_ids
        output_ids = model.generate(input_ids, do_sample=True)
        output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True)

        output_seq = model.generate(input_ids=input_ids,
                                    do_sample=True,
                                    num_return_sequences=5)
        output_seq_tt = model.generate(input_ids=input_ids,
                                       token_type_ids=token_type_ids,
                                       do_sample=True,
                                       num_return_sequences=5)
        output_seq_strs = tokenizer.batch_decode(output_seq,
                                                 skip_special_tokens=True)
        output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt,
                                                    skip_special_tokens=True)

        EXPECTED_OUTPUT_STR = "Today is a nice day and I am taking an hour to sit in the hammock and just enjoy"

        self.assertEqual(output_str, EXPECTED_OUTPUT_STR)
        self.assertTrue(
            all([
                output_seq_strs[idx] != output_seq_tt_strs[idx]
                for idx in range(len(output_seq_tt_strs))
            ]))  # token_type_ids should change output
    def test_batch_generation(self):
        # Marked as @tooslow due to GPU OOM
        model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B",
                                                  revision="float16",
                                                  from_pt=True)
        tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B",
                                                  revision="float16")

        tokenizer.padding_side = "left"

        # Define PAD Token = EOS Token = 50256
        tokenizer.pad_token = tokenizer.eos_token
        model.config.pad_token_id = model.config.eos_token_id

        # use different length sentences to test batching
        sentences = [
            "Hello, my dog is a little",
            "Today, I",
        ]

        inputs = tokenizer(sentences, return_tensors="tf", padding=True)
        input_ids = inputs["input_ids"]
        token_type_ids = tf.concat(
            [
                tf.zeros((input_ids.shape[0], input_ids.shape[1] - 1),
                         dtype=tf.int64),
                500 * tf.ones((input_ids.shape[0], 1), dtype=tf.int64),
            ],
            axis=-1,
        )

        outputs = model.generate(input_ids=input_ids,
                                 attention_mask=inputs["attention_mask"])
        outputs_tt = model.generate(
            input_ids=input_ids,
            attention_mask=inputs["attention_mask"],
            token_type_ids=token_type_ids,
        )

        inputs_non_padded = tokenizer(sentences[0],
                                      return_tensors="tf").input_ids
        output_non_padded = model.generate(input_ids=inputs_non_padded)

        num_paddings = (shape_list(inputs_non_padded)[-1] - tf.reduce_sum(
            tf.cast(inputs["attention_mask"][-1], tf.int64)).numpy())
        inputs_padded = tokenizer(sentences[1], return_tensors="tf").input_ids
        output_padded = model.generate(input_ids=inputs_padded,
                                       max_length=model.config.max_length -
                                       num_paddings)

        batch_out_sentence = tokenizer.batch_decode(outputs,
                                                    skip_special_tokens=True)
        batch_out_sentence_tt = tokenizer.batch_decode(
            outputs_tt, skip_special_tokens=True)
        non_padded_sentence = tokenizer.decode(output_non_padded[0],
                                               skip_special_tokens=True)
        padded_sentence = tokenizer.decode(output_padded[0],
                                           skip_special_tokens=True)

        expected_output_sentence = [
            "Hello, my dog is a little over a year old and has been diagnosed with a heart murmur",
            "Today, I’m going to share with you a few of my favorite",
        ]
        self.assertListEqual(expected_output_sentence, batch_out_sentence)
        self.assertTrue(
            batch_out_sentence_tt !=
            batch_out_sentence)  # token_type_ids should change output
        self.assertListEqual(expected_output_sentence,
                             [non_padded_sentence, padded_sentence])