Example #1
0
    def test_find_labels(self):
        if is_torch_available():
            from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification

            self.assertEqual(find_labels(BertForSequenceClassification),
                             ["labels"])
            self.assertEqual(find_labels(BertForPreTraining),
                             ["labels", "next_sentence_label"])
            self.assertEqual(find_labels(BertForQuestionAnswering),
                             ["start_positions", "end_positions"])

        if is_tf_available():
            from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification

            self.assertEqual(find_labels(TFBertForSequenceClassification),
                             ["labels"])
            self.assertEqual(find_labels(TFBertForPreTraining),
                             ["labels", "next_sentence_label"])
            self.assertEqual(find_labels(TFBertForQuestionAnswering),
                             ["start_positions", "end_positions"])

        if is_flax_available():
            # Flax models don't have labels
            from transformers import (
                FlaxBertForPreTraining,
                FlaxBertForQuestionAnswering,
                FlaxBertForSequenceClassification,
            )

            self.assertEqual(find_labels(FlaxBertForSequenceClassification),
                             [])
            self.assertEqual(find_labels(FlaxBertForPreTraining), [])
            self.assertEqual(find_labels(FlaxBertForQuestionAnswering), [])
Example #2
0
class FlaxBeitModelTest(FlaxModelTesterMixin, unittest.TestCase):

    all_model_classes = ((FlaxBeitModel, FlaxBeitForImageClassification,
                          FlaxBeitForMaskedImageModeling)
                         if is_flax_available() else ())

    def setUp(self) -> None:
        self.model_tester = FlaxBeitModelTester(self)
        self.config_tester = ConfigTester(self,
                                          config_class=BeitConfig,
                                          has_text_modality=False,
                                          hidden_size=37)

    def test_config(self):
        self.config_tester.run_common_tests()

    # We need to override this test because in Beit, the seq_len equals the number of patches + 1
    # we compute that here
    def test_attention_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common(
        )
        config.return_dict = True

        num_patches = (config.image_size // config.patch_size)**2
        seq_length = num_patches + 1

        for model_class in self.all_model_classes:
            inputs_dict["output_attentions"] = True
            inputs_dict["output_hidden_states"] = False
            model = model_class(config)
            outputs = model(
                **self._prepare_for_class(inputs_dict, model_class))
            attentions = outputs.attentions
            self.assertEqual(len(attentions),
                             self.model_tester.num_hidden_layers)

            # check that output_attentions also work using config
            del inputs_dict["output_attentions"]
            config.output_attentions = True
            model = model_class(config)
            outputs = model(
                **self._prepare_for_class(inputs_dict, model_class))
            attentions = outputs.attentions
            self.assertEqual(len(attentions),
                             self.model_tester.num_hidden_layers)

            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [
                    self.model_tester.num_attention_heads, seq_length,
                    seq_length
                ],
            )
            out_len = len(outputs)

            # Check attention is always last and order is fine
            inputs_dict["output_attentions"] = True
            inputs_dict["output_hidden_states"] = True
            model = model_class(config)
            outputs = model(
                **self._prepare_for_class(inputs_dict, model_class))

            added_hidden_states = 1
            self.assertEqual(out_len + added_hidden_states, len(outputs))

            self.assertEqual(len(attentions),
                             self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [
                    self.model_tester.num_attention_heads, seq_length,
                    seq_length
                ],
            )

    # We neeed to override this test because Beit's forward signature is different than text models.
    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.__call__)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            expected_arg_names = ["pixel_values"]
            self.assertListEqual(arg_names[:1], expected_arg_names)

    # We neeed to override this test because Beit expects pixel_values instead of input_ids
    def test_jit_compilation(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common(
        )

        for model_class in self.all_model_classes:
            with self.subTest(model_class.__name__):
                prepared_inputs_dict = self._prepare_for_class(
                    inputs_dict, model_class)
                model = model_class(config)

                @jax.jit
                def model_jitted(pixel_values, **kwargs):
                    return model(pixel_values=pixel_values, **kwargs)

                with self.subTest("JIT Enabled"):
                    jitted_outputs = model_jitted(
                        **prepared_inputs_dict).to_tuple()

                with self.subTest("JIT Disabled"):
                    with jax.disable_jit():
                        outputs = model_jitted(
                            **prepared_inputs_dict).to_tuple()

                self.assertEqual(len(outputs), len(jitted_outputs))
                for jitted_output, output in zip(jitted_outputs, outputs):
                    self.assertEqual(jitted_output.shape, output.shape)

    # We need to override this test because in Beit, the seq_len equals the number of patches + 1
    # we compute that here
    def test_hidden_states_output(self):
        def check_hidden_states_output(inputs_dict, config, model_class):
            model = model_class(config)
            num_patches = (config.image_size // config.patch_size)**2
            seq_length = num_patches + 1  # we add 1 for the [CLS] token

            outputs = model(
                **self._prepare_for_class(inputs_dict, model_class))
            hidden_states = outputs.hidden_states

            self.assertEqual(len(hidden_states),
                             self.model_tester.num_hidden_layers + 1)

            self.assertListEqual(
                list(hidden_states[0].shape[-2:]),
                [seq_length, self.model_tester.hidden_size],
            )

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common(
        )

        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(inputs_dict, config, model_class)

            # check that output_hidden_states also work using config
            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True

            check_hidden_states_output(inputs_dict, config, model_class)

    def test_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_model(*config_and_inputs)

    def test_for_masked_lm(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)

    def test_for_image_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_for_image_classification(
            *config_and_inputs)

    @slow
    def test_model_from_pretrained(self):
        for model_class_name in self.all_model_classes:
            model = model_class_name.from_pretrained(
                "microsoft/beit-base-patch16-224")
            outputs = model(np.ones((1, 3, 224, 224)))
            self.assertIsNotNone(outputs)
Example #3
0
from ..vit.test_modeling_vit import ViTModelTester

if is_torch_available():
    import torch

    from transformers import (
        BertModel,
        CLIPVisionModel,
        DeiTModel,
        RobertaModel,
        VisionTextDualEncoderConfig,
        VisionTextDualEncoderModel,
        ViTModel,
    )

if is_flax_available():
    from transformers import FlaxVisionTextDualEncoderModel
    from transformers.modeling_flax_pytorch_utils import (
        convert_pytorch_state_dict_to_flax,
        load_flax_weights_in_pytorch_model,
    )

if is_vision_available():
    from PIL import Image

    from transformers import VisionTextDualEncoderProcessor


# Inspired by
# https://github.com/rwightman/pytorch-image-models/blob/b9bd960a032c75ca6b808ddeed76bee5f3ed4972/timm/models/layers/helpers.py
# From PyTorch internals
Example #4
0
class FlaxBeitModelTest(FlaxModelTesterMixin, unittest.TestCase):

    all_model_classes = ((FlaxBeitModel, FlaxBeitForImageClassification,
                          FlaxBeitForMaskedImageModeling)
                         if is_flax_available() else ())

    def setUp(self) -> None:
        self.model_tester = FlaxBeitModelTester(self)
        self.config_tester = ConfigTester(self,
                                          config_class=BeitConfig,
                                          has_text_modality=False,
                                          hidden_size=37)

    def test_config(self):
        self.config_tester.run_common_tests()

    # We need to override this test because Beit's forward signature is different than text models.
    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.__call__)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            expected_arg_names = ["pixel_values"]
            self.assertListEqual(arg_names[:1], expected_arg_names)

    # We need to override this test because Beit expects pixel_values instead of input_ids
    def test_jit_compilation(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common(
        )

        for model_class in self.all_model_classes:
            with self.subTest(model_class.__name__):
                prepared_inputs_dict = self._prepare_for_class(
                    inputs_dict, model_class)
                model = model_class(config)

                @jax.jit
                def model_jitted(pixel_values, **kwargs):
                    return model(pixel_values=pixel_values, **kwargs)

                with self.subTest("JIT Enabled"):
                    jitted_outputs = model_jitted(
                        **prepared_inputs_dict).to_tuple()

                with self.subTest("JIT Disabled"):
                    with jax.disable_jit():
                        outputs = model_jitted(
                            **prepared_inputs_dict).to_tuple()

                self.assertEqual(len(outputs), len(jitted_outputs))
                for jitted_output, output in zip(jitted_outputs, outputs):
                    self.assertEqual(jitted_output.shape, output.shape)

    def test_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_model(*config_and_inputs)

    def test_for_masked_lm(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)

    def test_for_image_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_for_image_classification(
            *config_and_inputs)

    @slow
    def test_model_from_pretrained(self):
        for model_class_name in self.all_model_classes:
            model = model_class_name.from_pretrained(
                "microsoft/beit-base-patch16-224")
            outputs = model(np.ones((1, 3, 224, 224)))
            self.assertIsNotNone(outputs)