Ejemplo n.º 1
0
    def test_small_model_tf(self):
        question_answerer = pipeline(
            "question-answering",
            model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
            framework="tf")
        outputs = question_answerer(
            question="Where was HuggingFace founded ?",
            context="HuggingFace was founded in Paris.")

        self.assertEqual(nested_simplify(outputs), {
            "score": 0.011,
            "start": 0,
            "end": 11,
            "answer": "HuggingFace"
        })
    def test_accepts_torch_device(self):
        import torch

        text_classifier = pipeline(
            task="text-classification",
            model="hf-internal-testing/tiny-random-distilbert",
            framework="pt",
            device=torch.device("cpu"),
        )

        outputs = text_classifier("This is great !")
        self.assertEqual(nested_simplify(outputs), [{
            "label": "LABEL_0",
            "score": 0.504
        }])
Ejemplo n.º 3
0
 def test_large_model_issue(self):
     qa_pipeline = pipeline(
         "question-answering",
         model="mrm8488/bert-multi-cased-finetuned-xquadv1",
     )
     outputs = qa_pipeline(
         {
             "context": "Yes Bank founder Rana Kapoor has approached the Bombay High Court, challenging a special court's order from August this year that had remanded him in police custody for a week in a multi-crore loan fraud case. Kapoor, who is currently lodged in Taloja Jail, is an accused in the loan fraud case and some related matters being probed by the CBI and Enforcement Directorate. A single bench presided over by Justice S K Shinde on Tuesday posted the plea for further hearing on October 14. In his plea filed through advocate Vijay Agarwal, Kapoor claimed that the special court's order permitting the CBI's request for police custody on August 14 was illegal and in breach of the due process of law. Therefore, his police custody and subsequent judicial custody in the case were all illegal. Kapoor has urged the High Court to quash and set aside the special court's order dated August 14. As per his plea, in August this year, the CBI had moved two applications before the special court, one seeking permission to arrest Kapoor, who was already in judicial custody at the time in another case, and the other, seeking his police custody. While the special court refused to grant permission to the CBI to arrest Kapoor, it granted the central agency's plea for his custody. Kapoor, however, said in his plea that before filing an application for his arrest, the CBI had not followed the process of issuing him a notice under Section 41 of the CrPC for appearance before it. He further said that the CBI had not taken prior sanction as mandated under section 17 A of the Prevention of Corruption Act for prosecuting him. The special court, however, had said in its order at the time that as Kapoor was already in judicial custody in another case and was not a free man the procedure mandated under Section 41 of the CrPC need not have been adhered to as far as issuing a prior notice of appearance was concerned. ADVERTISING It had also said that case records showed that the investigating officer had taken an approval from a managing director of Yes Bank before beginning the proceedings against Kapoor and such a permission was a valid sanction. However, Kapoor in his plea said that the above order was bad in law and sought that it be quashed and set aside. The law mandated that if initial action was not in consonance with legal procedures, then all subsequent actions must be held as illegal, he said, urging the High Court to declare the CBI remand and custody and all subsequent proceedings including the further custody as illegal and void ab-initio. In a separate plea before the High Court, Kapoor's daughter Rakhee Kapoor-Tandon has sought exemption from in-person appearance before a special PMLA court. Rakhee has stated that she is a resident of the United Kingdom and is unable to travel to India owing to restrictions imposed due to the COVID-19 pandemic. According to the CBI, in the present case, Kapoor had obtained a gratification or pecuniary advantage of ₹ 307 crore, and thereby caused Yes Bank a loss of ₹ 1,800 crore by extending credit facilities to Avantha Group, when it was not eligible for the same",
             "question": "Is this person invovled in fraud?",
         }
     )
     self.assertEqual(
         nested_simplify(outputs),
         {"answer": "an accused in the loan fraud case", "end": 294, "score": 0.001, "start": 261},
     )
Ejemplo n.º 4
0
    def test_tokenizer_translation(self):
        inputs = self.tokenizer._build_translation_inputs("A test",
                                                          src_lang="en_XX",
                                                          tgt_lang="ar_AR")

        self.assertEqual(
            nested_simplify(inputs),
            {
                # en_XX, A, test, EOS
                "input_ids": [[250004, 62, 3034, 2]],
                "attention_mask": [[1, 1, 1, 1]],
                # ar_AR
                "forced_bos_token_id": 250001,
            },
        )
    def test_small_model_pt(self):
        small_model = "lysandre/tiny-vit-random"
        image_classifier = pipeline("image-classification", model=small_model)

        outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
        self.assertEqual(
            nested_simplify(outputs, decimals=4),
            [
                {"score": 0.0015, "label": "chambered nautilus, pearly nautilus, nautilus"},
                {"score": 0.0015, "label": "pajama, pyjama, pj's, jammies"},
                {"score": 0.0014, "label": "trench coat"},
                {"score": 0.0014, "label": "handkerchief, hankie, hanky, hankey"},
                {"score": 0.0014, "label": "baboon"},
            ],
        )

        outputs = image_classifier(
            [
                "http://images.cocodataset.org/val2017/000000039769.jpg",
                "http://images.cocodataset.org/val2017/000000039769.jpg",
            ],
            top_k=2,
        )
        self.assertEqual(
            nested_simplify(outputs, decimals=4),
            [
                [
                    {"score": 0.0015, "label": "chambered nautilus, pearly nautilus, nautilus"},
                    {"score": 0.0015, "label": "pajama, pyjama, pj's, jammies"},
                ],
                [
                    {"score": 0.0015, "label": "chambered nautilus, pearly nautilus, nautilus"},
                    {"score": 0.0015, "label": "pajama, pyjama, pj's, jammies"},
                ],
            ],
        )
Ejemplo n.º 6
0
    def test_tokenizer_translation(self):
        inputs = self.tokenizer._build_translation_inputs("A test",
                                                          src_lang="en",
                                                          tgt_lang="ar")

        self.assertEqual(
            nested_simplify(inputs),
            {
                # en_XX, A, test, EOS
                "input_ids": [[128022, 58, 4183, 2]],
                "attention_mask": [[1, 1, 1, 1]],
                # ar_AR
                "forced_bos_token_id": 128006,
            },
        )
    def test_gather_pre_entities(self):
        model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
        tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
        token_classifier = pipeline(task="ner",
                                    model=model_name,
                                    tokenizer=tokenizer,
                                    framework="pt")

        sentence = "Hello there"

        tokens = tokenizer(
            sentence,
            return_attention_mask=False,
            return_tensors="pt",
            truncation=True,
            return_special_tokens_mask=True,
            return_offsets_mapping=True,
        )
        offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0]
        special_tokens_mask = tokens.pop(
            "special_tokens_mask").cpu().numpy()[0]
        input_ids = tokens["input_ids"].numpy()[0]
        # First element in [CLS]
        scores = np.array([[1, 0, 0], [0.1, 0.3, 0.6], [0.8, 0.1, 0.1]])

        pre_entities = token_classifier.gather_pre_entities(
            sentence, input_ids, scores, offset_mapping, special_tokens_mask)
        self.assertEqual(
            nested_simplify(pre_entities),
            [
                {
                    "word": "Hello",
                    "scores": [0.1, 0.3, 0.6],
                    "start": 0,
                    "end": 5,
                    "is_subword": False,
                    "index": 1
                },
                {
                    "word": "there",
                    "scores": [0.8, 0.1, 0.1],
                    "index": 2,
                    "start": 6,
                    "end": 11,
                    "is_subword": False,
                },
            ],
        )
 def test_small_model_tf(self):
     feature_extractor = pipeline(
         task="feature-extraction",
         model="hf-internal-testing/tiny-random-distilbert",
         framework="tf")
     outputs = feature_extractor("This is a test")
     self.assertEqual(
         nested_simplify(outputs), [[
             [
                 -0.454, 0.966, 0.619, 0.262, 0.669, -0.661, -0.066, -0.513,
                 -0.768, -0.177, 1.771, -0.665, -0.649, 0.219, 0.236,
                 -0.375, 1.155, -1.07, 0.208, -0.799, 1.065, -1.223, 0.554,
                 1.274, 0.458, 2.292, -0.481, -0.928, -2.469, -1.692, 0.182,
                 1.06
             ],
             [
                 -0.187, -1.277, 0.849, -0.439, -0.967, -1.347, 1.063,
                 0.469, 1.086, -1.253, 0.349, 0.057, 1.031, -1.903, -0.432,
                 -1.377, 0.379, 0.733, -1.043, 1.307, 0.865, 0.229, 1.373,
                 1.671, -0.285, 0.599, -1.418, -1.179, -0.369, 1.039,
                 -0.705, 1.082
             ],
             [
                 -1.735, 1.102, 0.398, -0.245, 1.452, 0.46, -1.734, -0.746,
                 1.831, 0.562, 1.464, -0.342, -0.619, -0.455, 0.127, -1.209,
                 -0.686, -0.395, -0.316, 2.467, -0.379, 0.328, 0.639, 0.4,
                 -1.097, -0.096, 0.397, -0.806, -1.621, 1.127, -0.345, 0.074
             ],
             [
                 0.296, -0.638, 1.938, -0.151, -1.19, 1.445, 1.318, 0.711,
                 -0.125, 0.127, -2.179, 0.481, -1.019, 1.178, 0.318, 1.858,
                 -1.646, 0.185, -0.072, -0.979, 0.82, -1.374, 0.836, -1.019,
                 0.043, -0.156, -0.095, 0.641, -0.195, -0.076, -1.554, 0.275
             ],
             [
                 -0.266, 0.971, 0.745, -0.37, 1.42, -0.5, -0.53, 0.061,
                 1.311, -0.1, 1.796, 0.53, -0.739, -0.325, 0.28, -1.72,
                 0.382, -1.118, 0.442, 1.84, -2.497, 1.003, -0.788, -0.224,
                 -0.604, -1.259, -0.475, 1.18, -1.356, 0.695, 0.201, 0.016
             ],
             [
                 -0.618, -1.495, -0.67, -0.106, -1.265, -0.51, -1.752,
                 1.018, 0.674, 0.181, 0.297, 0.479, -0.185, 0.081, -2.44,
                 -0.239, 1.081, -1.38, 0.679, 0.878, 1.336, -1.347, 0.969,
                 -0.847, 0.293, 0.476, 1.647, -0.641, 0.66, 1.236, 0.761,
                 0.751
             ]
         ]])  # fmt: skip
Ejemplo n.º 9
0
    def test_tokenizer_translation(self):
        inputs = self.tokenizer._build_translation_inputs("A test",
                                                          return_tensors="pt",
                                                          src_lang="en_XX",
                                                          tgt_lang="java")

        self.assertEqual(
            nested_simplify(inputs),
            {
                # A, test, EOS, en_XX
                "input_ids": [[150, 242, 2, 50003]],
                "attention_mask": [[1, 1, 1, 1]],
                # java
                "forced_bos_token_id": 50001,
            },
        )
    def test_iterator_data_tf(self):
        def data(n: int):
            for _ in range(n):
                yield "This is a test"

        pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert",
                        framework="tf")
        out = pipe("This is a test")
        results = []
        for out in pipe(data(10)):
            self.assertEqual(nested_simplify(out), {
                "label": "LABEL_0",
                "score": 0.504
            })
            results.append(out)
        self.assertEqual(len(results), 10)
    def test_tokenizer_translation(self):
        inputs = self.tokenizer._build_translation_inputs("A test",
                                                          return_tensors="pt",
                                                          src_lang="eng_Latn",
                                                          tgt_lang="fra_Latn")

        self.assertEqual(
            nested_simplify(inputs),
            {
                # A, test, EOS, en_XX
                "input_ids": [[70, 7356, 2, 256047]],
                "attention_mask": [[1, 1, 1, 1]],
                # ar_AR
                "forced_bos_token_id": 256057,
            },
        )
Ejemplo n.º 12
0
    def test_small_model_pt(self):
        model = "anton-l/wav2vec2-random-tiny-classifier"

        audio_classifier = pipeline("audio-classification", model=model)

        audio = np.ones((8000,))
        output = audio_classifier(audio, top_k=4)
        self.assertEqual(
            nested_simplify(output, decimals=4),
            [
                {"score": 0.0842, "label": "no"},
                {"score": 0.0838, "label": "up"},
                {"score": 0.0837, "label": "go"},
                {"score": 0.0834, "label": "right"},
            ],
        )
Ejemplo n.º 13
0
    def test_iterator_data_tf(self):
        def data(n: int):
            for _ in range(n):
                yield "This is a test"

        pipe = pipeline(model="Narsil/tiny-distilbert-sequence-classification",
                        framework="tf")
        out = pipe("This is a test")
        results = []
        for out in pipe(data(10)):
            self.assertEqual(nested_simplify(out), {
                "label": "LABEL_1",
                "score": 0.502
            })
            results.append(out)
        self.assertEqual(len(results), 10)
Ejemplo n.º 14
0
    def test_pipeline_batch_unbatch_iterator_tensors(self):
        import torch

        from transformers.pipelines.pt_utils import PipelineIterator

        dummy_dataset = [{"id": torch.LongTensor([[10, 20], [0, 1], [0, 2]])}, {"id": torch.LongTensor([[3]])}]

        def add(number, extra=0):
            return {"id": number["id"] + extra}

        dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3)

        outputs = [item for item in dataset]
        self.assertEqual(
            nested_simplify(outputs), [{"id": [[12, 22]]}, {"id": [[2, 3]]}, {"id": [[2, 4]]}, {"id": [[5]]}]
        )
    def test_large_model_course(self):
        question_answerer = pipeline("question-answering")
        long_context = """
🤗 Transformers: State of the Art NLP

🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction,
question answering, summarization, translation, text generation and more in over 100 languages.
Its aim is to make cutting-edge NLP easier to use for everyone.

🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and
then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and
can be modified to enable quick research experiments.

Why should I use transformers?

1. Easy-to-use state-of-the-art models:
  - High performance on NLU and NLG tasks.
  - Low barrier to entry for educators and practitioners.
  - Few user-facing abstractions with just three classes to learn.
  - A unified API for using all our pretrained models.
  - Lower compute costs, smaller carbon footprint:

2. Researchers can share trained models instead of always retraining.
  - Practitioners can reduce compute time and production costs.
  - Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages.

3. Choose the right framework for every part of a model's lifetime:
  - Train state-of-the-art models in 3 lines of code.
  - Move a single model between TF2.0/PyTorch frameworks at will.
  - Seamlessly pick the right framework for training, evaluation and production.

4. Easily customize a model or an example to your needs:
  - We provide examples for each architecture to reproduce the results published by its original authors.
  - Model internals are exposed as consistently as possible.
  - Model files can be used independently of the library for quick experiments.

🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration
between them. It's straightforward to train your models with one before loading them for inference with the other.
"""
        question = "Which deep learning libraries back 🤗 Transformers?"
        outputs = question_answerer(question=question, context=long_context)

        self.assertEqual(
            nested_simplify(outputs),
            {"answer": "Jax, PyTorch and TensorFlow", "end": 1919, "score": 0.971, "start": 1892},
        )
Ejemplo n.º 16
0
    def test_threshold(self):
        threshold = 0.999
        model_id = "facebook/detr-resnet-50-panoptic"

        image_segmenter = pipeline("image-segmentation", model=model_id)

        outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold)

        for o in outputs:
            o["mask"] = hashimage(o["mask"])

        self.assertEqual(
            nested_simplify(outputs, decimals=4),
            [
                {"score": 0.9995, "label": "remote", "mask": "bd726918f10fed3efaef0091e11f923b"},
                {"score": 0.9994, "label": "cat", "mask": "fa5d8d5c329546ba5339f3095641ef56"},
            ],
        )
    def test_small_model_tf(self):
        zero_shot_classifier = pipeline(
            "zero-shot-classification",
            model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
            framework="tf",
        )
        outputs = zero_shot_classifier(
            "Who are you voting for in 2020?",
            candidate_labels=["politics", "public health", "science"])

        self.assertEqual(
            nested_simplify(outputs),
            {
                "sequence": "Who are you voting for in 2020?",
                "labels": ["science", "public health", "politics"],
                "scores": [0.333, 0.333, 0.333],
            },
        )
Ejemplo n.º 18
0
 def test_small_model_long_context_cls_slow(self):
     question_answerer = pipeline(
         "question-answering",
         model="deepset/roberta-base-squad2",
         handle_impossible_answer=True,
         max_seq_length=512,
     )
     outputs = question_answerer(
         question="What country is Paris the capital of?",
         context=
         """London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games. London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games.""",
     )
     self.assertEqual(nested_simplify(outputs), {
         "score": 0.988,
         "start": 0,
         "end": 0,
         "answer": ""
     })
Ejemplo n.º 19
0
    def test_threshold(self):
        threshold = 0.999
        model_id = "facebook/detr-resnet-50-panoptic"

        image_segmenter = pipeline("image-segmentation", model=model_id)

        outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold)

        for o in outputs:
            o["mask"] = hashlib.sha1(o["mask"].encode("UTF-8")).hexdigest()

        self.assertEqual(
            nested_simplify(outputs, decimals=4),
            [
                {"score": 0.9995, "label": "remote", "mask": "39dc07a07238048a06b0c2474de01ba3c09cc44f"},
                {"score": 0.9994, "label": "cat", "mask": "88b37bd2202c750cc9dd191518050a9b0ca5228c"},
            ],
        )
Ejemplo n.º 20
0
    def test_large_model_pt(self):
        import datasets

        model = "superb/wav2vec2-base-superb-ks"

        audio_classifier = pipeline("audio-classification", model=model)
        dataset = datasets.load_dataset("anton-l/superb_dummy", "ks", split="test")

        audio = np.array(dataset[3]["speech"], dtype=np.float32)
        output = audio_classifier(audio, top_k=4)
        self.assertEqual(
            nested_simplify(output, decimals=4),
            [
                {"score": 0.9809, "label": "go"},
                {"score": 0.0073, "label": "up"},
                {"score": 0.0064, "label": "_unknown_"},
                {"score": 0.0015, "label": "down"},
            ],
        )
    def test_small_model_pt(self):
        model_name = "hf-internal-testing/tiny-bert-for-token-classification"
        token_classifier = pipeline(task="token-classification", model=model_name, framework="pt")
        outputs = token_classifier("This is a test !")
        self.assertEqual(
            nested_simplify(outputs),
            [
                {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
                {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
            ],
        )

        token_classifier = pipeline(
            task="token-classification", model=model_name, framework="pt", ignore_labels=["O", "I-MISC"]
        )
        outputs = token_classifier("This is a test !")
        self.assertEqual(
            nested_simplify(outputs),
            [],
        )

        token_classifier = pipeline(task="token-classification", model=model_name, framework="pt")
        # Overload offset_mapping
        outputs = token_classifier(
            "This is a test !", offset_mapping=[(0, 0), (0, 1), (0, 2), (0, 0), (0, 0), (0, 0), (0, 0)]
        )
        self.assertEqual(
            nested_simplify(outputs),
            [
                {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 1},
                {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 0, "end": 2},
            ],
        )

        # Batch size does not affect outputs (attention_mask are required)
        sentences = ["This is a test !", "Another test this is with longer sentence"]
        outputs = token_classifier(sentences)
        outputs_batched = token_classifier(sentences, batch_size=2)
        # Batching does not make a difference in predictions
        self.assertEqual(nested_simplify(outputs_batched), nested_simplify(outputs))
        self.assertEqual(
            nested_simplify(outputs_batched),
            [
                [
                    {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
                    {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
                ],
                [],
            ],
        )
Ejemplo n.º 22
0
    def test_small_model_pt_semantic(self):
        model_id = "hf-internal-testing/tiny-random-beit-pipeline"
        image_segmenter = pipeline(model=model_id)
        outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg")
        for o in outputs:
            # shortening by hashing
            o["mask"] = hashimage(o["mask"])

        self.assertEqual(
            nested_simplify(outputs, decimals=4),
            [
                {
                    "score": None,
                    "label": "LABEL_0",
                    "mask": "6225140faf502d272af076222776d7e4",
                },
                {
                    "score": None,
                    "label": "LABEL_1",
                    "mask": "8297c9f8eb43ddd3f32a6dae21e015a1",
                },
            ],
        )
Ejemplo n.º 23
0
    def test_small_model_pt_softmax_trick(self):
        question_answerer = pipeline(
            "question-answering",
            model="sshleifer/tiny-distilbert-base-cased-distilled-squad")

        real_postprocess = question_answerer.postprocess

        # Tweak start and stop to make sure we encounter the softmax logits
        # bug.
        def ensure_large_logits_postprocess(
            model_outputs,
            top_k=1,
            handle_impossible_answer=False,
            max_answer_len=15,
        ):
            for output in model_outputs:
                output["start"] = output["start"] * 1e6
                output["end"] = output["end"] * 1e6
            return real_postprocess(
                model_outputs,
                top_k=top_k,
                handle_impossible_answer=handle_impossible_answer,
                max_answer_len=max_answer_len,
            )

        question_answerer.postprocess = ensure_large_logits_postprocess

        outputs = question_answerer(
            question="Where was HuggingFace founded ?",
            context="HuggingFace was founded in Paris.")

        self.assertEqual(nested_simplify(outputs), {
            "score": 0.028,
            "start": 0,
            "end": 11,
            "answer": "HuggingFace"
        })
 def test_aggregation_strategy_byte_level_tokenizer(self):
     sentence = "Groenlinks praat over Schiphol."
     ner = pipeline("ner",
                    model="xlm-roberta-large-finetuned-conll02-dutch",
                    aggregation_strategy="max")
     self.assertEqual(
         nested_simplify(ner(sentence)),
         [
             {
                 "end": 10,
                 "entity_group": "ORG",
                 "score": 0.994,
                 "start": 0,
                 "word": "Groenlinks"
             },
             {
                 "entity_group": "LOC",
                 "score": 1.0,
                 "word": "Schiphol.",
                 "start": 22,
                 "end": 31
             },
         ],
     )
    def test_spanish_bert(self):
        # https://github.com/huggingface/transformers/pull/4987
        NER_MODEL = "mrm8488/bert-spanish-cased-finetuned-ner"
        model = AutoModelForTokenClassification.from_pretrained(NER_MODEL)
        tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True)
        sentence = """Consuelo Araújo Noguera, ministra de cultura del presidente Andrés Pastrana (1998.2002) fue asesinada por las Farc luego de haber permanecido secuestrada por algunos meses."""

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer)
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity": "B-PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4, "index": 1},
                {"entity": "B-PER", "score": 0.803, "word": "##uelo", "start": 4, "end": 8, "index": 2},
                {"entity": "I-PER", "score": 0.999, "word": "Ara", "start": 9, "end": 12, "index": 3},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity_group": "PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4},
                {"entity_group": "PER", "score": 0.966, "word": "##uelo Araújo Noguera", "start": 4, "end": 23},
                {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23},
                {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
                {"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23},
                {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
                {"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114},
            ],
        )

        token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average")
        output = token_classifier(sentence)
        self.assertEqual(
            nested_simplify(output[:3]),
            [
                {"entity_group": "PER", "score": 0.966, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23},
                {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
                {"entity_group": "ORG", "score": 0.542, "word": "Farc", "start": 110, "end": 114},
            ],
        )
 def test_aggregation_strategy(self):
     model_name = self.small_models[0]
     tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
     token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt")
     # Just to understand scores indexes in this test
     self.assertEqual(
         token_classifier.model.config.id2label,
         {0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"},
     )
     example = [
         {
             # fmt : off
             "scores": np.array([0, 0, 0, 0, 0.9968166351318359, 0, 0, 0]),
             "index": 1,
             "is_subword": False,
             "word": "En",
             "start": 0,
             "end": 2,
         },
         {
             # fmt : off
             "scores": np.array([0, 0, 0, 0, 0.9957635998725891, 0, 0, 0]),
             "index": 2,
             "is_subword": True,
             "word": "##zo",
             "start": 2,
             "end": 4,
         },
         {
             # fmt: off
             "scores": np.array([0, 0, 0, 0, 0, 0.9986497163772583, 0, 0, ]),
             # fmt: on
             "index": 7,
             "word": "UN",
             "is_subword": False,
             "start": 11,
             "end": 13,
         },
     ]
     self.assertEqual(
         nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)),
         [
             {"end": 2, "entity": "I-PER", "score": 0.997, "start": 0, "word": "En", "index": 1},
             {"end": 4, "entity": "I-PER", "score": 0.996, "start": 2, "word": "##zo", "index": 2},
             {"end": 13, "entity": "B-ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7},
         ],
     )
     self.assertEqual(
         nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)),
         [
             {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
             {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
         ],
     )
     self.assertEqual(
         nested_simplify(token_classifier.aggregate(example, AggregationStrategy.FIRST)),
         [
             {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
             {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
         ],
     )
     self.assertEqual(
         nested_simplify(token_classifier.aggregate(example, AggregationStrategy.MAX)),
         [
             {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
             {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
         ],
     )
     self.assertEqual(
         nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)),
         [
             {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
             {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
         ],
     )
    def test_small_model_tf(self):
        unmasker = pipeline(task="fill-mask",
                            model="sshleifer/tiny-distilroberta-base",
                            top_k=2,
                            framework="tf")
        outputs = unmasker("My name is <mask>")
        self.assertEqual(
            nested_simplify(outputs, decimals=6),
            [
                {
                    "sequence": "My name is grouped",
                    "score": 2.1e-05,
                    "token": 38015,
                    "token_str": " grouped"
                },
                {
                    "sequence": "My name is accuser",
                    "score": 2.1e-05,
                    "token": 25506,
                    "token_str": " accuser"
                },
            ],
        )

        outputs = unmasker("The largest city in France is <mask>")
        self.assertEqual(
            nested_simplify(outputs, decimals=6),
            [
                {
                    "sequence": "The largest city in France is grouped",
                    "score": 2.1e-05,
                    "token": 38015,
                    "token_str": " grouped",
                },
                {
                    "sequence": "The largest city in France is accuser",
                    "score": 2.1e-05,
                    "token": 25506,
                    "token_str": " accuser",
                },
            ],
        )

        outputs = unmasker("My name is <mask>",
                           targets=[" Patrick", " Clara", " Teven"],
                           top_k=3)
        self.assertEqual(
            nested_simplify(outputs, decimals=6),
            [
                {
                    "sequence": "My name is Clara",
                    "score": 2e-05,
                    "token": 13606,
                    "token_str": " Clara"
                },
                {
                    "sequence": "My name is Patrick",
                    "score": 2e-05,
                    "token": 3499,
                    "token_str": " Patrick"
                },
                {
                    "sequence": "My name is Te",
                    "score": 1.9e-05,
                    "token": 2941,
                    "token_str": " Te"
                },
            ],
        )
    def test_integration_torch_image_segmentation(self):
        model_id = "facebook/detr-resnet-50-panoptic"

        image_segmenter = pipeline("image-segmentation", model=model_id)

        outputs = image_segmenter(
            "http://images.cocodataset.org/val2017/000000039769.jpg")
        for o in outputs:
            o["mask"] = hashimage(o["mask"])

        self.assertEqual(
            nested_simplify(outputs, decimals=4),
            [
                {
                    "score": 0.9094,
                    "label": "blanket",
                    "mask": "85144e4bf8d624c2c6175f7faf57eb30"
                },
                {
                    "score": 0.9941,
                    "label": "cat",
                    "mask": "f3a7f80220788acc0245ebc084df6afc"
                },
                {
                    "score": 0.9987,
                    "label": "remote",
                    "mask": "7703408f54da1d0ebda47841da875e48"
                },
                {
                    "score": 0.9995,
                    "label": "remote",
                    "mask": "bd726918f10fed3efaef0091e11f923b"
                },
                {
                    "score": 0.9722,
                    "label": "couch",
                    "mask": "226d6dcb98bebc3fbc208abdc0c83196"
                },
                {
                    "score": 0.9994,
                    "label": "cat",
                    "mask": "fa5d8d5c329546ba5339f3095641ef56"
                },
            ],
        )

        outputs = image_segmenter(
            [
                "http://images.cocodataset.org/val2017/000000039769.jpg",
                "http://images.cocodataset.org/val2017/000000039769.jpg",
            ],
            threshold=0.0,
        )
        for output in outputs:
            for o in output:
                o["mask"] = hashimage(o["mask"])

        self.assertEqual(
            nested_simplify(outputs, decimals=4),
            [
                [
                    {
                        "score": 0.9094,
                        "label": "blanket",
                        "mask": "85144e4bf8d624c2c6175f7faf57eb30"
                    },
                    {
                        "score": 0.9941,
                        "label": "cat",
                        "mask": "f3a7f80220788acc0245ebc084df6afc"
                    },
                    {
                        "score": 0.9987,
                        "label": "remote",
                        "mask": "7703408f54da1d0ebda47841da875e48"
                    },
                    {
                        "score": 0.9995,
                        "label": "remote",
                        "mask": "bd726918f10fed3efaef0091e11f923b"
                    },
                    {
                        "score": 0.9722,
                        "label": "couch",
                        "mask": "226d6dcb98bebc3fbc208abdc0c83196"
                    },
                    {
                        "score": 0.9994,
                        "label": "cat",
                        "mask": "fa5d8d5c329546ba5339f3095641ef56"
                    },
                ],
                [
                    {
                        "score": 0.9094,
                        "label": "blanket",
                        "mask": "85144e4bf8d624c2c6175f7faf57eb30"
                    },
                    {
                        "score": 0.9941,
                        "label": "cat",
                        "mask": "f3a7f80220788acc0245ebc084df6afc"
                    },
                    {
                        "score": 0.9987,
                        "label": "remote",
                        "mask": "7703408f54da1d0ebda47841da875e48"
                    },
                    {
                        "score": 0.9995,
                        "label": "remote",
                        "mask": "bd726918f10fed3efaef0091e11f923b"
                    },
                    {
                        "score": 0.9722,
                        "label": "couch",
                        "mask": "226d6dcb98bebc3fbc208abdc0c83196"
                    },
                    {
                        "score": 0.9994,
                        "label": "cat",
                        "mask": "fa5d8d5c329546ba5339f3095641ef56"
                    },
                ],
            ],
        )
    def test_small_model_pt(self):
        model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic"

        model = AutoModelForImageSegmentation.from_pretrained(model_id)
        feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
        image_segmenter = ImageSegmentationPipeline(
            model=model, feature_extractor=feature_extractor)

        outputs = image_segmenter(
            "http://images.cocodataset.org/val2017/000000039769.jpg",
            threshold=0.0)
        for o in outputs:
            # shortening by hashing
            o["mask"] = hashimage(o["mask"])

        self.assertEqual(
            nested_simplify(outputs, decimals=4),
            [
                {
                    "score": 0.004,
                    "label": "LABEL_0",
                    "mask": "34eecd16bbfb0f476083ef947d81bf66",
                },
                {
                    "score": 0.004,
                    "label": "LABEL_0",
                    "mask": "34eecd16bbfb0f476083ef947d81bf66",
                },
            ],
        )

        outputs = image_segmenter(
            [
                "http://images.cocodataset.org/val2017/000000039769.jpg",
                "http://images.cocodataset.org/val2017/000000039769.jpg",
            ],
            threshold=0.0,
        )
        for output in outputs:
            for o in output:
                o["mask"] = hashimage(o["mask"])

        self.assertEqual(
            nested_simplify(outputs, decimals=4),
            [
                [
                    {
                        "score": 0.004,
                        "label": "LABEL_0",
                        "mask": "34eecd16bbfb0f476083ef947d81bf66",
                    },
                    {
                        "score": 0.004,
                        "label": "LABEL_0",
                        "mask": "34eecd16bbfb0f476083ef947d81bf66",
                    },
                ],
                [
                    {
                        "score": 0.004,
                        "label": "LABEL_0",
                        "mask": "34eecd16bbfb0f476083ef947d81bf66",
                    },
                    {
                        "score": 0.004,
                        "label": "LABEL_0",
                        "mask": "34eecd16bbfb0f476083ef947d81bf66",
                    },
                ],
            ],
        )
    def run_test_targets(self, model, tokenizer):
        vocab = tokenizer.get_vocab()
        targets = list(sorted(vocab.keys()))[:2]
        # Pipeline argument
        fill_masker = FillMaskPipeline(model=model,
                                       tokenizer=tokenizer,
                                       targets=targets)
        outputs = fill_masker(f"This is a {tokenizer.mask_token}")
        self.assertEqual(
            outputs,
            [
                {
                    "sequence": ANY(str),
                    "score": ANY(float),
                    "token": ANY(int),
                    "token_str": ANY(str)
                },
                {
                    "sequence": ANY(str),
                    "score": ANY(float),
                    "token": ANY(int),
                    "token_str": ANY(str)
                },
            ],
        )
        target_ids = {vocab[el] for el in targets}
        self.assertEqual(set(el["token"] for el in outputs), target_ids)
        self.assertEqual(set(el["token_str"] for el in outputs), set(targets))

        # Call argument
        fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
        outputs = fill_masker(f"This is a {tokenizer.mask_token}",
                              targets=targets)
        self.assertEqual(
            outputs,
            [
                {
                    "sequence": ANY(str),
                    "score": ANY(float),
                    "token": ANY(int),
                    "token_str": ANY(str)
                },
                {
                    "sequence": ANY(str),
                    "score": ANY(float),
                    "token": ANY(int),
                    "token_str": ANY(str)
                },
            ],
        )
        target_ids = {vocab[el] for el in targets}
        self.assertEqual(set(el["token"] for el in outputs), target_ids)
        self.assertEqual(set(el["token_str"] for el in outputs), set(targets))

        # Score equivalence
        outputs = fill_masker(f"This is a {tokenizer.mask_token}",
                              targets=targets)
        tokens = [top_mask["token_str"] for top_mask in outputs]
        scores = [top_mask["score"] for top_mask in outputs]

        unmasked_targets = fill_masker(f"This is a {tokenizer.mask_token}",
                                       targets=tokens)
        target_scores = [top_mask["score"] for top_mask in unmasked_targets]
        self.assertEqual(nested_simplify(scores),
                         nested_simplify(target_scores))

        # Raises with invalid
        with self.assertRaises(ValueError):
            outputs = fill_masker(f"This is a {tokenizer.mask_token}",
                                  targets=[""])
        with self.assertRaises(ValueError):
            outputs = fill_masker(f"This is a {tokenizer.mask_token}",
                                  targets=[])
        with self.assertRaises(ValueError):
            outputs = fill_masker(f"This is a {tokenizer.mask_token}",
                                  targets="")