Exemplo n.º 1
0
    def test_rotate_subscription_key(self, resource_group, location,
                                     text_analytics_account,
                                     text_analytics_account_key):

        credential = AzureKeyCredential(text_analytics_account_key)
        client = TextAnalyticsClient(text_analytics_account, credential)

        docs = [{
            "id": "1",
            "text": "I will go to the park."
        }, {
            "id": "2",
            "text": "I did not like the hotel we stayed at."
        }, {
            "id": "3",
            "text": "The restaurant had really good food."
        }]

        response = client.begin_analyze(
            docs,
            entities_recognition_tasks=[EntitiesRecognitionTask()],
            key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
            pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
            polling_interval=self._interval(),
        ).result()

        self.assertIsNotNone(response)

        credential.update("xxx")  # Make authentication fail
        with self.assertRaises(ClientAuthenticationError):
            response = client.begin_analyze(
                docs,
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                polling_interval=self._interval(),
            ).result()

        credential.update(
            text_analytics_account_key)  # Authenticate successfully again
        response = client.begin_analyze(
            docs,
            entities_recognition_tasks=[EntitiesRecognitionTask()],
            key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
            pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
            polling_interval=self._interval(),
        ).result()
        self.assertIsNotNone(response)
Exemplo n.º 2
0
    def test_user_agent(self, client):
        def callback(resp):
            self.assertIn(
                "azsdk-python-ai-textanalytics/{} Python/{} ({})".format(
                    VERSION, platform.python_version(), platform.platform()),
                resp.http_request.headers["User-Agent"])

        docs = [{
            "id": "1",
            "text": "I will go to the park."
        }, {
            "id": "2",
            "text": "I did not like the hotel we stayed at."
        }, {
            "id": "3",
            "text": "The restaurant had really good food."
        }]

        poller = client.begin_analyze(
            docs,
            entities_recognition_tasks=[EntitiesRecognitionTask()],
            key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
            pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
            polling_interval=self._interval(),
        )

        self.assertIn(
            "azsdk-python-ai-textanalytics/{} Python/{} ({})".format(
                VERSION, platform.python_version(),
                platform.platform()), poller._polling_method._initial_response.
            http_request.headers["User-Agent"])

        poller.result(
        )  # need to call this before tearDown runs even though we don't need the response for the test.
Exemplo n.º 3
0
    def test_passing_only_string_entities_task(self, client):
        docs = [
            u"Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
            u"Microsoft fue fundado por Bill Gates y Paul Allen el 4 de abril de 1975.",
            u"Microsoft wurde am 4. April 1975 von Bill Gates und Paul Allen gegründet."
        ]

        response = client.begin_analyze(
            docs,
            entities_recognition_tasks=[EntitiesRecognitionTask()],
            polling_interval=self._interval(),
        ).result()

        results_pages = list(response)
        self.assertEqual(len(results_pages), 1)

        task_results = results_pages[0].entities_recognition_results
        self.assertEqual(len(task_results), 1)

        results = task_results[0].results
        self.assertEqual(len(results), 3)

        self.assertEqual(len(results[0].entities), 4)
        self.assertIsNotNone(results[0].id)
        for entity in results[0].entities:
            self.assertIsNotNone(entity.text)
            self.assertIsNotNone(entity.category)
            self.assertIsNotNone(entity.offset)
            self.assertIsNotNone(entity.confidence_score)
Exemplo n.º 4
0
    def test_invalid_language_hint_docs(self, client):
        response = list(
            client.begin_analyze(
                [{
                    "id":
                    "1",
                    "language":
                    "notalanguage",
                    "text":
                    "This should fail because we're passing in an invalid language hint"
                }],
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                polling_interval=self._interval(),
            ).result())

        task_types = [
            "entities_recognition_results", "key_phrase_extraction_results",
            "pii_entities_recognition_results"
        ]

        for task_type in task_types:
            tasks = getattr(
                response[0],
                task_type)  # only expecting a single page of results here
            self.assertEqual(len(tasks), 1)

            for r in tasks[0].results:
                self.assertTrue(r.is_error)
Exemplo n.º 5
0
    def test_whole_batch_dont_use_language_hint(self, client):
        docs = [
            u"This was the best day of my life.",
            u"I did not like the hotel we stayed at. It was too expensive.",
            u"The restaurant was not as good as I hoped."
        ]

        response = list(
            client.begin_analyze(
                docs,
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                language="",
                polling_interval=self._interval(),
            ).result())

        task_types = [
            "entities_recognition_results", "key_phrase_extraction_results",
            "pii_entities_recognition_results"
        ]

        for task_type in task_types:
            task_results = getattr(response[0], task_type)
            self.assertEqual(len(task_results), 1)

            results = task_results[0].results
            for r in results:
                self.assertFalse(r.is_error)
Exemplo n.º 6
0
    def test_client_passed_default_language_hint(self, client):
        docs = [{
            "id": "1",
            "text": "I will go to the park."
        }, {
            "id": "2",
            "text": "I did not like the hotel we stayed at."
        }, {
            "id": "3",
            "text": "The restaurant had really good food."
        }]

        response = list(
            client.begin_analyze(
                docs,
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                polling_interval=self._interval(),
            ).result())

        task_types = [
            "entities_recognition_results", "key_phrase_extraction_results",
            "pii_entities_recognition_results"
        ]

        for task_type in task_types:
            tasks = getattr(
                response[0],
                task_type)  # only expecting a single page of results here
            self.assertEqual(len(tasks), 1)
            self.assertEqual(len(tasks[0].results), 3)

            for r in tasks[0].results:
                self.assertFalse(r.is_error)
Exemplo n.º 7
0
    def test_output_same_order_as_input_multiple_tasks(self, client):
        docs = [
            TextDocumentInput(id="1", text="one"),
            TextDocumentInput(id="2", text="two"),
            TextDocumentInput(id="3", text="three"),
            TextDocumentInput(id="4", text="four"),
            TextDocumentInput(id="5", text="five")
        ]

        response = client.begin_analyze(
            docs,
            entities_recognition_tasks=[EntitiesRecognitionTask()],
            key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
            pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
            polling_interval=self._interval(),
        ).result()

        results_pages = list(response)
        self.assertEqual(len(results_pages), 1)

        task_types = [
            "entities_recognition_results", "key_phrase_extraction_results",
            "pii_entities_recognition_results"
        ]

        for task_type in task_types:
            task_results = getattr(results_pages[0], task_type)
            self.assertEqual(len(task_results), 1)

            results = task_results[0].results
            self.assertEqual(len(results), 5)

            for idx, doc in enumerate(results):
                self.assertEqual(str(idx + 1), doc.id)
Exemplo n.º 8
0
    def test_whole_batch_language_hint_and_obj_per_item_hints(self, client):
        docs = [
            TextDocumentInput(id="1",
                              text="I should take my cat to the veterinarian.",
                              language="en"),
            TextDocumentInput(id="2",
                              text="Este es un document escrito en Español.",
                              language="en"),
            TextDocumentInput(id="3", text="猫は幸せ"),
        ]

        response = list(
            client.begin_analyze(
                docs,
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                language="en",
                polling_interval=self._interval(),
            ).result())

        task_types = [
            "entities_recognition_results", "key_phrase_extraction_results",
            "pii_entities_recognition_results"
        ]

        for task_type in task_types:
            task_results = getattr(response[0], task_type)
            self.assertEqual(len(task_results), 1)

            results = task_results[0].results
            for r in results:
                self.assertFalse(r.is_error)
Exemplo n.º 9
0
    def test_empty_document_failure(self, client):
        docs = [{"id": "1", "text": ""}]

        with self.assertRaises(HttpResponseError):
            response = client.begin_analyze(
                docs,
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                polling_interval=self._interval(),
            )
Exemplo n.º 10
0
 def test_bad_credentials(self, client):
     with self.assertRaises(ClientAuthenticationError):
         response = client.begin_analyze(
             ["This is written in English."],
             entities_recognition_tasks=[EntitiesRecognitionTask()],
             key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
             pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
             polling_interval=self._interval(),
         )
Exemplo n.º 11
0
 def test_missing_input_records_error(self, client):
     docs = []
     with pytest.raises(ValueError) as excinfo:
         client.begin_analyze(
             docs,
             entities_recognition_tasks=[EntitiesRecognitionTask()],
             key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
             pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
             polling_interval=self._interval(),
         )
     assert "Input documents can not be empty or None" in str(excinfo.value)
Exemplo n.º 12
0
 def test_not_passing_list_for_docs(self, client):
     docs = {"id": "1", "text": "hello world"}
     with pytest.raises(TypeError) as excinfo:
         client.begin_analyze(
             docs,
             entities_recognition_tasks=[EntitiesRecognitionTask()],
             key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
             pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
             polling_interval=self._interval(),
         )
     assert "Input documents cannot be a dict" in str(excinfo.value)
Exemplo n.º 13
0
    def test_bad_document_input(self, client):
        docs = "This is the wrong type"

        with self.assertRaises(TypeError):
            response = client.begin_analyze(
                docs,
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                polling_interval=self._interval(),
            )
Exemplo n.º 14
0
    def test_pass_cls(self, client):
        def callback(pipeline_response, deserialized, _):
            return "cls result"

        res = client.begin_analyze(
            documents=["Test passing cls to endpoint"],
            entities_recognition_tasks=[EntitiesRecognitionTask()],
            key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
            pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
            cls=callback,
            polling_interval=self._interval(),
        ).result()
        assert res == "cls result"
Exemplo n.º 15
0
    def test_too_many_documents(self, client):
        docs = list(itertools.repeat(
            "input document",
            26))  # Maximum number of documents per request is 25

        with pytest.raises(HttpResponseError) as excinfo:
            client.begin_analyze(
                docs,
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                polling_interval=self._interval(),
            )
        assert excinfo.value.status_code == 400
Exemplo n.º 16
0
    def test_all_successful_passing_dict_entities_task(self, client):
        docs = [{
            "id":
            "1",
            "language":
            "en",
            "text":
            "Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975."
        }, {
            "id":
            "2",
            "language":
            "es",
            "text":
            "Microsoft fue fundado por Bill Gates y Paul Allen el 4 de abril de 1975."
        }, {
            "id":
            "3",
            "language":
            "de",
            "text":
            "Microsoft wurde am 4. April 1975 von Bill Gates und Paul Allen gegründet."
        }]

        response = client.begin_analyze(
            docs,
            entities_recognition_tasks=[EntitiesRecognitionTask()],
            show_stats=True,
            polling_interval=self._interval(),
        ).result()

        results_pages = list(response)
        self.assertEqual(len(results_pages), 1)

        task_results = results_pages[0].entities_recognition_results
        self.assertEqual(len(task_results), 1)

        results = task_results[0].results
        self.assertEqual(len(results), 3)

        for doc in results:
            self.assertEqual(len(doc.entities), 4)
            self.assertIsNotNone(doc.id)
            # self.assertIsNotNone(doc.statistics)
            for entity in doc.entities:
                self.assertIsNotNone(entity.text)
                self.assertIsNotNone(entity.category)
                self.assertIsNotNone(entity.offset)
                self.assertIsNotNone(entity.confidence_score)
Exemplo n.º 17
0
    def test_bad_model_version_error_single_task(
            self, client):  # TODO: verify behavior of service
        docs = [{
            "id": "1",
            "language": "english",
            "text": "I did not like the hotel we stayed at."
        }]

        with self.assertRaises(HttpResponseError):
            result = client.begin_analyze(
                docs,
                entities_recognition_tasks=[
                    EntitiesRecognitionTask(model_version="bad")
                ],
                polling_interval=self._interval(),
            ).result()
Exemplo n.º 18
0
    def test_show_stats_and_model_version_multiple_tasks(self, client):
        docs = [{
            "id": "56",
            "text": ":)"
        }, {
            "id": "0",
            "text": ":("
        }, {
            "id": "19",
            "text": ":P"
        }, {
            "id": "1",
            "text": ":D"
        }]

        response = client.begin_analyze(
            docs,
            entities_recognition_tasks=[
                EntitiesRecognitionTask(model_version="latest")
            ],
            key_phrase_extraction_tasks=[
                KeyPhraseExtractionTask(model_version="latest")
            ],
            pii_entities_recognition_tasks=[
                PiiEntitiesRecognitionTask(model_version="latest")
            ],
            show_stats=True,
            polling_interval=self._interval(),
        ).result()

        results_pages = list(response)
        self.assertEqual(len(results_pages), 1)

        task_types = [
            "entities_recognition_results", "key_phrase_extraction_results",
            "pii_entities_recognition_results"
        ]

        for task_type in task_types:
            task_results = getattr(results_pages[0], task_type)
            self.assertEqual(len(task_results), 1)

            results = task_results[0].results
            self.assertEqual(len(results), len(docs))
Exemplo n.º 19
0
    def test_out_of_order_ids_multiple_tasks(self, client):
        docs = [{
            "id": "56",
            "text": ":)"
        }, {
            "id": "0",
            "text": ":("
        }, {
            "id": "19",
            "text": ":P"
        }, {
            "id": "1",
            "text": ":D"
        }]

        response = client.begin_analyze(
            docs,
            entities_recognition_tasks=[
                EntitiesRecognitionTask(model_version="bad")
            ],  # at this moment this should cause all documents to be errors, which isn't correct behavior but I'm using it here to test document ordering with errors.  :)
            key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
            pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
            polling_interval=self._interval(),
        ).result()

        results_pages = list(response)
        self.assertEqual(len(results_pages), 1)

        task_types = [
            "entities_recognition_results", "key_phrase_extraction_results",
            "pii_entities_recognition_results"
        ]

        in_order = ["56", "0", "19", "1"]

        for task_type in task_types:
            task_results = getattr(results_pages[0], task_type)
            self.assertEqual(len(task_results), 1)

            results = task_results[0].results
            self.assertEqual(len(results), len(docs))

            for idx, resp in enumerate(results):
                self.assertEqual(resp.id, in_order[idx])
Exemplo n.º 20
0
    def test_duplicate_ids_error(self,
                                 client):  # TODO: verify behavior of service
        # Duplicate Ids
        docs = [{
            "id": "1",
            "text": "hello world"
        }, {
            "id": "1",
            "text": "I did not like the hotel we stayed at."
        }]

        with self.assertRaises(HttpResponseError):
            result = client.begin_analyze(
                docs,
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                polling_interval=self._interval(),
            ).result()
Exemplo n.º 21
0
    def test_all_successful_passing_text_document_input_entities_task(
            self, client):
        docs = [
            TextDocumentInput(
                id="1",
                text=
                "Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
                language="en"),
            TextDocumentInput(
                id="2",
                text=
                "Microsoft fue fundado por Bill Gates y Paul Allen el 4 de abril de 1975.",
                language="es"),
            TextDocumentInput(
                id="3",
                text=
                "Microsoft wurde am 4. April 1975 von Bill Gates und Paul Allen gegründet.",
                language="de")
        ]

        response = client.begin_analyze(
            docs,
            entities_recognition_tasks=[EntitiesRecognitionTask()],
            polling_interval=self._interval(),
        ).result()

        results_pages = list(response)
        self.assertEqual(len(results_pages), 1)

        task_results = results_pages[0].entities_recognition_results
        self.assertEqual(len(task_results), 1)

        results = task_results[0].results
        self.assertEqual(len(results), 3)

        for doc in results:
            self.assertEqual(len(doc.entities), 4)
            self.assertIsNotNone(doc.id)
            for entity in doc.entities:
                self.assertIsNotNone(entity.text)
                self.assertIsNotNone(entity.category)
                self.assertIsNotNone(entity.offset)
                self.assertIsNotNone(entity.confidence_score)
Exemplo n.º 22
0
 def test_mixing_inputs(self, client):
     docs = [
         {
             "id": "1",
             "text": "Microsoft was founded by Bill Gates and Paul Allen."
         },
         TextDocumentInput(
             id="2",
             text=
             "I did not like the hotel we stayed at. It was too expensive."
         ), u"You cannot mix string input with the above inputs"
     ]
     with self.assertRaises(TypeError):
         response = client.begin_analyze(
             docs,
             entities_recognition_tasks=[EntitiesRecognitionTask()],
             key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
             pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
             polling_interval=self._interval(),
         ).result()
Exemplo n.º 23
0
    def test_bad_model_version_error_multiple_tasks(
            self, client):  # TODO: verify behavior of service
        docs = [{
            "id": "1",
            "language": "english",
            "text": "I did not like the hotel we stayed at."
        }]

        response = client.begin_analyze(
            docs,
            entities_recognition_tasks=[
                EntitiesRecognitionTask(model_version="latest")
            ],
            key_phrase_extraction_tasks=[
                KeyPhraseExtractionTask(model_version="bad")
            ],
            pii_entities_recognition_tasks=[
                PiiEntitiesRecognitionTask(model_version="bad")
            ],
            polling_interval=self._interval(),
        ).result()

        results_pages = list(response)
        self.assertEqual(len(results_pages), 1)

        task_types = [
            "entities_recognition_results", "key_phrase_extraction_results",
            "pii_entities_recognition_results"
        ]

        for task_type in task_types:
            tasks = getattr(
                results_pages[0],
                task_type)  # only expecting a single page of results here
            self.assertEqual(len(tasks), 1)

            for r in tasks[0].results:
                self.assertTrue(
                    r.is_error
                )  # This is not the optimal way to represent this failure.  We are discussing a solution with the service team.
Exemplo n.º 24
0
    def test_multiple_pages_of_results_returned_successfully(self, client):
        single_doc = "hello world"
        docs = [{
            "id": str(idx),
            "text": val
        } for (idx, val) in enumerate(list(itertools.repeat(single_doc, 25)))
                ]  # max number of documents is 25

        result = client.begin_analyze(
            docs,
            entities_recognition_tasks=[EntitiesRecognitionTask()],
            key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
            pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
            show_stats=True,
            polling_interval=self._interval(),
        ).result()

        pages = list(result)
        self.assertEqual(len(pages), 2)  # default page size is 20

        # self.assertIsNotNone(result.statistics)  # statistics not working at the moment, but a bug has been filed on the service to correct this.

        task_types = [
            "entities_recognition_results", "key_phrase_extraction_results",
            "pii_entities_recognition_results"
        ]

        expected_results_per_page = [20, 5]

        for idx, page in enumerate(pages):
            for task_type in task_types:
                task_results = getattr(page, task_type)
                self.assertEqual(len(task_results), 1)

                results = task_results[0].results
                self.assertEqual(len(results), expected_results_per_page[idx])

                for doc in results:
                    self.assertFalse(doc.is_error)
Exemplo n.º 25
0
    def test_whole_batch_language_hint_and_dict_per_item_hints(self, client):
        docs = [{
            "id": "1",
            "language": "en",
            "text": "I will go to the park."
        }, {
            "id": "2",
            "language": "en",
            "text": "I did not like the hotel we stayed at."
        }, {
            "id": "3",
            "text": "The restaurant had really good food."
        }]

        response = list(
            client.begin_analyze(
                docs,
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                language="en",
                polling_interval=self._interval(),
            ).result())

        task_types = [
            "entities_recognition_results", "key_phrase_extraction_results",
            "pii_entities_recognition_results"
        ]

        for task_type in task_types:
            task_results = getattr(response[0], task_type)
            self.assertEqual(len(task_results), 1)

            results = task_results[0].results
            for r in results:
                self.assertFalse(r.is_error)
Exemplo n.º 26
0
    async def analyze_async(self):
        # [START analyze_async]
        from azure.core.credentials import AzureKeyCredential
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import EntitiesRecognitionTask, \
            PiiEntitiesRecognitionTask, \
            KeyPhraseExtractionTask

        endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
        key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

        text_analytics_client = TextAnalyticsClient(
            endpoint=endpoint,
            credential=AzureKeyCredential(key),
        )

        documents = [
            "We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore the spot! \
            They provide marvelous food and they have a great menu. The chief cook happens to be the owner (I think his name is John Doe) \
            and he is super nice, coming out of the kitchen and greeted us all. We enjoyed very much dining in the place! \
            The Sirloin steak I ordered was tender and juicy, and the place was impeccably clean. You can even pre-order from their \
            online menu at www.contososteakhouse.com, call 312-555-0176 or send email to [email protected]! \
            The only complaint I have is the food didn't come fast enough. Overall I highly recommend it!"
        ]

        async with text_analytics_client:
            poller = await text_analytics_client.begin_analyze(
                documents,
                display_name="Sample Text Analysis",
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()])

            result = await poller.result()

            async for page in result:
                for task in page.entities_recognition_results:
                    print("Results of Entities Recognition task:")

                    docs = [doc for doc in task.results if not doc.is_error]
                    for idx, doc in enumerate(docs):
                        print("\nDocument text: {}".format(documents[idx]))
                        for entity in doc.entities:
                            print("Entity: {}".format(entity.text))
                            print("...Category: {}".format(entity.category))
                            print("...Confidence Score: {}".format(
                                entity.confidence_score))
                            print("...Offset: {}".format(entity.offset))
                        print("------------------------------------------")

                for task in page.pii_entities_recognition_results:
                    print("Results of PII Entities Recognition task:")

                    docs = [doc for doc in task.results if not doc.is_error]
                    for idx, doc in enumerate(docs):
                        print("Document text: {}".format(documents[idx]))
                        for entity in doc.entities:
                            print("Entity: {}".format(entity.text))
                            print("Category: {}".format(entity.category))
                            print("Confidence Score: {}\n".format(
                                entity.confidence_score))
                        print("------------------------------------------")

                for task in page.key_phrase_extraction_results:
                    print("Results of Key Phrase Extraction task:")

                    docs = [doc for doc in task.results if not doc.is_error]
                    for idx, doc in enumerate(docs):
                        print("Document text: {}\n".format(documents[idx]))
                        print("Key Phrases: {}\n".format(doc.key_phrases))
                        print("------------------------------------------")