async def recognize_entities_async(self):
        # [START batch_recognize_entities_async]
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import TextAnalyticsApiKeyCredential
        text_analytics_client = TextAnalyticsClient(
            endpoint=self.endpoint,
            credential=TextAnalyticsApiKeyCredential(self.key))
        documents = [
            "Microsoft was founded by Bill Gates and Paul Allen.",
            "I had a wonderful trip to Seattle last week.",
            "I visited the Space Needle 2 times.",
        ]

        async with text_analytics_client:
            result = await text_analytics_client.recognize_entities(documents)

        docs = [doc for doc in result if not doc.is_error]

        for idx, doc in enumerate(docs):
            print("\nDocument text: {}".format(documents[idx]))
            for entity in doc.entities:
                print("Entity: \t", entity.text, "\tCategory: \t",
                      entity.category, "\tConfidence Score: \t",
                      round(entity.score, 3))
    async def health_with_cancellation_async(self):
        # [START health_with_cancellation_async]
        from azure.core.credentials import AzureKeyCredential
        from azure.ai.textanalytics.aio import TextAnalyticsClient

        endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
        key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

        text_analytics_client = TextAnalyticsClient(
            endpoint=endpoint,
            credential=AzureKeyCredential(key),
            api_version="v3.1-preview.3")

        documents = [
            "RECORD #333582770390100 | MH | 85986313 | | 054351 | 2/14/2001 12:00:00 AM | \
            CORONARY ARTERY DISEASE | Signed | DIS | Admission Date: 5/22/2001 \
            Report Status: Signed Discharge Date: 4/24/2001 ADMISSION DIAGNOSIS: \
            CORONARY ARTERY DISEASE. HISTORY OF PRESENT ILLNESS: \
            The patient is a 54-year-old gentleman with a history of progressive angina over the past several months. \
            The patient had a cardiac catheterization in July of this year revealing total occlusion of the RCA and \
            50% left main disease , with a strong family history of coronary artery disease with a brother dying at \
            the age of 52 from a myocardial infarction and another brother who is status post coronary artery bypass grafting. \
            The patient had a stress echocardiogram done on July , 2001 , which showed no wall motion abnormalities ,\
            but this was a difficult study due to body habitus. The patient went for six minutes with minimal ST depressions \
            in the anterior lateral leads , thought due to fatigue and wrist pain , his anginal equivalent. Due to the patient's \
            increased symptoms and family history and history left main disease with total occasional of his RCA was referred \
            for revascularization with open heart surgery."
        ]

        async with text_analytics_client:
            poller = await text_analytics_client.begin_analyze_healthcare(
                documents)
            poller = await text_analytics_client.begin_cancel_analyze_healthcare(
                poller)

        await poller.wait()
    async def test_all_successful_passing_dict(self, resource_group, location,
                                               text_analytics_account,
                                               text_analytics_account_key):
        text_analytics = TextAnalyticsClient(
            text_analytics_account,
            TextAnalyticsApiKeyCredential(text_analytics_account_key))

        docs = [{
            "id": "1",
            "text": "I should take my cat to the veterinarian."
        }, {
            "id": "2",
            "text": "Este es un document escrito en Español."
        }, {
            "id": "3",
            "text": "猫は幸せ"
        }, {
            "id": "4",
            "text": "Fahrt nach Stuttgart und dann zum Hotel zu Fu."
        }]

        response = await text_analytics.detect_language(docs, show_stats=True)

        self.assertEqual(response[0].primary_language.name, "English")
        self.assertEqual(response[1].primary_language.name, "Spanish")
        self.assertEqual(response[2].primary_language.name, "Japanese")
        self.assertEqual(response[3].primary_language.name, "German")
        self.assertEqual(response[0].primary_language.iso6391_name, "en")
        self.assertEqual(response[1].primary_language.iso6391_name, "es")
        self.assertEqual(response[2].primary_language.iso6391_name, "ja")
        self.assertEqual(response[3].primary_language.iso6391_name, "de")

        for doc in response:
            self.assertIsNotNone(doc.id)
            self.assertIsNotNone(doc.statistics)
            self.assertIsNotNone(doc.primary_language.score)
Beispiel #4
0
    async def test_rotate_subscription_key(self, resource_group, location,
                                           text_analytics_account,
                                           text_analytics_account_key):

        credential = AzureKeyCredential(text_analytics_account_key)
        client = TextAnalyticsClient(
            text_analytics_account,
            credential,
            api_version=TextAnalyticsApiVersion.V3_1_PREVIEW_3)

        docs = [{
            "id": "1",
            "text": "I will go to the park."
        }, {
            "id": "2",
            "text": "I did not like the hotel we stayed at."
        }, {
            "id": "3",
            "text": "The restaurant had really good food."
        }]

        async with client:
            response = await (await client.begin_analyze_healthcare(
                docs, polling_interval=self._interval())).result()
            self.assertIsNotNone(response)

            credential.update("xxx")  # Make authentication fail
            with self.assertRaises(ClientAuthenticationError):
                response = await (await client.begin_analyze_healthcare(
                    docs, polling_interval=self._interval())).result()

            credential.update(
                text_analytics_account_key)  # Authenticate successfully again
            response = await (await client.begin_analyze_healthcare(
                docs, polling_interval=self._interval())).result()
            self.assertIsNotNone(response)
    async def test_input_with_all_errors(self, resource_group, location,
                                         text_analytics_account,
                                         text_analytics_account_key):
        text_analytics = TextAnalyticsClient(
            text_analytics_account,
            TextAnalyticsApiKeyCredential(text_analytics_account_key))

        docs = [{
            "id": "1",
            "text": ""
        }, {
            "id": "2",
            "language": "Spanish",
            "text": "Hola"
        }, {
            "id": "3",
            "language": "de",
            "text": ""
        }]

        response = await text_analytics.recognize_entities(docs)
        self.assertTrue(response[0].is_error)
        self.assertTrue(response[1].is_error)
        self.assertTrue(response[2].is_error)
    async def test_all_successful_passing_text_document_input(
            self, resource_group, location, text_analytics_account,
            text_analytics_account_key):
        text_analytics = TextAnalyticsClient(
            text_analytics_account,
            TextAnalyticsApiKeyCredential(text_analytics_account_key))

        docs = [
            TextDocumentInput(id="1", text="My SSN is 555-55-5555."),
            TextDocumentInput(
                id="2",
                text=
                "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."
            ),
            TextDocumentInput(
                id="3", text="Is 998.214.865-68 your Brazilian CPF number?")
        ]

        response = await text_analytics.recognize_pii_entities(docs,
                                                               show_stats=True)
        self.assertEqual(response[0].entities[0].text, "555-55-5555")
        self.assertEqual(response[0].entities[0].category,
                         "U.S. Social Security Number (SSN)")
        self.assertEqual(response[1].entities[0].text, "111000025")
        # self.assertEqual(response[1].entities[0].category, "ABA Routing Number")  # Service is currently returning PhoneNumber here
        self.assertEqual(response[2].entities[0].text, "998.214.865-68")
        self.assertEqual(response[2].entities[0].category, "Brazil CPF Number")
        for doc in response:
            self.assertIsNotNone(doc.id)
            self.assertIsNotNone(doc.statistics)
            for entity in doc.entities:
                self.assertIsNotNone(entity.text)
                self.assertIsNotNone(entity.category)
                self.assertIsNotNone(entity.grapheme_offset)
                self.assertIsNotNone(entity.grapheme_length)
                self.assertIsNotNone(entity.confidence_score)
    async def test_whole_batch_country_hint_and_dict_input_async(
            self, resource_group, location, text_analytics_account,
            text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account,
                                             text_analytics_account_key)

        def callback(resp):
            country_str = "\"countryHint\": \"CA\""
            country = resp.http_request.body.count(country_str)
            self.assertEqual(country, 3)

        docs = [{
            "id": "1",
            "text": "I will go to the park."
        }, {
            "id": "2",
            "text": "I did not like the hotel we stayed it."
        }, {
            "id": "3",
            "text": "The restaurant had really good food."
        }]

        response = await text_analytics.detect_languages(
            docs, country_hint="CA", response_hook=callback)
    async def analyze_sentiment_async(self):
        # [START batch_analyze_sentiment_async]
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import TextAnalyticsAPIKeyCredential
        text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint, credential=TextAnalyticsAPIKeyCredential(self.key))
        documents = [
            "I had the best day of my life.",
            "This was a waste of my time. The speaker put me to sleep.",
            "No tengo dinero ni nada que dar...",
            "L'hôtel n'était pas très confortable. L'éclairage était trop sombre."
        ]

        async with text_analytics_client:
            result = await text_analytics_client.analyze_sentiment(documents)

        docs = [doc for doc in result if not doc.is_error]

        for idx, doc in enumerate(docs):
            print("Document text: {}".format(documents[idx]))
            print("Overall sentiment: {}".format(doc.sentiment))
        # [END batch_analyze_sentiment_async]
            print("Overall scores: positive={0:.3f}; neutral={1:.3f}; negative={2:.3f} \n".format(
                doc.document_scores.positive,
                doc.document_scores.neutral,
                doc.document_scores.negative,
            ))
            for idx, sentence in enumerate(doc.sentences):
                print("Sentence {} sentiment: {}".format(idx+1, sentence.sentiment))
                print("Sentence score: positive={0:.3f}; neutral={1:.3f}; negative={2:.3f}".format(
                    sentence.sentence_scores.positive,
                    sentence.sentence_scores.neutral,
                    sentence.sentence_scores.negative,
                ))
                print("Offset: {}".format(sentence.offset))
                print("Length: {}\n".format(sentence.length))
            print("------------------------------------")
    async def test_document_attribute_error_no_result_attribute(
            self, resource_group, location, text_analytics_account,
            text_analytics_account_key):
        text_analytics = TextAnalyticsClient(
            text_analytics_account,
            TextAnalyticsApiKeyCredential(text_analytics_account_key))

        docs = [{"id": "1", "text": ""}]
        response = await text_analytics.extract_key_phrases(docs)

        # Attributes on DocumentError
        self.assertTrue(response[0].is_error)
        self.assertEqual(response[0].id, "1")
        self.assertIsNotNone(response[0].error)

        # Result attribute not on DocumentError, custom error message
        try:
            key_phrases = response[0].key_phrases
        except AttributeError as custom_error:
            self.assertEqual(
                custom_error.args[0],
                '\'DocumentError\' object has no attribute \'key_phrases\'. '
                'The service was unable to process this document:\nDocument Id: 1\nError: '
                'invalidDocument - Document text is empty.\n')
    async def test_whole_batch_language_hint_and_dict_input_async(
            self, resource_group, location, text_analytics_account,
            text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account,
                                             text_analytics_account_key)

        def callback(resp):
            language_str = "\"language\": \"es\""
            language = resp.http_request.body.count(language_str)
            self.assertEqual(language, 3)

        docs = [{
            "id": "1",
            "text": "I will go to the park."
        }, {
            "id": "2",
            "text": "I did not like the hotel we stayed it."
        }, {
            "id": "3",
            "text": "The restaurant had really good food."
        }]

        response = await text_analytics.analyze_sentiment(
            docs, language="es", response_hook=callback)
    async def test_show_stats_and_model_version_async(
            self, resource_group, location, text_analytics_account,
            text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account,
                                             text_analytics_account_key)

        def callback(response):
            self.assertIsNotNone(response.model_version)
            self.assertIsNotNone(response.raw_response)
            self.assertEqual(response.statistics.document_count, 5)
            self.assertEqual(response.statistics.transaction_count, 4)
            self.assertEqual(response.statistics.valid_document_count, 4)
            self.assertEqual(response.statistics.erroneous_document_count, 1)

        docs = [{
            "id": "56",
            "text": ":)"
        }, {
            "id": "0",
            "text": ":("
        }, {
            "id": "22",
            "text": ""
        }, {
            "id": "19",
            "text": ":P"
        }, {
            "id": "1",
            "text": ":D"
        }]

        response = await text_analytics.analyze_sentiment(
            docs,
            show_stats=True,
            model_version="latest",
            response_hook=callback)
    async def test_batch_size_over_limit_async(self, resource_group, location, cognitiveservices_account, cognitiveservices_account_key):
        text_analytics = TextAnalyticsClient(cognitiveservices_account, cognitiveservices_account_key)

        docs = ["hello world"] * 1050
        with self.assertRaises(HttpResponseError):
            response = await text_analytics.detect_languages(docs)
Beispiel #13
0
    async def analyze_async(self):
        # [START analyze_async]
        from azure.core.credentials import AzureKeyCredential
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import EntitiesRecognitionTask, \
            PiiEntitiesRecognitionTask, \
            KeyPhraseExtractionTask

        endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
        key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

        text_analytics_client = TextAnalyticsClient(
            endpoint=endpoint,
            credential=AzureKeyCredential(key),
        )

        documents = [
            "We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore the spot! \
            They provide marvelous food and they have a great menu. The chief cook happens to be the owner (I think his name is John Doe) \
            and he is super nice, coming out of the kitchen and greeted us all. We enjoyed very much dining in the place! \
            The Sirloin steak I ordered was tender and juicy, and the place was impeccably clean. You can even pre-order from their \
            online menu at www.contososteakhouse.com, call 312-555-0176 or send email to [email protected]! \
            The only complaint I have is the food didn't come fast enough. Overall I highly recommend it!"
        ]

        async with text_analytics_client:
            poller = await text_analytics_client.begin_analyze(
                documents,
                display_name="Sample Text Analysis",
                entities_recognition_tasks=[EntitiesRecognitionTask()],
                pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
                key_phrase_extraction_tasks=[KeyPhraseExtractionTask()])

            result = await poller.result()

            async for page in result:
                for task in page.entities_recognition_results:
                    print("Results of Entities Recognition task:")

                    docs = [doc for doc in task.results if not doc.is_error]
                    for idx, doc in enumerate(docs):
                        print("\nDocument text: {}".format(documents[idx]))
                        for entity in doc.entities:
                            print("Entity: {}".format(entity.text))
                            print("...Category: {}".format(entity.category))
                            print("...Confidence Score: {}".format(
                                entity.confidence_score))
                            print("...Offset: {}".format(entity.offset))
                        print("------------------------------------------")

                for task in page.pii_entities_recognition_results:
                    print("Results of PII Entities Recognition task:")

                    docs = [doc for doc in task.results if not doc.is_error]
                    for idx, doc in enumerate(docs):
                        print("Document text: {}".format(documents[idx]))
                        for entity in doc.entities:
                            print("Entity: {}".format(entity.text))
                            print("Category: {}".format(entity.category))
                            print("Confidence Score: {}\n".format(
                                entity.confidence_score))
                        print("------------------------------------------")

                for task in page.key_phrase_extraction_results:
                    print("Results of Key Phrase Extraction task:")

                    docs = [doc for doc in task.results if not doc.is_error]
                    for idx, doc in enumerate(docs):
                        print("Document text: {}\n".format(documents[idx]))
                        print("Key Phrases: {}\n".format(doc.key_phrases))
                        print("------------------------------------------")
Beispiel #14
0
 def test_none_endpoint(self, **kwargs):
     textanalytics_test_api_key = kwargs.pop("textanalytics_test_api_key")
     with pytest.raises(ValueError):
         text_analytics = TextAnalyticsClient(
             None, AzureKeyCredential(textanalytics_test_api_key))
Beispiel #15
0
 def test_none_credentials(self, **kwargs):
     textanalytics_test_endpoint = kwargs.pop("textanalytics_test_endpoint")
     with pytest.raises(ValueError):
         text_analytics = TextAnalyticsClient(textanalytics_test_endpoint,
                                              None)
Beispiel #16
0
 async def test_no_single_input(self, resource_group, location, text_analytics_account, text_analytics_account_key):
     text_analytics = TextAnalyticsClient(text_analytics_account, AzureKeyCredential(text_analytics_account_key))
     with self.assertRaises(TypeError):
         response = await text_analytics.recognize_entities("hello world")
Beispiel #17
0
 async def test_bad_credentials(self, resource_group, location, text_analytics_account, text_analytics_account_key):
     text_analytics = TextAnalyticsClient(text_analytics_account, AzureKeyCredential("xxxxxxxxxxxx"))
     with self.assertRaises(ClientAuthenticationError):
         response = await text_analytics.recognize_entities(
             ["This is written in English."]
         )
    async def analyze_async(self):
        # [START analyze_async]
        from azure.core.credentials import AzureKeyCredential
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import (
            RecognizeEntitiesAction,
            RecognizeLinkedEntitiesAction,
            RecognizePiiEntitiesAction,
            ExtractKeyPhrasesAction,
            AnalyzeSentimentAction,
        )

        endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
        key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

        text_analytics_client = TextAnalyticsClient(
            endpoint=endpoint,
            credential=AzureKeyCredential(key),
        )

        documents = [
            'We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore the spot!'\
            'They provide marvelous food and they have a great menu. The chief cook happens to be the owner (I think his name is John Doe)'\
            'and he is super nice, coming out of the kitchen and greeted us all.'\
            ,

            'We enjoyed very much dining in the place!'\
            'The Sirloin steak I ordered was tender and juicy, and the place was impeccably clean. You can even pre-order from their'\
            'online menu at www.contososteakhouse.com, call 312-555-0176 or send email to [email protected]!'\
            'The only complaint I have is the food didn\'t come fast enough. Overall I highly recommend it!'\
        ]

        async with text_analytics_client:
            poller = await text_analytics_client.begin_analyze_actions(
                documents,
                display_name="Sample Text Analysis",
                actions=[
                    RecognizeEntitiesAction(),
                    RecognizePiiEntitiesAction(),
                    ExtractKeyPhrasesAction(),
                    RecognizeLinkedEntitiesAction(),
                    AnalyzeSentimentAction()
                ])

            pages = await poller.result()

            # To enumerate / zip for async, unless you install a third party library,
            # you have to read in all of the elements into memory first.
            # If you're not looking to enumerate / zip, we recommend you just asynchronously
            # loop over it immediately, without going through this step of reading them into memory
            document_results = []
            async for page in pages:
                document_results.append(page)

            for doc, action_results in zip(documents, document_results):
                print("\nDocument text: {}".format(doc))
                recognize_entities_result = action_results[0]
                print("...Results of Recognize Entities Action:")
                if recognize_entities_result.is_error:
                    print("...Is an error with code '{}' and message '{}'".
                          format(recognize_entities_result.code,
                                 recognize_entities_result.message))
                else:
                    for entity in recognize_entities_result.entities:
                        print("......Entity: {}".format(entity.text))
                        print(".........Category: {}".format(entity.category))
                        print(".........Confidence Score: {}".format(
                            entity.confidence_score))
                        print(".........Offset: {}".format(entity.offset))

                recognize_pii_entities_result = action_results[1]
                print("...Results of Recognize PII Entities action:")
                if recognize_pii_entities_result.is_error:
                    print("...Is an error with code '{}' and message '{}'".
                          format(recognize_pii_entities_result.code,
                                 recognize_pii_entities_result.message))
                else:
                    for entity in recognize_pii_entities_result.entities:
                        print("......Entity: {}".format(entity.text))
                        print(".........Category: {}".format(entity.category))
                        print(".........Confidence Score: {}".format(
                            entity.confidence_score))

                extract_key_phrases_result = action_results[2]
                print("...Results of Extract Key Phrases action:")
                if extract_key_phrases_result.is_error:
                    print("...Is an error with code '{}' and message '{}'".
                          format(extract_key_phrases_result.code,
                                 extract_key_phrases_result.message))
                else:
                    print("......Key Phrases: {}".format(
                        extract_key_phrases_result.key_phrases))

                recognize_linked_entities_result = action_results[3]
                print("...Results of Recognize Linked Entities action:")
                if recognize_linked_entities_result.is_error:
                    print("...Is an error with code '{}' and message '{}'".
                          format(recognize_linked_entities_result.code,
                                 recognize_linked_entities_result.message))
                else:
                    for linked_entity in recognize_linked_entities_result.entities:
                        print("......Entity name: {}".format(
                            linked_entity.name))
                        print(".........Data source: {}".format(
                            linked_entity.data_source))
                        print(".........Data source language: {}".format(
                            linked_entity.language))
                        print(".........Data source entity ID: {}".format(
                            linked_entity.data_source_entity_id))
                        print(".........Data source URL: {}".format(
                            linked_entity.url))
                        print(".........Document matches:")
                        for match in linked_entity.matches:
                            print("............Match text: {}".format(
                                match.text))
                            print("............Confidence Score: {}".format(
                                match.confidence_score))
                            print("............Offset: {}".format(
                                match.offset))
                            print("............Length: {}".format(
                                match.length))

                analyze_sentiment_result = action_results[4]
                print("...Results of Analyze Sentiment action:")
                if analyze_sentiment_result.is_error:
                    print("...Is an error with code '{}' and message '{}'".
                          format(analyze_sentiment_result.code,
                                 analyze_sentiment_result.message))
                else:
                    print("......Overall sentiment: {}".format(
                        analyze_sentiment_result.sentiment))
                    print(
                        "......Scores: positive={}; neutral={}; negative={} \n"
                        .format(
                            analyze_sentiment_result.confidence_scores.
                            positive,
                            analyze_sentiment_result.confidence_scores.neutral,
                            analyze_sentiment_result.confidence_scores.
                            negative,
                        ))
                print("------------------------------------------")
    async def test_length_with_emoji(self, resource_group, location, text_analytics_account, text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))

        result = await text_analytics.recognize_pii_entities(["👩 SSN: 123-12-1234"])
        self.assertEqual(result[0].entities[0].grapheme_offset, 7)
        self.assertEqual(result[0].entities[0].grapheme_length, 11)
Beispiel #20
0
async def sample_analyze_healthcare_entities_async():

    print(
        "In this sample we will be combing through the prescriptions our pharmacy has fulfilled "
        "so we can catalog how much inventory we have")
    print("We start out with a list of prescription documents.")

    # [START analyze_healthcare_entities_async]
    import os
    from azure.core.credentials import AzureKeyCredential
    from azure.ai.textanalytics import HealthcareEntityRelation
    from azure.ai.textanalytics.aio import TextAnalyticsClient

    endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
    key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

    text_analytics_client = TextAnalyticsClient(
        endpoint=endpoint,
        credential=AzureKeyCredential(key),
    )

    documents = [
        """
        Patient needs to take 100 mg of ibuprofen, and 3 mg of potassium. Also needs to take
        10 mg of Zocor.
        """, """
        Patient needs to take 50 mg of ibuprofen, and 2 mg of Coumadin.
        """
    ]

    async with text_analytics_client:
        poller = await text_analytics_client.begin_analyze_healthcare_entities(
            documents)
        result = await poller.result()
        docs = [doc async for doc in result if not doc.is_error]

    print("Let's first visualize the outputted healthcare result:")
    for idx, doc in enumerate(docs):
        for entity in doc.entities:
            print("Entity: {}".format(entity.text))
            print("...Normalized Text: {}".format(entity.normalized_text))
            print("...Category: {}".format(entity.category))
            print("...Subcategory: {}".format(entity.subcategory))
            print("...Offset: {}".format(entity.offset))
            print("...Confidence score: {}".format(entity.confidence_score))
            if entity.data_sources is not None:
                print("...Data Sources:")
                for data_source in entity.data_sources:
                    print("......Entity ID: {}".format(data_source.entity_id))
                    print("......Name: {}".format(data_source.name))
            if entity.assertion is not None:
                print("...Assertion:")
                print("......Conditionality: {}".format(
                    entity.assertion.conditionality))
                print("......Certainty: {}".format(entity.assertion.certainty))
                print("......Association: {}".format(
                    entity.assertion.association))
        for relation in doc.entity_relations:
            print("Relation of type: {} has the following roles".format(
                relation.relation_type))
            for role in relation.roles:
                print("...Role '{}' with entity '{}'".format(
                    role.name, role.entity.text))
        print("------------------------------------------")

    print(
        "Now, let's get all of medication dosage relations from the documents")
    dosage_of_medication_relations = [
        entity_relation for doc in docs
        for entity_relation in doc.entity_relations
        if entity_relation.relation_type ==
        HealthcareEntityRelation.DOSAGE_OF_MEDICATION
    ]
    # [END analyze_healthcare_entities_async]

    print(
        "Now, I will create a dictionary of medication to total dosage. "
        "I will use a regex to extract the dosage amount. For simplicity sake, I will assume "
        "all dosages are represented with numbers and have mg unit.")
    import re
    from collections import defaultdict

    medication_to_dosage = defaultdict(int)

    for relation in dosage_of_medication_relations:
        # The DosageOfMedication relation should only contain the dosage and medication roles

        dosage_role = next(filter(lambda x: x.name == "Dosage",
                                  relation.roles))
        medication_role = next(
            filter(lambda x: x.name == "Medication", relation.roles))

        try:
            dosage_value = int(re.findall(r"\d+", dosage_role.entity.text)
                               [0])  # we find the numbers in the dosage
            medication_to_dosage[medication_role.entity.text] += dosage_value
        except StopIteration:
            # Error handling for if there's no dosage in numbers.
            pass

    [
        print("We have fulfilled '{}' total mg of '{}'".format(
            dosage, medication))
        for medication, dosage in medication_to_dosage.items()
    ]
    async def analyze_async(self):
        # [START analyze_async]
        from azure.core.credentials import AzureKeyCredential
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import (
            RecognizeEntitiesAction,
            RecognizePiiEntitiesAction,
            ExtractKeyPhrasesAction,
            AnalyzeBatchActionsType
        )

        endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
        key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

        text_analytics_client = TextAnalyticsClient(
            endpoint=endpoint,
            credential=AzureKeyCredential(key),
        )

        documents = [
            "We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore the spot! \
            They provide marvelous food and they have a great menu. The chief cook happens to be the owner (I think his name is John Doe) \
            and he is super nice, coming out of the kitchen and greeted us all. We enjoyed very much dining in the place! \
            The Sirloin steak I ordered was tender and juicy, and the place was impeccably clean. You can even pre-order from their \
            online menu at www.contososteakhouse.com, call 312-555-0176 or send email to [email protected]! \
            The only complaint I have is the food didn't come fast enough. Overall I highly recommend it!"
        ]

        async with text_analytics_client:
            poller = await text_analytics_client.begin_analyze_batch_actions(
                documents,
                display_name="Sample Text Analysis",
                actions=[
                    RecognizeEntitiesAction(),
                    RecognizePiiEntitiesAction(),
                    ExtractKeyPhrasesAction()
                ]
            )

            result = await poller.result()

            async for action_result in result:
                if action_result.is_error:
                    raise ValueError(
                        "Action has failed with message: {}".format(
                            action_result.error.message
                        )
                    )
                if action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES:
                    print("Results of Entities Recognition action:")
                    for idx, doc in enumerate(action_result.document_results):
                        print("\nDocument text: {}".format(documents[idx]))
                        for entity in doc.entities:
                            print("Entity: {}".format(entity.text))
                            print("...Category: {}".format(entity.category))
                            print("...Confidence Score: {}".format(entity.confidence_score))
                            print("...Offset: {}".format(entity.offset))
                        print("------------------------------------------")

                if action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES:
                    print("Results of PII Entities Recognition action:")
                    for idx, doc in enumerate(action_result.document_results):
                        print("Document text: {}".format(documents[idx]))
                        for entity in doc.entities:
                            print("Entity: {}".format(entity.text))
                            print("Category: {}".format(entity.category))
                            print("Confidence Score: {}\n".format(entity.confidence_score))
                        print("------------------------------------------")

                if action_result.action_type == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES:
                    print("Results of Key Phrase Extraction action:")
                    for idx, doc in enumerate(action_result.document_results):
                        print("Document text: {}\n".format(documents[idx]))
                        print("Key Phrases: {}\n".format(doc.key_phrases))
                        print("------------------------------------------")
 def test_none_endpoint(self, resource_group, location,
                        text_analytics_account, text_analytics_account_key):
     with self.assertRaises(ValueError):
         text_analytics = TextAnalyticsClient(
             None, AzureKeyCredential(text_analytics_account_key))
Beispiel #23
0
async def sample_analyze_sentiment_with_opinion_mining():
    from azure.core.credentials import AzureKeyCredential
    from azure.ai.textanalytics.aio import TextAnalyticsClient

    endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
    key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

    text_analytics_client = TextAnalyticsClient(
        endpoint=endpoint, credential=AzureKeyCredential(key))

    print(
        "In this sample we will be a hotel owner going through reviews of their hotel to find complaints."
    )

    print(
        "I first found a handful of reviews for my hotel. Let's see what we have to improve."
    )

    documents = [
        """
        The food and service were unacceptable, but the concierge were nice.
        After talking to them about the quality of the food and the process to get room service they refunded
        the money we spent at the restaurant and gave us a voucher for near by restaurants.
        """, """
        The rooms were beautiful. The AC was good and quiet, which was key for us as outside it was 100F and our baby
        was getting uncomfortable because of the heat. The breakfast was good too with good options and good servicing times.
        The thing we didn't like was that the toilet in our bathroom was smelly. It could have been that the toilet was broken before we arrived.
        Either way it was very uncomfortable. Once we notified the staff, they came and cleaned it and left candles.
        """, """
        Nice rooms! I had a great unobstructed view of the Microsoft campus but bathrooms were old and the toilet was dirty when we arrived.
        It was close to bus stops and groceries stores. If you want to be close to campus I will recommend it, otherwise, might be better to stay in a cleaner one
        """
    ]

    async with text_analytics_client:
        result = await text_analytics_client.analyze_sentiment(documents)
    doc_result = [doc for doc in result if not doc.is_error]

    print("\nLet's first see the general sentiment of each of these reviews")
    positive_reviews = [
        doc for doc in doc_result if doc.sentiment == "positive"
    ]
    mixed_reviews = [doc for doc in doc_result if doc.sentiment == "mixed"]
    negative_reviews = [
        doc for doc in doc_result if doc.sentiment == "negative"
    ]
    print(
        "...We have {} positive reviews, {} mixed reviews, and {} negative reviews. "
        .format(len(positive_reviews), len(mixed_reviews),
                len(negative_reviews)))
    print(
        "\nSince these reviews seem so mixed, and since I'm interested in finding exactly what it is about my hotel that should be improved, "
        "let's find the complaints users have about individual aspects of this hotel"
    )

    print(
        "\nIn order to do that, I'm going to extract the targets of a negative sentiment. "
        "I'm going to map each of these targets to the mined opinion object we get back to aggregate the reviews by target. "
    )
    target_to_complaints = {}

    for document in doc_result:
        for sentence in document.sentences:
            for mined_opinion in sentence.mined_opinions:
                target = mined_opinion.target
                if target.sentiment == 'negative':
                    target_to_complaints.setdefault(target.text, [])
                    target_to_complaints[target.text].append(mined_opinion)

    print(
        "\nLet's now go through the aspects of our hotel people have complained about and see what users have specifically said"
    )

    for target, complaints in target_to_complaints.items():
        print(
            "Users have made {} complaint(s) about '{}', specifically saying that it's '{}'"
            .format(
                len(complaints), target, "', '".join([
                    assessment.text for complaint in complaints
                    for assessment in complaint.assessments
                ])))

    print(
        "\n\nLooking at the breakdown, I can see what aspects of my hotel need improvement, and based off of both the number and "
        "content of the complaints users have made about my toilets, I need to get that fixed ASAP."
    )
Beispiel #24
0
 async def test_empty_credential_class(self, resource_group, location, text_analytics_account, text_analytics_account_key):
     text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(""))
     with self.assertRaises(ClientAuthenticationError):
         response = await text_analytics.detect_language(
             ["This is written in English."]
         )
Beispiel #25
0
    async def analyze_sentiment_with_opinion_mining(self):
        from azure.core.credentials import AzureKeyCredential
        from azure.ai.textanalytics.aio import TextAnalyticsClient

        endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
        key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

        text_analytics_client = TextAnalyticsClient(
            endpoint=endpoint,
            credential=AzureKeyCredential(key)
        )

        print("In this sample we will be combing through the reviews of a potential hotel to stay at: Hotel Foo.")

        print(
            "I first found a handful of reviews for Hotel Foo. Let's see if I want to stay here."
        )

        documents = [
            "The food and service were unacceptable, but the concierge were nice",
            "The rooms were beautiful but dirty. The AC was good and quiet, but the elevator was broken",
            "The breakfast was good, but the toilet was smelly",
            "Loved this hotel - good breakfast - nice shuttle service.",
            "I had a great unobstructed view of the Microsoft campus"
        ]

        async with text_analytics_client:
            result = await text_analytics_client.analyze_sentiment(documents, show_opinion_mining=True)
        doc_result = [doc for doc in result if not doc.is_error]

        print("\n\nLet's see how many positive and negative reviews of this hotel I have right now")
        positive_reviews = [doc for doc in doc_result if doc.sentiment == "positive"]
        negative_reviews = [doc for doc in doc_result if doc.sentiment == "negative"]
        print("...We have {} positive reviews and {} negative reviews. ".format(len(positive_reviews), len(negative_reviews)))
        print("\nLooks more positive than negative, but still pretty mixed, so I'm going to drill deeper into the opinions of individual aspects of this hotel")

        print("\nIn order to do that, I'm going to sort them based on whether these opinions are positive, mixed, or negative")
        positive_mined_opinions = []
        mixed_mined_opinions = []
        negative_mined_opinions = []

        for document in doc_result:
            for sentence in document.sentences:
                for mined_opinion in sentence.mined_opinions:
                    aspect = mined_opinion.aspect
                    if aspect.sentiment == "positive":
                        positive_mined_opinions.append(mined_opinion)
                    elif aspect.sentiment == "mixed":
                        mixed_mined_opinions.append(mined_opinion)
                    else:
                        negative_mined_opinions.append(mined_opinion)

        print("\n\nLet's look at the {} positive opinions users have expressed for aspects of this hotel".format(len(positive_mined_opinions)))
        for mined_opinion in positive_mined_opinions:
            print("...Reviewers have the following opinions for the overall positive '{}' aspect of the hotel".format(mined_opinion.aspect.text))
            for opinion in mined_opinion.opinions:
                print("......'{}' opinion '{}'".format(opinion.sentiment, opinion.text))

        print("\n\nNow let's look at the {} mixed opinions users have expressed for aspects of this hotel".format(len(mixed_mined_opinions)))
        for mined_opinion in mixed_mined_opinions:
            print("...Reviewers have the following opinions for the overall mixed '{}' aspect of the hotel".format(mined_opinion.aspect.text))
            for opinion in mined_opinion.opinions:
                print("......'{}' opinion '{}'".format(opinion.sentiment, opinion.text))

        print("\n\nFinally, let's see the {} negative opinions users have expressed for aspects of this hotel".format(len(negative_mined_opinions)))
        for mined_opinion in negative_mined_opinions:
            print("...Reviewers have the following opinions for the overall negative '{}' aspect of the hotel".format(mined_opinion.aspect.text))
            for opinion in mined_opinion.opinions:
                print("......'{}' opinion '{}'".format(opinion.sentiment, opinion.text))

        print(
            "\n\nLooking at the breakdown, even though there were more positive opinions of this hotel, "
            "I care the most about the food and the toilets in a hotel, so I will be staying elsewhere"
        )
Beispiel #26
0
 async def test_no_single_input(self, resource_group, location, text_analytics_account, text_analytics_account_key):
     text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))
     with self.assertRaises(TypeError):
         response = await text_analytics.detect_language("hello world")
Beispiel #27
0
    async def test_batch_size_over_limit(self, resource_group, location, text_analytics_account, text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account, AzureKeyCredential(text_analytics_account_key))

        docs = [u"hello world"] * 1050
        with self.assertRaises(HttpResponseError):
            response = await text_analytics.recognize_entities(docs)
Beispiel #28
0
 async def test_empty_credentials(self, **kwargs):
     textanalytics_test_endpoint = kwargs.pop("textanalytics_test_endpoint")
     with pytest.raises(TypeError):
         text_analytics = TextAnalyticsClient(textanalytics_test_endpoint,
                                              "")
async def sample_analyze_sentiment_async():
    print(
        "In this sample we will be combing through reviews customers have left about their"
        "experience using our skydiving company, Contoso.")
    print(
        "We start out with a list of reviews. Let us extract the reviews we are sure are "
        "positive, so we can display them on our website and get even more customers!"
    )
    # [START analyze_sentiment_async]
    from azure.core.credentials import AzureKeyCredential
    from azure.ai.textanalytics.aio import TextAnalyticsClient

    endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
    key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

    text_analytics_client = TextAnalyticsClient(
        endpoint=endpoint, credential=AzureKeyCredential(key))

    documents = [
        """I had the best day of my life. I decided to go sky-diving and it made me appreciate my whole life so much more.
        I developed a deep-connection with my instructor as well, and I feel as if I've made a life-long friend in her.""",
        """This was a waste of my time. All of the views on this drop are extremely boring, all I saw was grass. 0/10 would
        not recommend to any divers, even first timers.""",
        """This was pretty good! The sights were ok, and I had fun with my instructors! Can't complain too much about my experience""",
        """I only have one word for my experience: WOW!!! I can't believe I have had such a wonderful skydiving company right
        in my backyard this whole time! I will definitely be a repeat customer, and I want to take my grandmother skydiving too,
        I know she'll love it!"""
    ]

    async with text_analytics_client:
        result = await text_analytics_client.analyze_sentiment(documents)

    docs = [doc for doc in result if not doc.is_error]

    print("Let's visualize the sentiment of each of these documents")
    for idx, doc in enumerate(docs):
        print(f"Document text: {documents[idx]}")
        print(f"Overall sentiment: {doc.sentiment}")
    # [END analyze_sentiment_async]

    print("Now, let us extract all of the positive reviews")
    positive_reviews = [doc for doc in docs if doc.sentiment == 'positive']

    print(
        "We want to be very confident that our reviews are positive since we'll be posting them on our website."
    )
    print(
        "We're going to confirm our chosen reviews are positive using two different tests"
    )

    print(
        "First, we are going to check how confident the sentiment analysis model is that a document is positive. "
        "Let's go with a 90% confidence.")
    positive_reviews = [
        review for review in positive_reviews
        if review.confidence_scores.positive >= 0.9
    ]

    print(
        "Finally, we also want to make sure every sentence is positive so we only showcase our best selves!"
    )
    positive_reviews_final = []
    for idx, review in enumerate(positive_reviews):
        print(f"Looking at positive review #{idx + 1}")
        any_sentence_not_positive = False
        for sentence in review.sentences:
            print(
                "...Sentence '{}' has sentiment '{}' with confidence scores '{}'"
                .format(sentence.text, sentence.sentiment,
                        sentence.confidence_scores))
            if sentence.sentiment != 'positive':
                any_sentence_not_positive = True
        if not any_sentence_not_positive:
            positive_reviews_final.append(review)

    print(
        "We now have the final list of positive reviews we are going to display on our website!"
    )
Beispiel #30
0
 def test_bad_type_for_credentials(self, **kwargs):
     textanalytics_test_endpoint = kwargs.pop("textanalytics_test_endpoint")
     with pytest.raises(TypeError):
         text_analytics = TextAnalyticsClient(textanalytics_test_endpoint,
                                              [])