async def test_all_errors_recognize_linked_entities_async(
            self, resource_group, location, text_analytics_account,
            text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account,
                                             text_analytics_account_key)

        docs = [{
            "id": "1",
            "text": ""
        }, {
            "id": "2",
            "language": "Spanish",
            "text": "Microsoft fue fundado por Bill Gates y Paul Allen"
        }]

        response = await text_analytics.recognize_linked_entities(docs)
        self.assertTrue(response[0].is_error)
        self.assertTrue(response[1].is_error)
Ejemplo n.º 2
0
    async def test_passing_only_string(self, resource_group, location, text_analytics_account, text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key))

        docs = [
            u"My SSN is 555-55-5555.",
            u"Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.",
            u"Is 998.214.865-68 your Brazilian CPF number?",
            u""
        ]

        response = await text_analytics.recognize_pii_entities(docs, show_stats=True)
        self.assertEqual(response[0].entities[0].text, "555-55-5555")
        self.assertEqual(response[0].entities[0].category, "U.S. Social Security Number (SSN)")
        self.assertEqual(response[1].entities[0].text, "111000025")
        # self.assertEqual(response[1].entities[0].category, "ABA Routing Number")  # Service is currently returning PhoneNumber here
        self.assertEqual(response[2].entities[0].text, "998.214.865-68")
        self.assertEqual(response[2].entities[0].category, "Brazil CPF Number")
        self.assertTrue(response[3].is_error)
    async def test_whole_batch_language_hint_and_obj_per_item_hints(self, resource_group, location, text_analytics_account, text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account, AzureKeyCredential(text_analytics_account_key))

        def callback(resp):
            language_str = "\"language\": \"es\""
            language = resp.http_request.body.count(language_str)
            self.assertEqual(language, 2)
            language_str = "\"language\": \"en\""
            language = resp.http_request.body.count(language_str)
            self.assertEqual(language, 1)

        docs = [
            TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="es"),
            TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="es"),
            TextDocumentInput(id="3", text="猫は幸せ"),
        ]

        response = await text_analytics.recognize_linked_entities(docs, language="en", raw_response_hook=callback)
    async def test_all_successful_passing_dict(self, resource_group, location, text_analytics_account, text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account, AzureKeyCredential(text_analytics_account_key))

        docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
                {"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]

        response = await text_analytics.recognize_linked_entities(docs, show_stats=True)
        for doc in response:
            self.assertEqual(len(doc.entities), 3)
            self.assertIsNotNone(doc.id)
            self.assertIsNotNone(doc.statistics)
            for entity in doc.entities:
                self.assertIsNotNone(entity.name)
                self.assertIsNotNone(entity.matches)
                self.assertIsNotNone(entity.language)
                self.assertIsNotNone(entity.data_source_entity_id)
                self.assertIsNotNone(entity.url)
                self.assertIsNotNone(entity.data_source)
Ejemplo n.º 5
0
    async def authentication_with_api_key_credential_async(self):
        print("\n.. authentication_with_api_key_credential_async")
        # [START create_ta_client_with_key_async]
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import TextAnalyticsApiKeyCredential
        endpoint = os.getenv("AZURE_TEXT_ANALYTICS_ENDPOINT")
        key = os.getenv("AZURE_TEXT_ANALYTICS_KEY")

        text_analytics_client = TextAnalyticsClient(
            endpoint, TextAnalyticsApiKeyCredential(key))
        # [END create_ta_client_with_key_async]

        doc = ["I need to take my cat to the veterinarian."]
        async with text_analytics_client:
            result = await text_analytics_client.detect_language(doc)

        print("Language detected: {}".format(result[0].primary_language.name))
        print("Confidence score: {}".format(result[0].primary_language.score))
Ejemplo n.º 6
0
    async def test_document_attribute_error_nonexistent_attribute(
            self, resource_group, location, text_analytics_account,
            text_analytics_account_key):
        text_analytics = TextAnalyticsClient(
            text_analytics_account,
            TextAnalyticsApiKeyCredential(text_analytics_account_key))

        docs = [{"id": "1", "text": ""}]
        response = await text_analytics.recognize_entities(docs)

        # Attribute not found on DocumentError or result obj, default behavior/message
        try:
            entities = response[0].attribute_not_on_result_or_error
        except AttributeError as default_behavior:
            self.assertEqual(
                default_behavior.args[0],
                '\'DocumentError\' object has no attribute \'attribute_not_on_result_or_error\''
            )
    async def test_all_successful_passing_text_document_input(self, resource_group, location, text_analytics_account, text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account, AzureKeyCredential(text_analytics_account_key))

        docs = [
            TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen"),
            TextDocumentInput(id="2", text="Microsoft fue fundado por Bill Gates y Paul Allen")
        ]

        response = await text_analytics.recognize_linked_entities(docs)
        for doc in response:
            self.assertEqual(len(doc.entities), 3)
            for entity in doc.entities:
                self.assertIsNotNone(entity.name)
                self.assertIsNotNone(entity.matches)
                self.assertIsNotNone(entity.language)
                self.assertIsNotNone(entity.data_source_entity_id)
                self.assertIsNotNone(entity.url)
                self.assertIsNotNone(entity.data_source)
    async def test_rotate_subscription_key(self, textanalytics_test_endpoint, textanalytics_test_api_key):
        credential = AzureKeyCredential(textanalytics_test_api_key)
        client = TextAnalyticsClient(textanalytics_test_endpoint, credential)

        docs = [{"id": "1", "text": "I will go to the park."},
                {"id": "2", "text": "I did not like the hotel we stayed at."},
                {"id": "3", "text": "The restaurant had really good food."}]

        response = await client.recognize_pii_entities(docs)
        assert response is not None

        credential.update("xxx")  # Make authentication fail
        with pytest.raises(ClientAuthenticationError):
            response = await client.recognize_pii_entities(docs)

        credential.update(textanalytics_test_api_key)  # Authenticate successfully again
        response = await client.recognize_pii_entities(docs)
        assert response is not None
Ejemplo n.º 9
0
    async def test_bad_model_version_error(self, resource_group, location,
                                           text_analytics_account,
                                           text_analytics_account_key):
        text_analytics = TextAnalyticsClient(
            text_analytics_account,
            TextAnalyticsApiKeyCredential(text_analytics_account_key))
        docs = [{
            "id": "1",
            "language": "english",
            "text": "I did not like the hotel we stayed at."
        }]

        try:
            result = await text_analytics.recognize_entities(
                docs, model_version="bad")
        except HttpResponseError as err:
            self.assertEqual(err.error.code, "InvalidRequest")
            self.assertIsNotNone(err.error.message)
Ejemplo n.º 10
0
    async def test_rotate_subscription_key(self, resource_group, location, text_analytics_account, text_analytics_account_key):
        credential = AzureKeyCredential(text_analytics_account_key)
        client = TextAnalyticsClient(text_analytics_account, credential)

        docs = [{"id": "1", "text": "I will go to the park."},
                {"id": "2", "text": "I did not like the hotel we stayed at."},
                {"id": "3", "text": "The restaurant had really good food."}]

        response = await client.analyze_sentiment(docs)
        self.assertIsNotNone(response)

        credential.update("xxx")  # Make authentication fail
        with self.assertRaises(ClientAuthenticationError):
            response = await client.analyze_sentiment(docs)

        credential.update(text_analytics_account_key)  # Authenticate successfully again
        response = await client.analyze_sentiment(docs)
        self.assertIsNotNone(response)
 async def test_mixing_inputs_async(self, resource_group, location,
                                    text_analytics_account,
                                    text_analytics_account_key):
     text_analytics = TextAnalyticsClient(text_analytics_account,
                                          text_analytics_account_key)
     docs = [
         {
             "id": "1",
             "text": "Microsoft was founded by Bill Gates and Paul Allen."
         },
         TextDocumentInput(
             id="2",
             text=
             "I did not like the hotel we stayed it. It was too expensive."
         ), "You cannot mix string input with the above inputs"
     ]
     with self.assertRaises(TypeError):
         response = await text_analytics.analyze_sentiment(docs)
    async def test_validate_input_string_async(self, resource_group, location,
                                               text_analytics_account,
                                               text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account,
                                             text_analytics_account_key)

        docs = [
            "I should take my cat to the veterinarian.",
            "Este es un document escrito en Español.", "猫は幸せ",
            "Fahrt nach Stuttgart und dann zum Hotel zu Fu.", ""
        ]

        response = await text_analytics.detect_languages(docs)
        self.assertEqual(response[0].primary_language.name, "English")
        self.assertEqual(response[1].primary_language.name, "Spanish")
        self.assertEqual(response[2].primary_language.name, "Japanese")
        self.assertEqual(response[3].primary_language.name, "German")
        self.assertTrue(response[4].is_error)
Ejemplo n.º 13
0
    async def test_whole_batch_country_hint_and_obj_per_item_hints_async(self, resource_group, location, cognitiveservices_account, cognitiveservices_account_key):
        text_analytics = TextAnalyticsClient(cognitiveservices_account, cognitiveservices_account_key)

        def callback(resp):
            country_str = "\"countryHint\": \"CA\""
            country = resp.http_request.body.count(country_str)
            self.assertEqual(country, 2)
            country_str = "\"countryHint\": \"US\""
            country = resp.http_request.body.count(country_str)
            self.assertEqual(country, 1)

        docs = [
            DetectLanguageInput(id="1", text="I should take my cat to the veterinarian.", country_hint="CA"),
            DetectLanguageInput(id="2", text="Este es un document escrito en Español.", country_hint="CA"),
            DetectLanguageInput(id="3", text="猫は幸せ"),
        ]

        response = await text_analytics.detect_languages(docs, country_hint="US", response_hook=callback)
    async def test_all_errors_recognize_pii_entities_async(
            self, resource_group, location, text_analytics_account,
            text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account,
                                             text_analytics_account_key)

        docs = [{
            "id": "1",
            "language": "es",
            "text": "hola"
        }, {
            "id": "2",
            "text": ""
        }]

        response = await text_analytics.recognize_pii_entities(docs)
        self.assertTrue(response[0].is_error)
        self.assertTrue(response[1].is_error)
Ejemplo n.º 15
0
    async def analyze_healthcare_entities_with_cancellation_async(self):
        # [START analyze_healthcare_entities_with_cancellation_async]
        from azure.core.credentials import AzureKeyCredential
        from azure.ai.textanalytics.aio import TextAnalyticsClient

        endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
        key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

        text_analytics_client = TextAnalyticsClient(
            endpoint=endpoint,
            credential=AzureKeyCredential(key),
        )

        documents = [
            "RECORD #333582770390100 | MH | 85986313 | | 054351 | 2/14/2001 12:00:00 AM | \
            CORONARY ARTERY DISEASE | Signed | DIS | Admission Date: 5/22/2001 \
            Report Status: Signed Discharge Date: 4/24/2001 ADMISSION DIAGNOSIS: \
            CORONARY ARTERY DISEASE. HISTORY OF PRESENT ILLNESS: \
            The patient is a 54-year-old gentleman with a history of progressive angina over the past several months. \
            The patient had a cardiac catheterization in July of this year revealing total occlusion of the RCA and \
            50% left main disease , with a strong family history of coronary artery disease with a brother dying at \
            the age of 52 from a myocardial infarction and another brother who is status post coronary artery bypass grafting. \
            The patient had a stress echocardiogram done on July , 2001 , which showed no wall motion abnormalities ,\
            but this was a difficult study due to body habitus. The patient went for six minutes with minimal ST depressions \
            in the anterior lateral leads , thought due to fatigue and wrist pain , his anginal equivalent. Due to the patient's \
            increased symptoms and family history and history left main disease with total occasional of his RCA was referred \
            for revascularization with open heart surgery."
        ]

        async with text_analytics_client:
            poller = await text_analytics_client.begin_analyze_healthcare_entities(
                documents)

            try:
                cancellation_poller = await poller.cancel()
                await cancellation_poller.wait()

            except HttpResponseError as e:
                # If the operation has already reached a terminal state it cannot be cancelled.
                print(e)

            else:
                print(
                    "Healthcare entities analysis was successfully cancelled.")
Ejemplo n.º 16
0
    async def alternative_document_input(self):
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import TextAnalyticsApiKeyCredential
        text_analytics_client = TextAnalyticsClient(
            endpoint=self.endpoint,
            credential=TextAnalyticsApiKeyCredential(self.key))

        documents = [{
            "id": "0",
            "language": "en",
            "text": "I had the best day of my life."
        }, {
            "id":
            "1",
            "language":
            "en",
            "text":
            "This was a waste of my time. The speaker put me to sleep."
        }, {
            "id": "2",
            "language": "es",
            "text": "No tengo dinero ni nada que dar..."
        }, {
            "id":
            "3",
            "language":
            "fr",
            "text":
            "L'hôtel n'était pas très confortable. L'éclairage était trop sombre."
        }]
        async with text_analytics_client:
            result = await text_analytics_client.detect_language(documents)

        for idx, doc in enumerate(result):
            if not doc.is_error:
                print("Document text: {}".format(documents[idx]))
                print("Language detected: {}".format(
                    doc.primary_language.name))
                print("ISO6391 name: {}".format(
                    doc.primary_language.iso6391_name))
                print("Confidence score: {}\n".format(
                    doc.primary_language.score))
            if doc.is_error:
                print(doc.id, doc.error)
Ejemplo n.º 17
0
    async def analyze_sentiment_async(self):
        # [START batch_analyze_sentiment_async]
        from azure.core.credentials import AzureKeyCredential
        from azure.ai.textanalytics.aio import TextAnalyticsClient

        endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
        key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

        text_analytics_client = TextAnalyticsClient(
            endpoint=endpoint, credential=AzureKeyCredential(key))
        documents = [
            "I had the best day of my life.",
            "This was a waste of my time. The speaker put me to sleep.",
            "No tengo dinero ni nada que dar...",
            "L'hôtel n'était pas très confortable. L'éclairage était trop sombre."
        ]

        async with text_analytics_client:
            result = await text_analytics_client.analyze_sentiment(documents)

        docs = [doc for doc in result if not doc.is_error]

        for idx, doc in enumerate(docs):
            print("Document text: {}".format(documents[idx]))
            print("Overall sentiment: {}".format(doc.sentiment))
            # [END batch_analyze_sentiment_async]
            print(
                "Overall confidence scores: positive={}; neutral={}; negative={} \n"
                .format(
                    doc.confidence_scores.positive,
                    doc.confidence_scores.neutral,
                    doc.confidence_scores.negative,
                ))
            for sentence in doc.sentences:
                print("Sentence '{}' has sentiment: {}".format(
                    sentence.text, sentence.sentiment))
                print(
                    "Sentence confidence scores: positive={}; neutral={}; negative={}"
                    .format(
                        sentence.confidence_scores.positive,
                        sentence.confidence_scores.neutral,
                        sentence.confidence_scores.negative,
                    ))
            print("------------------------------------")
async def sample_detect_language_async():
    print(
        "In this sample we own a hotel with customers from all around the globe. We want to eventually "
        "translate these reviews into English so our manager can read them. However, we first need to know which language "
        "they are in for more accurate translation. This is the step we will be covering in this sample\n"
    )
    # [START detect_language_async]
    from azure.core.credentials import AzureKeyCredential
    from azure.ai.textanalytics.aio import TextAnalyticsClient

    endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
    key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

    text_analytics_client = TextAnalyticsClient(
        endpoint=endpoint, credential=AzureKeyCredential(key))
    documents = [
        """
        The concierge Paulette was extremely helpful. Sadly when we arrived the elevator was broken, but with Paulette's help we barely noticed this inconvenience.
        She arranged for our baggage to be brought up to our room with no extra charge and gave us a free meal to refurbish all of the calories we lost from
        walking up the stairs :). Can't say enough good things about my experience!
        """, """
        最近由于工作压力太大,我们决定去富酒店度假。那儿的温泉实在太舒服了,我跟我丈夫都完全恢复了工作前的青春精神!加油!
        """
    ]
    async with text_analytics_client:
        result = await text_analytics_client.detect_language(documents)

    reviewed_docs = [doc for doc in result if not doc.is_error]

    print("Let's see what language each review is in!")

    for idx, doc in enumerate(reviewed_docs):
        print("Review #{} is in '{}', which has ISO639-1 name '{}'\n".format(
            idx, doc.primary_language.name, doc.primary_language.iso6391_name))
        if doc.is_error:
            print(doc.id, doc.error)
    # [END detect_language_async]
    print(
        "When actually storing the reviews, we want to map the review to their ISO639-1 name "
        "so everything is more standardized")

    review_to_language = {}
    for idx, doc in enumerate(reviewed_docs):
        review_to_language[documents[idx]] = doc.primary_language.iso6391_name
    async def alternative_scenario_recognize_pii_entities_async(self):
        """This sample demonstrates how to retrieve batch statistics, the
        model version used, and the raw response returned from the service.

        It additionally shows an alternative way to pass in the input documents
        using a list[TextDocumentInput] and supplying your own IDs and language hints along
        with the text.
        """
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import TextAnalyticsApiKeyCredential
        text_analytics_client = TextAnalyticsClient(
            endpoint=self.endpoint,
            credential=TextAnalyticsApiKeyCredential(self.key))

        documents = [{
            "id": "0",
            "language": "en",
            "text": "The employee's SSN is 555-55-5555."
        }, {
            "id":
            "1",
            "language":
            "en",
            "text":
            "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."
        }, {
            "id": "2",
            "language": "en",
            "text": "Is 998.214.865-68 your Brazilian CPF number?"
        }]

        extras = []

        def callback(resp):
            extras.append(resp.statistics)
            extras.append(resp.model_version)
            extras.append(resp.raw_response)

        async with text_analytics_client:
            result = await text_analytics_client.recognize_pii_entities(
                documents,
                show_stats=True,
                model_version="latest",
                response_hook=callback)
    async def analyze_healthcare_async(self):
        # [START analyze_healthcare_async]
        from azure.core.credentials import AzureKeyCredential
        from azure.ai.textanalytics.aio import TextAnalyticsClient

        endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
        key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

        text_analytics_client = TextAnalyticsClient(
            endpoint=endpoint,
            credential=AzureKeyCredential(key),
            api_version="v3.1-preview.3")

        documents = ["Subject is taking 100mg of ibuprofen twice daily"]

        async with text_analytics_client:
            poller = await text_analytics_client.begin_analyze_healthcare(
                documents)
            result = await poller.result()
            docs = [doc async for doc in result if not doc.is_error]

        print("Results of Healthcare Analysis:")
        for idx, doc in enumerate(docs):
            print("Document text: {}\n".format(documents[idx]))
            for entity in doc.entities:
                print("Entity: {}".format(entity.text))
                print("...Category: {}".format(entity.category))
                print("...Subcategory: {}".format(entity.subcategory))
                print("...Offset: {}".format(entity.offset))
                print("...Confidence score: {}".format(
                    entity.confidence_score))
                if entity.links is not None:
                    print("...Links:")
                    for link in entity.links:
                        print("......ID: {}".format(link.id))
                        print("......Data source: {}".format(link.data_source))
            for relation in doc.relations:
                print("Relation:")
                print("...Source: {}".format(relation.source.text))
                print("...Target: {}".format(relation.target.text))
                print("...Type: {}".format(relation.relation_type))
                print("...Bidirectional: {}".format(relation.is_bidirectional))
            print("------------------------------------------")
Ejemplo n.º 21
0
    async def test_language_kwarg_spanish(self, resource_group, location,
                                          text_analytics_account,
                                          text_analytics_account_key):
        text_analytics = TextAnalyticsClient(
            text_analytics_account,
            TextAnalyticsApiKeyCredential(text_analytics_account_key))

        def callback(response):
            language_str = "\"language\": \"es\""
            self.assertEqual(response.http_request.body.count(language_str), 1)
            self.assertIsNotNone(response.model_version)
            self.assertIsNotNone(response.statistics)

        res = await text_analytics.recognize_entities(
            inputs=["Bill Gates is the CEO of Microsoft."],
            model_version="latest",
            show_stats=True,
            language="es",
            raw_response_hook=callback)
Ejemplo n.º 22
0
 async def test_duplicate_ids_error(self, resource_group, location,
                                    text_analytics_account,
                                    text_analytics_account_key):
     text_analytics = TextAnalyticsClient(
         text_analytics_account,
         TextAnalyticsApiKeyCredential(text_analytics_account_key))
     # Duplicate Ids
     docs = [{
         "id": "1",
         "text": "hello world"
     }, {
         "id": "1",
         "text": "I did not like the hotel we stayed at."
     }]
     try:
         result = await text_analytics.recognize_entities(docs)
     except HttpResponseError as err:
         self.assertEqual(err.error.code, "InvalidDocument")
         self.assertIsNotNone(err.error.message)
    async def extract_key_phrases_async(self):
        # [START batch_extract_key_phrases_async]
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint,
                                                    credential=self.key)
        documents = [
            "Redmond is a city in King County, Washington, United States, located 15 miles east of Seattle.",
            "I need to take my cat to the veterinarian.",
            "I will travel to South America in the summer.",
        ]

        async with text_analytics_client:
            result = await text_analytics_client.extract_key_phrases(documents)

        for doc in result:
            if not doc.is_error:
                print(doc.key_phrases)
            if doc.is_error:
                print(doc.id, doc.error)
    async def alternative_scenario_recognize_entities_async(self):
        """This sample demonstrates how to retrieve batch statistics, the
        model version used, and the raw response returned from the service.

        It additionally shows an alternative way to pass in the input documents
        using a list[TextDocumentInput] and supplying your own IDs and language hints along
        with the text.
        """
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint,
                                                    credential=self.key)

        documents = [
            {
                "id": "0",
                "language": "en",
                "text": "Microsoft was founded by Bill Gates and Paul Allen."
            },
            {
                "id": "1",
                "language": "de",
                "text": "I had a wonderful trip to Seattle last week."
            },
            {
                "id": "2",
                "language": "es",
                "text": "I visited the Space Needle 2 times."
            },
        ]

        extras = []

        def callback(resp):
            extras.append(resp.statistics)
            extras.append(resp.model_version)
            extras.append(resp.raw_response)

        async with text_analytics_client:
            result = await text_analytics_client.recognize_entities(
                documents,
                show_stats=True,
                model_version="latest",
                response_hook=callback)
    async def test_whole_batch_dont_use_language_hint_async(
            self, resource_group, location, text_analytics_account,
            text_analytics_account_key):
        text_analytics = TextAnalyticsClient(text_analytics_account,
                                             text_analytics_account_key)

        def callback(resp):
            language_str = "\"language\": \"\""
            language = resp.http_request.body.count(language_str)
            self.assertEqual(language, 3)

        docs = [
            u"This was the best day of my life.",
            u"I did not like the hotel we stayed it. It was too expensive.",
            u"The restaurant was not as good as I hoped."
        ]

        response = await text_analytics.analyze_sentiment(
            docs, language="", response_hook=callback)
Ejemplo n.º 26
0
    async def test_passing_only_string(self, resource_group, location,
                                       text_analytics_account,
                                       text_analytics_account_key):
        text_analytics = TextAnalyticsClient(
            text_analytics_account,
            TextAnalyticsApiKeyCredential(text_analytics_account_key))

        docs = [
            u"Microsoft was founded by Bill Gates and Paul Allen.",
            u"I did not like the hotel we stayed at. It was too expensive.",
            u"The restaurant had really good food. I recommend you try it.",
            u""
        ]

        response = await text_analytics.analyze_sentiment(docs)
        self.assertEqual(response[0].sentiment, "neutral")
        self.assertEqual(response[1].sentiment, "negative")
        self.assertEqual(response[2].sentiment, "positive")
        self.assertTrue(response[3].is_error)
    async def test_country_hint_kwarg(self, resource_group, location,
                                      text_analytics_account,
                                      text_analytics_account_key):
        text_analytics = TextAnalyticsClient(
            text_analytics_account,
            TextAnalyticsApiKeyCredential(text_analytics_account_key))

        def callback(response):
            country_str = "\"countryHint\": \"ES\""
            self.assertEqual(response.http_request.body.count(country_str), 1)
            self.assertIsNotNone(response.model_version)
            self.assertIsNotNone(response.statistics)

        res = await text_analytics.detect_language(
            inputs=["this is written in english"],
            model_version="latest",
            show_stats=True,
            country_hint="ES",
            raw_response_hook=callback)
Ejemplo n.º 28
0
    async def test_input_with_some_errors(self, resource_group, location,
                                          text_analytics_account,
                                          text_analytics_account_key):
        text_analytics = TextAnalyticsClient(
            text_analytics_account,
            TextAnalyticsApiKeyCredential(text_analytics_account_key))

        docs = [{
            "id": "1",
            "text": ""
        }, {
            "id": "2",
            "language": "es",
            "text": "Microsoft fue fundado por Bill Gates y Paul Allen"
        }]

        response = await text_analytics.recognize_linked_entities(docs)
        self.assertTrue(response[0].is_error)
        self.assertFalse(response[1].is_error)
Ejemplo n.º 29
0
    async def alternative_scenario_extract_key_phrases_async(self):
        """This sample demonstrates how to retrieve batch statistics, the
        model version used, and the raw response returned from the service.

        It additionally shows an alternative way to pass in the input documents
        using a list[TextDocumentInput] and supplying your own IDs and language hints along
        with the text.
        """
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import TextAnalyticsApiKeyCredential
        text_analytics_client = TextAnalyticsClient(
            endpoint=self.endpoint,
            credential=TextAnalyticsApiKeyCredential(self.key))

        documents = [{
            "id":
            "0",
            "language":
            "en",
            "text":
            "Redmond is a city in King County, Washington, United States, located 15 miles east of Seattle."
        }, {
            "id": "1",
            "language": "en",
            "text": "I need to take my cat to the veterinarian."
        }, {
            "id": "2",
            "language": "en",
            "text": "I will travel to South America in the summer."
        }]
        extras = []

        def callback(resp):
            extras.append(resp.statistics)
            extras.append(resp.model_version)
            extras.append(resp.raw_response)

        async with text_analytics_client:
            result = await text_analytics_client.extract_key_phrases(
                documents,
                show_stats=True,
                model_version="latest",
                response_hook=callback)
Ejemplo n.º 30
0
    async def analyze_sentiment_async(self):
        # [START batch_analyze_sentiment_async]
        from azure.ai.textanalytics.aio import TextAnalyticsClient
        from azure.ai.textanalytics import TextAnalyticsApiKeyCredential
        text_analytics_client = TextAnalyticsClient(
            endpoint=self.endpoint,
            credential=TextAnalyticsApiKeyCredential(self.key))
        documents = [
            "I had the best day of my life.",
            "This was a waste of my time. The speaker put me to sleep.",
            "No tengo dinero ni nada que dar...",
            "L'hôtel n'était pas très confortable. L'éclairage était trop sombre."
        ]

        async with text_analytics_client:
            result = await text_analytics_client.analyze_sentiment(documents)

        docs = [doc for doc in result if not doc.is_error]

        for idx, doc in enumerate(docs):
            print("Document text: {}".format(documents[idx]))
            print("Overall sentiment: {}".format(doc.sentiment))
            # [END batch_analyze_sentiment_async]
            print(
                "Overall scores: positive={0:.3f}; neutral={1:.3f}; negative={2:.3f} \n"
                .format(
                    doc.document_scores.positive,
                    doc.document_scores.neutral,
                    doc.document_scores.negative,
                ))
            for idx, sentence in enumerate(doc.sentences):
                print("Sentence {} sentiment: {}".format(
                    idx + 1, sentence.sentiment))
                print(
                    "Sentence score: positive={0:.3f}; neutral={1:.3f}; negative={2:.3f}"
                    .format(
                        sentence.sentence_scores.positive,
                        sentence.sentence_scores.neutral,
                        sentence.sentence_scores.negative,
                    ))
                print("Offset: {}".format(sentence.offset))
                print("Length: {}\n".format(sentence.length))
            print("------------------------------------")