def test_create_request_method_is_called(self, patched_verify_request):
        """
        ...then the verify_request() should also be invoked with the
        correct arguments
        """
        expert_client = ExpertAiClient()
        expert_client.create_request(
            endpoint_path="resource_urlpath",
            params={"language": "en"},
            body={"text": "text"},
        )

        patched_verify_request.assert_called_with(
            "resource_urlpath", params={"language": "en"},
        )
    def test_a_request_is_created(self, patched_get_method_name_for_endpoint):
        """
        ...then the proper HTTP method should be set
        """

        def fake_get_method(self):
            return {url: "GET"}.get(url)

        url = self.endpoint_path
        expert_client = ExpertAiClient()
        patched_get_method_name_for_endpoint.side_effect = fake_get_method
        new_request = expert_client.create_request(self.endpoint_path)

        self.assertEqual(new_request.string_method, "GET")
        patched_get_method_name_for_endpoint.assert_called_once_with(
            self.endpoint_path
        )
    def test_a_bad_request_is_received(self, patched_object_mapper):
        """
        ...then the ObjectMapper should not be called
        """

        def response_json():
            return {
                "errors": [
                    {
                        "code": "PREPARE_DOCUMENT_FAILED",
                        "message": "missing layout key in json",
                    }
                ],
                "success": False,
            }

        fake_response = MagicMock(
            status_code=constants.HTTP_SUCCESSFUL, json=response_json
        )
        expert_client = ExpertAiClient()
        expert_client.process_response(fake_response)
        patched_object_mapper.assert_not_called()
    def test_a_full_analysis_request_is_executed(self):
        """
        ...then verify that whole flow works as expected
        """
        response_json = {
            "success": True,
            "data": {
                "content": "Facebook is looking at buying U.S. startup for $6 million",
                "language": "en",
                "version": "sensei: 3.1.0; disambiguator: 15.0-QNTX-2016",
                "knowledge": [
                    {
                        "label": "organization.company",
                        "properties": [
                            {"type": "WikiDataId", "value": "Q380"}
                        ],
                        "syncon": 288110,
                    }
                ],
                "phrases": [
                    {"tokens": [0], "type": "PP", "start": 54, "end": 65},
                ],
                "tokens": [
                    {
                        "syncon": 62653,
                        "start": 74,
                        "end": 83,
                        "type": "NOU",
                        "lemma": "long time",
                        "pos": "NOUN",
                        "dependency": {"id": 11, "head": 7, "label": "nmod"},
                        "morphology": "Number=Sing",
                        "paragraph": 0,
                        "sentence": 0,
                        "phrase": 4,
                        "atoms": [
                            {
                                "start": 74,
                                "end": 78,
                                "type": "ADJ",
                                "lemma": "long",
                            },
                        ],
                    }
                ],
                "mainSentences": [],
                "mainPhrases": [],
                "mainLemmas": [],
                "mainSyncons": [],
                "entities": [],
                "topics": [],
                "sentences": [{"phrases": [0], "start": 0, "end": 100}],
                "paragraphs": [],
            },
        }

        response = MagicMock(text="e@i")
        response.status_code = 200
        response.json.return_value = response_json
        self.patched_post.return_value = response

        client = ExpertAiClient()
        request_body = {"document": {"text": "text"}}
        data_model = client.full_analysis(
            body=request_body, params={"language": "es"}
        )

        # two POST requests are made, one for the token and one for analysis
        self.assertEqual(self.patched_post.call_count, 2)
        self.assertEqual(data_model.sentences[0].phrases[0].type_, "PP")
Esempio n. 5
0
# Demonstrates the use of the self-documentation resource 'taxonomies' of the expert.ai (Cloud based) Natural Language API

from expertai.nlapi.cloud.client import ExpertAiClient

client = ExpertAiClient()

output = client.taxonomies()

print("Taxonomies:\n")

for taxonomy in output.taxonomies:
    print(taxonomy.name)
    print("\tLanguages:")
    for language in taxonomy.languages:
        print("\t\t{0}".format(language.code))
from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()

text = "Michael Jordan was one of the best basketball players of all time. Scoring was Jordan's stand-out skill, but he still holds a defensive NBA record, with eight steals in a half.'"
language = 'en'

output = client.full_analysis(body={"document": {
    "text": text
}},
                              params={'language': language})

# Output arrays size

print("Output arrays size:")

print("knowledge: ", len(output.knowledge))
print("paragraphs: ", len(output.paragraphs))
print("sentences: ", len(output.sentences))
print("phrases: ", len(output.phrases))
print("tokens: ", len(output.tokens))
print("mainSentences: ", len(output.main_sentences))
print("mainPhrases: ", len(output.main_phrases))
print("mainLemmas: ", len(output.main_lemmas))
print("mainSyncons: ", len(output.main_syncons))
print("topics: ", len(output.topics))
print("entities: ", len(output.entities))
print("entities: ", len(output.relations))
print("sentiment.items: ", len(output.sentiment.items))
Esempio n. 7
0
# Demonstrates the IPTC Media Topics document classification capability of the (Cloud based) expert.ai Natural Language API

from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()

text = "Michael Jordan was one of the best basketball players of all time. Scoring was Jordan's stand-out skill, but he still holds a defensive NBA record, with eight steals in a half."
taxonomy = 'iptc'
language = 'en'

output = client.classification(body={"document": {
    "text": text
}},
                               params={
                                   'taxonomy': taxonomy,
                                   'language': language
                               })

print("Tab separated list of categories:")

for category in output.categories:
    print(category.id_, category.hierarchy, sep="\t")
Esempio n. 8
0
# Demonstrates the Temporal information detection capability of the expert.ai (Cloud based) Natural Language API

import json
from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()

text = "I went to Australia for the first time in 1998. Ten years later I returned to stay there, but from 2011 to 2013 I lived in New Zealand."

detector = 'temporal-information'
language = 'en'

output = client.detection(body={"document": {
    "text": text
}},
                          params={
                              'detector': detector,
                              'language': language
                          })

# Output extra data containing the JSON-LD object

print("extra_data: ", json.dumps(output.extra_data, indent=4, sort_keys=True))
class ExpertAiClientTestCase(ExpertAiTestCase):
    def setUp(self):
        super().setUp()
        self.expert_client = ExpertAiClient()
        self.test_body = {"document": {"text": "text"}}
        self.test_endpoint_path = "endpoint/{language}/{resource}"

    @patch("expertai.nlapi.cloud.client.ExpertAiClient.get_method_name_for_endpoint")
    def test_a_request_is_created(self, patched_get_method_name_for_endpoint):
        """
        ...then the proper HTTP method should be set
        """

        def fake_get_method(self):
            return {url: "GET"}.get(url)

        url = self.endpoint_path
        expert_client = ExpertAiClient()
        patched_get_method_name_for_endpoint.side_effect = fake_get_method
        new_request = expert_client.create_request(self.endpoint_path)

        self.assertEqual(new_request.string_method, "GET")
        patched_get_method_name_for_endpoint.assert_called_once_with(
            self.endpoint_path
        )

    @patch("expertai.nlapi.cloud.validate.ExpertAiValidation.check_parameters")
    def test_a_request_is_verified(self, patched_check_parameters):
        """
        ...then check_parameters method should be called
        """
        self.expert_client.verify_request(
            endpoint_path="path/{language}", params={"language": "en"}
        )

        patched_check_parameters.assert_called_once_with(
            params={"language": "en"}
        )

    @patch("expertai.nlapi.cloud.validate.ExpertAiValidation.check_parameters")
    def test_parameters_are_not_required(self, patched_check_parameters):
        """
        ...then the check_parameters method should not be called
        """
        self.expert_client.verify_request(endpoint_path="/path", params=None)
        patched_check_parameters.assert_not_called()

    def test_required_parameters_are_not_provided(self):
        """
        ...then an error should be raised, indicating which parameter
        is missing
        """
        self.assertRaises(
            MissingParametersError,
            self.expert_client.verify_request,
            endpoint_path="path/{lang}",
        )

    def test_a_parameterized_urlpath(self):
        """
        ...then keywords should be extracted
        """
        self.assertEqual(
            self.expert_client.urlpath_keywords("path/{language}/{resource}"),
            ["language", "resource"],
        )

    @patch("expertai.nlapi.cloud.client.ExpertAiClient.verify_request")
    def test_create_request_method_is_called(self, patched_verify_request):
        """
        ...then the verify_request() should also be invoked with the
        correct arguments
        """
        expert_client = ExpertAiClient()
        expert_client.create_request(
            endpoint_path="resource_urlpath",
            params={"language": "en"},
            body={"text": "text"},
        )

        patched_verify_request.assert_called_with(
            "resource_urlpath", params={"language": "en"},
        )

    @patch("expertai.nlapi.cloud.client.ObjectMapper")
    def test_a_bad_request_is_received(self, patched_object_mapper):
        """
        ...then the ObjectMapper should not be called
        """

        def response_json():
            return {
                "errors": [
                    {
                        "code": "PREPARE_DOCUMENT_FAILED",
                        "message": "missing layout key in json",
                    }
                ],
                "success": False,
            }

        fake_response = MagicMock(
            status_code=constants.HTTP_SUCCESSFUL, json=response_json
        )
        expert_client = ExpertAiClient()
        expert_client.process_response(fake_response)
        patched_object_mapper.assert_not_called()
from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()

text = "Michael Jordan was one of the best basketball players of all time. Scoring was Jordan's stand-out skill, but he still holds a defensive NBA record, with eight steals in a half."
language = 'en'

output = client.specific_resource_analysis(body={"document": {
    "text": text
}},
                                           params={
                                               'language': language,
                                               'resource': 'relations'
                                           })

# Output relations' data

print("Output relations' data:")

for relation in output.relations:
    print(relation.verb.lemma, ":")
    for related in relation.related:
        print("\t", "(", related.relation, ")", related.lemma)
Esempio n. 11
0
def expert_api_client():
    return ExpertAiClient()
    """
    def test_a_context_request_is_executed(self):
        """
        ...then verify that whole flow works as expected
        """
        response_json = {
            "success":
            True,
            "contexts": [{
                "description":
                "Standard context",
                "languages": [{
                    "analyses": [
                        "disambiguation", "relevants", "entities", "sentiment",
                        "relations"
                    ],
                    "code":
                    "en",
                    "name":
                    "English"
                }, {
                    "analyses":
                    ["disambiguation", "relevants", "entities", "relations"],
                    "code":
                    "es",
                    "name":
                    "Spanish"
                }, {
                    "analyses":
                    ["disambiguation", "relevants", "entities", "relations"],
                    "code":
                    "fr",
                    "name":
                    "French"
                }, {
                    "analyses":
                    ["disambiguation", "relevants", "entities", "relations"],
                    "code":
                    "de",
                    "name":
                    "German"
                }, {
                    "analyses":
                    ["disambiguation", "relevants", "entities", "relations"],
                    "code":
                    "it",
                    "name":
                    "Italian"
                }],
                "name":
                "standard"
            }]
        }
        response = MagicMock()
        response.status_code = 200
        response.ok = True
        response.json.return_value = response_json

        self.patched_get.return_value = response
        client = ExpertAiClient()
        dm = client.contexts()
        self.assertEqual(dm.contexts[0].name, "standard")
        self.assertEqual(dm.contexts[0].languages[4].code, "it")
        self.assertEqual(dm.contexts[0].languages[3].analyses[2], "entities")
Esempio n. 13
0
# Demonstrates the sentiment analysis capability of the expert.ai (Cloud based) Natural Language API performed by the 'sentiment' resource

from expertai.nlapi.cloud.client import ExpertAiClient
client2 = ExpertAiClient()

text = "Michael Jordan was one of the best basketball players of all time."
language = 'en'

output = client2.specific_resource_analysis(body={"document": {
    "text": text
}},
                                            params={
                                                'language': language,
                                                'resource': 'sentiment'
                                            })

# Output overall sentiment

print("Output overall sentiment:")

print(output.sentiment.overall)

output = client2.specific_resource_analysis(body={"document": {
    "text": text
}},
                                            params={
                                                'language': language,
                                                'resource': 'sentiment'
                                            })

# Output overall sentiment
Esempio n. 14
0
import os
import requests
import json
os.environ["EAI_USERNAME"] = '******'
os.environ["EAI_PASSWORD"] = '******'

from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()

# GET RESTAURANT
searchTerm = input("Enter a business name or search term: ")
searchLocation = input("Enter a city name or zip code: ")

MY_API_KEY = "YELP API KEY GOES HERE"
googleAPIKey = "GOOGLE API KEY GOES HERE"
language= 'en'

masterText = ""
masterList  = []

# Search for business on Yelp first
url = 'https://api.yelp.com/v3/businesses/search'
headers = {'Authorization': f"Bearer {MY_API_KEY}"}
params={'term':searchTerm, 'location':searchLocation}

resp = requests.get(url, params=params, headers=headers)
parsed = json.loads(resp.text)

thebusinesses = parsed["businesses"]

# Get precise business name and Yelp ID
Esempio n. 15
0
 def setUp(self):
     super().setUp()
     self.expert_client = ExpertAiClient()
     self.test_body = {"document": {"text": "text"}}
     self.test_endpoint_path = "endpoint/{language}/{resource}"
from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()

text = "Michael Jordan was one of the best basketball players of all time. Scoring was Jordan's stand-out skill, but he still holds a defensive NBA record, with eight steals in a half."
language = 'en'

output = client.contexts()

# Contexts

print("Contexts:")

for context in output.contexts:
    print(context.name)
    print("\tLanguages:")
    for language in context.languages:
        print("\t", language.code)
Esempio n. 17
0
# Demonstrates the use of the self-documentation resource 'taxonomy' of the expert.ai (Cloud based) Natural Language API for the IPTC document classification taxonomy

from expertai.nlapi.cloud.client import ExpertAiClient


def printCategory(level, category):
    tabs = "    " * level
    print("{}{} ({})".format(tabs, category.id, category.label))
    for nestedCategory in category.categories:
        printCategory(level + 1, nestedCategory)


client = ExpertAiClient()

taxonomy = 'iptc'
language = 'en'

output = client.taxonomy(params={'taxonomy': taxonomy, 'language': language})

print("iptc taxonomy category tree:\n")

for category in output.taxonomy[0].categories:
    printCategory(0, category)
Esempio n. 18
0
    def test_a_taxonomies_request_is_executed(self):
        """
        ...then verify that whole flow works as expected
        """
        response_json = {
            "success":
            True,
            "taxonomies": [{
                "description":
                "The iptc document classification resource classifies texts based on the IPTC Media Topics taxonomy",
                "languages": [{
                    "code": "en",
                    "name": "English"
                }, {
                    "code": "es",
                    "name": "Spanish"
                }, {
                    "code": "fr",
                    "name": "French"
                }, {
                    "code": "de",
                    "name": "German"
                }, {
                    "code": "it",
                    "name": "Italian"
                }],
                "name":
                "iptc"
            }, {
                "contract":
                "https://github.com/therealexpertai/nlapi-openapi-specification/blob/master/geotax.yaml",
                "description":
                "The geotax document classification resource recognizes geographic places cited in the text and returns corresponding countries' names. In addition, when requested with a specific query-string parameter, it returns extra-data containing equivalent GeoJSON objects. See the specific OpenAPI document (https://github.com/therealexpertai/nlapi-openapi-specification/blob/master/geotax.yaml) for information about the way to obtain and interpret GeoJSON data.",
                "languages": [{
                    "code": "en",
                    "name": "English"
                }, {
                    "code": "es",
                    "name": "Spanish"
                }, {
                    "code": "fr",
                    "name": "French"
                }, {
                    "code": "de",
                    "name": "German"
                }, {
                    "code": "it",
                    "name": "Italian"
                }],
                "name":
                "geotax"
            }]
        }
        response = MagicMock()
        response.status_code = 200
        response.ok = True
        response.json.return_value = response_json

        self.patched_get.return_value = response
        client = ExpertAiClient()
        dm = client.taxonomies()
        self.assertEqual(dm.taxonomies[1].name, "geotax")
        self.assertEqual(dm.taxonomies[0].languages[2].code, "fr")
Esempio n. 19
0
#set env vars
import os
os.environ["EAI_USERNAME"] = '******'
os.environ["EAI_PASSWORD"] = '******'

from textblob import TextBlob
import expertai
from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()


# Output range = [-100,100]. 100 = very positive. -100 = very negative.
def analyzeSent(text):
    text = str(text)
    if len(text) < 4:
        return (0.0)

    b = TextBlob(text)
    language = b.detect_language()

    badList = ['ru', 'uk', 'su', 'sr', 'tl', 'pt', 'fi']
    if language in badList:
        return (0.0)

    try:
        output = client.specific_resource_analysis(
            body={"document": {
                "text": text
            }},
            params={
                'language': language,
Esempio n. 20
0
    def test_taxonomy_iptc_is_executed(self):
        """
        ...then verify that whole flow works as expected
        """
        response_json = {
            "success":
            True,
            "data": [{
                "namespace":
                "iptc_en_1.0",
                "taxonomy": [{
                    "categories": [{
                        "categories": [{
                            "categories": [{
                                "id": "20000003",
                                "label": "Animation"
                            }, {
                                "id": "20000004",
                                "label": "Cartoon"
                            }, {
                                "categories": [{
                                    "id": "20000006",
                                    "label": "Film festival"
                                }],
                                "id":
                                "20000005",
                                "label":
                                "Cinema"
                            }, {
                                "categories": [{
                                    "id": "20000008",
                                    "label": "Ballet"
                                }, {
                                    "id": "20000009",
                                    "label": "Modern dance"
                                }, {
                                    "id": "20000010",
                                    "label": "Traditional dance"
                                }],
                                "id":
                                "20000007",
                                "label":
                                "Dance"
                            }],
                            "id":
                            "01000000",
                            "label":
                            "Arts, culture and entertainment"
                        }],
                        "id":
                        "V16000000",
                        "label":
                        "Conflicts, war and peace"
                    }],
                    "id":
                    "MEDIATOPIC_TAX",
                    "label":
                    "MEDIATOPIC_TAX"
                }]
            }]
        }
        response = MagicMock()
        response.status_code = 200
        response.ok = True
        response.json.return_value = response_json

        self.patched_get.return_value = response
        client = ExpertAiClient()
        dm = client.taxonomy_iptc(params={"language": "en"})
        self.assertEqual(dm.taxonomy[0].categories[0].id, "MEDIATOPIC_TAX")
        self.assertEqual(dm.taxonomy[0].categories[0].categories[0].label,
                         "Conflicts, war and peace")