def test_a_full_analysis_request_is_executed(self): """ ...then verify that whole flow works as expected """ response_json = { "success": True, "data": { "content": "Facebook is looking at buying U.S. startup for $6 million", "language": "en", "version": "sensei: 3.1.0; disambiguator: 15.0-QNTX-2016", "knowledge": [ { "label": "organization.company", "properties": [ {"type": "WikiDataId", "value": "Q380"} ], "syncon": 288110, } ], "phrases": [ {"tokens": [0], "type": "PP", "start": 54, "end": 65}, ], "tokens": [ { "syncon": 62653, "start": 74, "end": 83, "type": "NOU", "lemma": "long time", "pos": "NOUN", "dependency": {"id": 11, "head": 7, "label": "nmod"}, "morphology": "Number=Sing", "paragraph": 0, "sentence": 0, "phrase": 4, "atoms": [ { "start": 74, "end": 78, "type": "ADJ", "lemma": "long", }, ], } ], "mainSentences": [], "mainPhrases": [], "mainLemmas": [], "mainSyncons": [], "entities": [], "topics": [], "sentences": [{"phrases": [0], "start": 0, "end": 100}], "paragraphs": [], }, } response = MagicMock(text="e@i") response.status_code = 200 response.json.return_value = response_json self.patched_post.return_value = response client = ExpertAiClient() request_body = {"document": {"text": "text"}} data_model = client.full_analysis( body=request_body, params={"language": "es"} ) # two POST requests are made, one for the token and one for analysis self.assertEqual(self.patched_post.call_count, 2) self.assertEqual(data_model.sentences[0].phrases[0].type_, "PP")
from expertai.nlapi.cloud.client import ExpertAiClient client = ExpertAiClient() text = "Michael Jordan was one of the best basketball players of all time. Scoring was Jordan's stand-out skill, but he still holds a defensive NBA record, with eight steals in a half.'" language = 'en' output = client.full_analysis(body={"document": { "text": text }}, params={'language': language}) # Output arrays size print("Output arrays size:") print("knowledge: ", len(output.knowledge)) print("paragraphs: ", len(output.paragraphs)) print("sentences: ", len(output.sentences)) print("phrases: ", len(output.phrases)) print("tokens: ", len(output.tokens)) print("mainSentences: ", len(output.main_sentences)) print("mainPhrases: ", len(output.main_phrases)) print("mainLemmas: ", len(output.main_lemmas)) print("mainSyncons: ", len(output.main_syncons)) print("topics: ", len(output.topics)) print("entities: ", len(output.entities)) print("entities: ", len(output.relations)) print("sentiment.items: ", len(output.sentiment.items))