def getsentimentfromUrl(url):
    global errorcounter
    try:
        response = natural_language_understanding.analyze(
        url=url,
        language="de",
        return_analyzed_text=True,
        features=Features(
          entities=EntitiesOptions(
            sentiment=True)))
        errorcounter = max(errorcounter - 0.2, 0)
        return response, False, errorcounter
    except:
        print("WATSON API has internal server error, waiting for 3 seconds and retry")
        time.sleep(3)
        try:
            response = natural_language_understanding.analyze(
            url=url,
            language="de",
            return_analyzed_text=True,
            features=Features(
              entities=EntitiesOptions(
                sentiment=True)))
            return response, False, errorcounter
        except:
            errorcounter += 1
            print("WATSON API has internal server error")
            print(errorcounter)
            return None, True, errorcounter
Пример #2
0
def get_nlu_data(samples):
    """Query IBM NLU to get keyword data for each sample."""
    data = {}
    nlu = NaturalLanguageUnderstandingV1(
        version='2018-03-16',
        username='******',
        password='******')
    for s in samples:
        response = nlu.analyze(
            text=s,
            language='en',
            features=Features(
                keywords=KeywordsOptions(
                    emotion=True,
                    limit=5),
                entities=EntitiesOptions(
                    emotion=True,
                    limit=5)
            ))
        data[s] = {'key' : {}, 'ent' : {}}
        for kwd_data in response.result['keywords']:
            if ('relevance' not in kwd_data or 'emotion' not in kwd_data):
                continue # skip this one, it doesn't have full data?
            data[s]['key'][kwd_data['text']] = kwd_data
        for ent_data in response.result['entities']:
            if ('relevance' not in ent_data or 'emotion' not in ent_data):
                continue #yuh yeet
            data[s]['ent'][ent_data['text']] = ent_data
    return data
    def understanding(self):
        if not self.transcription:
            self.transcript()

        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2017-02-27',
            username=os.environ['UNDERSTANDING_USERNAME'],
            password=os.environ['UNDERSTANDING_PASSWORD'])

        self.analysis = natural_language_understanding.analyze(
            text=self.transcription['results'][0]['alternatives'][0]
            ['transcript'],
            features=Features(categories=CategoriesOptions(),
                              concepts=ConceptsOptions(),
                              emotion=EmotionOptions(),
                              entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       mentions=True),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True),
                              relations=RelationsOptions(),
                              sentiment=SentimentOptions()))

        logger.info('Completed analysis of recorded file')
        return self.analysis
Пример #4
0
def watson_nlp_analysis(text):

    if text == '': return text

    max_limit_one = 10
    max_limit_two = 30

    naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey=os.environ['WATSON'],
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )

    response = naturalLanguageUnderstanding.analyze(
        text=text,
        features=Features(concepts=ConceptsOptions(limit=max_limit_one),
                          categories=CategoriesOptions(limit=max_limit_one),
                          sentiment=SentimentOptions(document=True),
                          emotion=EmotionOptions(document=True),
                          entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=max_limit_two),
                          keywords=KeywordsOptions(
                              emotion=True,
                              sentiment=True,
                              limit=max_limit_two))).get_result()
    return response
Пример #5
0
def process(key, text):
    naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey=key,
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )

    # t = 'IBM is an American multinational technology company '
    # 'headquartered in Armonk, New York, United States, '
    # 'with operations in over 170 countries.'

    t = text

    try:
        response = naturalLanguageUnderstanding.analyze(
            text=t,
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2))).get_result()
    except:
        return False

    print(json.dumps(response, indent=2))
    return (json.dumps(response, indent=2))
Пример #6
0
def analyze_audio():
    print(request.json['texto'])

    ''' Parte para programar y traducir al inglés '''
    language_translator = LanguageTranslator(
        version='2018-03-16',
        iam_api_key='XmyHrVcLnTgWC3Ou33zGB989tcrOxocykZeZDUJxdlP6',
        url='https://gateway.watsonplatform.net/language-translator/api')

    translation = language_translator.translate(
        text=request.json['texto'],
        model_id='es-en')

    ''' Parte para sacar insights del texto '''
    natural_language_understanding = NaturalLanguageUnderstandingV1(
      username='******',
      password='******',
      version='2018-03-16')

    response = natural_language_understanding.analyze(
      text=json.loads(json.dumps(translation, indent=2, ensure_ascii=False))["translations"][0]["translation"],
      features=Features(
        entities=EntitiesOptions(
          emotion=True,
          sentiment=True,
          limit=2),
        keywords=KeywordsOptions(
          emotion=True,
          sentiment=True,
          limit=2)))

    return jsonify(json.dumps(response, indent=2))
Пример #7
0
def main(dict):
    try:
        model_id=""
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2018-04-05',
            iam_apikey="",
            url='')        
        resposta = natural_language_understanding.analyze(
            text=dict['texto'],
            features=Features(
                entities=EntitiesOptions(emotion=True, sentiment=True, model=model_id),
                sentiment=SentimentOptions()),
                ).get_result()
        retorno = {}

        # Salvar as entidades no retorno
        if 'entities' in resposta:
            for i in range(len(resposta['entities'])):
                retorno[resposta['entities'][i]['type']] = resposta['entities'][i]['text']

        # Salvar o sentimento no retorno
        if 'sentiment' in resposta:
            retorno['sentiment'] = resposta['sentiment']['document']['label']

        dict['err'] = False
        dict['resposta'] = retorno
        return dict

    except:
        dict['err'] = True
        dict['resposta'] = "Erro na chamada ao NLU."
        return dict
def posting_generator():
    jobs = []
    #job_title = "software engineer"
    #job_location = "Pittsburgh, PA"
    search_url = 'https://www.careerbuilder.com/jobs-software-engineer-in-pittsburgh,pa?keywords=Software+Engineer&location=Pittsburgh%2C+PA'
    base_url = 'https://www.careerbuilder.com'

    next_page = urllib.request.urlopen(search_url, None, None)

    nlu = NLU(
        _apikey='BU11gy3frJMRMKz4XQ_sPJ_HGF3p-qEr74xUlEVTWvsY',
	    version='2018-03-19'
	)

    def nextPage(soup):
	    print("BREAK 1")
	    next_link = soup.find("a", class_="Next Page")

	    if next_link is not None:
		    print("BREAK 2")
		    next_url = next_link.find_parent("a")['href']
		    next_page = next_url
		    return next_page

	    else:
	        print("BREAK 3")
	        return 0

    while True:
        soup = BeautifulSoup(next_page, 'html.parser')

		#next_page = nextPage(soup)
              
        for job in soup.find_all('h2'):
            if job.get('class') == 'job-title show-for-medium-up':
                url = 'https://www.careerbuilder.com' + job.a['href']
                response = nlu.analyze(
                    url=url,
                    features=Features(
                        entities=EntitiesOptions(
                            limit=1000
						),
						keywords=KeywordsOptions(
							limit=1000
						),
					)
				).get_result()
                jobs.append(response)
				# jsonprinter(response)
                yield response

        next_url = nextPage(soup)

        if next_url == 0:
            break
        else:
            next_page = urllib.request.urlopen(next_url, None, None)


    print("END OF PROGRAM!")
Пример #9
0
def IntelligentCrawlUrl(URL):
    """
	This Function uses IBM Watson's Natural Language Understanding API to crawl the links and get company or person names based on a 	 knowledge graph it already has.
	This Function also return Company/Person names based on relevance score by IBM Natural Language Cognitive API.
	"""
    ListOfEntityOutput = []
    try:
        response = NaturalLanguageUnderstanding.analyze(
            url=URL,
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=250),
                              sentiment=SentimentOptions(),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=250)))

    except Exception as e:
        response = {}

    if response:
        for EveryEntity in response["entities"]:
            if EveryEntity["type"] == "Company":
                if EveryEntity["relevance"] > 0.25:
                    ListOfEntityOutput.append(EveryEntity["text"])
    print(ListOfEntityOutput)
    return ListOfEntityOutput
Пример #10
0
    def concepts(self):
        IBM_dict = {}
        IBM_response = self.naturalLanguageUnderstanding.analyze(
            text=self.text,
            features=Features(
                entities=EntitiesOptions(emotion=True, sentiment=True, limit=10),
                keywords=KeywordsOptions(emotion=True, sentiment=True,limit=10),
                sentiment=SentimentOptions(),
                categories=CategoriesOptions()
                )).get_result()

        sent_dict = {'sentiment': IBM_response['sentiment']['document']['score']}
        IBM_dict['sentiment'] = sent_dict
        
        ent_result = []
        ents = IBM_response['entities']
        for e in ents:
            ent_result.append(e['text'].lower())
        ent_result.sort()
        IBM_dict['entities'] = ent_result
        
        kws = []
        for keyword in IBM_response['keywords']:
            kws.append(keyword['text'].lower())
        kws.sort()
        IBM_dict['keywords'] = kws
        
        cats = []
        for category in IBM_response['categories']:
            cats.append(category['label'])
        IBM_dict['categories'] = cats
        
        return IBM_dict
Пример #11
0
def nlu_data():
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')
    r = open("C:\\Users\\kishan.sampat\\Desktop\\user_input.txt", "r")
    extracted = r.read().splitlines()
    #print(extracted)
    mainArray = []

    for elements in extracted:
        array = []
        if len(elements) > 0:
            #print(elements)
            response = natural_language_understanding.analyze(
                text=elements,
                features=Features(entities=EntitiesOptions(sentiment=True,
                                                           limit=3),
                                  keywords=KeywordsOptions()))

            with open('C:\\Users\\kishan.sampat\\Desktop\\user_input.csv',
                      'w',
                      newline='') as outfile:

                for each in response['keywords']:
                    tex = each['text']
                    lemet = porter_stemmer.stem(tex)
                    array.append(lemet)
                mainArray.append(array)
                csv.writer(outfile).writerows(mainArray)

        else:
            break
Пример #12
0
def watson_analyze_text_understanding(text):
    """
    Input:
        text to be analyzed
    Output:
        response from the watson API

    Taken from the watson API docs.
    """
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=settings.WATSON_UNDERSTANDING_USERNAME,
        password=settings.WATSON_UNDERSTANDING_PASSWORD,
        version="2017-02-27",
    )

    response = {}
    try:
        response = natural_language_understanding.analyze(
            text=text,
            features=Features(
                entities=EntitiesOptions(emotion=True, sentiment=True,
                                         limit=2),
                keywords=KeywordsOptions(emotion=True, sentiment=True,
                                         limit=2),
            ),
        )
    except Exception as e:
        print e
        print "Proceeding without the watson data"

    return response
Пример #13
0
def get_sentiment(target_text):
	from watson_developer_cloud import NaturalLanguageUnderstandingV1
	from watson_developer_cloud.natural_language_understanding_v1 \
	import Features, EntitiesOptions, KeywordsOptions

	natural_language_understanding = NaturalLanguageUnderstandingV1(
	 username="******",
	 password="******",
	 version="2018-03-16")

	""" Opening the file and reading the text contained """
	file = open("messages_to_be_analyzed.txt",'r')
	target_text = file.read()

	response = natural_language_understanding.analyze(
	 text= target_text,
	 features=Features(
	   entities=EntitiesOptions(
	     emotion=True,
	     sentiment=True,
	     limit=2),
	   keywords=KeywordsOptions(
	     emotion=True,
	     sentiment=True,
	     limit=2)))

	return [response.result.get("keywords")[0].get("emotion").get(emotion) for emotion in
		[entry for entry in response.result.get("keywords")[0].get("emotion")]]
Пример #14
0
    def identifyKeyworkdsAndEntities(self, data):
        self.app.logger.info(
            'Preparing to invoke Natural Language Understanding service')
        txt = data.encode("utf-8", "replace")
        nlu = self.getNLUService()

        results = nlu.analyze(text=data,
                              return_analyzed_text=True,
                              features=Features(entities=EntitiesOptions(),
                                                keywords=KeywordsOptions()))

        self.app.logger.info(json.dumps(results, indent=2))

        primeEntity = None
        primeKeyword = None

        if 'entities' in results:
            entities = results['entities']
            if 0 < len(entities):
                primeEntity = entities[0].get('text', None)

        if 'keywords' in results:
            keywords = results['keywords']
            if 0 < len(keywords):
                primeKeyword = keywords[0].get('text', None)

        retData = {"prime_entity": primeEntity, "prime_keyword": primeKeyword}
        return retData
Пример #15
0
def nlu_fact():
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')
    r = open("C:\\Users\\kishan.sampat\\Desktop\\fact.txt","r")
    extracted = r.read()    

    response = natural_language_understanding.analyze(

        text= extracted,
   
        features=Features(entities=EntitiesOptions(sentiment=True,limit=3), keywords=KeywordsOptions())) 
    #print(response)
   # print(json.dumps(response, indent=5))
    #json_parsed = json.dumps(response)
    #print(json_parsed)
    with open('C:\\Users\\kishan.sampat\\Desktop\\fact_data.csv', 'w') as outfile:
        #json.dump(response , outfile)
        for each in response['keywords']:
            tex = each['text']
            lemet = porter_stemmer.stem(tex)
            #print(lemet)
            json.dump(lemet , outfile)
            outfile.write('\n')
        for ent in response['entities']:
            ents = ent['text']
            lemet_ent = porter_stemmer.stem(ents)
            #print(lemet_ent)
            json.dump(lemet_ent , outfile)
            outfile.write('\n')
Пример #16
0
def requestWatsonSentence(sentences):

    nlu = NaturalLanguageUnderstandingV1(version='2017-02-27',
                                         username=credentials.nluKey,
                                         password=credentials.nluId)

    keywords = dict()
    entities = dict()

    for i in range(len(sentences)):
        tmp = nlu.analyze(text=sentences[i],
                          features=Features(
                              entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2)))
        keywords[len(keywords)] = [
            item.get('text', {}) for item in tmp.get('keywords', {})
            if item.get("relevance") > 0.1
        ]
        entities[len(entities)] = [
            item.get('text', {}) for item in tmp.get('entities', {})
            if item.get("relevance") > 0.75
        ]

    # return keywords, tones #'rtype': dictionary
    return entities, keywords
Пример #17
0
    def makeWikiSection(self, sectiontitle):
        print("Accessing IBM Watson for NLP understanding on " + sectiontitle +
              " (subtopic of " + self._topic + ")")

        response = self.watsonobj.analyze(
            text=self._page.section(sectiontitle),
            features=Features(concepts=ConceptsOptions(limit=3),
                              entities=EntitiesOptions(limit=3),
                              keywords=KeywordsOptions(limit=5),
                              relations=RelationsOptions(),
                              semantic_roles=SemanticRolesOptions(limit=3)))

        if sectiontitle in wikipedia.search(
                sectiontitle) and sectiontitle is not "See also":
            return Node("Section",
                        title=sectiontitle,
                        content=self._page.section(sectiontitle),
                        concepts=json.dumps(response["concepts"]),
                        entities=json.dumps(response["entities"]),
                        keywords=json.dumps(response["keywords"]),
                        relations=json.dumps(response["relations"]),
                        semantic_roles=json.dumps(response["semantic_roles"]),
                        mainarticleurl=wikipedia.page(self._topic).url)

        return Node("Section",
                    title=sectiontitle,
                    content=self._page.section(sectiontitle),
                    concepts=json.dumps(response["concepts"]),
                    entities=json.dumps(response["entities"]),
                    keywords=json.dumps(response["keywords"]),
                    relations=json.dumps(response["relations"]),
                    semantic_roles=json.dumps(response["semantic_roles"]))
Пример #18
0
def getAnalysis(review):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username='******',
        password='******',
        version='2018-03-16')
    response = natural_language_understanding.analyze(
        text=review,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2))).get_result()
    keywords = response["keywords"]
    numKeywords = len(keywords)
    if numKeywords == 0:
        print("no keywords")
        return np.zeros(6)
    sentiments = np.array([
        keyword["sentiment"]["score"] * keyword["relevance"]
        for keyword in keywords
    ])
    totalSentiment = np.sum(sentiments) / numKeywords

    emotionNames = ['sadness', 'joy', 'fear', 'disgust', 'anger']
    emotions = np.array([
        np.array([keyword["emotion"][name] for name in emotionNames])
        for keyword in keywords
    ])
    totalEmotions = np.sum(emotions, 0) / numKeywords

    features = np.insert(totalEmotions, 0, totalSentiment, axis=0)
    return features
Пример #19
0
def get_keywords(content):
    response = service.analyze(text=content,
                               features=Features(
                                   entities=EntitiesOptions(),
                                   keywords=KeywordsOptions())).get_result()

    kword_list = [keyword["text"] for keyword in response["keywords"]]
    return json.dumps(kword_list)
Пример #20
0
def fun_natural_language_understanding(text):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version="2018-09-28", iam_apikey='        ', url='')
    features = Features(entities=EntitiesOptions(sentiment=True, model=""))
    response = natural_language_understanding.analyze(
        text=text, features=features).get_result()

    return response
Пример #21
0
def tweetEmotion(tweet):
    # *** WATSON ANALYSIS ***
    #returns emotion: joy, anger, disgust, sadness, fear

    # ----- WATSON CLOUD LOGIN -----
    natural_language_understanding = NaturalLanguageUnderstandingV1(
      username="******",
      password="******",
      version="2017-02-27")

    try:
        response = natural_language_understanding.analyze(
          text=tweet,
          features=Features(entities=EntitiesOptions(
                                  emotion=True, sentiment=True,limit=1),
                           keywords=KeywordsOptions(
                                  emotion=True, sentiment=True,limit=1
                                            ))
        )

    except watson_developer_cloud.watson_service.WatsonApiException as e:
        response = []

    # print "response", response

    json_data = json.dumps(response, indent=2)          #loads into objects
    json_data = json.loads(json_data)                   #converts into dictionary

    # print "jason_data", json_data

    if len(json_data) > 0:
        # check where emotions located: keywords or entities
        # print len(json_data["keywords"])
        # print len(json_data["entities"])

        #check if emotion data exists
        if (len(json_data["keywords"]) > 0 and "emotion" in json_data["keywords"][0]):
            result = json_data["keywords"][0]["emotion"]
            print result
        elif (len(json_data["entities"]) > 0 and "emotion" in json_data["entities"][0]):
            result = json_data["entities"][0]["emotion"]
            print result
        else:
            result = "undef"
            print result
            # return result
    else:
        result = "undef"
        # print result
        # return result

    if result == "undef":
        return "undef"
    else:
        maxEmotion = max(result, key=result.get)

    print maxEmotion
Пример #22
0
    def analyze(self, sentence):
        response_places = []
        response = self.natural_language_understanding.analyze(
            text=sentence['input'],
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=1),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=1)))

        # print(json.dumps(response, indent=2))
        '''
        places
        '''
        if len(response['entities']) > 0 and str(
                response['entities'][0]['type']).lower() in place_entity_type:
            new_input = {}
            destination_name = response['keywords'][0]['text']
            new_input['current_lat'] = sentence['current_lat']
            new_input['current_lng'] = sentence['current_lng']
            new_input['destination_name'] = destination_name
            location_request = requests.get(
                url=config.GOOGLE_GET_LOCATION_GEO.format(
                    destination_name, config.GOOGLE_API_KEY))
            location_geo = json.loads(
                location_request.content)['results'][0]['geometry']['location']
            new_input['destination_lat'] = location_geo['lat']
            new_input['destination_lng'] = location_geo['lng']
            return get_recommended_trip(new_input)
        else:
            keyword = response['keywords'][0]['text']
            message_res = WatsonWatsonAssistantAdapter().message(keyword)
            places = message_res['output']['text']
            for place in places:
                google_places_response = requests.get(
                    url=config.GOOGLE_PLACE_URL.format(place,
                                                       config.GOOGLE_API_KEY))
                google_place = json.loads(
                    google_places_response.content)['candidates'][0]
                place_element = {}
                place_element['name'] = google_place['name']
                place_element['image'] = config.GOOGLE_IMAGE_URL.format(
                    google_place['photos'][0]['photo_reference'],
                    config.GOOGLE_API_KEY)
                place_element['rating'] = google_place['rating']
                place_element['lat'] = google_place['geometry']['location'][
                    'lat']
                place_element['lng'] = google_place['geometry']['location'][
                    'lng']
                response_places.append(place_element)

            print response_places
            response_dict = {}
            response_dict['places'] = response_places
            response_dict['trips'] = {}
        return response_dict
Пример #23
0
def analizarTexto(USER):
  #Open the file with the name of the user.
  resultfile = open("result_"+USER+".json", "w+")

  targets =[
      'vida', 'Guatemala', 'amor', 'sexo', 'politico',
      'poliltica','Yo', 'sonrisa', 'pais','novio','novia',
      'enojo', 'hermano', 'hermana','mama','papa','familia',
      'deporte', 'relacion'
    ]
  #Get the configuration file
  with open('config.json', 'r') as f:
      config = json.load(f)

  #reading the data file
  datafile = open("data_" +USER+".txt","r")
  data = datafile.read()
  
  #print the data... remove this..
  print (data)

  #Authentication
  natural_language_understanding = NaturalLanguageUnderstandingV1(
      version=config["version"],
      username=config["username"],
      password=config["password"]
  )

  response = natural_language_understanding.analyze(
    text=data,
    features=Features(
      entities=EntitiesOptions(
        emotion=True,
        sentiment=True,
        limit=2),
      keywords=KeywordsOptions(
        emotion=True,
        sentiment=True,
        limit=2),
      sentiment=SentimentOptions(
        targets=targets
      )
      #Doesn't support spanish language yet
      # ,
      # emotion=EmotionOptions(
      #   targets=targets
      # )
    )
  )

  result = str(response)
  print (result)
  resultfile.write(result + "")
  resultfile.close()
  f.close()
  datafile.close()
Пример #24
0
    def get_entities(self, text=None, url=None):
        if text == None and url == None:
            return "Error"

        response = naturalLanguageUnderstanding.analyze(
            text=text,
            url=url,
            features=Features(entities=EntitiesOptions(
                limit=50))).get_result()
        return response
Пример #25
0
def nluRun(txt):
    response = natural_language_understanding.analyze(
        text=txt,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=3),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=3)))
    return response
Пример #26
0
    def __init__(self, nlu_details: dict) -> None:
        self.version = nlu_details["version"]
        self.url = nlu_details["url"]
        self.apikey = nlu_details["apikey"]

        self.nlu = NaturalLanguageUnderstandingV1(
            version=self.version, url=self.url, iam_apikey=self.apikey)

        self.features = Features(categories=CategoriesOptions(), entities=EntitiesOptions(
            emotion=True, sentiment=True), keywords=KeywordsOptions(emotion=True, sentiment=True))
Пример #27
0
def analyze_article(url):
    response = nlu.analyze(url=url,
                           features=Features(
                               entities=EntitiesOptions(emotion=True,
                                                        sentiment=True,
                                                        limit=2),
                               keywords=KeywordsOptions(emotion=True,
                                                        sentiment=True,
                                                        limit=2)))
    return response
Пример #28
0
def get_response(text):
    response = natural_language_understanding.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2)))
    return response['keywords']
Пример #29
0
def main(args):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')

    try:
        response = natural_language_understanding.analyze(
            text=args.get("text", None),
            url=args.get("url", None),
            html=args.get("html", None),
            features=Features(entities=EntitiesOptions(),
                              emotion=EmotionOptions()))
    except WatsonException as ex:
        return {"NULL": "NULL"}

    if (args.get("type", "Emotion") == "Emotion"):
        result = emotion2result(response)
        return result

    itemlist = dic2item(response)

    wiki_query = "http://en.wikipedia.org/w/api.php?action=query&" \
                 "prop=extracts&format=json&exintro=&titles="

    count = 0
    index = 0
    extractlist = {}
    while (count < 3 and index < len(itemlist)):
        temp = itemlist[index][0].encode("utf8")
        item = temp.split(" ")
        string = ""
        for i in item:
            string += i + "+"
        string = string[:len(string) - 1]
        res = try_url(wiki_query + string)
        # print res
        res_json = json.loads(res)
        extract = res_json["query"]["pages"]
        pagenum = extract.keys()[0]
        if (pagenum != "-1"):
            count += 1
            extract = extract[pagenum]
            extract = extract["extract"]
            extract = extract.encode("utf8")

            slist = extract.split(". ")
            if (slist is not None):
                extract = slist[0] + "."
            extract = clean(extract)
            extractlist[itemlist[index][0]] = extract
        index += 1
    if (extractlist == {}):
        return {"NULL": "NULL"}
    return extractlist
Пример #30
0
def analyse_text(text):
    print("Texto: %s" % text)

    response = natural_language_understanding.analyze(
        text=text,
        features=Features(
            entities=EntitiesOptions(emotion=True, sentiment=True),
            keywords=KeywordsOptions(emotion=True, sentiment=True),
        )).get_result()

    return response