def analyzeWithTargets(self, text, name, symbol): if text == None or text == "": raise Exception( "The function analyze() received an None or empty text parameter." ) try: # With name and symbol targets response = Watson.natural_language_understanding.analyze( text=text, features=Features( emotion=EmotionOptions(targets=[name, "$" + symbol]), sentiment=SentimentOptions(targets=[name, "$" + symbol])), #language="en", return_analyzed_text=True) except BaseException as e: print(e) try: # Just with name targets response = Watson.natural_language_understanding.analyze( text=text, features=Features( emotion=EmotionOptions(targets=[name]), sentiment=SentimentOptions(targets=[name])), #language="en", return_analyzed_text=True) except BaseException as e: print(e) try: # Just with symbol targets response = Watson.natural_language_understanding.analyze( text=text, features=Features( emotion=EmotionOptions(targets=[symbol]), sentiment=SentimentOptions(targets=[symbol])), #language="en", return_analyzed_text=True) except BaseException as e: print(e) try: # Without targets response = Watson.natural_language_understanding.analyze( text=text, features=Features(emotion=EmotionOptions(), sentiment=SentimentOptions()), #language="en", return_analyzed_text=True) except BaseException as e: print(e) return False print(json.dumps(response, indent=4)) return response
def analyze_emotions(read_path, write_path1, write_path2): natural_language_understanding = NaturalLanguageUnderstandingV1( username='******', password='******', version='2018-03-16') with open(write_path1, 'wb') as outFile1, open(write_path2, 'w') as outFile2: file_writer1 = csv.writer(outFile1) file_writer2 = csv.writer(outFile2) i = 1 with open(read_path, 'r') as inFile: fileReader = csv.reader(inFile) # for i in range(417): # next(fileReader) for row in fileReader: tweet = row[4] tweet = remove_words_with_numbers(tweet) print(i, tweet) # data = [row[0], # row[1], # row[4], # row[5], # row[10]] if isNotEmpty(tweet): response = natural_language_understanding.analyze( language="en", text=tweet, features=Features(emotion=EmotionOptions())) jsonData = json.dumps(response, indent=2) print(jsonData) my_dict = json.loads(jsonData) my_dict2 = my_dict["emotion"] document = my_dict2["document"] emotion = document["emotion"] highest_emotion = get_highest_emotion( emotion["anger"], emotion["joy"], emotion["sadness"], emotion["fear"], emotion["disgust"]) print(highest_emotion) data = [ row[0], row[1], row[4], row[5], row[10], emotion["anger"], emotion["joy"], emotion["sadness"], emotion["fear"], emotion["disgust"], highest_emotion ] file_writer1.writerow(data) i = i + 1 else: data = [row[0], row[1], row[4], row[5], row[10]] file_writer2.writerow(data)
def watson_nlp_analysis(text): if text == '': return text max_limit_one = 10 max_limit_two = 30 naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1( version='2018-11-16', iam_apikey=os.environ['WATSON'], url= 'https://gateway.watsonplatform.net/natural-language-understanding/api' ) response = naturalLanguageUnderstanding.analyze( text=text, features=Features(concepts=ConceptsOptions(limit=max_limit_one), categories=CategoriesOptions(limit=max_limit_one), sentiment=SentimentOptions(document=True), emotion=EmotionOptions(document=True), entities=EntitiesOptions(emotion=True, sentiment=True, limit=max_limit_two), keywords=KeywordsOptions( emotion=True, sentiment=True, limit=max_limit_two))).get_result() return response
def calculateRelevance(text, regModel): natural_language_understanding = NaturalLanguageUnderstandingV1( version='2018-11-16', iam_apikey='34qzJpNfbmmav0ZFkGM9vM_enLCTAOuQsd5s4odeF19l', url= 'https://gateway-lon.watsonplatform.net/natural-language-understanding/api' ) keywords = {"quake", "shake", "tremble"} try: response = natural_language_understanding.analyze( html=text, features=Features(emotion=EmotionOptions())).get_result() dictionary = response["emotion"]["document"]["emotion"] except Exception as e: print(e) return None for keyword in keywords: columnName = "hasSubstring_" + keyword dictionary[columnName] = text.lower().find(keyword) >= 0 one = pd.Series([1]) data = pd.DataFrame([list(one.append(pd.Series(dictionary)))]) return regModel.predict(data)
def constructRow(text, isEarthquake): natural_language_understanding = NaturalLanguageUnderstandingV1( version='2018-11-16', iam_apikey='34qzJpNfbmmav0ZFkGM9vM_enLCTAOuQsd5s4odeF19l', url= 'https://gateway-lon.watsonplatform.net/natural-language-understanding/api' ) keywords = {"quake", "shake", "tremble"} try: response = natural_language_understanding.analyze( html=text, features=Features(emotion=EmotionOptions())).get_result() dictionary = response["emotion"]["document"]["emotion"] # going to remove the ones with only 0's except: dictionary = { "anger": 0, "disgust": 0, "fear": 0, "joy": 0, "sadness": 0 } for keyword in keywords: columnName = "hasSubstring_" + keyword # find() returns index of location of keyword, and -1 # if none, so I changed it into true/ false dictionary[columnName] = text.lower().find(keyword) >= 0 print(dictionary) dictionary["duringEarthquake"] = isEarthquake print(pd.Series(dictionary)) return pd.Series(dictionary)
def handleMoodLogging(): # Display the form if request.method == 'GET': return render_template('index.html') else: # Validate using credentials natural_language_understanding = NaturalLanguageUnderstandingV1( username=secret_dictionary['username'], password=secret_dictionary['password'], version='2018-03-16') # Grab the text from the user journal_contents = request.form['journal_content'] #print('journal contents: ', journal_contents.encode('ascii', 'ignore')) # Make a call to the API with the text passed in alchemy_results = natural_language_understanding.analyze( text=journal_contents.encode('ascii', 'ignore'), features=Features(emotion=EmotionOptions(), sentiment=SentimentOptions())) #print 'Writing results to a file:' fo = open('static/mockresponses/emotion_response.json', 'w+') fo.write(json.dumps(alchemy_results, indent=2)) fo.close() return render_template('gauge.html')
def EmotionAnalysis(): # Search for all tweets public_tweets = api.search(target_term, count=100, result_type="recent", lang="en") analyzed_tweets = [] # Loop through all tweets for tweet in public_tweets["statuses"]: tweet_text = tweet["text"] tweet_id = tweet["id"] tweet_author = tweet["user"]["screen_name"] if tweet_id not in analyzed_tweets: analyzed_tweets.append(tweet_id) response = natural_language_understanding.analyze( text=tweet_text, features=Features(emotion=EmotionOptions())).get_result() jsonified_response = json.dumps(response, indent=2) sadness_level = json.loads(jsonified_response)["emotion"][ "document"]["emotion"]["sadness"] if sadness_level > .60: try: api.update_status( "Hello @" + tweet_author + "! It seems like you're having a rough time. Try visiting our website, it might help! www.emotionalsupportai.org" ) except Exception: pass
def send_for_analysis(self, tweets, word): return self.text_analyzer.analyze( text=tweets, features=Features( keywords=KeywordsOptions(emotion=True, limit=2), emotion=EmotionOptions(targets=[word], document=True))).get_result()
def detect_text(path): """Detects text in the file.""" from google.cloud import vision client = vision.ImageAnnotatorClient() with io.open(path, 'rb') as image_file: content = image_file.read() image = vision.types.Image(content=content) response = client.text_detection(image=image) texts = response.text_annotations natural_language_understanding = NaturalLanguageUnderstandingV1( version='2018-11-16', iam_apikey='ysuAh_Jc3ASnVq3mvfwjONT5dD5G2oqcTGLizYs7HXyC', url= 'https://gateway.watsonplatform.net/natural-language-understanding/api' ) strings = "" for text in texts: strings += str(text.description) break response = natural_language_understanding.analyze( text=strings, features=Features(emotion=EmotionOptions( targets=strings.split('\n')))).get_result() print(json.dumps(response, indent=2))
def analyze_text(self, text): if len(text) > 15: response = self.natural_language_understanding.analyze( text=text, features=Features(sentiment=SentimentOptions(), emotion=EmotionOptions())).get_result() print(json.dumps(response, indent=2)) else: response = { "usage": { "text_units": 1, "text_characters": 65, "features": 2 }, "sentiment": { "document": { "score": 0.0, "label": "neutral" } }, "language": "en", "emotion": { "document": { "emotion": { "sadness": 0.0, "joy": 0.0, "fear": 0.0, "disgust": 0.0, "anger": 0.0 } } } } return response
def understanding(self): if not self.transcription: self.transcript() natural_language_understanding = NaturalLanguageUnderstandingV1( version='2017-02-27', username=os.environ['UNDERSTANDING_USERNAME'], password=os.environ['UNDERSTANDING_PASSWORD']) self.analysis = natural_language_understanding.analyze( text=self.transcription['results'][0]['alternatives'][0] ['transcript'], features=Features(categories=CategoriesOptions(), concepts=ConceptsOptions(), emotion=EmotionOptions(), entities=EntitiesOptions(emotion=True, sentiment=True, mentions=True), keywords=KeywordsOptions(emotion=True, sentiment=True), relations=RelationsOptions(), sentiment=SentimentOptions())) logger.info('Completed analysis of recorded file') return self.analysis
def analyseEmotions(input): natural_language_understanding = NaturalLanguageUnderstandingV1( version='2017-02-27', username='******', password='******') #response = natural_language_understanding.analyze( # text=input, # features=Features(entities=EntitiesOptions(), keywords=KeywordsOptions(), emotion=EmotionOptions())) response = natural_language_understanding.analyze( text=input, features=Features(emotion=EmotionOptions())) #print(json.dumps(response, indent=2)); #print "" anger = response["emotion"]["document"]["emotion"]["anger"] joy = response["emotion"]["document"]["emotion"]["joy"] sadness = response["emotion"]["document"]["emotion"]["sadness"] fear = response["emotion"]["document"]["emotion"]["fear"] disgust = response["emotion"]["document"]["emotion"]["disgust"] total = anger + joy + sadness + fear + disgust #print ("Anger: %f" % (anger)); #print ("Joy: %f" % (joy)); #print ("Sadness: %f" % (sadness)); #print ("Fear: %f" % (fear)); #print ("Disgust: %f" % (disgust)); #print ("total: %f" % (total)); return total
def __init__(self): self.naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1( version='2018-09-21', iam_apikey='z22B_pFOjawl36w4CwyWGRs55jVnXE4y464VlyY7o-67', url= 'https://gateway-syd.watsonplatform.net/natural-language-understanding/api' ) self.def_features = Features(sentiment=SentimentOptions(document=True), emotion=EmotionOptions(document=True))
def get_sentiment(self, song, artist): warnings.filterwarnings('ignore') natural_language_understanding = NaturalLanguageUnderstandingV1( version='2017-02-27', username="******", password="******") response = natural_language_understanding.analyze( text=PyLyrics.getLyrics(artist, song), features=Features(emotion=EmotionOptions(), sentiment=SentimentOptions())) return response.get('sentiment').get('document').get('score')
def emotion(tweet): try: response = naturalLanguageUnderstanding.analyze( text=tweet, features=Features(emotion=EmotionOptions(document=True))).get_result() emotions = response['emotion']['document']['emotion'] return emotions except watson_service.WatsonApiException as e: print(e) return None
def main(args): natural_language_understanding = NaturalLanguageUnderstandingV1( version='2017-02-27', username='******', password='******') try: response = natural_language_understanding.analyze( text=args.get("text", None), url=args.get("url", None), html=args.get("html", None), features=Features(entities=EntitiesOptions(), emotion=EmotionOptions())) except WatsonException as ex: return {"NULL": "NULL"} if (args.get("type", "Emotion") == "Emotion"): result = emotion2result(response) return result itemlist = dic2item(response) wiki_query = "http://en.wikipedia.org/w/api.php?action=query&" \ "prop=extracts&format=json&exintro=&titles=" count = 0 index = 0 extractlist = {} while (count < 3 and index < len(itemlist)): temp = itemlist[index][0].encode("utf8") item = temp.split(" ") string = "" for i in item: string += i + "+" string = string[:len(string) - 1] res = try_url(wiki_query + string) # print res res_json = json.loads(res) extract = res_json["query"]["pages"] pagenum = extract.keys()[0] if (pagenum != "-1"): count += 1 extract = extract[pagenum] extract = extract["extract"] extract = extract.encode("utf8") slist = extract.split(". ") if (slist is not None): extract = slist[0] + "." extract = clean(extract) extractlist[itemlist[index][0]] = extract index += 1 if (extractlist == {}): return {"NULL": "NULL"} return extractlist
def get_news_sentiment(request): try: response = natural_language_understanding.analyze( url=request.GET.get('url'), features=Features(sentiment=SentimentOptions(), emotion=EmotionOptions(), concepts=ConceptsOptions(limit=5), categories=CategoriesOptions())) return Response(response) except: return Response({"error": 'problem retrieving'})
def IBM_NLP(userinput): response = natural_language_understanding.analyze( text=userinput, language='en', features=Features(emotion=EmotionOptions())) sadness = response.result['emotion']['document']['emotion']['sadness'] joy = response.result['emotion']['document']['emotion']['joy'] fear = response.result['emotion']['document']['emotion']['fear'] disgust = response.result['emotion']['document']['emotion']['disgust'] anger = response.result['emotion']['document']['emotion']['anger'] return (sadness, joy, fear, disgust, anger)
def post(self): data = request.data.decode('utf-8') dataDict = json.loads(data) natural_language_understanding = NaturalLanguageUnderstandingV1( version='2018-11-16', iam_apikey='H61lzMxZfTcrmkMhz1_i6fvlxj0ljGVR04EnLKOJpGW1', url= 'https://gateway.watsonplatform.net/natural-language-understanding/api' ) response = natural_language_understanding.analyze( text=dataDict['line'], features=Features(emotion=EmotionOptions())).get_result() return response
def textAnalyse(url, natural_language_understanding): response = natural_language_understanding.analyze( url=url, features=Features(entities=EntitiesOptions( sentiment=False, emotion=False, limit=1 ), emotion=EmotionOptions() ) ) return response
def watsget(comp,txt): natural_language_understanding = NaturalLanguageUnderstandingV1( username=watson_username, password=watson_password, version=watson_version) response = natural_language_understanding.analyze( text=txt, features=Features( # Emotion options emotion=EmotionOptions(comp) ) ) return response
def analyze_using_NLU(analysistext): res = dict() response = natural_language_understanding.analyze( text=analysistext, features=Features(sentiment=SentimentOptions(), entities=EntitiesOptions(), keywords=KeywordsOptions(), emotion=EmotionOptions(), concepts=ConceptsOptions(), categories=CategoriesOptions(), semantic_roles=SemanticRolesOptions())) res['results'] = response return json.dumps(res)
def analyze_emotion(self, tweet): if 'Samsung' in tweet['text']: analysis = natural_language_understanding.analyze( text=tweet['text'], features=Features(emotion=EmotionOptions( targets=['Samsung']))).get_result() heighest_score = max( analysis['emotion']['targets'][0]['emotion'], key=analysis['emotion']['targets'][0]['emotion'].get) return json.dumps(heighest_score) else: return ' '
def get_sentiment(text): try: natural_language_understanding = NaturalLanguageUnderstandingV1( username="******", password="******", version='2018-03-16') response = natural_language_understanding.analyze( text=text, features=Features(emotion=EmotionOptions(), sentiment=SentimentOptions())) return json.dumps(response, indent=2) except: return None
def analyze(links): for link in links: response = natural_language_understanding.analyze( url=link, features=Features( entities=EntitiesOptions(emotion=True, sentiment=True, limit=15), emotion=EmotionOptions(targets=['keyword1', 'keyword2']), keywords=KeywordsOptions(emotion=True, sentiment=True, limit=2), concepts=ConceptsOptions(limit=5), sentiment=SentimentOptions(targets=['stocks']), categories=CategoriesOptions()))
def analizaLivro(texto): response = natural_language_understanding.analyze( text=texto, features=Features(sentiment=SentimentOptions(), emotion=EmotionOptions())) score = response['sentiment']['document']['score'] label = response['sentiment']['document']['label'] sadness = response['emotion']['document']['emotion']['sadness'] joy = response['emotion']['document']['emotion']['joy'] fear = response['emotion']['document']['emotion']['fear'] disgust = response['emotion']['document']['emotion']['disgust'] anger = response['emotion']['document']['emotion']['anger'] return [score, label, sadness, anger, joy, fear, disgust]
def get_emotions(text): text = translate(text) text = f'<i>{text}</i>' natural_language_understanding = NaturalLanguageUnderstandingV1( version='2018-11-16', iam_apikey='vcfJHb4lqz67pevf5vnqdOqVe-bOtFefMqUG5Q3c4ha2', url='https://gateway-lon.watsonplatform.net/natural-language-understanding/api' ) try: response = natural_language_understanding.analyze( html=text, features=Features(emotion=EmotionOptions())).get_result() return response["emotion"]["document"]["emotion"] except WatsonApiException: return "API error"
def get_sentiment_analysis(review_text): service = NaturalLanguageUnderstandingV1( version='2018-08-01', url= 'https://gateway-wdc.watsonplatform.net/natural-language-understanding/api', iam_apikey=watson_key) response = service.analyze(text=review_text, features=Features( sentiment=SentimentOptions(), keywords=KeywordsOptions(sentiment=True, limit=5), emotion=EmotionOptions())).get_result() return response
def test_html_analyze(self): nlu_url = "http://bogus.com/v1/analyze" responses.add(responses.POST, nlu_url, body="{\"resulting_key\": true}", status=200, content_type='application/json') nlu = NaturalLanguageUnderstandingV1(version='2016-01-23', url='http://bogus.com', username='******', password='******') nlu.analyze(Features(sentiment=SentimentOptions(), emotion=EmotionOptions(document=False)), html="<span>hello this is a test</span>") assert len(responses.calls) == 1
def analyze_using_NLU(analysistext): """ Extract results from Watson Natural Language Understanding for each news item """ res = dict() response = natural_language_understanding.analyze( text=analysistext, features=Features( sentiment=SentimentOptions(), entities=EntitiesOptions(), keywords=KeywordsOptions(), emotion=EmotionOptions(), concepts=ConceptsOptions(), categories=CategoriesOptions(), )) res['results'] = response return res