def do_analysis(names, texts): alchemyapi = AlchemyAPI() base=os.path.basename('transcripts/Democrats/dem-2-4-2016.txt') debate_name = os.path.splitext(base)[0] file_name = 'data/targeted sentiments/Democrat/' + str(debate_name) + '_sentiment.txt' f = open(file_name, 'w') filepath = "data/topics/Democratic Debates_Top Three Topics.csv" all_topics = csv.DictReader(open(filepath, "r")) dt_1 = [] for row in all_topics: dt_1.append(row[debate_name]) dt_2 = [x.split('\'') for x in dt_1] dt_3 = [y for x in dt_2[0] for y in x.split(',')] for i in range(len(names)): for j in range(3): topic = [y for x in dt_2[j] for y in x.split(',')] response = alchemyapi.sentiment_targeted('text', texts[i], topic[:(len(topic)-1)]) if response['status'] == 'OK': f.write(topic[len(topic)-1] + ':\n') f.write(names[i] + ' Sentiment \n') f.write('type: ' + str(response['docSentiment']['type']) + '\n') if 'score' in response['docSentiment']: f.write('score: ' + str(response['docSentiment']['score']) + '\n \n') else: print('Error in sentiment analysis call: ', response['statusInfo'])
def populateTweets(self): self.lat, self.long, self.radius = self.findCoordinatesRadius() geo = str(self.lat) + "," + str(self.long) + "," + str(self.radius) + "km" tweets = api.search(q=self.search, lang='en', geocode=geo, rpp=100) showcase = tweets[0:5] self.showcase = [] for tweet in showcase: self.showcase.append([tweet.text, tweet.user.screen_name]) hashtagsRaw = [tweet.entities['hashtags'] for tweet in tweets] hashtagsList = list(itertools.chain.from_iterable(hashtagsRaw)) hashtags = [hash['text'] for hash in hashtagsList] frequency = {} for hashtag in hashtags: frequency[hashtag] = hashtags.count(hashtag) self.popularHashtags = dict(Counter(hashtags).most_common(5)).keys() texts = [tweet.text for tweet in tweets] self.sentiment = 0.0 alchemyapi = AlchemyAPI() for text in texts: response = alchemyapi.sentiment_targeted('text', text.lower(), self.search.lower()) if response['status'] != 'ERROR' and response['docSentiment']['type'] != 'neutral': numeric = float(response['docSentiment']['score']) self.sentiment = self.sentiment + (numeric / len(texts)) #computes average sentiment
def populateTweets(self): self.lat, self.long, self.radius = self.findCoordinatesRadius() geo = str(self.lat) + "," + str(self.long) + "," + str( self.radius) + "km" tweets = api.search(q=self.search, lang='en', geocode=geo, rpp=100) showcase = tweets[0:5] self.showcase = [] for tweet in showcase: self.showcase.append([tweet.text, tweet.user.screen_name]) hashtagsRaw = [tweet.entities['hashtags'] for tweet in tweets] hashtagsList = list(itertools.chain.from_iterable(hashtagsRaw)) hashtags = [hash['text'] for hash in hashtagsList] frequency = {} for hashtag in hashtags: frequency[hashtag] = hashtags.count(hashtag) self.popularHashtags = dict(Counter(hashtags).most_common(5)).keys() texts = [tweet.text for tweet in tweets] self.sentiment = 0.0 alchemyapi = AlchemyAPI() for text in texts: response = alchemyapi.sentiment_targeted('text', text.lower(), self.search.lower()) if response['status'] != 'ERROR' and response['docSentiment'][ 'type'] != 'neutral': numeric = float(response['docSentiment']['score']) self.sentiment = self.sentiment + ( numeric / len(texts)) #computes average sentiment
def writeResult(arr, outFile, search): alchemyapi = AlchemyAPI(apiKey) #alchemyapi = AlchemyAPI("89e395ea07490a40a55ccf241612724f80827956") #alchemyapi = AlchemyAPI("f7e81de9b04fcb1eadc9469800a86a15bffd8ec3") #alchemyapi = AlchemyAPI("d3547d0e12ac5425b57cf1d2e05280525224a109") #alchemyapi = AlchemyAPI("6d03602e012eca8b7ab3ac92e37327950b1caa78") #print "In write ",len(arr) fi = open(outFile,'wb') SEARCHTERM = search.lower() writer = csv.writer(fi) neg, pos, neu = 0,0,0 results = [] print "Starting AlchemyAPI" for item in zip(*arr)[0]: #print item response = alchemyapi.sentiment_targeted('text', item, SEARCHTERM) #print "maine response",response try: respType = response['docSentiment']['type'] #print "Response ",respType except Exception, e: #print e continue if respType == 'neutral': neu += 1 elif respType == 'positive': pos += 1 elif respType == 'negative': neg += 1 lst = [item,respType] writer.writerow(lst) results.append(lst)
def analyzeTweets(apiKey, arr, search, resultsArray): alchemyapi = AlchemyAPI(apiKey) SEARCHTERM = search.lower() neg, pos, neu = 0,0,0 for item in arr: #print item response = alchemyapi.sentiment_targeted('text', item[0], SEARCHTERM) print "maine response",response try: respType = response['docSentiment']['type'] #print "Response ",respType except Exception, e: #print e continue if respType == 'neutral': neu += 1 elif respType == 'positive': pos += 1 elif respType == 'negative': neg += 1 lst = [item[0],respType, item[1],item[2]] resultsArray.append(lst)
is secure, we do not get access to the private https: URLs img_url = media.images['standard_resolution'].url img_response = alchemyapi.imageExtraction('url', img_url) if img_response['status'] == 'OK': print(json.dumps(img_response, indent=4)) else: print('Error in image extraction call: ', img_response['statusInfo']) ''' # Printing the caption of the image and doing sentiment analysis of the # caption targeted towards CapitalOne name = api.user(media.user.id) print name if hasattr(media, 'caption'): print YELLOW+"Caption :"+ENDCOLOR, media.caption.text response = alchemyapi.sentiment_targeted('text', media.caption.text, 'capital') if response['status'] == 'OK': print GREEN+'Sentiment type: '+ENDCOLOR, response['docSentiment']['type'] if 'score' in response['docSentiment']: print GREEN + 'Sensitivity Score: '+ENDCOLOR, response['docSentiment']['score'] else: print('Error in targeted sentiment analysis call: ', response['statusInfo']) # Printing Media and User Statistics: Media Likes, User Details include # Number of Followers # Number of Users Following
print('') print('') print('') print('############################################') print('# Targeted Sentiment Analysis Example #') print('############################################') print('') print('') print('Processing text: ', demo_text) print('') response = alchemyapi.sentiment_targeted('text',demo_text, 'Denver') if response['status'] == 'OK': print('## Response Object ##') print(json.dumps(response, indent=4)) print('') print('## Targeted Sentiment ##') print('type: ', response['docSentiment']['type']) if 'score' in response['docSentiment']: print('score: ', response['docSentiment']['score']) else: print('Error in targeted sentiment analysis call: ', response['statusInfo'])
response = alchemyapi.sentiment('text', test_text); assert(response['status'] == 'OK') response = alchemyapi.sentiment('html', test_html); assert(response['status'] == 'OK') response = alchemyapi.sentiment('url', test_url); assert(response['status'] == 'OK') response = alchemyapi.sentiment('random', test_url); assert(response['status'] == 'ERROR') #invalid flavor print('Sentiment tests complete!') print('') #Targeted Sentiment print('Checking targeted sentiment . . . ') response = alchemyapi.sentiment_targeted('text', test_text, 'heart'); assert(response['status'] == 'OK') response = alchemyapi.sentiment_targeted('html', test_html, 'language'); assert(response['status'] == 'OK') response = alchemyapi.sentiment_targeted('url', test_url, 'Congress'); assert(response['status'] == 'OK') response = alchemyapi.sentiment_targeted('random', test_url, 'Congress'); assert(response['status'] == 'ERROR') #invalid flavor response = alchemyapi.sentiment_targeted('text', test_text, None); assert(response['status'] == 'ERROR') #missing target print('Targeted sentiment tests complete!') print('') #Text
wait = raw_input('press enter to continue') print('') print('') print('') print('############################################') print('# Targeted Sentiment Analysis Example #') print('############################################') print('') print('') print('Processing text: ', demo_text) print('') response = alchemyapi.sentiment_targeted('text', demo_text, 'Denver') if response['status'] == 'OK': print('## Response Object ##') print(json.dumps(response, indent=4)) print('') print('## Targeted Sentiment ##') print('type: ', response['docSentiment']['type']) if 'score' in response['docSentiment']: print('score: ', response['docSentiment']['score']) else: print('Error in targeted sentiment analysis call: ', response['statusInfo'])
#Sentiment print('Checking sentiment . . . ') response = alchemyapi.sentiment('text', test_text) assert (response['status'] == 'OK') response = alchemyapi.sentiment('html', test_html) assert (response['status'] == 'OK') response = alchemyapi.sentiment('url', test_url) assert (response['status'] == 'OK') response = alchemyapi.sentiment('random', test_url) assert (response['status'] == 'ERROR') #invalid flavor print('Sentiment tests complete!') print('') #Targeted Sentiment print('Checking targeted sentiment . . . ') response = alchemyapi.sentiment_targeted('text', test_text, 'heart') assert (response['status'] == 'OK') response = alchemyapi.sentiment_targeted('html', test_html, 'language') assert (response['status'] == 'OK') response = alchemyapi.sentiment_targeted('url', test_url, 'Congress') assert (response['status'] == 'OK') response = alchemyapi.sentiment_targeted('random', test_url, 'Congress') assert (response['status'] == 'ERROR') #invalid flavor response = alchemyapi.sentiment_targeted('text', test_text, None) assert (response['status'] == 'ERROR') #missing target print('Targeted sentiment tests complete!') print('') #Text print('Checking text . . . ') response = alchemyapi.text('text', test_text)
temp = open('review2dish.json', 'r') analysis_temp = open('review2analysis.json', 'w') reviews = json.load(temp) results = {} count = 0 request_count = 0 for item in reviews: #print reviews[item][0]['review'] restaurant = reviews[item][0]['restaurant'] time = reviews[item][0]['time'] author = reviews[item][0]['author'] for t in reviews[item][0]['keyword']: #pass response = alchemyapi.sentiment_targeted('text', reviews[item][0]['review'], t.encode('utf8')) request_count += 1 if response['status'] == 'OK' and response['docSentiment'].has_key( 'score'): ana_result = { 'keyword': t.encode('utf8'), 'review': reviews[item][0]['review'], 'result': response['docSentiment'], 'restaurant': restaurant, 'time': time, 'author': author } if results.has_key(item): results[item].append(ana_result)
print('Error in sentiment analysis call: ', response['statusInfo']) print('') print('') print('') print('############################################') print('# Targeted Sentiment Analysis Example #') print('############################################') print('') print('') #print('Processing text: ', demo_text) print('') targeting = 'sky' response = alchemyapi.sentiment_targeted('text', demo_text, targeting) if response['status'] == 'OK': print('## Response Object ##') #print(json.dumps(response, indent=4)) print('') print('## Targeted Sentiment ## of', targeting) print('type: ', response['docSentiment']['type']) if 'score' in response['docSentiment']: print('score: ', response['docSentiment']['score']) else: print('Error in targeted sentiment analysis call: ', response['statusInfo'])
try: sock = urllib2.urlopen(response['items'][i]['link'], timeout=1) # Avoid to put into variable if size HTML code is too big if sys.getsizeof(sock.read()) < 4098871: sock.close() sock = urllib2.urlopen(response['items'][i]['link']) content_HTML = sock.read() sock.close() except Exception: print j + i + 1, 'urllib error' # Targeted sentiment analysis sentiment = None score = None mixed = None alchemy_response = alchemyapi.sentiment_targeted( 'url', response['items'][i]['link'], topic) if alchemy_response['status'] == 'OK': sentiment = alchemy_response['docSentiment']['type'] if 'score' in alchemy_response['docSentiment']: score = alchemy_response['docSentiment']['score'] if 'mixed' in alchemy_response['docSentiment']: mixed = alchemy_response['docSentiment']['mixed'] else: print j + i + 1, 'Error in targeted sentiment analysis call: ', alchemy_response[ 'statusInfo'] # Keyword sentiment analysis keyword_response = alchemyapi.keywords('url', response['items'][i]['link'], {'sentiment': 1}) key_sentiment = None