Example #1
0
def Calculate():
	try:
		news = request.form['inputNews'].lower()
		topic = request.form['inputTopic']
		category = request.form['inputCategory']

		print news + "\t" + topic + "\t" + category
		
		from havenondemand.hodindex import HODClient
		client = HODClient(apikey='6b1f8438-56c7-45e0-98a6-6742c1be0d65', apiversiondefault=1)

		"""def get_bias(url):
			print "Hello"
			data = {'url': url}
			r = client.post('analyzesentiment', data)
			sentiment = r.json()['aggregate']['sentiment']
			score = r.json()['aggregate']['score']
			print url + " | " + sentiment + " | " + str(score)
			return score"""

		paper = newspaper.build("http://" + news + ".com", language='en', memoize_articles=False)

		url = []

		for article in paper.articles:
			url.append(article.url)

		cumulative_score = 0.0
		countNegative = 0
		countPositive = 0
		countNeutral = 0

		"""import multiprocessing as mp

		p = mp.Pool(3)
		res = p.map(get_bias, url)"""

		print newspaper.category

		for u in url:
			data = {'url': u}
			r = client.post('analyzesentiment', data)
			sentiment = r.json()['aggregate']['sentiment']
			score = r.json()['aggregate']['score']
			print u + " | " + sentiment + " | " + str(score)
			cumulative_score += score
			if sentiment == 'positive':
				countPositive += 1
			elif sentiment == 'negative':
				countNegative += 1
			elif sentiment == 'neutral':
				countNeutral += 1				

		print cumulative_score
		print cumulative_score/len(url)

	except Exception as e:
		return json.dumps({'error':str(e)})

	return news + topic + category
Example #2
0
def wikipediagrabber(filepath):  

	#make API call, as outlined in https://github.com/HPE-Haven-OnDemand/havenondemand-python
	client = HODClient("http://api.havenondemand.com/", "5e8a3841-5bec-43cc-9dac-5e5d0a90bbc9")
	r = client.post('extractentities', data={'entity_type': ['people_eng'], 'unique_entities': 'true'},files={'file':open(filepath,'rb')}   )

	#set variables
	myjson = r.json()
	identifiers = []
	dictionary={}
	
	#iterate through each named entity
	for i in range(0, len(myjson['entities'])):
		
		#try statement that only stores named entries with wikipedia descriptions in dictionary
		try:
			#record duplicate named entities 
			identifier = myjson['entities'][i]['additional_information']['wikidata_id']

			#only add to dictionary if named entity has not already appeared 
			if identifier not in identifiers:
				identifiers.append(identifier)
				entry = myjson['entities'][i]['original_text']
				dictionary[myjson['entities'][i]['additional_information']['wikidata_id']] = [myjson['entities'][i]['original_text'], wikipedia.summary(entry, sentences = 5), myjson['entities'][i]['additional_information']['wikipedia_eng']]
 		
 		#do not add to dictionary if they do not have wikipedia pages		
		except (wikipedia.exceptions.DisambiguationError, wikipedia.exceptions.PageError) as e:
			continue

	return dictionary
Example #3
0
def havenSentiment(text):
    """Takes a string as an input and runs it through the Haven API to gather sentiment analysis and returns it"""
    from havenondemand.hodindex import HODClient
    import os
    key = os.environ.get('havenAPI')
    client = HODClient(apikey=key, apiversiondefault=1)
    data = {'text': text}
    r = client.post('analyzesentiment', data)
    sentiment = r.json()['aggregate']['sentiment']
    score = r.json()['aggregate']['score']
    # return text + " | " + sentiment + " | " + str(score)
    return score


# if __name__ == "__main__":
# 	from twitterGrab import twitterUserGrab
# 	from twitterGrab import twitterTopicGrab
# 	print havenSentiment(twitterTopicGrab("python"))
# 	print havenSentiment(twitterUserGrab("kanyewest"))
def havenSentiment(text):	
	"""Takes a string as an input and runs it through the Haven API to gather sentiment analysis and returns it"""
	from havenondemand.hodindex import HODClient
	import os
	key = os.environ.get('havenAPI')
	client = HODClient(apikey=key, apiversiondefault=1)
	data = {'text': text}
	r = client.post('analyzesentiment', data)
	sentiment = r.json()['aggregate']['sentiment']
	score = r.json()['aggregate']['score']
	# return text + " | " + sentiment + " | " + str(score)
	return score




# if __name__ == "__main__":
# 	from twitterGrab import twitterUserGrab
# 	from twitterGrab import twitterTopicGrab
# 	print havenSentiment(twitterTopicGrab("python"))
# 	print havenSentiment(twitterUserGrab("kanyewest"))
Example #5
0
def r(subreddit, thread):
    """ """
    # if db has stored values, return stored values
    stored_sentiments = search_thread_id(thread)
    if stored_sentiments != []:
        return Response(json.dumps(stored_sentiments),
                        mimetype="application/json")

    # establish client
    client = HODClient("http://api.havenondemand.com",
                       "65f7315d-1189-449f-a839-7a46fd4263be")

    # create the intial user agent
    user_agent = "reddit-mood-analyzer-scrape by /u/abrarisland"
    reddit = praw.Reddit(user_agent=user_agent)

    submission = reddit.get_submission(submission_id=thread, comment_limit=15)
    # get all of the comments in the thread and flatten the comment tree
    flat_comments = praw.helpers.flatten_tree(submission.comments)

    results = []
    evaluate_comments(client, thread, flat_comments, results)
    return Response(json.dumps(results), mimetype="application/json")
Example #6
0
import newspaper, json	
cnn_paper = newspaper.build('http://cnn.com', language='en', memoize_articles=False)

url = []

for article in cnn_paper.articles:
	url.append(article.url)

from havenondemand.hodindex import HODClient
client = HODClient(apikey='6b1f8438-56c7-45e0-98a6-6742c1be0d65', apiversiondefault=1)

cumulative_score = 0.0
count = 0

import multiprocessing as mp

p = mp.Pool(3)

def get_bias(url):
	data = {'url': url}
	r = client.post('analyzesentiment', data)
	sentiment = r.json()['aggregate']['sentiment']
	score = r.json()['aggregate']['score']
	print url + " | " + sentiment + " | " + str(score)
	return score

res = p.map(get_bias, url)

for record in res:
    cumulative_score += record
from havenondemand.hodindex import HODClient

client = HODClient(apikey='API_KEY', apiversiondefault=1)

text = "I love puppies"
data = {'text': text}

r = client.post('analyzesentiment', data)
sentiment = r.json()['aggregate']['sentiment']
score = r.json()['aggregate']['score']
print(text + " | " + sentiment + " | " + str(score))
Example #8
0
import os
import os, os.path
from threading import Thread
import time
import picamera
import requests
import shutil
from havenondemand.hodindex import HODClient

##Haven OnDemand client
client = HODClient("http://api.havenondemand.com", "API_KEY")

##Find number of images already saved
##Need for if Raspberry Pi shuts off and im_counter gets set back to zero
DIR = './img' #directory images get saved to
img_counter = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
##

##Set constants
slideshow_delay = 30
save_and_display_delay = 40
img_name = 'image'

##Initialize camera
camera = picamera.PiCamera()
camera.sharpness = 0
camera.contrast = 0
camera.brightness = 50
camera.saturation = 0
camera.ISO = 0
camera.video_stabilization = False
Example #9
0
from flask import Flask
from flask import request
from flask import render_template
import json as js
import tweepy
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from havenondemand.hodindex import HODClient
client = HODClient("http://api.havenondemand.com/",
                   "075a1e5f-ad81-4677-9362-7ca4649103f2")

listofData = []


class ClassName(StreamListener):
    fuckmylife = 0
    """docstring for ClassName"""
    def on_status(self, status):
        ClassName.fuckmylife += 1
        #parsed_data = "tweet='%s'"%(status.text)

        listofData.append(status.text)

        if (ClassName.fuckmylife < 30):
            return True

        else:
            return False

    def on_error(self, status):
Example #10
0
def Calculate():
    try:
        news = request.form['inputNews'].lower()
        topic = request.form['inputTopic']
        category = request.form['inputCategory']

        print news + "\t" + topic + "\t" + category

        from havenondemand.hodindex import HODClient
        client = HODClient(apikey='6b1f8438-56c7-45e0-98a6-6742c1be0d65',
                           apiversiondefault=1)
        """def get_bias(url):
			print "Hello"
			data = {'url': url}
			r = client.post('analyzesentiment', data)
			sentiment = r.json()['aggregate']['sentiment']
			score = r.json()['aggregate']['score']
			print url + " | " + sentiment + " | " + str(score)
			return score"""

        paper = newspaper.build("http://" + news + ".com",
                                language='en',
                                memoize_articles=False)

        url = []

        for article in paper.articles:
            url.append(article.url)

        cumulative_score = 0.0
        countNegative = 0
        countPositive = 0
        countNeutral = 0
        """import multiprocessing as mp

		p = mp.Pool(3)
		res = p.map(get_bias, url)"""

        print newspaper.category

        for u in url:
            data = {'url': u}
            r = client.post('analyzesentiment', data)
            sentiment = r.json()['aggregate']['sentiment']
            score = r.json()['aggregate']['score']
            print u + " | " + sentiment + " | " + str(score)
            cumulative_score += score
            if sentiment == 'positive':
                countPositive += 1
            elif sentiment == 'negative':
                countNegative += 1
            elif sentiment == 'neutral':
                countNeutral += 1

        print cumulative_score
        print cumulative_score / len(url)

    except Exception as e:
        return json.dumps({'error': str(e)})

    return news + topic + category