Пример #1
0
def Calculate():
	try:
		news = request.form['inputNews'].lower()
		topic = request.form['inputTopic']
		category = request.form['inputCategory']

		print news + "\t" + topic + "\t" + category
		
		from havenondemand.hodindex import HODClient
		client = HODClient(apikey='6b1f8438-56c7-45e0-98a6-6742c1be0d65', apiversiondefault=1)

		"""def get_bias(url):
			print "Hello"
			data = {'url': url}
			r = client.post('analyzesentiment', data)
			sentiment = r.json()['aggregate']['sentiment']
			score = r.json()['aggregate']['score']
			print url + " | " + sentiment + " | " + str(score)
			return score"""

		paper = newspaper.build("http://" + news + ".com", language='en', memoize_articles=False)

		url = []

		for article in paper.articles:
			url.append(article.url)

		cumulative_score = 0.0
		countNegative = 0
		countPositive = 0
		countNeutral = 0

		"""import multiprocessing as mp

		p = mp.Pool(3)
		res = p.map(get_bias, url)"""

		print newspaper.category

		for u in url:
			data = {'url': u}
			r = client.post('analyzesentiment', data)
			sentiment = r.json()['aggregate']['sentiment']
			score = r.json()['aggregate']['score']
			print u + " | " + sentiment + " | " + str(score)
			cumulative_score += score
			if sentiment == 'positive':
				countPositive += 1
			elif sentiment == 'negative':
				countNegative += 1
			elif sentiment == 'neutral':
				countNeutral += 1				

		print cumulative_score
		print cumulative_score/len(url)

	except Exception as e:
		return json.dumps({'error':str(e)})

	return news + topic + category
Пример #2
0
def wikipediagrabber(filepath):  

	#make API call, as outlined in https://github.com/HPE-Haven-OnDemand/havenondemand-python
	client = HODClient("http://api.havenondemand.com/", "5e8a3841-5bec-43cc-9dac-5e5d0a90bbc9")
	r = client.post('extractentities', data={'entity_type': ['people_eng'], 'unique_entities': 'true'},files={'file':open(filepath,'rb')}   )

	#set variables
	myjson = r.json()
	identifiers = []
	dictionary={}
	
	#iterate through each named entity
	for i in range(0, len(myjson['entities'])):
		
		#try statement that only stores named entries with wikipedia descriptions in dictionary
		try:
			#record duplicate named entities 
			identifier = myjson['entities'][i]['additional_information']['wikidata_id']

			#only add to dictionary if named entity has not already appeared 
			if identifier not in identifiers:
				identifiers.append(identifier)
				entry = myjson['entities'][i]['original_text']
				dictionary[myjson['entities'][i]['additional_information']['wikidata_id']] = [myjson['entities'][i]['original_text'], wikipedia.summary(entry, sentences = 5), myjson['entities'][i]['additional_information']['wikipedia_eng']]
 		
 		#do not add to dictionary if they do not have wikipedia pages		
		except (wikipedia.exceptions.DisambiguationError, wikipedia.exceptions.PageError) as e:
			continue

	return dictionary
Пример #3
0
def havenSentiment(text):
    """Takes a string as an input and runs it through the Haven API to gather sentiment analysis and returns it"""
    from havenondemand.hodindex import HODClient
    import os
    key = os.environ.get('havenAPI')
    client = HODClient(apikey=key, apiversiondefault=1)
    data = {'text': text}
    r = client.post('analyzesentiment', data)
    sentiment = r.json()['aggregate']['sentiment']
    score = r.json()['aggregate']['score']
    # return text + " | " + sentiment + " | " + str(score)
    return score


# if __name__ == "__main__":
# 	from twitterGrab import twitterUserGrab
# 	from twitterGrab import twitterTopicGrab
# 	print havenSentiment(twitterTopicGrab("python"))
# 	print havenSentiment(twitterUserGrab("kanyewest"))
def havenSentiment(text):	
	"""Takes a string as an input and runs it through the Haven API to gather sentiment analysis and returns it"""
	from havenondemand.hodindex import HODClient
	import os
	key = os.environ.get('havenAPI')
	client = HODClient(apikey=key, apiversiondefault=1)
	data = {'text': text}
	r = client.post('analyzesentiment', data)
	sentiment = r.json()['aggregate']['sentiment']
	score = r.json()['aggregate']['score']
	# return text + " | " + sentiment + " | " + str(score)
	return score




# if __name__ == "__main__":
# 	from twitterGrab import twitterUserGrab
# 	from twitterGrab import twitterTopicGrab
# 	print havenSentiment(twitterTopicGrab("python"))
# 	print havenSentiment(twitterUserGrab("kanyewest"))
from havenondemand.hodindex import HODClient

client = HODClient(apikey='API_KEY', apiversiondefault=1)

text = "I love puppies"
data = {'text': text}

r = client.post('analyzesentiment', data)
sentiment = r.json()['aggregate']['sentiment']
score = r.json()['aggregate']['score']
print(text + " | " + sentiment + " | " + str(score))
Пример #6
0
def Calculate():
    try:
        news = request.form['inputNews'].lower()
        topic = request.form['inputTopic']
        category = request.form['inputCategory']

        print news + "\t" + topic + "\t" + category

        from havenondemand.hodindex import HODClient
        client = HODClient(apikey='6b1f8438-56c7-45e0-98a6-6742c1be0d65',
                           apiversiondefault=1)
        """def get_bias(url):
			print "Hello"
			data = {'url': url}
			r = client.post('analyzesentiment', data)
			sentiment = r.json()['aggregate']['sentiment']
			score = r.json()['aggregate']['score']
			print url + " | " + sentiment + " | " + str(score)
			return score"""

        paper = newspaper.build("http://" + news + ".com",
                                language='en',
                                memoize_articles=False)

        url = []

        for article in paper.articles:
            url.append(article.url)

        cumulative_score = 0.0
        countNegative = 0
        countPositive = 0
        countNeutral = 0
        """import multiprocessing as mp

		p = mp.Pool(3)
		res = p.map(get_bias, url)"""

        print newspaper.category

        for u in url:
            data = {'url': u}
            r = client.post('analyzesentiment', data)
            sentiment = r.json()['aggregate']['sentiment']
            score = r.json()['aggregate']['score']
            print u + " | " + sentiment + " | " + str(score)
            cumulative_score += score
            if sentiment == 'positive':
                countPositive += 1
            elif sentiment == 'negative':
                countNegative += 1
            elif sentiment == 'neutral':
                countNeutral += 1

        print cumulative_score
        print cumulative_score / len(url)

    except Exception as e:
        return json.dumps({'error': str(e)})

    return news + topic + category