コード例 #1
0
ファイル: NLP.py プロジェクト: Zishuo/TwitterAnalyst
class NLPEngine:
    def __init__(self):
        self.alchemyapi = AlchemyAPI()
        self.alchemyapi.apikey = 'a6e464bee67aebc9f2197ecffd88d48187bf469e'

    def sentiment(self, text):
        response = self.alchemyapi.sentiment("text", text)
        if response['status'] == 'OK':
            if response['docSentiment']['type'] == 'neutral':
                return 0
            else:
                return response['docSentiment']['score']
        else:
            print response
            return 0

    def place(self,text):
        response = self.alchemyapi.entities('text',text)
        result = None
        if response['status'] == 'OK':
            for entitie in response['entities']:
                if entitie['type'] == 'City':
                    result = entitie['text']
                    break
        else:
            print response
        return result
コード例 #2
0
ファイル: NLP.py プロジェクト: yyyfor/TwitterAnalyst
class NLPEngine:
    def __init__(self):
        self.alchemyapi = AlchemyAPI()
        self.alchemyapi.apikey = 'a6e464bee67aebc9f2197ecffd88d48187bf469e'

    def sentiment(self, text):
        response = self.alchemyapi.sentiment("text", text)
        if response['status'] == 'OK':
            if response['docSentiment']['type'] == 'neutral':
                return 0
            else:
                return response['docSentiment']['score']
        else:
            print response
            return 0

    def place(self, text):
        response = self.alchemyapi.entities('text', text)
        result = None
        if response['status'] == 'OK':
            for entitie in response['entities']:
                if entitie['type'] == 'City':
                    result = entitie['text']
                    break
        else:
            print response
        return result
コード例 #3
0
class ProductReviewAnalyzer:
	'''
		wrapper class which implements sentiment analysis using custom analyzer or by using APIs.
		presently alchemyapi is being used.
	'''
	def __init__(self):
		self.alchemyapi = AlchemyAPI()

	def AnalyzeOverallSentiment(self , product_review):
		if (product_review.userReview and len(product_review.userReview) > 0 ):
			response = self.alchemyapi.sentiment("text" , product_review.userReview)
			if response['status'] == 'OK':
			   return response
			else:
			   print('Error in sentiment analysis call: ', response['statusInfo'])
			   return None
		else :
			print "error : product review is not set"	
			return None 
	
	def KeywordExtractor(self,product_review):
		if (product_review.userReview and len(product_review.userReview) > 0) :
			response = self.alchemyapi.keywords("text" , product_review.userReview , {'sentiment' : 1})
			if response['status'] == 'OK':
				return response['keywords']
			else :
				print 'Error in keyword extractor call: ' , response['statusInfo']
		else :
			print "error product review is not set" 
			return None

	def EntityExtractor(self,product_review):
		if (product_review.userReview and len(product_review.userReview) > 0) :
			response = self.alchemyapi.entities("text" , product_review.userReview , {'sentiment' : 1})
			if response['status'] == 'OK':
				return response['entities']
			else :
				print 'Error in Entity extractor call: ' , response['statusInfo']
		else :
			print "error product review is not set" 
			return None
コード例 #4
0
ファイル: alc_system.py プロジェクト: ymoon/polititweetstorm
def alc_sent_system(search_results, text_to_info, topic):

	# Lists of example texts
	pos_examples = []
	neg_examples = []


	alc = AlchemyAPI()
	alc_neg_count = 0
	alc_pos_count = 0
	alc_sent_score = 0.0

	# Grab the entity of the topic 
	# There should only be one with the unigram/bigram provided
	try:
		ent_topic = alc.entities("text", topic, {'sentiment': 0, 'language': 'english'})["entities"][0]["text"].lower()
		print "valid topic entity", ent_topic
	except:
		# topic has no entity so just grab basic doc text sentiment
		ent_topic = None

	# Loop through search_results to classify each tweet in results
	for tweet in search_results:
		#Look into entity recognition and actually extracting sentiment towards the entity searched for and not just the overall tweet
		the_sent = "neutral"
		ent_response = alc.entities("text", tweet, {'sentiment': 1, 'language': 'english'})
		if (ent_topic and ("entities" in ent_response)):
			# Entity recognition based sentiment analysis
			for ent in ent_response["entities"]:
				#print ent["text"].lower()
				#check if the entity in the tweet is the same/is within the topic entity or vice versa
				if ((ent["text"].lower() in ent_topic) or (ent_topic in ent["text"].lower())):
					#print "entity in tweet was part of topic entity", ent_topic
					# Get Sentiment
					if ent["sentiment"]["type"] == "negative":
						alc_neg_count += 1
						the_sent = "negative"
					elif ent["sentiment"]["type"] == "positive":
						alc_pos_count += 1
						the_sent = "postive"
					# Get Sentiment score
        			if  "score" in ent["sentiment"]:
						alc_sent_score += float(ent["sentiment"]["score"])
		else: #no entities found in the tweet just clasify the entire tweet
			#use alchemyAPI to determine if positive or negative
			sent_response = alc.sentiment("text", tweet, {'language': 'english'})
			#calculate if there are more positive or more negative examples
				
			if sent_response["docSentiment"]["type"] == "negative":
				alc_neg_count += 1
				the_sent = "negative"
			elif sent_response["docSentiment"]["type"] == "positive":
				alc_pos_count += 1
				the_sent = "positive"

			#add sent score value to system to determine overall score
				#when neutral no score is present 
			if  "score" in sent_response["docSentiment"]:
				alc_sent_score += float(sent_response["docSentiment"]["score"])

		#Gather tweet examples
		if the_sent == "positive":
			if len(pos_examples) < 10:
				print "adding positive example"
				pos_examples.append(tweet)
		elif the_sent == "negative":
			if len(neg_examples) < 10:
				neg_examples.append(tweet)


	# Creates a list for of json objects of tweets of negative and positive examples
	for i in range(len(pos_examples)):
		pos_examples[i] = text_to_info[pos_examples[i]]
	for i in range(len(neg_examples)):
		neg_examples[i] = text_to_info[neg_examples[i]]
	# print pos
	#print pos_examples
	# print neg
	print neg_examples
	if (alc_pos_count + alc_neg_count > 0):
		pos_percent = (alc_pos_count/float(alc_pos_count+alc_neg_count)) * 100
		neg_percent = (alc_neg_count/float(alc_pos_count+alc_neg_count)) * 100
	else:
		print "Something went wrong/no tweets were found"
		pos_percent = 0
		neg_percent = 0
	print pos_percent
	print neg_percent

	if alc_neg_count > alc_pos_count:
		alc_sent_from_examples = "negative"
	elif alc_neg_count < alc_pos_count:
		alc_sent_from_examples = "positive"
	else:
		alc_sent_from_examples = "neutral"
	print "Sentiment from examples: ", alc_sent_from_examples

	if alc_sent_score < 0:
		alc_sent_from_score = "negative"
	elif alc_sent_score > 0:
		alc_sent_from_score = "positive"
	else:
		alc_sent_from_score = "neutral"

	print "Sentiment from score: ", alc_sent_from_score 

	if ent_topic:
		return (alc_pos_count, alc_neg_count, pos_examples, neg_examples, pos_percent, neg_percent, ent_topic)
	else:
		return (alc_pos_count, alc_neg_count, pos_examples, neg_examples, pos_percent, neg_percent, topic)
コード例 #5
0
    def post(self):
        """
        Get wufoo data from webhook
        """
        # potential data coming in from wufoo.com
        data = request.form
        # potential data coming in from leanworkbench.com
        if request.data:
            lwb_data = loads(request.data)
            create = lwb_data.get("create")
        else:
            create = False

        # if creating/registering survey to user
        if create:
            if current_user.is_anonymous():
                return dumps([{"status":400}])
            else:
                url = lwb_data.get("url")
                handshake = lwb_data.get("handshake")
                if not url:
                    return jsonify(emsg="No url given")
                else:
                    print 'attempting to add survey'
                    try:
                        name = url.split('/')[-2]
                        new_survey = WufooSurveyModel(username=current_user.email, url=url, name=name, handshake=handshake)
                        db.session.add(new_survey)
                        db.session.commit()
                        return make_response(dumps({"status":200, "msg":"Survey successfully added"}))
                    except:
                        traceback.print_exc()
                        return jsonify(status=500)
        # if webhook and not the user registering the survey for the first time
        else:
            # parse json load
            entry_id = data.get("EntryId")
            form_structure = data.get("FormStructure")
            form_structure_dict =  loads(form_structure)
            created_by = form_structure_dict.get('Email')
            url = form_structure_dict.get("Url")
            field_structure = data.get("FieldStructure")
            field_structure_dict = loads(field_structure)
            fields = field_structure_dict.get("Fields")
            handshake = data.get("HandshakeKey")
            # get survey
            survey = WufooSurveyModel.query.filter_by(wufoo_email=created_by,name=url).all()
            if not survey:
                print 'survey does not exist yet'
                # if survey doesn't exist yet, pass
                return jsonify(status="This survey does not exist yet.")
            survey = survey[-1]
            survey_handshake = survey.handshake
            if handshake == "":
                handshake = None
            if survey_handshake != handshake:
                print 'handshake not equal'
                return jsonify(status="Handshake invalid")

            # get textareas
            textareas = [] 
            for field in fields:
                print field
                field_type = field.get("Type")
                if field_type == "textarea":
                    textareas.append(field.get("ID"))

            alchemyapi = AlchemyAPI(os.getenv("ALCHEMYAPI_KEY"))
            for key in data:
                if key in textareas:
                    text = data[key]
                    response = alchemyapi.sentiment('text', text)
                    if response['status'] == 'OK':
                        docsentiment = response.get("docSentiment")
                        score = docsentiment.get("score")
                        sent_type = docsentiment.get("type")
                        new_sentiment = WufooTextareaSentiment(score=score, 
                            sentiment_type= sent_type, text=text)
                        survey.textareas.append(new_sentiment)
                        db.session.add(survey)
                        db.session.add(new_sentiment)
                        db.session.commit()
                        
                    else:
                        print 'alchemy failed'
コード例 #6
0
   response = alchemyapi.sentiment("text",tweet)
   print "Sentiment : ",response["docSentiment"]["type"] ," " ,tweet[1]
   if response["docSentiment"]["type"] == tweet[1]:  
      accuracy+= 1
print "Total accuracy : ", (accuracy/total)*100
'''
client = MongoClient('localhost',port = 27017)
db = client.test
#db = client['test-database']
#collection = db.test_collection
tweets = db['21-11-15']

cnt  = 0
print 'begin searching '
for tweet in tweets.find():
  #if tweet.get('text') :
    tweet_p = tweet
    response = alchemyapi.sentiment("text",tweet_p)
    #print tweet_p
    if response['status'] == "ERROR":
      continue
    else :
      sentiment = response['docSentiment']
      if 'type' in sentiment:
       print " Sentiment : ",sentiment["type"]
     




コード例 #7
0
ファイル: test.py プロジェクト: rhouck/gdp
from alchemyapi_python.alchemyapi import AlchemyAPI
alchemyapi = AlchemyAPI()

#myText = "I'm excited to get started with AlchemyAPI!"
#response = alchemyapi.sentiment("text", myText)
#print "Sentiment: ", response["docSentiment"]["type"]

articles = [
    ["October 24, 2014, Friday", "http://www.nytimes.com/2014/10/24/business/cheaper-fuel-helps-airlines-to-record-profits.html"],
    ["26-Sep-14", "http://www.ft.com/cms/s/0/05aa74a4-457f-11e4-ab86-00144feabdc0.html"]
]
sentiments = []
for a in articles:
    try:
        response = alchemyapi.sentiment("url", a[1])
        result = {'date': parse(a[0]), 
                'sentiment-type': response["docSentiment"]["type"], 
                'sentiment-score': response["docSentiment"]["score"],
                'sentiment-mixed': response["docSentiment"]["mixed"]
                }
        sentiments.append(result)
    except:
        pass

pprint.pprint(sentiments)




コード例 #8
0
    def post(self):
        """
        Get wufoo data from webhook
        """
        # potential data coming in from wufoo.com
        data = request.form
        # potential data coming in from leanworkbench.com
        if request.data:
            lwb_data = loads(request.data)
            create = lwb_data.get("create")
        else:
            create = False

        # if creating/registering survey to user
        if create:
            if current_user.is_anonymous():
                return dumps([{"status": 400}])
            else:
                url = lwb_data.get("url")
                handshake = lwb_data.get("handshake")
                if not url:
                    return jsonify(emsg="No url given")
                else:
                    print 'attempting to add survey'
                    try:
                        name = url.split('/')[-2]
                        new_survey = WufooSurveyModel(
                            username=current_user.email,
                            url=url,
                            name=name,
                            handshake=handshake)
                        db.session.add(new_survey)
                        db.session.commit()
                        return make_response(
                            dumps({
                                "status": 200,
                                "msg": "Survey successfully added"
                            }))
                    except:
                        traceback.print_exc()
                        return jsonify(status=500)
        # if webhook and not the user registering the survey for the first time
        else:
            # parse json load
            entry_id = data.get("EntryId")
            form_structure = data.get("FormStructure")
            form_structure_dict = loads(form_structure)
            created_by = form_structure_dict.get('Email')
            url = form_structure_dict.get("Url")
            field_structure = data.get("FieldStructure")
            field_structure_dict = loads(field_structure)
            fields = field_structure_dict.get("Fields")
            handshake = data.get("HandshakeKey")
            # get survey
            survey = WufooSurveyModel.query.filter_by(wufoo_email=created_by,
                                                      name=url).all()
            if not survey:
                print 'survey does not exist yet'
                # if survey doesn't exist yet, pass
                return jsonify(status="This survey does not exist yet.")
            survey = survey[-1]
            survey_handshake = survey.handshake
            if handshake == "":
                handshake = None
            if survey_handshake != handshake:
                print 'handshake not equal'
                return jsonify(status="Handshake invalid")

            # get textareas
            textareas = []
            for field in fields:
                print field
                field_type = field.get("Type")
                if field_type == "textarea":
                    textareas.append(field.get("ID"))

            alchemyapi = AlchemyAPI(os.getenv("ALCHEMYAPI_KEY"))
            for key in data:
                if key in textareas:
                    text = data[key]
                    response = alchemyapi.sentiment('text', text)
                    if response['status'] == 'OK':
                        docsentiment = response.get("docSentiment")
                        score = docsentiment.get("score")
                        sent_type = docsentiment.get("type")
                        new_sentiment = WufooTextareaSentiment(
                            score=score, sentiment_type=sent_type, text=text)
                        survey.textareas.append(new_sentiment)
                        db.session.add(survey)
                        db.session.add(new_sentiment)
                        db.session.commit()

                    else:
                        print 'alchemy failed'