コード例 #1
0
def getData(filename):
    with open(
            'C:\\Users\\gatesyp\\Documents\\GitHub\\twiddit\\tweetsRepo\\' +
            filename + '.txt', 'r') as myfile:
        data = myfile.read().replace('\n', ' ')

    alchemyapi = AlchemyAPI()
    response = alchemyapi.entities('text', data, {'sentiment': 1})

    if response['status'] == 'OK':
        #print(json.dumps(response, indent=4))
        for entity in response['entities']:
            try:
                result = testGoog.searchGoog(entity['text'])

                if float(entity['relevance']) > 0.4:
                    #print(result[0]['url'])
                    #print(result[1]['url'])
                    redditHandler.subscribe(result[0]['url'], mybiszti,
                                            lolipop123)
                    redditHandler.subscribe(result[1]['url'], mybiszti,
                                            lolipop123)

            except:
                print("none or code 403")
コード例 #2
0
ファイル: NLP.py プロジェクト: yyyfor/TwitterAnalyst
class NLPEngine:
    def __init__(self):
        self.alchemyapi = AlchemyAPI()
        self.alchemyapi.apikey = 'a6e464bee67aebc9f2197ecffd88d48187bf469e'

    def sentiment(self, text):
        response = self.alchemyapi.sentiment("text", text)
        if response['status'] == 'OK':
            if response['docSentiment']['type'] == 'neutral':
                return 0
            else:
                return response['docSentiment']['score']
        else:
            print response
            return 0

    def place(self, text):
        response = self.alchemyapi.entities('text', text)
        result = None
        if response['status'] == 'OK':
            for entitie in response['entities']:
                if entitie['type'] == 'City':
                    result = entitie['text']
                    break
        else:
            print response
        return result
コード例 #3
0
ファイル: NLP.py プロジェクト: Zishuo/TwitterAnalyst
class NLPEngine:
    def __init__(self):
        self.alchemyapi = AlchemyAPI()
        self.alchemyapi.apikey = 'a6e464bee67aebc9f2197ecffd88d48187bf469e'

    def sentiment(self, text):
        response = self.alchemyapi.sentiment("text", text)
        if response['status'] == 'OK':
            if response['docSentiment']['type'] == 'neutral':
                return 0
            else:
                return response['docSentiment']['score']
        else:
            print response
            return 0

    def place(self,text):
        response = self.alchemyapi.entities('text',text)
        result = None
        if response['status'] == 'OK':
            for entitie in response['entities']:
                if entitie['type'] == 'City':
                    result = entitie['text']
                    break
        else:
            print response
        return result
コード例 #4
0
def test_alchemy():
    text_1 = "The decision by the independent MP Andrew Wilkie to withdraw his support for the minority Labor government sounded dramatic but it should not further threaten its stability. When, after the 2010 election, Wilkie, Rob Oakeshott, Tony Windsor and the Greens agreed to support Labor, they gave just two guarantees: confidence and supply"
    text_2 = "inflation plan, initially hailed at home and abroad as the saviour of the economy, is limping towards its first anniversary amid soaring prices, widespread shortages and a foreign payments crisis.     Announced last February 28 the plan  prices, fixed the value of the new Cruzado currency and ended widespread indexation of the economy in a bid to halt the country's 250 pct inflation rate.     But within a year the plan has all but collapsed.     \"The situation now is worse than it was. Although there was inflation, at least the economy worked,\" a leading bank economist said.     The crumbling of the plan has been accompanied by a dramatic reversal in the foreign trade account. In 1984 and 1985 Brazil's annual trade surpluses had been sufficient to cover the 12 billion dlrs needed to service its 109 billion dlr foreign debt.     For the first nine months of 1986 all seemed to be on target for a repeat, with monthly surpluses averaging one billion dlrs. But as exports were diverted and imports increased to avoid further domestic shortages the trade surplus plunged to 211 mln dlrs in October and since then has averaged under 150 mln.  Reuter "
    alchemyapi = AlchemyAPI()
    response = alchemyapi.entities('text', text_2, {'sentiment': 1})

    pp.pprint(response)
    print len(response['entities'])
コード例 #5
0
def test_alchemy():
    text_1 = "The decision by the independent MP Andrew Wilkie to withdraw his support for the minority Labor government sounded dramatic but it should not further threaten its stability. When, after the 2010 election, Wilkie, Rob Oakeshott, Tony Windsor and the Greens agreed to support Labor, they gave just two guarantees: confidence and supply"
    text_2 = "inflation plan, initially hailed at home and abroad as the saviour of the economy, is limping towards its first anniversary amid soaring prices, widespread shortages and a foreign payments crisis.     Announced last February 28 the plan  prices, fixed the value of the new Cruzado currency and ended widespread indexation of the economy in a bid to halt the country's 250 pct inflation rate.     But within a year the plan has all but collapsed.     \"The situation now is worse than it was. Although there was inflation, at least the economy worked,\" a leading bank economist said.     The crumbling of the plan has been accompanied by a dramatic reversal in the foreign trade account. In 1984 and 1985 Brazil's annual trade surpluses had been sufficient to cover the 12 billion dlrs needed to service its 109 billion dlr foreign debt.     For the first nine months of 1986 all seemed to be on target for a repeat, with monthly surpluses averaging one billion dlrs. But as exports were diverted and imports increased to avoid further domestic shortages the trade surplus plunged to 211 mln dlrs in October and since then has averaged under 150 mln.  Reuter "
    alchemyapi = AlchemyAPI()
    response = alchemyapi.entities('text', text_2, {'sentiment': 1})

    pp.pprint(response)
    print len(response['entities'])
コード例 #6
0
def getAnnotation(text):

    annotations = spotlight.annotate(
        'http://spotlight.dbpedia.org/rest/annotate',
        text,
        confidence=0.25,
        support=40)
    annotationsSorted = sorted(annotations, key=lambda k: k['similarityScore'])
    setSpotlight = set(map(lambda x: x['URI'], annotationsSorted))
    """
    { u'URI': u'http://dbpedia.org/resource/People',
      u'offset': 321,
      u'percentageOfSecondRank': -1.0,
      u'similarityScore': 0.08647863566875458,
      u'support': 426, #
      u'surfaceForm': u'people',
      u'types': u'DBpedia:TopicalConcept'}
    """

    alchemyapi = AlchemyAPI()
    response = alchemyapi.entities('text', text, {'sentiment': 1})
    resFilt = filter(lambda x: 'disambiguated' in x, response['entities'])
    key = ['dbpedia', 'geonames', 'yago', 'opencyc']
    resFilt

    entitySet = set()

    for r in resFilt:
        for k in key:
            if k in r['disambiguated']:
                entitySet.add(r['disambiguated'][k])
    """
    {u'count': u'1',
      u'disambiguated': {u'dbpedia': u'http://dbpedia.org/resource/Kathmandu',
       u'freebase': u'http://rdf.freebase.com/ns/m.04cx5',
       u'geo': u'27.716666666666665 85.36666666666666',
       u'geonames': u'http://sws.geonames.org/1283240/',
       u'name': u'Kathmandu',
       u'subType': [u'TouristAttraction'],
       u'website': u'http://www.kathmandu.gov.np/',
       u'yago': u'http://yago-knowledge.org/resource/Kathmandu'},
      u'relevance': u'0.33',
      u'sentiment': {u'type': u'neutral'},
      u'text': u'Kathmandu',
      u'type': u'City'},
    """

    entitySet.update(setSpotlight)

    return entitySet, annotationsSorted, response
コード例 #7
0
def getAnnotation(text):
    annotations = spotlight.annotate('http://spotlight.dbpedia.org/rest/annotate',text,confidence=0.25, support=40)
    annotationsSorted = sorted(annotations, key=lambda k: k['similarityScore']) 
    setSpotlight=set(map(lambda x:x['URI'],annotationsSorted))

    """
    { u'URI': u'http://dbpedia.org/resource/People',
      u'offset': 321,
      u'percentageOfSecondRank': -1.0,
      u'similarityScore': 0.08647863566875458,
      u'support': 426,
      u'surfaceForm': u'people',
      u'types': u'DBpedia:TopicalConcept'}
    """
    
    alchemyapi = AlchemyAPI()
    response = alchemyapi.entities('text', text, {'sentiment': 1})
    resFilt=filter(lambda x: 'disambiguated' in x, response['entities'])
    key=['dbpedia','geonames','yago','opencyc']
    resFilt
    
    
    entitySet=set()

    for r in resFilt:
        for k in key:
            if k in r['disambiguated']:
                entitySet.add(r['disambiguated'][k])
    
    
    """
    {u'count': u'1',
      u'disambiguated': {u'dbpedia': u'http://dbpedia.org/resource/Kathmandu',
       u'freebase': u'http://rdf.freebase.com/ns/m.04cx5',
       u'geo': u'27.716666666666665 85.36666666666666',
       u'geonames': u'http://sws.geonames.org/1283240/',
       u'name': u'Kathmandu',
       u'subType': [u'TouristAttraction'],
       u'website': u'http://www.kathmandu.gov.np/',
       u'yago': u'http://yago-knowledge.org/resource/Kathmandu'},
      u'relevance': u'0.33',
      u'sentiment': {u'type': u'neutral'},
      u'text': u'Kathmandu',
      u'type': u'City'},
    """
    
    entitySet.update(setSpotlight)
    
    return entitySet,annotationsSorted,response
コード例 #8
0
def extract_text_alchemy(url):
	# for content processing
	# to connect with alchemy and tag the content 
	alchemyapi 	= AlchemyAPI()

	resp       	= alchemyapi.text('url', url)

	response 	= alchemyapi.keywords("text", resp['text'])

	if "keywords" in response.keys():
		keywords 	= response["keywords"]
	else:
		print "No parsed data: ", url
		print response
		keywords = []

	return keywords
コード例 #9
0
def extract_text_alchemy(url):
    # for content processing
    # to connect with alchemy and tag the content
    alchemyapi = AlchemyAPI()

    resp = alchemyapi.text('url', url)

    response = alchemyapi.keywords("text", resp['text'])

    if "keywords" in response.keys():
        keywords = response["keywords"]
    else:
        print "No parsed data: ", url
        print response
        keywords = []

    return keywords
コード例 #10
0
class ProductReviewAnalyzer:
	'''
		wrapper class which implements sentiment analysis using custom analyzer or by using APIs.
		presently alchemyapi is being used.
	'''
	def __init__(self):
		self.alchemyapi = AlchemyAPI()

	def AnalyzeOverallSentiment(self , product_review):
		if (product_review.userReview and len(product_review.userReview) > 0 ):
			response = self.alchemyapi.sentiment("text" , product_review.userReview)
			if response['status'] == 'OK':
			   return response
			else:
			   print('Error in sentiment analysis call: ', response['statusInfo'])
			   return None
		else :
			print "error : product review is not set"	
			return None 
	
	def KeywordExtractor(self,product_review):
		if (product_review.userReview and len(product_review.userReview) > 0) :
			response = self.alchemyapi.keywords("text" , product_review.userReview , {'sentiment' : 1})
			if response['status'] == 'OK':
				return response['keywords']
			else :
				print 'Error in keyword extractor call: ' , response['statusInfo']
		else :
			print "error product review is not set" 
			return None

	def EntityExtractor(self,product_review):
		if (product_review.userReview and len(product_review.userReview) > 0) :
			response = self.alchemyapi.entities("text" , product_review.userReview , {'sentiment' : 1})
			if response['status'] == 'OK':
				return response['entities']
			else :
				print 'Error in Entity extractor call: ' , response['statusInfo']
		else :
			print "error product review is not set" 
			return None
コード例 #11
0
    else:
        print('Error in taxonomy call: ', response['statusInfo'])


def authenticate():
    consumer_key = 'Enter your consumer key from the Twitter API here'
    consumer_secret = 'Enter your consumer secret from the Twitter API here'
    access_token = 'Enter your accerss token from the Twitter API here'
    access_token_secret = 'Enter your access token secret from the Twitter API here'

    auth = tp.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    return tp.API(auth)


alchemyapi = AlchemyAPI()


def processFeed(user):
    try:
        api = authenticate()
        #user = raw_input('Enter Twitter handle: ')
        tweets = fetch_tweets(api, user)  #A list of tweets
        #print(len(tweets))

        urls = []
        for tweet in tweets:
            url = text_and_url(tweet)
            if url:
                #print(url)
                urls.append(url)
コード例 #12
0
	def __init__(self):
		self.alchemyapi = AlchemyAPI()
コード例 #13
0
    def post(self):
        """
        Get wufoo data from webhook
        """
        # potential data coming in from wufoo.com
        data = request.form
        # potential data coming in from leanworkbench.com
        if request.data:
            lwb_data = loads(request.data)
            create = lwb_data.get("create")
        else:
            create = False

        # if creating/registering survey to user
        if create:
            if current_user.is_anonymous():
                return dumps([{"status":400}])
            else:
                url = lwb_data.get("url")
                handshake = lwb_data.get("handshake")
                if not url:
                    return jsonify(emsg="No url given")
                else:
                    print 'attempting to add survey'
                    try:
                        name = url.split('/')[-2]
                        new_survey = WufooSurveyModel(username=current_user.email, url=url, name=name, handshake=handshake)
                        db.session.add(new_survey)
                        db.session.commit()
                        return make_response(dumps({"status":200, "msg":"Survey successfully added"}))
                    except:
                        traceback.print_exc()
                        return jsonify(status=500)
        # if webhook and not the user registering the survey for the first time
        else:
            # parse json load
            entry_id = data.get("EntryId")
            form_structure = data.get("FormStructure")
            form_structure_dict =  loads(form_structure)
            created_by = form_structure_dict.get('Email')
            url = form_structure_dict.get("Url")
            field_structure = data.get("FieldStructure")
            field_structure_dict = loads(field_structure)
            fields = field_structure_dict.get("Fields")
            handshake = data.get("HandshakeKey")
            # get survey
            survey = WufooSurveyModel.query.filter_by(wufoo_email=created_by,name=url).all()
            if not survey:
                print 'survey does not exist yet'
                # if survey doesn't exist yet, pass
                return jsonify(status="This survey does not exist yet.")
            survey = survey[-1]
            survey_handshake = survey.handshake
            if handshake == "":
                handshake = None
            if survey_handshake != handshake:
                print 'handshake not equal'
                return jsonify(status="Handshake invalid")

            # get textareas
            textareas = [] 
            for field in fields:
                print field
                field_type = field.get("Type")
                if field_type == "textarea":
                    textareas.append(field.get("ID"))

            alchemyapi = AlchemyAPI(os.getenv("ALCHEMYAPI_KEY"))
            for key in data:
                if key in textareas:
                    text = data[key]
                    response = alchemyapi.sentiment('text', text)
                    if response['status'] == 'OK':
                        docsentiment = response.get("docSentiment")
                        score = docsentiment.get("score")
                        sent_type = docsentiment.get("type")
                        new_sentiment = WufooTextareaSentiment(score=score, 
                            sentiment_type= sent_type, text=text)
                        survey.textareas.append(new_sentiment)
                        db.session.add(survey)
                        db.session.add(new_sentiment)
                        db.session.commit()
                        
                    else:
                        print 'alchemy failed'
コード例 #14
0
class Alchemy:
    def __init__(self):
        logging.debug("Loading Alchemy...")
        self.alchemyapi = AlchemyAPI()

    def run(self, data, target, options=None):
        if target == "combined":
            if options is None:
                options = ['main', 'sentiment', 'emotion']  # defualt
            return self.combined(data, options)
        elif target == "text":
            return self.text(data)
        else:
            logging.error("Unhandled target:" + target)

    def text(self, url):
        logging.debug("[T] Requestions data from Alchemy...")
        response = self.alchemyapi.text('url', url)
        logging.debug("Fininshed!")

        if response['status'] == 'OK':
            return response
        else:
            logging.error('Error in concept tagging call: ' +
                          str(response['statusInfo']))
            sys.exit(0)

        return None

    def combined(self, text, options):
        logging.debug("[C] Requestions data from Alchemy...")

        result = {}
        for n in options:
            if n == "main":
                response = self.combined_helper(text)
                result[u'entities'] = response['entities']
                result[u'taxonomy'] = response['taxonomy']
                result[u'concepts'] = response['concepts']
                result[u'keywords'] = response['keywords']

                result = response
            elif n == "sentiment":
                response = self.combined_helper(
                    text, options={"extract": ('doc-sentiment')})
                result[u'docSentiment'] = response['docSentiment']
            elif n == "emotion":
                response = self.combined_helper(
                    text, options={"extract": ('doc-emotion')})
                result[u'docEmotions'] = response['docEmotions']

        self.combined_helper(text)

        logging.debug("Fininshed!")

        return result

    def combined_helper(self, text, options={}):
        response = self.alchemyapi.combined('text', text, options=options)

        if response['status'] == 'OK':
            return response
        else:
            logging.error('Error in concept tagging call: ',
                          response['statusInfo'])
            sys.exit(0)

        return None
コード例 #15
0
entity_list = None
entity_type = ""
entity_text = ""
person_name = ""
person_name_to_entity_map = {}

# "relations" request type (subject-verb-object NLP sentence parsing)
relations_response = None
relation_action = None
relation_action_text = ""
relation_action_verb_text = ""
relation_subject = None
relation_subject_text = ""

# create AlchemyAPI instance
my_alchemy_API = AlchemyAPI()

# retrieve an article to try out.
article_id = 327069
article = Article.objects.get( id = article_id )

# then, get text.
article_text = article.article_text_set.get()

# retrieve article body
article_body_html = article_text.get_content()
article_body_text = article_text.get_content_sans_html()

# try the API - combined - not available for HTML.
combined_response = my_alchemy_API.combined( 'text', article_body_text )
コード例 #16
0
from alchemyapi_python.alchemyapi import AlchemyAPI
from pymongo import MongoClient

alchemyapi = AlchemyAPI()
print 'success'
'''
def read_tweets(fname, t_type):
    tweets = []
    f = open(fname, 'r')
    line = f.readline()
    while line != '':
         tweets.append([line, t_type])
         #tweets.append(line)
         line = f.readline()
    f.close()
    return tweets



#example test 
myText = "I'm very excited to get started with AlchemyAPI!"
response = alchemyapi.sentiment("text",myText)
print "Sentiment: "  ,response["docSentiment"]["score"]

#testing of AlchemyAPI classifier based on sample test data 
test_tweets = read_tweets('happy_test.txt','positive')
test_tweets.extend(read_tweets('sad_test.txt','negative'))
accuracy = 0
total = len(test_tweets)
for tweet in test_tweets : 
   response = alchemyapi.sentiment("text",tweet)
コード例 #17
0
ファイル: test.py プロジェクト: rhouck/gdp
"""
search_results = t.search(q="@united", count=5)
try:
    for tweet in search_results["statuses"]:
        created = parse(tweet['created_at'])
        # ignore retweeted content
        #if tweet['text'][:2] == "RT":
        #	continue
        print created
        print tweet['text']
except TwythonError as e:
    print e
"""

from alchemyapi_python.alchemyapi import AlchemyAPI
alchemyapi = AlchemyAPI()

#myText = "I'm excited to get started with AlchemyAPI!"
#response = alchemyapi.sentiment("text", myText)
#print "Sentiment: ", response["docSentiment"]["type"]

articles = [
    ["October 24, 2014, Friday", "http://www.nytimes.com/2014/10/24/business/cheaper-fuel-helps-airlines-to-record-profits.html"],
    ["26-Sep-14", "http://www.ft.com/cms/s/0/05aa74a4-457f-11e4-ab86-00144feabdc0.html"]
]
sentiments = []
for a in articles:
    try:
        response = alchemyapi.sentiment("url", a[1])
        result = {'date': parse(a[0]), 
                'sentiment-type': response["docSentiment"]["type"], 
コード例 #18
0
 def __init__(self):
     logging.debug("Loading Alchemy...")
     self.alchemyapi = AlchemyAPI()
コード例 #19
0
ファイル: alc_system.py プロジェクト: ymoon/polititweetstorm
def alc_sent_system(search_results, text_to_info, topic):

	# Lists of example texts
	pos_examples = []
	neg_examples = []


	alc = AlchemyAPI()
	alc_neg_count = 0
	alc_pos_count = 0
	alc_sent_score = 0.0

	# Grab the entity of the topic 
	# There should only be one with the unigram/bigram provided
	try:
		ent_topic = alc.entities("text", topic, {'sentiment': 0, 'language': 'english'})["entities"][0]["text"].lower()
		print "valid topic entity", ent_topic
	except:
		# topic has no entity so just grab basic doc text sentiment
		ent_topic = None

	# Loop through search_results to classify each tweet in results
	for tweet in search_results:
		#Look into entity recognition and actually extracting sentiment towards the entity searched for and not just the overall tweet
		the_sent = "neutral"
		ent_response = alc.entities("text", tweet, {'sentiment': 1, 'language': 'english'})
		if (ent_topic and ("entities" in ent_response)):
			# Entity recognition based sentiment analysis
			for ent in ent_response["entities"]:
				#print ent["text"].lower()
				#check if the entity in the tweet is the same/is within the topic entity or vice versa
				if ((ent["text"].lower() in ent_topic) or (ent_topic in ent["text"].lower())):
					#print "entity in tweet was part of topic entity", ent_topic
					# Get Sentiment
					if ent["sentiment"]["type"] == "negative":
						alc_neg_count += 1
						the_sent = "negative"
					elif ent["sentiment"]["type"] == "positive":
						alc_pos_count += 1
						the_sent = "postive"
					# Get Sentiment score
        			if  "score" in ent["sentiment"]:
						alc_sent_score += float(ent["sentiment"]["score"])
		else: #no entities found in the tweet just clasify the entire tweet
			#use alchemyAPI to determine if positive or negative
			sent_response = alc.sentiment("text", tweet, {'language': 'english'})
			#calculate if there are more positive or more negative examples
				
			if sent_response["docSentiment"]["type"] == "negative":
				alc_neg_count += 1
				the_sent = "negative"
			elif sent_response["docSentiment"]["type"] == "positive":
				alc_pos_count += 1
				the_sent = "positive"

			#add sent score value to system to determine overall score
				#when neutral no score is present 
			if  "score" in sent_response["docSentiment"]:
				alc_sent_score += float(sent_response["docSentiment"]["score"])

		#Gather tweet examples
		if the_sent == "positive":
			if len(pos_examples) < 10:
				print "adding positive example"
				pos_examples.append(tweet)
		elif the_sent == "negative":
			if len(neg_examples) < 10:
				neg_examples.append(tweet)


	# Creates a list for of json objects of tweets of negative and positive examples
	for i in range(len(pos_examples)):
		pos_examples[i] = text_to_info[pos_examples[i]]
	for i in range(len(neg_examples)):
		neg_examples[i] = text_to_info[neg_examples[i]]
	# print pos
	#print pos_examples
	# print neg
	print neg_examples
	if (alc_pos_count + alc_neg_count > 0):
		pos_percent = (alc_pos_count/float(alc_pos_count+alc_neg_count)) * 100
		neg_percent = (alc_neg_count/float(alc_pos_count+alc_neg_count)) * 100
	else:
		print "Something went wrong/no tweets were found"
		pos_percent = 0
		neg_percent = 0
	print pos_percent
	print neg_percent

	if alc_neg_count > alc_pos_count:
		alc_sent_from_examples = "negative"
	elif alc_neg_count < alc_pos_count:
		alc_sent_from_examples = "positive"
	else:
		alc_sent_from_examples = "neutral"
	print "Sentiment from examples: ", alc_sent_from_examples

	if alc_sent_score < 0:
		alc_sent_from_score = "negative"
	elif alc_sent_score > 0:
		alc_sent_from_score = "positive"
	else:
		alc_sent_from_score = "neutral"

	print "Sentiment from score: ", alc_sent_from_score 

	if ent_topic:
		return (alc_pos_count, alc_neg_count, pos_examples, neg_examples, pos_percent, neg_percent, ent_topic)
	else:
		return (alc_pos_count, alc_neg_count, pos_examples, neg_examples, pos_percent, neg_percent, topic)
コード例 #20
0
ファイル: NLP.py プロジェクト: yyyfor/TwitterAnalyst
 def __init__(self):
     self.alchemyapi = AlchemyAPI()
     self.alchemyapi.apikey = 'a6e464bee67aebc9f2197ecffd88d48187bf469e'
コード例 #21
0
def find_keywords(text, citycode):
    text = text.encode('ascii', 'ignore')
    ###extractor = extract.TermExtractor()
    ###keywords = extractor(text)

    ###s1 = sorted(keywords,key=lambda term: term[1])
    ###s1.reverse()
    ###s2 = sorted(keywords,key=lambda term: term[2])
    ###s2.reverse()

    ###maxkw = 3
    ###s1 = [(kw[0],kw[1]) for kw in s1[0:maxkw] ]
    ###s2 = [(kw[0],kw[2]) for kw in s2[0:maxkw] ]

    ###kw = s1
    ###for k in s2:
    ###    if k not in kw:
    ###        kw.append(k)
    ###kw = sorted(kw,key=lambda term: term[1])
    ###kw.reverse()

    #kw = set(s1+s2)
    alchemyapi = AlchemyAPI()
    ###response = alchemyapi.keywords('text',text,{'sentiment':1,'maxRetrieve':5})
    response = alchemyapi.concepts('text', text, {'maxRetrieve': 5})

    kw = []
    if response['status'] == "OK":
        #kw = [(keyword['text'].encode('ascii'),float(keyword['relevance'])) for keyword in response['keywords']]
        kw = [(concept['text'].encode('ascii'), float(concept['relevance']))
              for concept in response['concepts']]
    print kw

    results = {}

    isFirst = True

    # all data?
    # opendata.socrata.com/browse
    dataSetQueryURLs = {
        'EDM':
        'https://data.edmonton.ca/browse?limitTo=maps&sortBy=relevance&q=%s',
        'CGO':
        'https://data.cityofchicago.org/browse?limitTo=maps&sortBy=relevance&q=%s',
        'NYC':
        'https://nycopendata.socrata.com/browse?limitTo=maps&sortBy=relevance&q=%s',
        'SFO':
        'https://data.sfgov.org/browse?limitTo=maps&sortBy=relevance&q=%s',
        'BOS':
        'https://data.cityofboston.gov/browse?limitTo=maps&sortBy=relevance&q=%s',
        'MTL':
        'https://montreal.demo.socrata.com/browse?limitTo=maps&sortBy=relevance&q=%s',
    }
    dataSetQueryURL = dataSetQueryURLs[citycode]

    linkclasses = {
        'EDM': 'nameLink',
        'CGO': 'name',
        'NYC': 'nameLink',
        'SFO': 'nameLink',
        'BOS': 'name',
        'MTL': 'nameLink',
    }
    linkclass = linkclasses[citycode]

    for key in kw:
        text = key[0]
        score = key[1]

        # save score somewhere?

        url = dataSetQueryURL % urllib.quote(text)

        content = urllib2.urlopen(url).read()
        soup = BeautifulSoup(content)
        links = soup.find_all("a", class_=linkclass)
        links = links[0:3]
        if len(links) > 0:
            results[text] = []
            for link in links:
                results[text].append(link['href'])
                if isFirst:
                    content = urllib2.urlopen(results[text][0]).read()
                    soup = BeautifulSoup(content)
                    embed = soup.find(id="embed_code")
                    embed = str(embed.contents[0])
                    embed = embed.replace('_width_px', '646')
                    embed = embed.replace('_height_px', '760')
                    results['the_featured_embed_'] = mark_safe(embed)
                    isFirst = False

    return results
コード例 #22
0
def main(config):
	while True:
		now = datetime.datetime.now()
		logger.info('Running at {now}'.format(now=now))
		if len(config['download_urls']) > 0:
			url = random.choice(config['download_urls'])
			logger.info('Downloading from \'{URL}\''.format(URL=url))
			bytes = download(url)
		else:
			logger.error('ERROR: No \'download_urls\' in config')
			return

		original_name = now.strftime(config['save_format_string'])

		glitched = glitch(bytes, config['glitch'])
		original_image = bytesToImage(bytes)
		glitched_image = bytesToImage(glitched)

		# original_image.show()
		# glitched_image.show()

		alchemyapi = AlchemyAPI(config['alchemyapi_key'])
		
		options = {'forceShowAll': 1}
		logger.info('Requesting tags for original image')
		orig_response = alchemyapi.imageTaggingRaw(bytes, options)
		logger.info('Requesting tags for glitched image')
		glitch_response = alchemyapi.imageTaggingRaw(glitched, options)
		keywords = getKeywords(orig_response)
		logger.info('Recieved original {orig} glitch {glitch}'.format(orig=len(keywords), glitch=len(getKeywords(glitch_response))))
		if (len(keywords) and len(getKeywords(glitch_response))):
			break;


		
	original_image.save(config['save_directory'] + '/' + original_name)
	glitched_image.save(config['save_directory'] + '/' + config['prefix'] + original_name)

	logger.info('---Original---')
	printResponse(orig_response)
	logger.info('---Glitched---')
	printResponse(glitch_response)

	comment = ''
	for keyword in keywords:
		if keyword['score'] > 0.4:
			comment += ' #' + keyword['text'].replace(" ", "")

	if not len(comment):
		logger.info('No comment so far adding first')
		comment += ' #' + keyword['text'][0].replace(" ", "")
	
	comment += ' #glitchart #digitalart'

	logger.info('Comment is \'{comment}\''.format(comment=comment))

	if config['confirmation']:
		x = raw_input('Upload: y/n? ')

		if x == 'y':
			post(config['save_directory'] + '/' + config['prefix'] + original_name,
				 comment, config['instagram'])
	else:
		post(config['save_directory'] + '/' + config['prefix'] + original_name,
				 comment, config['instagram'])

	return
コード例 #23
0
ファイル: alchemy_demo.py プロジェクト: madhuradas/fyp
from alchemyapi_python.alchemyapi import AlchemyAPI 		#add alchemyapi_python to /usr/lib/python27/site-packages/
import json
import os

alchemyapi = AlchemyAPI()

d = {}

path = raw_input('Enter the directory path to annotate images from : ')

missed = 0
count = 0
for root,dirs,files in os.walk(path):
    for f in files:
    	count += 1
        if f.endswith('.png') or f.endswith('.jpg'):
            response = alchemyapi.imageTagging('image', path + '/' + f)
            if response['status'] == 'OK':
                print '## Response Object ##'
                print response["totalTransactions"]
                d[path + '/' + f] = response['imageKeywords']
                if len(response['imageKeywords']) == 0:
                	missed += 1
            else:
                print 'Error in image tagging call: ', response['statusInfo']
    	print count
print "Missed:",str(missed)

open('first1000_encoding.json','w').write(json.dumps(d))
コード例 #24
0
    def post(self):
        """
        Get wufoo data from webhook
        """
        # potential data coming in from wufoo.com
        data = request.form
        # potential data coming in from leanworkbench.com
        if request.data:
            lwb_data = loads(request.data)
            create = lwb_data.get("create")
        else:
            create = False

        # if creating/registering survey to user
        if create:
            if current_user.is_anonymous():
                return dumps([{"status": 400}])
            else:
                url = lwb_data.get("url")
                handshake = lwb_data.get("handshake")
                if not url:
                    return jsonify(emsg="No url given")
                else:
                    print 'attempting to add survey'
                    try:
                        name = url.split('/')[-2]
                        new_survey = WufooSurveyModel(
                            username=current_user.email,
                            url=url,
                            name=name,
                            handshake=handshake)
                        db.session.add(new_survey)
                        db.session.commit()
                        return make_response(
                            dumps({
                                "status": 200,
                                "msg": "Survey successfully added"
                            }))
                    except:
                        traceback.print_exc()
                        return jsonify(status=500)
        # if webhook and not the user registering the survey for the first time
        else:
            # parse json load
            entry_id = data.get("EntryId")
            form_structure = data.get("FormStructure")
            form_structure_dict = loads(form_structure)
            created_by = form_structure_dict.get('Email')
            url = form_structure_dict.get("Url")
            field_structure = data.get("FieldStructure")
            field_structure_dict = loads(field_structure)
            fields = field_structure_dict.get("Fields")
            handshake = data.get("HandshakeKey")
            # get survey
            survey = WufooSurveyModel.query.filter_by(wufoo_email=created_by,
                                                      name=url).all()
            if not survey:
                print 'survey does not exist yet'
                # if survey doesn't exist yet, pass
                return jsonify(status="This survey does not exist yet.")
            survey = survey[-1]
            survey_handshake = survey.handshake
            if handshake == "":
                handshake = None
            if survey_handshake != handshake:
                print 'handshake not equal'
                return jsonify(status="Handshake invalid")

            # get textareas
            textareas = []
            for field in fields:
                print field
                field_type = field.get("Type")
                if field_type == "textarea":
                    textareas.append(field.get("ID"))

            alchemyapi = AlchemyAPI(os.getenv("ALCHEMYAPI_KEY"))
            for key in data:
                if key in textareas:
                    text = data[key]
                    response = alchemyapi.sentiment('text', text)
                    if response['status'] == 'OK':
                        docsentiment = response.get("docSentiment")
                        score = docsentiment.get("score")
                        sent_type = docsentiment.get("type")
                        new_sentiment = WufooTextareaSentiment(
                            score=score, sentiment_type=sent_type, text=text)
                        survey.textareas.append(new_sentiment)
                        db.session.add(survey)
                        db.session.add(new_sentiment)
                        db.session.commit()

                    else:
                        print 'alchemy failed'
コード例 #25
0
ファイル: rt_data.py プロジェクト: furuolan/Acads
import json
import urllib
import time
import sys
import requests
import unirest
import os
import json
import simplejson
import subprocess
import shlex
import numpy as np
#import alchemyapi_python.__future__ import print_function
from alchemyapi_python.alchemyapi import AlchemyAPI

alchemyapi = AlchemyAPI()
demo_text = 'Yesterday dumb Bob destroyed my fancy iPhone in beautiful Denver, Colorado. I guess I will have to head over to the Apple Store and buy a new one.'
response = alchemyapi.entities('text', demo_text, {'sentiment': 1})


key = 'd7d457mawe4jc68nja63sjcn'
search_url1 = 'http://api.rottentomatoes.com/api/public/v1.0/movies.json?apikey='+key+'&q='
search_url2 = '&page_limit=1&page='

rev_url1='http://api.rottentomatoes.com/api/public/v1.0/movies/'
rev_url2='/reviews.json?apikey='+key+'&page_limit=50'

mpaa_conversion = {"G":0, "PG":1, "PG-13":2,"R":3,"NC-17":4}
ratings_conversion = {"A":1.0,"B":0.7,"C":0.4,"D":0.2,"E":0}
def rt_features(search_str):
  search_url = search_url1+search_str+search_url2
コード例 #26
0
ファイル: NLP.py プロジェクト: Zishuo/TwitterAnalyst
 def __init__(self):
     self.alchemyapi = AlchemyAPI()
     self.alchemyapi.apikey = 'a6e464bee67aebc9f2197ecffd88d48187bf469e'