コード例 #1
0
def use_indico(Xtest, ytest):
    '''
    Batch reviews in groups of 100 and send them to the Indico high quality API to get
    sentiment results. Return the list of results, as well as the test features
    and test targets, to be used in testing the results. Process each document as a
    whole, rather than processing each sentence individually.
    Isolate the fetching of the sentiment results from Indico from the use of
    those results, so that if something goes wrong, we don't need to fetch again.
    No need to clean and vectorize training reviews, or train a random forest
    on them, because Indico has done all of that already. Just strip out html.
    '''
    print "Cleaning html from the test set of movie reviews..."
    clean_test_reviews = remove_html(Xtest)
    print "Running Indico queries..."
    print "This will take a while..."
    # process the reviews in batches of 1000, then finish with the leftovers, if any
    # Indico is not splitting on sentences, returns one sentiment per review
    sentiment_lists = []
    for i in range(100,len(Xtest),100):
        print "Processing reviews {0} to {1}...".format(i-100, i-1)
        batch = clean_test_reviews[i-100:i]
        results = indicoio.sentiment_hq(batch)
        sentiment_lists += results
    if len(sentiment_lists)<len(Xtest):
        print "Processing final reviews {0} to {1}...".format(len(sentiment_lists),len(Xtest))
        batch = clean_test_reviews[len(sentiment_lists):]
        results = indicoio.sentiment_hq(batch)
        sentiment_lists += results
    print "{0} Indico sentiments returned".format(len(sentiment_lists))
    return sentiment_lists
コード例 #2
0
 def analyze_image(self, gif_url):
     """
     Checks to see if there are any recognizable objects in the gifs, and if there are performs sentiment analysis on the objects. Then it outputs the average of the sentiment analysis of all the objects
     """
     gif_output = []
     #print(gif_url)
     while True:
         try:
             gif_output = indicoio.image_recognition(gif_url)
             break
         except ValueError:
             #print('oops')
             return None
     #print(gif_output)
     self.list_objects = [
         name for name, prob in gif_output.items() if prob > .1
     ]
     if len(self.list_objects) == 1:
         self.sentiment_output = indicoio.sentiment_hq(self.list_objects[0])
     elif len(self.list_objects) == 0:
         return self.text_sentiment
     else:
         self.sentiment_output = indicoio.sentiment_hq(self.list_objects)
         self.sentiment_output = np.mean(self.sentiment_output)
     return self.sentiment_output
コード例 #3
0
def sentiment(link, title):
    # r = requests.post('http://text-processing.com/api/sentiment/', data = {'text': title})
    # posnegneutral = json.loads(r.text)['label']

    try:
        link_pos = indicoio.sentiment_hq(link)
        if link_pos > 0.9 or link_pos < 0.2:
            # save an API call
            # - don't need to waste one if page contents already > 0.9 or < 0.2 (pos)
            title_pos = link_pos
        else:
            title_pos = indicoio.sentiment_hq(title)

        avg_pos = (link_pos + title_pos) / 2

        if link_pos > 0.9 or title_pos > 0.9:
            posnegneutral = "pos - {} (max)".format(max(link_pos, title_pos))
        elif avg_pos > 0.8:
            posnegneutral = "pos - {} (avg)".format(avg_pos)
        elif link_pos < 0.2 or title_pos < 0.2:
            posnegneutral = "neg - {} (min)".format(min(link_pos, title_pos))
        elif avg_pos < 0.3:
            posnegneutral = "neg - {} (avg)".format(avg_pos)
        else:
            posnegneutral = "neutral (link_pos={} title_pos={} avg={})".format(
                link_pos, title_pos, avg_pos)

    except:
        e = sys.exc_info()
        posnegneutral = "ERROR Checking Sentiment: {}".format(e)
        link_pos = 0
        title_pos = 0
        avg_pos = 0

    return posnegneutral, link_pos, title_pos, avg_pos
コード例 #4
0
    def test_sentiment_hq(self):
        test_string = "Worst song ever."
        response = sentiment_hq(test_string)

        self.assertTrue(isinstance(response, float))
        self.assertTrue(response < 0.5)

        test_string = "Best song ever."
        response = sentiment_hq(test_string)
        self.assertTrue(isinstance(response, float))
        self.assertTrue(response > 0.5)
コード例 #5
0
    def test_sentiment_hq(self):
        test_string = "Worst song ever."
        response = sentiment_hq(test_string, job_options={"job": True})

        self.assertTrue(isinstance(response, (float, np.float32)))
        self.assertTrue(response < 0.5)

        test_string = "Best song ever."
        response = sentiment_hq(test_string, job_options={"job": True})
        self.assertTrue(isinstance(response, (float, np.float32)))
        self.assertTrue(response > 0.5)
コード例 #6
0
def get_sentiment(statement, lookup_table, save_results=False):
    if save_results:
        if statement in lookup_table.index:
            return lookup_table.loc[statement, 'sentiment_hq'], False
        else:
            print('Result not found in table')
            return indicoio.sentiment_hq(statement), True
    else:
        if statement in lookup_table.index:
            return lookup_table.loc[statement, 'sentiment_hq']
        else:
            print('Result not found in table')
            return indicoio.sentiment_hq(statement)
コード例 #7
0
ファイル: twitter_streaming.py プロジェクト: OmkarB/pecan
 def on_status(self, status):
     global sentiment_value
     body = status.text
     s = indicoio.sentiment_hq(body)
     s = Decimal(s).quantize(Decimal('.0001'), rounding=ROUND_DOWN)
     sentiments.append(s)
     sentiment_value = (sum(sentiments) / len(sentiments))
コード例 #8
0
def main():
	indicoio.config.api_key = '123273ff84fe220626891873d499ea07'
	indicoio.config.language = 'russian'

	# results:
	#0.94399955814
	#print indicoio.sentiment('хороший кот', language='russian')
	#0.777086528524
	#print indicoio.sentiment('постановление правительство', language='russian')
	print indicoio.sentiment('хороший', language='russian')
	print indicoio.sentiment('правительство', language='russian')
	print indicoio.sentiment('кот', language='russian')

	return

	res = indicoio.sentiment_hq([
		'хороший кот',
		'постановление правительство',
		'состоятельный оказаться',
		'коррупционный правонарушение',
		'конфликт интерес',
		'первое квартал'
	])

	for r in res:
		print r
コード例 #9
0
def main():
    if len(sys.argv) != 3:
        return

    inname = sys.argv[1]
    outname = sys.argv[2]

    with open(inname, mode='r') as inFile:
        tweets = json.load(inFile)
        count = 0

        for tweet in tweets:
            if tweet['lang'] == 'en':
                tweet['positiveness'] = float(
                    indicoio.sentiment_hq(tweet['text']))
            else:
                tweet['positiveness'] = None

            count += 1

            if count % 100 == 0:
                print(count)
                with open(outname, 'w') as outfile:
                    json.dump(tweets, outfile)

        with open(outname, 'w') as outfile:
            json.dump(tweets, outfile)
コード例 #10
0
 def processData(self):
     textNoSym = str.replace(str.replace((str)(self.text), '#', ''), '@',
                             '')
     textNoSym = re.sub(r'https:.*$', '', textNoSym)
     if textNoSym == '':  # If without emojis and URLs the tweet is an empty string return neutral sentiment
         return 50
     return (int)(indicoio.sentiment_hq(textNoSym) * 100)
コード例 #11
0
ファイル: analyzer.py プロジェクト: ashley/WHack5
    def get_weighted_average_sentiments(self):
        if (len(self.yaks) > 0):
            sentiment_sum = 0.0

            yak_info = [[yak.message, yak.likes] for yak in self.yaks
                        if yak.likes != 10000]

            upvotes = [yak[1] for yak in yak_info]
            sentiments = indicoio.sentiment_hq([yak[0] for yak in yak_info])

            entries = len(upvotes) + sum(
                [math.fabs(upvote) for upvote in upvotes])

            for index in range(len(sentiments)):
                sentiment_sum += sentiments[index] * upvotes[index]

            percent = (sentiment_sum / entries - .4) * 200
            message = "Positive" if percent >= 0 else "Negative"
            style = "color:green;" if percent >= 0 else "color:red;"

            return {
                "percent": "{0:.2f}".format(math.fabs(percent)),
                "message": message,
                "style": style
            }
        return {
            "percent": 100.0,
            "message": "Not Available",
            "style": "color:orange;"
        }
コード例 #12
0
ファイル: texts.py プロジェクト: rainiera/text-graph
def indico_batch_sentiment():
    """a ONE-OFF method to call the indico.io API to HQ batch sentiment 18192 texts.

    Kinda shows how badly designed OOP code this is right now for the hackathon, oops.
    Should refactor when there is time.
    """
    with open('sentiments.csv', 'wb') as f:
        texts = []
        writer = csv.writer(f)
        with open('texts/filenames.txt', 'r') as filenames:
            fn_list = map(str.strip, [filename for filename in filenames])
            fn_list = map(lambda x: 'texts/texts/' + x, fn_list)
            for fn in fn_list:
                texts.append(get_texts(fn))  # returns TextMessage object
        texts = [item for sublist in texts for item in sublist]
        with open('indico_sentiment_hq_errors.txt', 'w') as error_log:
            for text in texts:
                sentiment_result = None
                try:
                    sentiment_result = sentiment_hq(text.body.encode(),
                                                    api_key=INDICO_API_KEY)
                except BaseException as e:
                    error_log.write(str(e))
                finally:
                    writer.writerow([
                        unicode(s).encode('utf-8') for s in [
                            text.msg_id, text.posix,
                            repr(text.sent), text.body,
                            repr(text.mentions), sentiment_result
                        ]
                    ])
コード例 #13
0
def transcribe_file(speech_file):
    """Transcribe the given audio file."""
    from google.cloud import speech
    from google.cloud.speech import enums
    from google.cloud.speech import types
    client = speech.SpeechClient()

    with io.open(speech_file, 'rb') as audio_file:
        content = audio_file.read()

    audio = types.RecognitionAudio(content=content)
    config = types.RecognitionConfig(
        encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
        sample_rate_hertz=16000,
        language_code='en-US')

    response = client.recognize(config, audio)
    # Each result is for a consecutive portion of the audio. Iterate through
    # them to get the transcripts for the entire audio file.
    y = ""
    for result in response.results:
        # The first alternative is the most likely one for this portion.
        x = format(result.alternatives[0].transcript)

        y = y + x
        #get_text_sentiment()
    sentiment = indicoio.sentiment_hq(y)
    return "Transcript: " + y + " \n Sentiment:" + str(sentiment)
コード例 #14
0
ファイル: texts.py プロジェクト: rainiera/text-graph
def indico_batch_sentiment():
    """a ONE-OFF method to call the indico.io API to HQ batch sentiment 18192 texts.

    Kinda shows how badly designed OOP code this is right now for the hackathon, oops.
    Should refactor when there is time.
    """
    with open('sentiments.csv', 'wb') as f:
        texts = []
        writer = csv.writer(f)
        with open('texts/filenames.txt', 'r') as filenames:
            fn_list = map(str.strip, [filename for filename in filenames])
            fn_list = map(lambda x: 'texts/texts/' + x, fn_list)
            for fn in fn_list:
                texts.append(get_texts(fn))  # returns TextMessage object
        texts = [item for sublist in texts for item in sublist]
        with open('indico_sentiment_hq_errors.txt', 'w') as error_log:
            for text in texts:
                sentiment_result = None
                try:
                    sentiment_result = sentiment_hq(text.body.encode(), api_key=INDICO_API_KEY)
                except BaseException as e:
                    error_log.write(str(e))
                finally:
                    writer.writerow([unicode(s).encode('utf-8') for s in
                                     [text.msg_id, text.posix, repr(text.sent),
                                      text.body, repr(text.mentions), sentiment_result]])
コード例 #15
0
ファイル: test.py プロジェクト: kaivalyagandhi/inpulsify
def analysis(data):
    sentiment = ind.sentiment_hq(data)
    tags = sort(ind.text_tags(data))
    languages = sort(ind.language(data))
    politics = sort(ind.political(data))
    keywords = sort(ind.keywords(data))
    names = sort(ind.named_entities(data))

    print "Sentiment", sentiment

    print "\n\n\nTags"
    for t in tags:
        print t[0], float(t[1]) * 100

    print "\n\n\nLanguages"
    for l in languages:
        print l[0], float(l[1]) * 100

    print "\n\n\nPolitical"
    for p in politics:
        print p[0], float(p[1]) * 100
    
    print "\n\nkeywords"
    for k in keywords:
        print k[0], float(k[1]) * 100
コード例 #16
0
ファイル: textmining.py プロジェクト: kzhang8850/TwitchChat
def sentiments(twitchchat):
	"""
	Prints the sentiment of the whole duration the program was open. 
	"""

	sentimental = indicoio.sentiment_hq(twitchchat)   					#uses pattern's sentiment analysis to calculate how troll or nice the chat is
	print "This streamer has a sentiment polarity of " + str(sentimental) 
コード例 #17
0
ファイル: app.py プロジェクト: usfslk/Radar
def results():

    keyword = str(request.args.get("keyword", None))
    conn = sqlite3.connect(":memory:")
    cursor = conn.cursor()
    cursor.execute(""" 
	CREATE TABLE IF NOT EXISTS main(
		Keyword TEXT,
		Title TEXT,
		Description TEXT,
		URL TEXT,
		IMGLink TEXT,
		Score INTEGER,
		sqltime TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
	); 
	""")

    resultslist = []
    scorelist = []
    index = 0
    limit = 20

    data = newsapi.get_everything(
        q=keyword,
        sources='crypto-coins-news',
        language='en',
        sort_by='publishedAt',
    )
    load = data['articles']

    for index, post in zip(range(limit), load):
        title = post['title']
        description = post['description']
        url = post['url']
        imglink = post['urlToImage']
        scoredesc = indicoio.sentiment_hq(title)
        calc = (scoredesc * 100)
        score = ("%.2f" % calc)
        scorelist.append(float(score))
        nline = keyword, title, description, url, imglink, score
        cursor.execute(
            'INSERT INTO main (Keyword, Title, Description, URL, IMGLink, Score)  VALUES (?, ?, ?, ?, ?, ?)',
            (nline))
        conn.commit()

    for row in cursor.execute(
            "SELECT Title, Description, URL, IMGLink, Score, sqltime FROM main ORDER BY Score DESC LIMIT 16"
    ):
        resultslist.append(row)

    sumlist = (sum(scorelist))
    lenght_list = (len(scorelist))
    before = (sumlist / lenght_list)
    average = float("%.0f" % before) + 10

    return render_template('analysis.html',
                           resultslist=resultslist,
                           keyword=keyword,
                           average=average)
コード例 #18
0
    def test_batched_error_handling(self):
        test_data = ["Terribly interesting test data."] * 100
        test_data[98] = ""
        with self.assertRaises(IndicoError):
            sentiment_hq(test_data, batch_size=20)

        files = glob.glob('indico-sentimenthq-*.json')
        assert len(files)

        for filename in files:
            data = json.load(open(filename, 'r'))

            # first four batches should have returned
            assert len(data) == 80

            # clean up after ourselves
            os.remove(filename)
コード例 #19
0
def retrieve_indico(tokz):
    indi = indicoio.sentiment_hq(tokz)
    if indi>0.5:
        isent = 'POS'
    elif indi<0.5:
        isent = 'NEG'
    else: isent = 'NEU'
    return isent
コード例 #20
0
    def test_batched_error_handling(self):
        test_data = ["Terribly interesting test data."] * 100
        test_data[98] = ""
        with self.assertRaises(IndicoError):
            sentiment_hq(test_data, batch_size=20)

        files = glob.glob('indico-sentimenthq-*.json')
        assert len(files)

        for filename in files:
            data = json.load(open(filename, 'r'))

            # first four batches should have returned
            assert len(data) == 80

            # clean up after ourselves
            os.remove(filename)
コード例 #21
0
    def handle(self, *args, **options):

        tweet = Tweet.objects.get(id=options['tweet_id'][0])

        score = indicoio.sentiment_hq(tweet.text)
        tweet.sentiment_score = score
        tweet.save()
        self.stdout.write(str(score))
        return
コード例 #22
0
ファイル: app.py プロジェクト: jmalliaros/hackTheNorth2015
def tweety_search(searchName):
	tweet_list = []
	search = api.GetSearch(term=searchName, lang='en', result_type='recent', count=NUMBER_TWEETS, max_id='')
	for t in search:
		tweet_list.append(t.text.encode('utf-8'))
	score = indicoio.sentiment_hq(tweet_list)
	top_and_bottom(tweet_list, score)
	score = statistics.mean(score)
	return score
コード例 #23
0
ファイル: textmining.py プロジェクト: kzhang8850/TwitchChat
def sentiments(twitchchat):
    """
	Prints the sentiment of the whole duration the program was open. 
	"""

    sentimental = indicoio.sentiment_hq(
        twitchchat
    )  #uses pattern's sentiment analysis to calculate how troll or nice the chat is
    print "This streamer has a sentiment polarity of " + str(sentimental)
コード例 #24
0
def main():
    configure()
    examples = filter(lambda sen: sen != '', get_examples('input.txt'))
    # single examples
    sentiments = indicoio.sentiment_hq(examples)
    poli = indicoio.political(examples)
    for i in range(len(examples)):
        print('============')
        print('{}\n\n{}\n\n{}\n'.format(examples[i], sentiments[i], poli[i]))
        print('============')
コード例 #25
0
ファイル: textmining.py プロジェクト: kzhang8850/TwitchChat
def wordsentiment(words):
    """
	returns the sentiment of the inputted phrase
	"""
    if words == '':  #sometimes after filtering the individual phrase has nothing left, this is to account for that
        sentiments = 0
    else:
        sentiments = indicoio.sentiment_hq(
            words)  #using indico's API because pattern's is bad

    return sentiments
コード例 #26
0
ファイル: textmining.py プロジェクト: kzhang8850/TwitchChat
def wordsentiment(words):
	"""
	returns the sentiment of the inputted phrase
	"""
	if words == '':         			                  	    #sometimes after filtering the individual phrase has nothing left, this is to account for that
		sentiments = 0
	else:	
		sentiments = indicoio.sentiment_hq(words)				#using indico's API because pattern's is bad


	return sentiments
コード例 #27
0
ファイル: app.py プロジェクト: jmalliaros/hackTheNorth2015
def sms():
	response = twiml.Response()
	body = request.form['Body']
	print(body)
	value = indicoio.sentiment_hq(body)
	print(value)
	response.message("You sent: {0}\nSentiment analysis: {1}".format(body, value))
	data = {
		"text": body,
		"value": value
	}
	messages.insert_one(data)
	return str(response)
コード例 #28
0
	def loadRecentPosts(self,recent_tags, api):

		for tag in recent_tags:
			#split the string returned to get users id
			temp, new_id = tag.id.split('_')
			user = api.user(new_id)

			#gets amount of posts user has made
			postCount = user.counts['media']
			#gets the amount of followers the user has
			followers = user.counts['followed_by']
			#gets the amount of people the user is following
			following = user.counts['follows']
			#gets the number of likes of the post
			likes = tag.like_count

			print 'Post Number:', self.numPosts
			print likes, 'likes'
			print "Users Number of Posts:", postCount
			print "Followers:", followers
			print "Following:", following

			# Checks each word in caption to see if it is positive, neutral or negative and
			# puts it into a list then calculates its radius based on number of followers
			if tag.caption is not None:
				print(tag.caption.text)
				sentiment = indicoio.sentiment_hq(tag.caption.text)
				if sentiment >= 0.66:
					self.positivePosts+=1
					self.positiveY.append(sentiment*100)
					self.positiveX.append(self.numPosts%(MAX_COUNT/3))
					self.positiveRadius = self.calculateRadius(self.positiveRadius,followers)
				elif sentiment >= 0.33:
					self.neutralPosts+=1
					self.neutralY.append(sentiment*100)
					self.neutralX.append(self.numPosts%(MAX_COUNT/3))
					self.neutralRadius = self.calculateRadius(self.neutralRadius,followers)
				else:
					self.negativePosts+=1
					self.negativeY.append(sentiment*100)
					self.negativeX.append(self.numPosts%(MAX_COUNT/3))
					self.negativeRadius = self.calculateRadius(self.negativeRadius,followers)
					
			#Use Indico API to calculate image sentiment
			imageUrl = tag.images['low_resolution'].url
			self.imageSentiment.append(indicoio.fer(imageUrl))

			print # separate each post with a new line
			self.numPosts+=1
コード例 #29
0
def spam_filter(msg=input("Enter message = ")):
    msg = TextBlob(msg)
    current_lang = msg.detect_language()
    print("Language of this message is = ", current_lang)
    if (current_lang != 'en'):
        msg.translate(to='en')
    else:
        msg.correct()
    X_dtm = vect.fit_transform(X)
    test_dtm = vect.transform([str(msg)])
    model.fit(X_dtm, y)
    result = model.predict(test_dtm)
    prob = model.predict_proba(test_dtm)
    if result == [1]:
        print("SPAM ALERT!")
    else:
        print("HAM")
        predsa = clf.predict(vectsa.transform([str(msg)]))

        if predsa == [1]:
            print("Positive Feeling")

        elif predsa == [0]:
            print("Negative Feeling")
        else:
            print("Can't analyze ur Felling...Try API ? ....")
        senti = indicoio.sentiment_hq(str(msg))
        print("Online Help , Positivity of Incoming Message = ", senti)
    p = indicoio.personality(str(msg))
    d = []
    d.append([
        p['agreeableness'], p['conscientiousness'], p['extraversion'],
        p['openness'], msg.sentiment.polarity, msg.sentiment.subjectivity
    ])
    traits = pd.DataFrame(d,
                          columns=[
                              'agreeableness', 'conscientiousness',
                              'extraversion', 'openness', 'polarity',
                              'subjectivity'
                          ])
    print(profanity.contains_profanity(str(msg)), " Profanity")
    print(profanity.censor(str(msg)))
    print("Summarizing this message =", msg.noun_phrases)
    percent = pd.DataFrame(prob, columns=["% HAM", "%SPAM"])
    print(traits)
    print(percent)
コード例 #30
0
def group_msgs(msgs, ts_grouped):
	track_idx = 0
	msgs_grouped = []
	sentiment_mapping = []
	for ts_list in ts_grouped:
		move_up = track_idx + len(ts_list)
		grouping = msgs[track_idx:move_up]
		
		# hits a problem right here...
		single_str = ' '.join(str(elem) for elem in grouping)
		sentiment_rating = indicoio.sentiment_hq(single_str)
		timemap = (ts_list[0].date(), sentiment_rating)
		sentiment_mapping.append(timemap)
		msgs_grouped.append(grouping)
		track_idx = move_up

	return [sentiment_mapping, msgs_grouped]
コード例 #31
0
ファイル: app.py プロジェクト: jmalliaros/hackTheNorth2015
def reddit_search(searchName):
	searchName = request.form['searchName']
	r = praw.Reddit(user_agent='sentimenter')
	submissions = r.search(searchName)
	score = []
	commentList = []
	for searchSubmissionInd, submission in enumerate(submissions):
		submission.replace_more_comments(limit=1, threshold=1)
		flat_comments = praw.helpers.flatten_tree(submission.comments)
		for comment in flat_comments:
			commentList.append(comment.body)
		print("Grabbing comments from reddit thread: " + submission.short_link)
		if (searchSubmissionInd +1 == SUBMISSION_SEARCH_LIMIT):
			break
	score = indicoio.sentiment_hq(commentList)
	top_and_bottom(commentList, score)
	score = statistics.mean(score)
	return score
コード例 #32
0
ファイル: app.py プロジェクト: yasoob/hackTheNorth2015
def search():
	searchName = request.form['searchName']
	reddit = request.form.get('reddit', default=False, type=bool)
	twitter = request.form.get('twitter', default=False, type=bool)
	r = praw.Reddit(user_agent='sentimenter')
	submissions = r.search(searchName)
	score = []
	commentList = []
	for searchSubmissionInd, submission in enumerate(submissions):
		submission.replace_more_comments(limit=1, threshold=1)
		flat_comments = praw.helpers.flatten_tree(submission.comments)
		for comment in flat_comments:
			commentList.append(comment.body)
		print("Grabbing comments from reddit thread: " + submission.short_link)
		if (searchSubmissionInd +1 == SUBMISSION_SEARCH_LIMIT):
			break
	score = statistics.mean(indicoio.sentiment_hq(commentList))
	return jsonify(searchName = searchName, score = score)
コード例 #33
0
ファイル: analisis.py プロジェクト: kevinalh/dakopucp
        def correr():
            indicoio.config.api_key = settings.KEY_INDICOIO

            while (1):
                # En un principio fue necesario usar objetos Q para usar or, pero ya no lo es pues se modifico el codigo para que se haga el analisis de sentimiento
                # y de localizacion a la vez
                # tweets = Tweet.objects.filter(Q(sentimiento=None) | Q(mina=None))
                tweets = Tweet.objects.filter(sentimiento=None)

                for tweet in tqdm(tweets):
                    sent = indicoio.sentiment_hq(tweet.text)
                    tweet.sentimiento = Decimal(sent)
                    lugares = Lugar.objects.all()
                    for lugar in lugares:
                        # Autocritica: Se puede evitar el bucle for aprovechando los QuerySets de Django. Mucho mas veloces.
                        if lugar.nombre in tweet.text:
                            # Obviamente si hay mas de un lugar, se va a quedar con el ultimo. Este caso no se considerara por ser prototipo de Hackaton
                            tweet.mina = lugar
                    tweet.save()

                time.sleep(10)
コード例 #34
0
ファイル: analisis.py プロジェクト: kevinalh/dakopucp
        def correr():
            indicoio.config.api_key = settings.KEY_INDICOIO

            while(1):
                # En un principio fue necesario usar objetos Q para usar or, pero ya no lo es pues se modifico el codigo para que se haga el analisis de sentimiento
                # y de localizacion a la vez
                # tweets = Tweet.objects.filter(Q(sentimiento=None) | Q(mina=None))
                tweets = Tweet.objects.filter(sentimiento=None)

                for tweet in  tqdm(tweets):
                    sent = indicoio.sentiment_hq(tweet.text)
                    tweet.sentimiento = Decimal(sent)
                    lugares = Lugar.objects.all()
                    for lugar in  lugares:
                        # Autocritica: Se puede evitar el bucle for aprovechando los QuerySets de Django. Mucho mas veloces.
                        if lugar.nombre in tweet.text:
                            # Obviamente si hay mas de un lugar, se va a quedar con el ultimo. Este caso no se considerara por ser prototipo de Hackaton
                            tweet.mina = lugar
                    tweet.save()
                
                time.sleep(10)
コード例 #35
0
def sentiment():
    """
    Uses indico.io's Sentiment Analysis API to analyse each text in tih-data.json and
    assign it a sentiment value.

    Writes a csv file of the form:
        Item,Category,Year,Sentiment
        X: YYYY/MM/DD,$category,YYYY,sentiment_score

    Where: sentiment_score is an integer between -100 and 100.
           and category has been previously manually assigned in tih-data.json.

    """

    data = read()

    with open('sentiment.csv', 'a') as outfile:

        outfile.write('Item,Category,Year,Sentiment\n')

        for i in range(len(data)):
            # Item in the format X: YYYY/MM/DD
            s1 = "{}{}{}".format(i + 1, ": ", data[str(i)]["date"])
            # Category e.g. Opinion, Advert, etc.
            s2 = data[str(i)]["cat"]
            # Year
            s3 = data[str(i)]["date"][0:4]
            # Sentiment
            s4 = indicoio.sentiment_hq(data[str(i)]["article"]["text"])
            s4 = s4 * 100
            if s4 > 50:
                pass
            elif s4 < 50:
                s4 = s4 - 100
            s4 = int(round(s4, 0))

            s = "{},{},{},{}".format(s1, s2, s3, s4)

            outfile.write("{}{}".format(s, '\n'))
            print("{}    {}".format(s1, s4))
コード例 #36
0
ファイル: mood.py プロジェクト: borischu/MoodMusic
def sentiment(text_input):

    #song lists created using radio billboard list and indicoio algorithm on song lyrics

    list1 = ["Sorry", "What do you mean", "Same Old Love", "Here", "Hit The Quan", "Good For You"]
    list2 = ["Focus", "Where Ya At", "Hello", "Locked Away", "Lean On", "Confident"]
    list3 = ["Jumpman", "Trap Queen", "Renegades", "White Iverson", "Shut Up and Dance", "Hotline Bling"]
    list4 = ["Drag Me Down", "Downtown", "Perfect", "Again", "Stitches", "679"] 
    list5 = ["My Way", "Ex's & Oh's", "Antidote", "Like I'm gonna lose you", "Tennessee Whiskey", "See You Again", "Watch Me", "On My Mind", "I'll Show You",
    "Cheerleader", "The Hills", "Uptown Funk!", "Die a Happy Man", "How Deep Is Your Love", "Photograph", "Can't feel my face"]

    indicoio.config.api_key = os.environ.get('INDICO')
    #text_input = raw_input('Tell me about your day: ') #raw_input for python 2.7
    sentiment = indicoio.sentiment_hq(text_input)
    songTitle = ""
    dayDescription = ""
    print(sentiment)
    #0<SUPERSAD<.25<SAD<.45<NEUTRAL<.6<HAPPY<.8<SUPERHAPPY<1
    if sentiment < .15:
        dayDescription = ("awful")
        songTitle = (list1[(int)(random.random()*len(list1))])
    else:
        if sentiment < .35:
            dayDescription = ("bad")
            songTitle = (list2[(int)(random.random()*len(list2))])
        else:
            if sentiment < .65:
                dayDescription = ("ok")
                songTitle = (list3[(int)(random.random()*len(list3))])
            else:
                if sentiment <.9:
                    dayDescription = ("good")
                    songTitle = (list4[(int)(random.random()*len(list4))])
                else:
                    dayDescription = ("awesome")
                    songTitle = (list5[(int)(random.random()*len(list5))])
    values = [dayDescription, songTitle]
    return values
コード例 #37
0
 def get_tweet_sentiment(self, tweet):
     '''
             Utility function to classify sentiment of passed tweet
             using indicoio
             '''
     indicoio.config.api_key = 'e7cf4e703a29b6cdcecec19c5898185e'
     # calculate sentiment score
     myString = self.clean_tweet(tweet)
     if not bool(myString and myString.strip()):
         return 0.5
     try:
         sentiment_score = indicoio.sentiment_hq(myString)
     except:
         print('connection issue with indicoio')
         return 0.5
     if sentiment_score > 0.5:
         sentiment = 'positive'
     elif sentiment_score == 0.5:
         sentiment = 'neutral'
     else:
         sentiment = 'negative'
     print(myString, ",", sentiment)
     return sentiment_score
コード例 #38
0
def send_to_indico():
    '''
    This route handles the server's response when
    you post data to localhost:5000/crunch through
    the form on index.html
    '''

    tweets_csv_string = request.form.get('tweets')
    csv_list = tweets_csv_string.replace('\r', '').splitlines()

    if len(csv_list) > 40:
        csv_list = csv_list[:40]
    print csv_list
    tweet_list = []
    for csv_tweet in csv_list:
        tweet_only = csv_tweet.split(',')[2:]
        tweet_list.append(','.join(tweet_only))

    tweet_list = tweet_list[::-1]

    #tweet_scores = indicoio.batch_sentiment(tweet_list, api_key="428b1c1039ed8d8eaa886ee88044debd")
    tweet_scores = indicoio.sentiment_hq(tweet_list, api_key="428b1c1039ed8d8eaa886ee88044debd")
    return json.dumps({'scores': tweet_scores, 'tweets': tweet_list})  # dumps converts res to a JSON object
コード例 #39
0
def main():
    indicoio.config.api_key = '123273ff84fe220626891873d499ea07'
    indicoio.config.language = 'russian'

    # results:
    #0.94399955814
    #print indicoio.sentiment('хороший кот', language='russian')
    #0.777086528524
    #print indicoio.sentiment('постановление правительство', language='russian')
    print indicoio.sentiment('хороший', language='russian')
    print indicoio.sentiment('правительство', language='russian')
    print indicoio.sentiment('кот', language='russian')

    return

    res = indicoio.sentiment_hq([
        'хороший кот', 'постановление правительство',
        'состоятельный оказаться', 'коррупционный правонарушение',
        'конфликт интерес', 'первое квартал'
    ])

    for r in res:
        print r
コード例 #40
0
def determine_comment_sentiment(submissions, submission_urls, comments_dict):
    sentiment_comments = dict()
    Scounter = 0
    for submission in submissions:
        # reading the lines
        print("Finding sentiment value of " + submission_urls[submission])
        multiLine = comments_dict[str(submission)]
        total = 0
        Scounter = Scounter + 1
        counter = 0
        for line in multiLine:
            if len(line) > 125:
                line = line[0:125]
            total = total + indicoio.sentiment_hq(line)
            counter = counter + 1
        if counter != 0:
            commentSentiment = total / counter
            sentiment_comments[str(submission)] = commentSentiment
        else:
            sentiment_comments[str(submission)] = 0.5

    for key in sentiment_comments:
        print(key, sentiment_comments[key])
    return sentiment_comments
コード例 #41
0
import pandas as pd
import indicoio
import cProfile
from pprint import pprint
import csv

indicoio.config.api_key = "bbc6aca3c1564961ba2f0ee5dc81f32f"
df = pd.read_csv("data/hotel_reviews.csv")
indicoio.sentiment_hq(["indico is so easy to use!", "everything is awesome!"])
コード例 #42
0
register = template.Library()
#song lists created using radio billboard list and indicoio algorithm on song lyrics



list1 = ["Sorry", "What do you mean", "Same Old Love", "Here", "Hit The Quan", "Good For You"]
list2 = ["Focus", "Where Ya At", "Hello", "Locked Away", "Lean On", "Confident"]
list3 = ["Jumpman", "Trap Queen", "Renegades", "White Iverson", "Shut Up and Dance", "Hotline Bling"]
list4 = ["Drag Me Down", "Downtown", "Perfect", "Again", "Stitches", "679"] 
list5 = ["My Way", "Ex's & Oh's", "Antidote", "Like I'm gonna lose you", "Tennessee Whiskey", "See You Again", "Watch Me", "On My Mind", "I'll Show You",
"Cheerleader", "The Hills", "Uptown Funk!", "Die a Happy Man", "How Deep Is Your Love", "Photograph", "Can't feel my face"]

indicoio.config.api_key = 'a894b47f344116b3d32d12de039bf690'
#text_input = input('Tell me about your day: ')
sentiment = indicoio.sentiment_hq(text_input)
songTitle = ""
dayDescription = ""
print(sentiment)
#0<SUPERSAD<.25<SAD<.45<NEUTRAL<.6<HAPPY<.8<SUPERHAPPY<1
if sentiment < .15:
    dayDescription = ("Your day was: AWFUL")
    songTitle = (list1[(int)(random.random()*len(list1))])
else:
    if sentiment < .35:
        dayDescription = ("Your day was: BAD")
        songTitle = (list2[(int)(random.random()*len(list2))])
    else:
        if sentiment < .65:
            dayDescription = ("Your day was: OK")
            songTitle = (list3[(int)(random.random()*len(list3))])
コード例 #43
0
    # print(linedict[submission])

# Reading the lines
sentimentComments = dict()
Scounter = 0
for submission in submissions:
    # reading the lines
    print("Finding sentiment value of " + submissionUrls[submission])
    multiLine = linedict[str(submission)]
    total = 0
    Scounter = Scounter + 1
    counter = 0
    for line in multiLine:
        if len(line) > 125:
            line = line[0:125]
        total = total + indicoio.sentiment_hq(line)
        counter = counter + 1
    if counter != 0:
        commentSentiment = total / counter
        sentimentComments[str(submission)] = commentSentiment
    else:
        sentimentComments[str(submission)] = 0.5

for key in sentimentComments:
    print(key, sentimentComments[key])

f.close()

objectSent = dict()
objectOccur = dict()
objectIDs = dict()
コード例 #44
0
 def analyze_Sentiment_indico(self):
     art = self.cleanText()
     s = ''.join(ch for ch in art)
     sentiment = indicoio.sentiment_hq(s)
     return sentiment
コード例 #45
0
 def test_batch_sentiment_hq(self):
     test_data = ['Worst song ever', 'Best song ever']
     response = sentiment_hq(test_data, api_key=self.api_key)
     self.assertTrue(isinstance(response, list))
     self.assertTrue(response[0] < 0.5)
コード例 #46
0
#import indicoio api configuration
import indicoio
indicoio.config.api_key = '580dc401830e72a98d4834bafe5b7d7c'

#use api to calculate sentiment analysis. sentiment is number 0-1
text_input = raw_input('enter feels here: ') #take user input
sentiment = (indicoio.sentiment_hq(text_input)) #use indico api to calculate input. save value as sentiment


file = open("sentimentnumber*.csv", "w")

file.write(str(sentiment))

file.close()
コード例 #47
0
ファイル: To-Compare.py プロジェクト: KaylaNguyen/To-Compare
def crawlTwits(term):
    # get authentication
    api = TwitterAPI('1KxHa1hlPbRdsggvL5yfBgHPY', 'afQVw38uLX3Q1YdILflyG4FjWhjkMzXgSP9ypLee4LM4QIMOea',
                     '2786140432-npYkBKUXdUj3ulYj5f2N7LLN7dVJD6L6KdoyyLi',
                     'qwOdzFwyNfBKcmO6tq2TbOElrDHfd0ooiXNhMy4a7kUMd')
    indicoio.config.api_key = 'e2637d8d80afb64412b3dda3dda64bdd'

    # keep a counter to sum the sentiment score
    scoreSum = 0
    # keep a counter to sum the number of twits
    twitsNum = 0
    # keep a list of keywords
    listKeyWords = ""

    # search twits
    r = api.request('search/tweets', {'q': term})
    for item in r:
        # filter out patterns
        patterns = re.compile(', u\'text\': u\'(.*?)\', u\'is_quote_status\':')
        if patterns is None:
            patterns = re.compile(', u\'text\': u\"(.*?), u\'is_quote_status\':')
        # search for patterns from twits
        text = patterns.search(str(item))
        # if found
        if text:
            # group into a text
            twit = text.group(1)

            # send twit to indico to get sentiment analyzed
            sentimentNum = indicoio.sentiment_hq(twit)
            # sent twit to indico to get keywords
            json_keyWords = indicoio.keywords(twit)
            # go through dict object
            for key, value in json_keyWords.items():
                # if the key is relevant enough
                if value >= 0.2:
                    # add keywords to the list
                    listKeyWords += key + ", "

            # add up score sum
            scoreSum += sentimentNum
            # increment number of twits
            twitsNum += 1

            # Uncomment lines below to debug
            # print(twit)
            # print(sentimentNum)
            # if sentimentNum < 0.3:
            #     print("Negative")
            # elif sentimentNum > 0.7:
            #     print("Positive")
            # else:
            #     print("Neutral")
            # print('\n')

    # compute the average sentiment score
    average = scoreSum / twitsNum
    # get the evaluation
    if average <= 0.2:
        rate = "very negative"
    elif average <= 0.4:
        rate = "slightly negative"
    elif average >= 0.8:
        rate = "very positive"
    elif average >= 0.6:
        rate = "slightly positive"
    else:
        rate = "neutral"
    # string to return
    string = "an average score of " + str(average) + "\nOverall, it is " + str(rate) + "\nKeywords are " + listKeyWords
    return string
コード例 #48
0
ファイル: tweeter.py プロジェクト: JVehaun/Tweeter
import indicoio

access_key = "734176927825682433-UPc1vYiJjO0B4Ux6kj3GwRFXgIA22sK"
access_secret = "PpMlpJ412wx87g34msMRdir2yqfaiDTB6P0WqOdrlMxUw"
consumer_key = "jRsDidUota8wzQOin3AmKS312"
consumer_secret = "fRWBT5mK9EXWaZiIJCLdAO9FpbfU71eT4IHuv41OxazTJCeUdC"

indicoio.config.api_key = "070ef4588cd091fb23e3a6c9727097ce"
twitter = Twitter(auth = OAuth(access_key, access_secret, consumer_key, consumer_secret))

query = twitter.search.tweets(q = sys.argv[1], count = 15)
count = 0
total = len(query["statuses"])
ratingsum = 0;
strings = []

for result in query["statuses"]:
        tweet = result["text"].encode(encoding='UTF-8',errors='strict') 
        weight = indicoio.sentiment_hq(tweet)
        print(weight)
        ratingsum += weight
        string = "@%s | https://twitter.com/%s/status/%s | %s\n" % (result["user"]["screen_name"], result["user"]["screen_name"], result["id_str"], tweet)
        if weight > 0.5:
                strings.append(string);
                count = count + 1;
print("The average favorability of tweets, with 1 being the most favorable, is %2f" % (ratingsum/total))
print("%s tweets with positive sentiment found that mention %s\n" % (count, sys.argv[1]))
print("twitter user | tweet link | tweet message")
for index in range(0, count):
        print(strings[index])
コード例 #49
0
ファイル: Color.py プロジェクト: RyanMarcus/EdgarAllanPoetry
import indicoio as ind
from sys import stdin
ind.config.api_key = 'b88a14d4a97b56a6ed8f65efee05f9c4'
str = stdin.read()
print(ind.sentiment_hq(str))
コード例 #50
0
import indicoio
indicoio.config.api_key = ''

import numpy
import matplotlib.pyplot as plt

#read the file line by line
with open(filename) as f:
    lines = f.readlines()


#send request to Indico SA API and store the scores in a list
sent_scores=[]
for l in lines:
	  sent_scores.append(indicoio.sentiment_hq(l))
	 
#print "All scores %s:" %sent_scores


#map score ranges to categories(arbitrarily assigned here)
pos_total=0
neutral_total=0
negative_total=0

for l in lines:
	  if indicoio.sentiment_hq(l)<0.3:
	      negative_total+=1
	  elif indicoio.sentiment_hq(l)>=0.3 and indicoio.sentiment_hq(l)<0.7:
	      neutral_total+=1
	  else:
コード例 #51
0
 def test_batch_size(self):
     test_data = ["Terribly interesting test data."] * 100
     response = sentiment_hq(test_data, batch_size=20)
     self.assertTrue(isinstance(response, list))
     self.assertTrue(all([isinstance(el, (float, np.float32)) for el in response]))
コード例 #52
0
ファイル: Color.py プロジェクト: v1vekkumar/EdgarAllanPoetry
import indicoio as ind
from sys import stdin
ind.config.api_key = 'b88a14d4a97b56a6ed8f65efee05f9c4'
happy_thresh = 0.75
sad_thresh = 0.25
str = stdin.read()
val = ind.sentiment_hq(str)
print(val)
コード例 #53
0
import indicoio
indicoio.config.api_key = '580dc401830e72a98d4834bafe5b7d7c'
text_input = raw_input('enter feels here: ')
print text_input
print(indicoio.sentiment_hq(text_input))