コード例 #1
0
def mood(user_message):
	for key in user_message:
	if(len(user_message[key]) >= 5):
		text = ""
		mod = len(user_message[key])%5
		for y in range(0,len(user_message[key])-mod,5):
			text+=user_message[key][y]+"."
			text+=user_message[key][y+1]+"."
			text+=user_message[key][y+2]+"."
			text+=user_message[key][y+3]+"."
			text+=user_message[key][y+4]+"."
			stats=indicoio.emotion(text)
			print(max(stats, key=stats.get))
			text=""
		for y in range(mod):
			text+=user_message[key][y]+"."
			indicoio.emotion(text)
			text=""
	else:
		mod = len(user_message[key])%2
		for y in range(0,len(user_message[key])-mod,2):
			text=""
			text+=user_message[key][y]+"."
			text+=user_message[key][y+1]+"."
			stats=indicoio.emotion(text)
			print(max(stats, key=stats.get))
			text=""
		if(mod !=0):
			stats=indicoio.emotion(user_message[key][mod])
			print(max(stats, key=stats.get))
コード例 #2
0
def printgraph(filename):
    x = collector(indicoio.emotion((text_extract(filename))[0]))
    y = collector(indicoio.emotion((text_extract(filename))[1]))
    graph(x, y)


# x = collector(indicoio.emotion((text_extract("WhatsApp.txt"))[0]))
# y = collector(indicoio.emotion((text_extract("WhatsApp.txt"))[1]))

# graph(x,y)
コード例 #3
0
def sort():
    with open("merged_lyrics_unique.txt", "r") as f:
        for line in f:
            if not line.isspace():
                emotion = indicoio.emotion(line)

                song = line
                print song

                anger = emotion.get('anger')
                surprise = emotion.get('surprise')
                sadness = emotion.get('sadness')
                fear = emotion.get('fear')
                joy = emotion.get('joy')

                print "anger: " + str(anger)
                print "surprise: " + str(surprise)
                print "sadness: " + str(sadness)
                print "fear: " + str(fear)
                print "joy: " + str(joy)

                result = str(
                    max(emotion.iteritems(), key=operator.itemgetter(1))[0])

                print "result: " + result

                with open("lyrics_" + result + ".txt", "a") as outfile:
                    outfile.write(line)
コード例 #4
0
def main():
    if len(sys.argv) != 3:
        return

    inname = sys.argv[1]
    outname = sys.argv[2]

    with open(inname, mode='r') as inFile:
        tweets = json.load(inFile)
        count = 0

        for tweet in tweets:
            result = indicoio.emotion(tweet['text'])
            tweet['anger'] = result['anger']
            tweet['joy'] = result['joy']
            tweet['fear'] = result['fear']
            tweet['sadness'] = result['sadness']
            tweet['surprise'] = result['surprise']

            count += 1

            if count % 100 == 0:
                print(count)
                with open(outname, 'w') as outfile:
                    json.dump(tweets, outfile)

        with open(outname, 'w') as outfile:
            json.dump(tweets, outfile)
コード例 #5
0
def emotion_count(username):
    history = []

    count = {'anger': 0., 'joy': 0., 'sadness': 0., 'fear': 0., 'surprise': 0.}

    user_tweets = api.user_timeline(username, count=50)
    counter = 0
    for tweet in user_tweets:
        print(tweet.text)
        print(tweet.retweeted)
        if 'http' in tweet.text:
            continue
        emotions = indicoio.emotion(tweet.text)
        for key, value in count.items():
            count[key] += emotions[key]
        history.append({'index': counter, 'sadness': emotions['sadness']})
        # history.append({'date': tweet.created_at, 'sadness': emotions['sadness']})
        # print(indicoio.emotion(tweet.text))
        print()
        counter += 1
    print(count)

    print(history)
    df = pd.DataFrame(history)

    print(df)
    ax = sns.regplot(x="index", y="sadness", data=df)
    plt.show()
コード例 #6
0
ファイル: mood.py プロジェクト: Tinkaa/cira
def analyzePosts(user):
    global moods, counter, last_post
    profile = requests.get(
        'https://api.vk.com/method/wall.get?owner_id=%s&access_token=30691f8630691f8630691f8607300e355a3306930691f866b917e8e8459c4931e49642f&v=5.92'
        % user).json()

    try:
        for each in profile['response']['items'][::-1]:
            # if post already analyzed, break
            if last_post[user] >= each['date']:
                continue
            last_post[user] = each['date']

            # get probabilities of emotions -> into list
            counter[user] += 1
            emotions = indicoio.emotion(each['text'])

            # update mood average
            for each in emotions:
                updateMood(user, each, emotions[each])
    except:
        # PROFILE COULD NOT BE ANALYZED
        print("error!!")
        error = 1
        return
コード例 #7
0
def get_emotion_results(tweet_text_array):
    parsed_sentiment = []
    raw_results = indicoio.emotion(tweet_text_array, top_n=1)
    for result in raw_results:
        field, value = result.items()[0]
        parsed_sentiment.append(field)
    return parsed_sentiment
コード例 #8
0
def trendtone(request, query):
    print(query)
    print("trendtone")
    tweets = []
    emotion = {'fear': 0, 'sadness': 0, 'joy': 0, 'anger': 0, 'surprise': 0}
    for tweet in tweepy.Cursor(api.search, q=query + ' -RT',
                               lang="en").items(100):

        jtweet = {}
        jtweet['created_at'] = tweet._json['created_at']
        jtweet['text'] = tweet._json['text']
        #params = {'text': jtweet['text']}

        #try:
        #analysis = client.get_request(params, HODApps.ANALYZE_SENTIMENT, async=False)
        #sentiment = analysis['aggregate']['score']
        #except:
        #continue
        ctweet = clean_tweet(jtweet['text'])
        if len(ctweet) != 0:
            emotions = indicoio.emotion(ctweet)

        emotion['fear'] += emotions['fear']
        emotion['surprise'] += emotions['surprise']
        emotion['sadness'] += emotions['sadness']
        emotion['anger'] += emotions['anger']
        emotion['joy'] += emotions['joy']

        jtweet['location'] = tweet._json['user']['location']

        tweets.append(jtweet)

    print(len(tweets))
    return JsonResponse({'tweets': tweets, 'emotion': emotion})
コード例 #9
0
ファイル: emotions.py プロジェクト: rkarjadi/HappyTweet-1
 def most_likely_emotion(tweet):
     """
     Predict the most likely emotion
     :param tweet: A string of text
     :return: A string that indicate that tweet's sentiment
     """
     test = indicoio.emotion(tweet)
     return max(test, key=test.get)
コード例 #10
0
def rate(lyrics):
    if((type(lyrics) is str)):
        lyrics = lyrics.split('\n')
    # print(lyrics)
    lyricsEmotion = []
    for line in lyrics:
        lyricsEmotion.append(indicoio.emotion(line))
    return (lyricsEmotion)
コード例 #11
0
 def test_batch_emotion(self):
     test_data = [
         "I did it. I got into Grad School. Not just any program, but a GREAT program. :-)"
     ]
     response = emotion(test_data)
     self.assertTrue(isinstance(response, list))
     self.assertTrue(isinstance(response[0], dict))
     self.assertIn('joy', response[0].keys())
コード例 #12
0
def get_emotion(tweet_batch):
    emotions_list = []
    print(tweet_batch)
    response_list = indicoio.emotion(tweet_batch)
    for r in response_list:
        inverse = [(value, key) for key, value in r.items()]
        real_emotion = max(inverse)[1]
        emotions_list.append(real_emotion)
    return emotions_list
コード例 #13
0
ファイル: reddit.py プロジェクト: harhur/Passion-Board
def fetch_posts(subreddit, target_emotion):
    submissions = {}
    return_data = {}
    return_data['submissions'] = []

    if (target_emotion == "joy"):
        subreddit = "happy"
    elif (target_emotion == "sadness"):
        subreddit = "worldnews"

    # Connect to Reddit via PRAW
    reddit = praw.Reddit(
        client_id='Us-byLFTjQmSJQ',
        client_secret=config.reddit_client_secret,
        user_agent='python:com.hackharvard.sentient-dashboard:v1.0')

    # Fetch top X posts from hot of chosen subreddit
    for submission in reddit.subreddit(subreddit).hot(limit=MAX_POSTS):
        # Run emotional analysis on each emotion
        emotion_sum = Counter(indicoio.emotion(submission.title))
        for counter, comment in enumerate(submission.comments):
            if counter >= MAX_COMMENTS:
                break
            if comment.body != "[removed]" and "http" not in comment.body:  # Ignore removed comments
                emotion_sum.update(Counter(indicoio.emotion(comment.body)))
            else:
                counter -= 1
        # Average the emotion values
        for emotion in emotion_sum:
            emotion_sum[emotion] /= counter + 1
        submissions[submission.title] = [emotion_sum, submission.shortlink]

    # Cut out all emotions except the chosen one
    for sub in submissions:
        submissions[sub][0] = submissions[sub][0][target_emotion]

    # Construct JSON to return
    for post in reversed(sorted(submissions, key=submissions.get)):
        return_data['submissions'].append({
            'title': post,
            'score': submissions[post][0],
            'shortlink': submissions[post][1]
        })
    return json.dumps(return_data)
コード例 #14
0
 def analyze_tweets_emotions(self):
     try:
         self.emotions_stats = Factor(
             indicoio.emotion(
                 self.person.all_text_as_one().content).items(),
             'Emotions stats')
         self.plotter.add_factor(self.emotions_stats)
     except IndicoError:
         raise PersonAnalyzerException(
             'Error while fetching data from indicoio')
コード例 #15
0
    def sentimentAnalyzerUsingIndicoio(self, twts):
        print('loading....')
        sentDim = {
            'anger': 'Alert',
            'joy': 'Happy',
            'sadness': 'Sad',
            'fear': 'Fear',
            'surprise': 'Surprise'
        }

        sentiment4OneDay4OneCom = []
        sentimentAlert = []
        sentimentSad = []
        sentimentHappy = []
        sentimentSurprise = []

        sid = SentimentIntensityAnalyzer()
        assert (len(twts) >= 0)
        if len(twts) == 0:
            return [0.5, 0.5, 0.5, 0.5]
        sums = 0
        for sentence in twts:
            sentence = HTMLParser.HTMLParser().unescape(
                sentence)  # unescape HTML
            sentence = re.sub(r"http\S+", "", sentence)  # remove normal URLS
            sentence = re.sub(r"pic\.twitter\.com/\S+", "",
                              sentence)  # remove pic.twitter.com URLS
            # print(sentence+'\n')
            i = 0
            sums += 1
            ss = indicoio.emotion(sentence)
            # print (ss,sums, 'out of 1000')
            for k in ss:
                # print (k)
                # print('{0}: {1}, '.format(sentDim[k], ss[k]))
                if sentDim[k] == 'Alert':
                    sentimentAlert.append(ss[k])
                if sentDim[k] == 'Happy':
                    sentimentHappy.append(ss[k])
                if sentDim[k] == 'Sad':
                    sentimentSad.append(ss[k])
                if sentDim[k] == 'Surprise':
                    sentimentSurprise.append(ss[k])
        # print (sentimentHappy)
        print('\nalert happy sad surprise')
        sentiment4OneDay4OneCom.append(
            sum(sentimentAlert) / float(len(sentimentAlert)))
        sentiment4OneDay4OneCom.append(
            sum(sentimentHappy) / float(len(sentimentHappy)))
        sentiment4OneDay4OneCom.append(
            sum(sentimentSurprise) / float(len(sentimentSurprise)))
        sentiment4OneDay4OneCom.append(
            sum(sentimentSad) / float(len(sentimentSad)))
        print(sentiment4OneDay4OneCom, self.todayDate)
        return sentiment4OneDay4OneCom
コード例 #16
0
ファイル: views.py プロジェクト: DannyKong12/yhack17
def voice(request, url):
    r = sr.Recognizer()
    r.energy_threshold = 4000
    for c in url:
        if c == '$':
            url = url.replace(c, '/')
    string = ""

    with sr.AudioFile(urlopen(url)) as source:
        source = r.record(source)
        try:
            string = r.recognize_google(source,
                                        language="en-US",
                                        show_all=False)
        except Exception as e:
            pass

    abcd = "you ever worked in a creative Department Sun. no sir. ever written the great ad. no sir. never put your ideas another man's hands ask him to put his idea. senores. rewrite add son. we write ads or people die. it's that simple. are we clear. yes sir. are we clear. Crystal. you want Brady. man with big ideas. you or you client service director. responsibilities that you can possibly fathom. You Weep For bigger loads. you have the luxury of not knowing what I know. doesn't sound product. bike repair stand. you don't want the truth because deep down in places you don't talk about it. you need me right ads. we use words like inside big ideas. the time nor the inclination to explain myself to a man who. sleeps under the blanket. otherwise I suggest you pick up the kids. what you think you are entitled to. did you send out an ad without showing the account people. you snotty nose little suits."

    sentences = sent_tokenize(
        abcd) if text == "good_men.wav" else sent_tokenize(string)

    emotiondict = indicoio.emotion(sentences)

    d = {}
    for k in emotiondict[0]:
        d[k] = list(d[k] for d in emotiondict)

    df = pd.DataFrame(d, columns=d.keys())

    mat = np.matrix(df.values) * 10

    softmat = softmax(mat).round(4)

    numlst = [[np.argmax(i), i[np.argmax(i)]] if i[np.argmax(i)] >= 0.60 else 5
              for i in softmat]
    emotions = ["Anger", "Joy", "Fear", "Sadness", "Surprise", "Neutral"]
    emojis = []
    for i in range(len(numlst)):
        try:
            a, b = emotions[numlst[i][0]], numlst[i][1]
        except Exception as e:
            a, b = emotions[5], random.uniform(0.3, 0.35)
        try:
            c = tone_analyzer.tone(sentences[i],
                                   sentences='false',
                                   content_type='text/plain'
                                   )['document_tone']['tones'][1]['tone_name']
        except (TypeError, IndexError) as e:
            c = "Tentative"
        emojis.append((a, b, c))

    return HttpResponse(str(emojis))
コード例 #17
0
def route_depr_results(answers):

    d = json.loads(answers)

    # batch example
    output = []
    for str in d:
        output.append(str.split(":")[-1])
    print(output)
    result = indicoio.emotion(output)
    emotions = []
    surprise = 0
    sadness = 0
    joy = 0
    fear = 0
    anger = 0
    combined_emotion = []
    for i in result:
        happy = i['surprise'] + i['joy']
        sad = i['fear'] + i['anger'] + i['sadness']
        surprise = i['surprise'] + surprise
        sadness = i['sadness'] + sadness
        joy = i['joy'] + joy
        fear = i['fear'] + fear
        anger = i['anger'] + anger

        emotions.append({'happy': happy, "sad": sad})
    print(emotions)
    combined_emotion.append({
        'joy': joy / len(result),
        'sadness': sadness / len(result),
        'fear': fear / len(result),
        'anger': anger / len(result),
        'surprise': surprise / len(result)
    })

    happy = 0
    sad = 0
    verdict = {}
    for i in emotions:
        happy = i['happy'] + happy
        sad = i['sad'] + sad
    verdict['happy'] = happy / len(emotions)
    verdict['sad'] = sad / len(emotions)

    print(verdict)
    print(combined_emotion)

    #output.append(thoughts)

    return render_template("result.html",
                           output=verdict,
                           emotion=combined_emotion)
コード例 #18
0
ファイル: emotions.py プロジェクト: rkarjadi/HappyTweet-1
 def get_all_emotions(tweet):
     """
     Wrapper around the indico.io API
     :param tweet: A string of text
     :return: A dict of sentiments of that tweet
     """
     try:
         r = indicoio.emotion(tweet)
         return r
     except:
         e = str(sys.exc_info()[0])
         print("Something wrong happened here: " + e)
         return
コード例 #19
0
ファイル: sentimenttask.py プロジェクト: xiukei/TheDeadlySins
def anger_analyser(data_dic):

    #access the text:
    if data_dic['truncated'] is True: #the tweet's root level text is truncated, need to access full text feild
        text = data_dic['extended_tweet']['full_text'] #string
    else:  # the tweet's root level text is not truncated
        text = data_dic['text']


    #access API:
    try:
        res = indicoio.emotion(text, api_key = KEY_LIST[0])
    except:
        KEY_LIST.pop(0)
        res = indicoio.emotion(text, api_key = KEY_LIST[0])


    #modify data_dic:
    data_dic['ANGER_PROB'] = res['anger']
    if res['anger']> res['joy'] and res['anger']> res['fear'] and res['anger']> res['surprise'] and res['anger']> res['sadness']:
        data_dic['ANGER'] = True
    else:
        data_dic['ANGER'] = False
コード例 #20
0
def analyze_text(text_tweets):
    personality_scores_list = []
    emotion_scores_list = []

    personality = indicoio.personality(text_tweets)
    emotion = indicoio.emotion(text_tweets)

    for x in personality:
        personality_scores_list.append(x)

    for y in emotion:
        emotion_scores_list.append(y)

    return personality_scores_list, emotion_scores_list
def getSongEmotions():
    indicoio.config.api_key = '781cddf05f6cb0d88449272c8c7768eb'
    filename = request.form['song_name'].strip().lower()
    if filename[-4:] != ".txt":
        filename += ".txt"
    filename = filename.replace(" ","_")
    with open('../Songs/'+filename, 'r') as f:
        filename_data = f.read()
        f.close()
    text = filename_data.strip()
    emotion_dict = indicoio.emotion(text)

    return_dict = dict()
    return_dict['data'] = [[emotion,score] for emotion,score in zip(emotion_dict.keys(),emotion_dict.values())]
    return jsonify(chart=return_dict)
コード例 #22
0
 def get_emotions(self):
     emotion_scores = [0, 0, 0, 0, 0]
     emotion_dict = indicoio.emotion(self.tweet_text)
     for key, value in emotion_dict.iteritems():
         if key == 'anger':
             emotion_scores[0] = value
         elif key == 'joy':
             emotion_scores[1] += value
         elif key == 'fear':
             emotion_scores[2] += value
         elif key == 'sadness':
             emotion_scores[3] += value
         elif key == 'surprise':
             emotion_scores[4] += value
     return emotion_scores
コード例 #23
0
ファイル: jon.py プロジェクト: breedy231/spillit
def q1():
    user_input = input("My idea of a fun friday night is ___")
    print "Your input: " + str(user_input)
    emotion = indicoio.emotion(user_input)
    personality = indicoio.personality(user_input)
    personas = indicoio.personas(user_input)

    pprint(emotion)
    e_max = max(emotion, key=emotion.get)
    personas_max = max(personas, key=personas.get)
    personality_max = max(personality, key=personality.get)

    print "Congradulations, your emotion is " + str(
        e_max) + ", your personality is " + str(
            personality_max) + ", and your persona is " + str(personas_max)
コード例 #24
0
def sentEmotion(sentence):

    # Determine which emotion(s) is/are most represented in the text
    indicoio.config.api_key = '06ca2da07a6fd7c7746f1d4c202bdc5a'
    emoDict = indicoio.emotion(sentence)
    emoDict["neutral"] = emoDict["surprise"]
    # Determine if the overall emotion is neutral or not
    # if max(emoDict.values()) - min(emoDict.values()) > 0.1:
    #     neutralEmoDict = {}
    #     neutralEmoDict["neutral"] = emoDict["surprise"] + 0.85*emoDict["fear"] + 0.85*emoDict["anger"] + 0.85*emoDict["joy"] + 0.85*emoDict["sadness"]
    #     neutralEmoDict["fear"] = 0.15*emoDict["fear"]
    #     neutralEmoDict["anger"] = 0.15*emoDict["anger"]
    #     neutralEmoDict["joy"] = 0.15*emoDict["joy"]
    #     neutralEmoDict["sadness"] = 0.15*emoDict["sadness"]
    #     return neutralEmoDict
    return emoDict
コード例 #25
0
 def classification(self):
     try:
         with open('Csv By Days/' + str(self.start_date) + '.csv',
                   newline='',
                   encoding="ISO-8859-1") as tweetsFile:
             reader = list(csv.reader(tweetsFile))
         tweetsFile.close()
     except:
         traceback.print_exc()
         return -1
     dailyDictionary = {
         'Date': '0',
         'anger': 0,
         'sadness': 0,
         'fear': 0,
         'joy': 0,
         'surprise': 0
     }
     i = 0
     #dailyDictionary['Date'] = datetime.datetime.strftime(self.start_date,'%m/%d/%Y')
     dailyDictionary['Date'] = self.start_date.strftime('%m/%d/%Y')
     reader.pop(0)
     marker = 0
     for row in reader:
         # if i == 3:
         #     break
         if marker == 0:
             marker = 1
             continue
         try:
             row[1] = regex.sub('', row[1])
             emotions = indicoio.emotion([row[1]])
             dailyDictionary['joy'] += float(emotions[0]['joy'])
             dailyDictionary['anger'] += emotions[0]['anger']
             dailyDictionary['sadness'] += emotions[0]['sadness']
             dailyDictionary['fear'] += emotions[0]['fear']
             dailyDictionary['surprise'] += emotions[0]['surprise']
             i += 1
             if i == 1000:
                 break
         except:
             traceback.print_exc()
             print(type(row[1]))
             print(i)
             continue
     self.daily_p_mood = dailyDictionary
     return 0
コード例 #26
0
def measure_emotion_batch(documents):
    max_sad = 0
    sad_id = ""
    for doc in documents:
        try:
            doc['emotions'] = indicoio.emotion(doc['text'])
            if max_sad < doc['emotions']['sadness']:
                max_sad = doc['emotions']['sadness']
                sad_id = doc['id']
        except IndicoError:
            doc['emotions'] = {
                'anger': 0.,
                'joy': 0.,
                'sadness': 0.,
                'fear': 0.,
                'surprise': 0.
            }

    return documents, sad_id
コード例 #27
0
ファイル: queries.py プロジェクト: breedy231/spillit
def process_response(response):
    print(str(response))
    questionType = response.question_id
    #questionType = Question.query.get(response.question_id);
    #print(str(question));
    #questionType = QuestionType.query.get(question.type_id);
    print("processing a " + str(questionType))
    result = {}
    user_input = response.response_text
    if (questionType == "Personality"):
        result = indicoio.personality(user_input)
    elif (questionType == "Emotion"):
        result = indicoio.emotion(user_input)
    elif (questionType == "Persona"):
        result = indicoio.personas(user_input)
    pprint(result)
    max_result = max(result, key=result.get)
    print("max response: " + max_result)
    return "" + max_result
コード例 #28
0
def sentAnalysis(sentence, sentNum, entireText):

    # Determine which emotion(s) is/are most represented in the text
    emoDict = indicoio.emotion(sentence)
    sortedVals = sorted(emoDict.values())[::-1]
    print(emoDict)
    isNeutral = True

    # Determine if the overall emotion is neutral or not
    if max(sortedVals) - min(sortedVals) > 0.1:
        isNeutral = False
        stdDev = numpy.std(sortedVals)
        emotions = []
        for percentage in sortedVals:
            if percentage > abs(max(sortedVals) - 1.5 * stdDev):
                emotions += [
                    key for key, val in emoDict.items() if val == percentage
                ]

    return emoDict
コード例 #29
0
def get_tweets():
    index = 0
    search_results = {}

    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_secret)

    api = tweepy.API(auth)

    # searches count num of tweets containing string q
    search_results = api.search(q=search_term, count=30)
    # public_tweets = api.home_timeline()

    print("Showing tweets with keyphrase...")

    # uses indico's emotion analyzer to return any "happy" tweets as text
    for tweet in search_results:
        if (indicoio.emotion(tweet.text).get('joy') > 0.55):
            index += 1
            search_results[index] = tweet.text

    return json.dumps(search_results)
コード例 #30
0
    def recognizeEmotionText(self, speech_text):
        """Recognize emotion in transcript of user speech.

        Currently using the Indico.io Text Analysis API for Emotion
        Classification.

        Arguments:
            speech_text {string} -- Transcript of what the user said.

        Returns:
            dict -- dict of predicted emotions and associated probabilities
        """
        greetings = ['hey', 'hi', 'hello', 'how are you']
        if any(greeting in speech_text.lower().split() for greeting in greetings):
            time.sleep(1)
            return 'greeting'
        textPredictions = indicoio.emotion(speech_text, threshold=0.4)
        print(textPredictions)
        if textPredictions:
            return max(textPredictions, key=textPredictions.get)
        else:
            return None
コード例 #31
0
ファイル: userDataClass.py プロジェクト: Advait-M/echacks
 def askInfo(self, request, dictOrString):
     if request == "mood":
         tempDict = indicoio.emotion(self.opinionString,
                                     api_key=config["indico_key"])
         if dictOrString == "dictionary":
             return tempDict
         else:
             maxVal = max(tempDict.values())
             for i in tempDict:
                 if tempDict[i] == maxVal:
                     return i
     elif request == "party":
         tempDict = indicoio.political(self.opinionString,
                                       api_key=config["indico_key"])
         if dictOrString == "dictionary":
             return tempDict
         else:
             maxVal = max(tempDict.values())
             for i in tempDict:
                 if tempDict[i] == maxVal:
                     return i
     else:
         warnings.warn("invalid request", UserWarning)
コード例 #32
0
ファイル: insights.py プロジェクト: N2ITN/Reddit_Persona
def execute(USERNAME, target, refresh):

    r_data = io_helper.read_raw(USERNAME, target)

    og = sys.stdout
    fpath = io_helper.out_path(USERNAME, target)

    def analysis(raw='', limit=5, text='', percent=True):
        global meta_dict
        # print lines if input is a list of non-dicts
        # if input is list of dicts, merge dicts and resend to analysis
        if isinstance(raw, list):
            for item in raw:
                if not isinstance(item, dict):
                    print(item)
                else:
                    create_meta_dict(item)
            analysis(meta_dict, limit, text, percent)

        # if input is dict: print k, v pairs
        # optional args for return limit and description text
        if isinstance(raw, dict):
            print(text)
            ct = 0
            for v in sorted(raw, key=raw.get, reverse=True):
                ct += 1
                if ct > limit: break
                if isinstance(raw[v], float):
                    if percent: per = r'%'
                    else: per = ''
                    print("    " + v, str(round(raw[v] * 100, 2)) + per)
                else:
                    print(v, raw[v])
            print()

    def create_meta_dict(item):
        # merge list of dicts into master dict
        global meta_dict
        meta_dict[item['text']] = item['confidence']
        return meta_dict

    rClean = ''
    for i in range(len(r_data)):
        if r_data[i - 1] == '\\':
            rClean = rClean[:-1]
            if r_data[i] != "'":
                continue

        if r_data[i] == '*':
            rClean += ' '
        else:
            rClean += r_data[i]

    r_data = rClean
    del rClean
    indicoio.config.api_key = keycheck.get_key()

    # Big 5
    big5 = {'text': "Big 5 personality inventory matches: ", "payload": indicoio.personality(r_data)}

    # Meyers briggs
    mbtiLabels = indicoio.personas(r_data)
    mbti_dict = {
        'architect': 'intj',
        'logician': 'intp',
        'commander': 'entj',
        'debater': 'entp',
        'advocate': 'infj',
        'mediator': 'infp',
        'protagonist': 'enfj',
        'campaigner': 'enfp',
        'logistician': 'istj',
        'defender': 'isfj',
        'executive': 'estj',
        'consul': 'esfj',
        'virtuoso': 'istp',
        'adventurer': 'isfp',
        'entrepreneur': 'estp',
        'entertainer': 'esfp'
    }

    def replace_mbti():
        for k, v in mbtiLabels.items():
            k = k.replace(k, mbti_dict[k])
            yield k

    k = (list(replace_mbti()))
    v = map(lambda x: x, mbtiLabels.values())
    payload = (dict(zip(k, v)))

    mbti = {'text': "Most likely personalilty styles: ", "payload": payload, 'ct': 5, 'percent': True}

    # Political
    pol = {'text': "Political alignments: ", "payload": indicoio.political(r_data, version=1)}
    # Sentiment
    sen = {'text': "Sentiment: ", "payload": {'Percent positive': indicoio.sentiment(r_data)}, 'ct': 3}

    # Emotion 
    emo = {'text': "Predominant emotions:", "payload": indicoio.emotion(r_data), 'ct': 5}

    # Keywords
    kw = {'text': "Keywords: ", "payload": indicoio.keywords(r_data), 'ct': 5}
    # Text tags
    tt = {'text': "Text tags: ", "payload": indicoio.text_tags(r_data), 'ct': 10}
    # Place
    pla = {
        'text': "Key locations: ",
        'payload': indicoio.places(r_data, version=2),
        'ct': 3,
        'percent': True
    }

    def Karma(USERNAME):
        import praw
        import collections
        kList = []
        user_agent = ("N2ITN")
        r = praw.Reddit(user_agent=user_agent)
        thing_limit = 100

        user = r.get_redditor(USERNAME)
        gen = user.get_submitted(limit=thing_limit)
        karma_by_subreddit = {}
        for thing in gen:
            subreddit = thing.subreddit.display_name
            karma_by_subreddit[subreddit] = (karma_by_subreddit.get(subreddit, 0) + thing.score)

        for w in sorted(karma_by_subreddit, key=karma_by_subreddit.get, reverse=True):
            kList.append(str(w) + ': ' + str(karma_by_subreddit[w]))
        kList.insert(0, 'Karma by Sub')

        print("\n\t".join(kList[:10]))

    def show(results):
        # Accepts bag of dicts, or single dict
        if not isinstance(results, dict):
            for X in results:
                show(X)
        else:
            if results == pla and pla['payload'] == []:
                print("Not enough information to infer place of origin")
                print()
            else:

                i = results
                analysis(
                    raw=i.get('payload', ''),
                    limit=i.get('ct', 5),
                    text=i.get('text', ''),
                    percent=i.get('percent', True)
                )

    with open(fpath, 'w') as outtie:
        sys.stdout = outtie
        print(target + USERNAME)
        print()
        show([kw, pla, big5, emo, sen, pol, mbti, tt])
        # Karma(USERNAME)

        sys.stdout = og
    return
コード例 #33
0
    def test_emotion(self):
        data = "I did it. I got into Grad School. Not just any program, but a GREAT program. :-)"
        response = emotion(data)

        self.assertTrue(isinstance(response, dict))
        self.assertIsInstance(response["joy"], float)
コード例 #34
0
 def test_batch_emotion(self):
     test_data = ["I did it. I got into Grad School. Not just any program, but a GREAT program. :-)"]
     response = emotion(test_data)
     self.assertTrue(isinstance(response, list))
     self.assertTrue(isinstance(response[0], dict))
     self.assertIn('joy', response[0].keys())
コード例 #35
0
    return book.sheet_by_name(args.sheet)

def parse_from_xlsx(args, batch_size=20):
    book = xlrd.open_workbook(args.filename)
    sheet = _get_sheet(book, args)
    data = sheet.col(args.column or 0)
    for idx in xrange(1 if args.contains_header else 0, len(data), batch_size):
        yield map(attrgetter("value"), data[idx: idx + batch_size])

if __name__ == "__main__":
    reload(sys)
    sys.setdefaultencoding('utf-8')

    parser = argparse.ArgumentParser()
    parser.add_argument("filename", type=str, help="path to excel file")
    parser.add_argument("--sheet", type=str, help="sheet name")
    parser.add_argument("--sheet-number", type=int, help="sheet index from 1")
    parser.add_argument("--column", type=int, help="column index from 1")
    parser.add_argument("--contains-header", action="store_true", help="use if columns have headers")

    args = parser.parse_args()
    with open("predictions.csv", "wb") as f:
        writer = csv.writer(f, dialect="excel")
        for lines in parse_from_xlsx(args):
            not_empty, inputs = zip(*[row for row in enumerate(lines) if row[1].strip()])
            predictions = indicoio.emotion(list(inputs))
            output = [[str(predictions.pop(0))] if idx in not_empty else "" for idx in xrange(len(lines))]
            writer.writerows(izip(inputs, output))

    print "Analysis complete, CSV file generated."