Example #1
0
    def test_personas(self):
        test_string = "I love my friends!"
        response = personas(test_string)

        self.assertTrue(isinstance(response, dict))
        self.assertIsInstance(response["commander"], float)
        self.assertTrue(len(response.keys()))
Example #2
0
    def test_personas(self):
        test_string = "I love my friends!"
        response = personas(test_string)

        self.assertTrue(isinstance(response, dict))
        self.assertIsInstance(response["commander"], float)
        self.assertTrue(len(response.keys()))
Example #3
0
def indico(request):
    data = request.POST.get('data')
    print (data)
    indicoio.config.api_key = '584f26a05ca718f8387fcc7ed6d845f4'

    # single example
    a = indicoio.personas(data)

    w = (a['commander'] + a['debater'] + a['logician'] + a['architect'])
    x = (a['mediator'] + a['protagonist'] + a['advocate'] + a['campaigner'])
    y = (a['virtuoso'] + a['entrepreneur'] + a['entertainer'] + a['adventurer'])
    z = (a['consul'] + a['logistician'] + a['executive'] + a['defender'])

    vect = [w,x,y,z]

    result = max(vect)
    if result == w:
        result = 'analyst'
    if result == x:
        result = 'diplomat'
    if result == y:
        result = 'explorer'
    if result == z:
        result = 'sentinel'

    return HttpResponse(str(result))
def main():
    if len(sys.argv) != 3:
        return

    inname = sys.argv[1]
    outname = sys.argv[2]

    with open(inname, mode='r') as inFile:
        tweets = json.load(inFile)
        count = 0

        for tweet in tweets:
            count += 1

            if count % 100 == 0:
                print(count)

            if 'entertainer' in tweet.keys():
                continue

            result = indicoio.personas(tweet['text'])
            for key in result.keys():
                tweet[key] = result[key]

        with open(outname, 'w') as outfile:
            json.dump(tweets, outfile)
Example #5
0
def my_form_post():
    text = request.form['text']
    if (text == ''):
        return render_template('index.html')
    else:
        matches = indicoio.personas(text)
        myersbriggs = max(matches.items(), key=operator.itemgetter(1))[0]
        zodiac = zodiac_finder(myersbriggs)
        return render_template(zodiac)
 def analyze_tweets_personas(self):
     try:
         self.personas_stats = Factor(
             indicoio.personas(
                 self.person.all_text_as_one().content).items(),
             'Personas stats')
         self.plotter.add_factor(self.personas_stats)
     except IndicoError:
         raise PersonAnalyzerException(
             'Error while fetching data from indicoio')
Example #7
0
def q1():
    user_input = input("My idea of a fun friday night is ___")
    print "Your input: " + str(user_input)
    emotion = indicoio.emotion(user_input)
    personality = indicoio.personality(user_input)
    personas = indicoio.personas(user_input)

    pprint(emotion)
    e_max = max(emotion, key=emotion.get)
    personas_max = max(personas, key=personas.get)
    personality_max = max(personality, key=personality.get)

    print "Congradulations, your emotion is " + str(
        e_max) + ", your personality is " + str(
            personality_max) + ", and your persona is " + str(personas_max)
Example #8
0
def briggs_test(resume):
	myers_array = []
	briggs_dict = indicoio.personas(resume)
	briggs_dict = {i:briggs_dict[i] for i in briggs_dict if briggs_dict[i] > 0.075}
	briggs_keys = list(briggs_dict.keys())
	briggs_values = list(briggs_dict.values())
	for i in range(len(briggs_values)):
		briggs_values[i] = round(float(briggs_values[i]),3)
	briggs_keys = json.dumps(briggs_keys)
	briggs_keys = str(briggs_keys).replace("[","{")
	briggs_keys = str(briggs_keys).replace("]","}")
	briggs_values = str(briggs_values).replace("[","{{")
	briggs_values = str(briggs_values).replace("]","}}")
	print([briggs_values,briggs_keys])
	return [briggs_values,briggs_keys]
Example #9
0
def process_response(response):
    print(str(response))
    questionType = response.question_id
    #questionType = Question.query.get(response.question_id);
    #print(str(question));
    #questionType = QuestionType.query.get(question.type_id);
    print("processing a " + str(questionType))
    result = {}
    user_input = response.response_text
    if (questionType == "Personality"):
        result = indicoio.personality(user_input)
    elif (questionType == "Emotion"):
        result = indicoio.emotion(user_input)
    elif (questionType == "Persona"):
        result = indicoio.personas(user_input)
    pprint(result)
    max_result = max(result, key=result.get)
    print("max response: " + max_result)
    return "" + max_result
def gimme_the_goods(text, tag_count=3, persona_count=3):
        
    # Consume some of that api for analysis
    sentiment = indicoio.sentiment(text)
    # TODO figure out a better way to handle this bug
    political = indicoio.political(text[0:1100])
    personality = indicoio.personality(text)
    personas = indicoio.personas(text)
    tags = indicoio.text_tags(text, top_n=tag_count)

    # Sort the personas to grab top ones
    top_personas = dict(sorted(personas.items(),
                        key=operator.itemgetter(1),
                        reverse=True)[:persona_count])
    
    # Truncate the values to 3 decimals for cleanliness
    roundness = 3
    sentiment = truncate_values(sentiment, roundness)
    political = truncate_values(political, roundness)
    personality = truncate_values(personality, roundness)
    top_personas = truncate_values(top_personas, roundness)
    tags = truncate_values(tags, roundness)
    
    # Rearrange the personas a bit
    final_personas = []
    for key, value in top_personas.items():
        final_personas.append({
            'type': persona_mapping[key],
            'name': key,
            'value': value,
        })
    
    return_dict = {
        'sentiment': sentiment,
        'political': political,
        'personality': personality,
        'personas': final_personas,
        'tags': tags
    }

    return return_dict
def gimme_the_goods(text, tag_count=3, persona_count=3):

    # Consume some of that api for analysis
    sentiment = indicoio.sentiment(text)
    # TODO figure out a better way to handle this bug
    political = indicoio.political(text[0:1100])
    personality = indicoio.personality(text)
    personas = indicoio.personas(text)
    tags = indicoio.text_tags(text, top_n=tag_count)

    # Sort the personas to grab top ones
    top_personas = dict(
        sorted(personas.items(), key=operator.itemgetter(1),
               reverse=True)[:persona_count])

    # Truncate the values to 3 decimals for cleanliness
    roundness = 3
    sentiment = truncate_values(sentiment, roundness)
    political = truncate_values(political, roundness)
    personality = truncate_values(personality, roundness)
    top_personas = truncate_values(top_personas, roundness)
    tags = truncate_values(tags, roundness)

    # Rearrange the personas a bit
    final_personas = []
    for key, value in top_personas.items():
        final_personas.append({
            'type': persona_mapping[key],
            'name': key,
            'value': value,
        })

    return_dict = {
        'sentiment': sentiment,
        'political': political,
        'personality': personality,
        'personas': final_personas,
        'tags': tags
    }

    return return_dict
#CALL INDICO.IO EMOTION API

import indicoio
indicoio.config.api_key = 'b21e6a05e2dca170414dcda6cfcbef15'

advocate_array = []
debator_array = []
mediator_array = []
consul_array = []
error2 = []

for y in range(0,3000):

    try:
        result = indicoio.personas(array[y])

        print "This dictionary contains these keys: ", " ".join(result)

        advocate_result = (result["advocate"]*100)
        debator_result = (result["debater"]*100)
        mediator_result = (result["mediator"]*100)
        consul_result = (result["consul"]*100)

        advocate_array.append(advocate_result)
        debator_array.append(debator_result)
        mediator_array.append(mediator_result)
        consul_array.append(consul_result)
        print "FINISH "+str(y)
    except KeyError:
        error2.append(y)
Example #13
0
 def test_batch_personas(self):
     test_string = "I love my friends!"
     response = personas([test_string,test_string])
     self.assertTrue(isinstance(response, list))
     self.assertIsInstance(response[0]["commander"], float)
     self.assertEqual(response[0]["commander"], response[1]["commander"])
Example #14
0
def execute(USERNAME, target, refresh):

    r_data = io_helper.read_raw(USERNAME, target)

    og = sys.stdout
    fpath = io_helper.out_path(USERNAME, target)

    def analysis(raw='', limit=5, text='', percent=True):
        global meta_dict
        # print lines if input is a list of non-dicts
        # if input is list of dicts, merge dicts and resend to analysis
        if isinstance(raw, list):
            for item in raw:
                if not isinstance(item, dict):
                    print(item)
                else:
                    create_meta_dict(item)
            analysis(meta_dict, limit, text, percent)

        # if input is dict: print k, v pairs
        # optional args for return limit and description text
        if isinstance(raw, dict):
            print(text)
            ct = 0
            for v in sorted(raw, key=raw.get, reverse=True):
                ct += 1
                if ct > limit: break
                if isinstance(raw[v], float):
                    if percent: per = r'%'
                    else: per = ''
                    print("    " + v, str(round(raw[v] * 100, 2)) + per)
                else:
                    print(v, raw[v])
            print()

    def create_meta_dict(item):
        # merge list of dicts into master dict
        global meta_dict
        meta_dict[item['text']] = item['confidence']
        return meta_dict

    rClean = ''
    for i in range(len(r_data)):
        if r_data[i - 1] == '\\':
            rClean = rClean[:-1]
            if r_data[i] != "'":
                continue

        if r_data[i] == '*':
            rClean += ' '
        else:
            rClean += r_data[i]

    r_data = rClean
    del rClean
    indicoio.config.api_key = keycheck.get_key()

    # Big 5
    big5 = {
        'text': "Big 5 personality inventory matches: ",
        "payload": indicoio.personality(r_data)
    }

    # Meyers briggs
    mbtiLabels = indicoio.personas(r_data)
    mbti_dict = {
        'architect': 'intj',
        'logician': 'intp',
        'commander': 'entj',
        'debater': 'entp',
        'advocate': 'infj',
        'mediator': 'infp',
        'protagonist': 'enfj',
        'campaigner': 'enfp',
        'logistician': 'istj',
        'defender': 'isfj',
        'executive': 'estj',
        'consul': 'esfj',
        'virtuoso': 'istp',
        'adventurer': 'isfp',
        'entrepreneur': 'estp',
        'entertainer': 'esfp'
    }

    def replace_mbti():
        for k, v in mbtiLabels.items():
            k = k.replace(k, mbti_dict[k])
            yield k

    k = (list(replace_mbti()))
    v = map(lambda x: x, mbtiLabels.values())
    payload = (dict(zip(k, v)))

    mbti = {
        'text': "Most likely personalilty styles: ",
        "payload": payload,
        'ct': 5,
        'percent': True
    }

    # Political
    pol = {
        'text': "Political alignments: ",
        "payload": indicoio.political(r_data, version=1)
    }
    # Sentiment
    sen = {
        'text': "Sentiment: ",
        "payload": {
            'Percent positive': indicoio.sentiment(r_data)
        },
        'ct': 3
    }

    # Emotion
    emo = {
        'text': "Predominant emotions:",
        "payload": indicoio.emotion(r_data),
        'ct': 5
    }

    # Keywords
    kw = {'text': "Keywords: ", "payload": indicoio.keywords(r_data), 'ct': 5}
    # Text tags
    tt = {
        'text': "Text tags: ",
        "payload": indicoio.text_tags(r_data),
        'ct': 10
    }
    # Place
    pla = {
        'text': "Key locations: ",
        'payload': indicoio.places(r_data, version=2),
        'ct': 3,
        'percent': True
    }

    def Karma(USERNAME):
        import praw
        import collections
        kList = []
        user_agent = ("N2ITN")
        r = praw.Reddit(user_agent=user_agent)
        thing_limit = 100

        user = r.get_redditor(USERNAME)
        gen = user.get_submitted(limit=thing_limit)
        karma_by_subreddit = {}
        for thing in gen:
            subreddit = thing.subreddit.display_name
            karma_by_subreddit[subreddit] = (
                karma_by_subreddit.get(subreddit, 0) + thing.score)

        for w in sorted(karma_by_subreddit,
                        key=karma_by_subreddit.get,
                        reverse=True):
            kList.append(str(w) + ': ' + str(karma_by_subreddit[w]))
        kList.insert(0, 'Karma by Sub')

        print("\n\t".join(kList[:10]))

    def show(results):
        # Accepts bag of dicts, or single dict
        if not isinstance(results, dict):
            for X in results:
                show(X)
        else:
            if results == pla and pla['payload'] == []:
                print("Not enough information to infer place of origin")
                print()
            else:

                i = results
                analysis(raw=i.get('payload', ''),
                         limit=i.get('ct', 5),
                         text=i.get('text', ''),
                         percent=i.get('percent', True))

    with open(fpath, 'w') as outtie:
        sys.stdout = outtie
        print(target + USERNAME)
        print()
        show([kw, pla, big5, emo, sen, pol, mbti, tt])
        Karma(USERNAME)

        sys.stdout = og
    return
    clean_tweet = tweet
    clean_tweet = html.unescape(clean_tweet)
    clean_tweet = " ".join(filter(lambda x:x[0]!="@", clean_tweet.split()))
    clean_tweet = " ".join(filter(lambda x:x[:4]!="http", clean_tweet.split()))
    tweets[index] = clean_tweet
    
print("There are " + str(len(tweets)) + " about to be printed!")
print("\n-\n".join(tweets))
exit()    

# join the tweets into a big ol paragraph
combined_tweets = " ".join(tweets)

# get some sweet stats
sentiment = i.sentiment(combined_tweets)
personas = i.personas(combined_tweets)
political = i.political(combined_tweets)

# sorty sort
sorted_personas = sorted(personas.items(), 
                         key=operator.itemgetter(1), 
                         reverse=True)
sorted_political = sorted(political.items(),
                          key=operator.itemgetter(1),
                          reverse=True)

print()
print(sorted_personas[:3])
print(sorted_political[0])
print(sentiment)
Example #16
0
 def test_batch_personas(self):
     test_string = "I love my friends!"
     response = personas([test_string, test_string])
     self.assertTrue(isinstance(response, list))
     self.assertIsInstance(response[0]["commander"], float)
     self.assertEqual(response[0]["commander"], response[1]["commander"])
    clean_tweet = html.unescape(clean_tweet)
    clean_tweet = " ".join(filter(lambda x: x[0] != "@", clean_tweet.split()))
    clean_tweet = " ".join(
        filter(lambda x: x[:4] != "http", clean_tweet.split()))
    tweets[index] = clean_tweet

print("There are " + str(len(tweets)) + " about to be printed!")
print("\n-\n".join(tweets))
exit()

# join the tweets into a big ol paragraph
combined_tweets = " ".join(tweets)

# get some sweet stats
sentiment = i.sentiment(combined_tweets)
personas = i.personas(combined_tweets)
political = i.political(combined_tweets)

# sorty sort
sorted_personas = sorted(personas.items(),
                         key=operator.itemgetter(1),
                         reverse=True)
sorted_political = sorted(political.items(),
                          key=operator.itemgetter(1),
                          reverse=True)

print()
print(sorted_personas[:3])
print(sorted_political[0])
print(sentiment)
Example #18
0
def execute(USERNAME, target, refresh):

    r_data = io_helper.read_raw(USERNAME, target)

    og = sys.stdout
    fpath = io_helper.out_path(USERNAME, target)

    def analysis(raw='', limit=5, text='', percent=True):
        global meta_dict
        # print lines if input is a list of non-dicts
        # if input is list of dicts, merge dicts and resend to analysis
        if isinstance(raw, list):
            for item in raw:
                if not isinstance(item, dict):
                    print(item)
                else:
                    create_meta_dict(item)
            analysis(meta_dict, limit, text, percent)

        # if input is dict: print k, v pairs
        # optional args for return limit and description text
        if isinstance(raw, dict):
            print(text)
            ct = 0
            for v in sorted(raw, key=raw.get, reverse=True):
                ct += 1
                if ct > limit: break
                if isinstance(raw[v], float):
                    if percent: per = r'%'
                    else: per = ''
                    print("    " + v, str(round(raw[v] * 100, 2)) + per)
                else:
                    print(v, raw[v])
            print()

    def create_meta_dict(item):
        # merge list of dicts into master dict
        global meta_dict
        meta_dict[item['text']] = item['confidence']
        return meta_dict

    rClean = ''
    for i in range(len(r_data)):
        if r_data[i - 1] == '\\':
            rClean = rClean[:-1]
            if r_data[i] != "'":
                continue

        if r_data[i] == '*':
            rClean += ' '
        else:
            rClean += r_data[i]

    r_data = rClean
    del rClean
    indicoio.config.api_key = keycheck.get_key()

    # Big 5
    big5 = {'text': "Big 5 personality inventory matches: ", "payload": indicoio.personality(r_data)}

    # Meyers briggs
    mbtiLabels = indicoio.personas(r_data)
    mbti_dict = {
        'architect': 'intj',
        'logician': 'intp',
        'commander': 'entj',
        'debater': 'entp',
        'advocate': 'infj',
        'mediator': 'infp',
        'protagonist': 'enfj',
        'campaigner': 'enfp',
        'logistician': 'istj',
        'defender': 'isfj',
        'executive': 'estj',
        'consul': 'esfj',
        'virtuoso': 'istp',
        'adventurer': 'isfp',
        'entrepreneur': 'estp',
        'entertainer': 'esfp'
    }

    def replace_mbti():
        for k, v in mbtiLabels.items():
            k = k.replace(k, mbti_dict[k])
            yield k

    k = (list(replace_mbti()))
    v = map(lambda x: x, mbtiLabels.values())
    payload = (dict(zip(k, v)))

    mbti = {'text': "Most likely personalilty styles: ", "payload": payload, 'ct': 5, 'percent': True}

    # Political
    pol = {'text': "Political alignments: ", "payload": indicoio.political(r_data, version=1)}
    # Sentiment
    sen = {'text': "Sentiment: ", "payload": {'Percent positive': indicoio.sentiment(r_data)}, 'ct': 3}

    # Emotion 
    emo = {'text': "Predominant emotions:", "payload": indicoio.emotion(r_data), 'ct': 5}

    # Keywords
    kw = {'text': "Keywords: ", "payload": indicoio.keywords(r_data), 'ct': 5}
    # Text tags
    tt = {'text': "Text tags: ", "payload": indicoio.text_tags(r_data), 'ct': 10}
    # Place
    pla = {
        'text': "Key locations: ",
        'payload': indicoio.places(r_data, version=2),
        'ct': 3,
        'percent': True
    }

    def Karma(USERNAME):
        import praw
        import collections
        kList = []
        user_agent = ("N2ITN")
        r = praw.Reddit(user_agent=user_agent)
        thing_limit = 100

        user = r.get_redditor(USERNAME)
        gen = user.get_submitted(limit=thing_limit)
        karma_by_subreddit = {}
        for thing in gen:
            subreddit = thing.subreddit.display_name
            karma_by_subreddit[subreddit] = (karma_by_subreddit.get(subreddit, 0) + thing.score)

        for w in sorted(karma_by_subreddit, key=karma_by_subreddit.get, reverse=True):
            kList.append(str(w) + ': ' + str(karma_by_subreddit[w]))
        kList.insert(0, 'Karma by Sub')

        print("\n\t".join(kList[:10]))

    def show(results):
        # Accepts bag of dicts, or single dict
        if not isinstance(results, dict):
            for X in results:
                show(X)
        else:
            if results == pla and pla['payload'] == []:
                print("Not enough information to infer place of origin")
                print()
            else:

                i = results
                analysis(
                    raw=i.get('payload', ''),
                    limit=i.get('ct', 5),
                    text=i.get('text', ''),
                    percent=i.get('percent', True)
                )

    with open(fpath, 'w') as outtie:
        sys.stdout = outtie
        print(target + USERNAME)
        print()
        show([kw, pla, big5, emo, sen, pol, mbti, tt])
        # Karma(USERNAME)

        sys.stdout = og
    return