コード例 #1
0
def test():
    set_api_key("write your api key here")
    similarity("Sachin is the greatest batsman",
               "Tendulkar is the finest cricketer")
    sentiment("Come on, lets play together")
    ner("Narendra Modi is the prime minister of India")
    keywords(
        "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
    )
    emotion("Did you hear the latest Porcupine Tree song ? It's rocking !")
    intent(
        "Finance ministry calls banks to discuss new facility to drain cash")
    abuse("you f**king a$$hole")
    batch_intent([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_abuse([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_ner([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_sentiment([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_phrase_extractor([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
コード例 #2
0
ファイル: nlp_main_handler.py プロジェクト: tyz1z/ubi
    def go(self, text, language):
        if language == 'english':
            key_words = paralleldots.keywords(text)
            key_phrase = paralleldots.phrase_extractor(text)
            emotion = paralleldots.emotion(text)
        elif language == 'schinese':
            key_words = [{
                'Error':
                'The lang_code is not among the supported languages, supported languages: en, pt, zh, es, de, fr, nl, it, ja, th, da, fi, el, ru, ar.',
                'code': 400
            }]  # chinese API not yet available
            key_phrase = paralleldots.multilang_keywords(text, 'zh')
            emotion = paralleldots.emotion(text, 'zh')
        elif language == 'french':
            key_words = paralleldots.multilang_keywords(text, 'fr')
            key_phrase = paralleldots.multilang_keywords(text, 'fr')
            emotion = paralleldots.emotion(text, 'fr')
        elif language == 'japanese':
            key_words = paralleldots.multilang_keywords(text, 'ja')
            key_phrase = paralleldots.multilang_keywords(text, 'ja')
            emotion = paralleldots.emotion(text, 'ja')
        else:
            key_words, key_phrase, emotion = [], [], []

        return key_words, key_phrase, emotion
コード例 #3
0
def test():
    similarity("Sachin is the greatest batsman",
               "Tendulkar is the finest cricketer")
    sentiment("Come on, lets play together")
    taxonomy("Narendra Modi is the prime minister of India")
    ner("Narendra Modi is the prime minister of India")
    keywords(
        "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
    )
    emotion("Did you hear the latest Porcupine Tree song ? It's rocking !")
    intent(
        "Finance ministry calls banks to discuss new facility to drain cash")
    abuse("you f**king a$$hole")
コード例 #4
0
def analyze_entry(raw_text):
    sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
    text_sentences = sent_detector.tokenize(raw_text.strip())
    emotions_overall = paralleldots.emotion(raw_text)
    sentiment_overall = paralleldots.sentiment(raw_text)
    emotions_sentences = paralleldots.batch_emotion(text_sentences)
    sentiment_sentences = paralleldots.batch_sentiment(text_sentences)
    #print("type of emotions_overall: ", type(emotions_overall))
    overall = {}
    overall.update(emotions_overall)
    overall.update(sentiment_overall)
    sentences = {}
    sentences.update(emotions_sentences)
    sentences.update(sentiment_sentences)
    data = {
        'Overall': overall,
        'Sentences': sentences,
        'Source Text': raw_text
    }
    #print("type of data: ", type(data))
    #data = json.dumps(data)
    #print("type of data: ",type(data))
    # data['Overall'].append(emotions_overall)
    # data['Overall'].append(sentiment_overall)
    # data['Sentences'] = []
    # data['Sentences'].append(emotions_sentences)
    # data['Sentences'].append(sentiment_sentences)
    #print(type(data))
    return data
コード例 #5
0
    def transcribe():
#Code for getting the Audio file and coverting the speech into text and then analyzing it using Personality Insight with Try Exception Handling
        script=audfilepath.get()
        try:
            with open(script,
            'rb') as audio_file:
                prof = speech_to_text.recognize(audio_file, content_type="audio/mp3").result
            a = json.dumps(prof, indent = 4)
            z=0
            y=0
            for i in prof['results']:
                z=z+1
            trlist=[]
            for i in range(z):
                transcripts = prof['results'][i]['alternatives'][0]['transcript']
                trlist.append(transcripts)
            trstr=""
            for j in trlist:
                trstr = trstr + " " + j
            UserDict = paralleldots.emotion(trstr)['emotion']
            print(trstr)
            df = pd.DataFrame.from_dict(UserDict, orient = 'index')
            df.reset_index(inplace=True)
            df.columns=['Sentiment', 'Percentile']
            plt.figure(figsize=(15,5))
            sns.barplot(x="Sentiment", y="Percentile", data = df)
            plt.show()
        except Exception:
            errortext=Label(fourth_window).config(text="")
            errortext=Label(fourth_window, text="Error in finding file, please make sure the path is correct and the format is mp3")
            errortext.grid(row=2, column=1)
コード例 #6
0
def test(request):
    paralleldots.set_api_key("M4rTJatLfpK0pp1AjE5pZ8ciHa4hW2KTOeq65fUIoEk")
    text = "i wanna die"
    data = paralleldots.emotion(text)
    dick = data['emotion']
    print(dick['Angry'])
    return render(request, 'home/test.html')
コード例 #7
0
def get_emotion(string):
    value_returned = paralleldots.emotion(string)

    if 'emotion' in value_returned:
        return value_returned['emotion']

    return None
コード例 #8
0
    def get_emotions(self):
        """Analyzes every tweet using ParallelDots API and adds result to Emotion Dic.

        :returns: Dictionary containing number of each emotion occurrences in all tweets.
        """

        for tweet in self.tweets:
            tweet = clean_tweet(tweet.full_text)

            # Increment in emotions the tweet analysis' result
            if emotion(tweet)['emotion']['emotion'] in self.emotions:
                self.emotions[emotion(tweet)['emotion']['emotion']] += 1
            else:
                self.emotions[emotion(tweet)['emotion']['emotion']] = 1

        return self.emotions
コード例 #9
0
def get_vects(text):
    keywords_vect = [k['keyword'] for k in keywords(text)['keywords']]
    emotion_vect = [(key, value)
                    for key, value in emotion(text)['probabilities'].items()]
    sentiment_vect = sentiment(text)
    del sentiment_vect['usage']
    return keywords_vect, emotion_vect, sentiment_vect
コード例 #10
0
def search_tweet():
    query = input("Your Query: ")
    max_search = int(input("Maximum Results: "))
    print('\n')
    searched_tweets = [status for status in tweepy.Cursor(api.search, q=query).items(max_search)]

    for i in range(len(searched_tweets)):
        json = searched_tweets[i]._json
        json_user = json['user']
        user = json_user['name']
        twitter_id = json_user['screen_name']
        created_at = json['created_at']
        tweet = json['text']
        loc = json_user['location']
        lang = json_user['lang']
        t_zone = json_user['time_zone']
        sentiment = pd.sentiment(tweet)['sentiment']
        emotion = max(pd.emotion(tweet)['emotion']['probabilities'])
        abuse = pd.abuse(tweet)['sentence_type']
        print(str(i+1)+'.\tUser: '******' (@'+twitter_id+')')
        print('\tTweet Created: '+created_at)
        print('\tLocation: '+loc)
        print('\tLanguage: '+lang)
        print('\tTime Zone: ', t_zone)
        print('\tTweet: '+tweet)
        print('\n\tSentiment Analysis:\n')
        print('\t\tSentiment: '+sentiment)
        print('\t\tEmotion: '+emotion)
        print('\t\tAbuse: '+abuse)
        print('-------------------------------------------------------------------------------------------------------')
        time.sleep(0.2)

    time.sleep(1)
    input("\nPress Enter to Continue...")
    main_menu()
コード例 #11
0
def scoreAPI(text):
    abuse = paralleldots.abuse(text)
    answer = ""
    if abuse['neither'] < 0.95:
        print("Your text may be seen as abuse or hate speech: ",
              abuse['neither'])
        answer = answer + "Your text may be seen as abuse or hate speech: " + str(
            abuse['neither']) + "\n"
    emotion = paralleldots.emotion(text)['emotion']
    if emotion['Sad'] > 0.3:
        print("Your text may be seen as sad: ", emotion['Sad'])
        answer = answer + "Your text may be seen as sad: " + str(
            emotion['Sad']) + "\n"
    if emotion['Bored'] > 0.2:
        print("Your text may be seen as boring: ", emotion['Bored'])
        answer = answer + "Your text may be seen as boring: " + str(
            emotion['Bored']) + "\n"
    if emotion['Excited'] < 0.1:
        print("Try to sound more excited!: ", emotion['Excited'])
        answer = answer + "Try to sound more excited!: " + str(
            emotion['Excited']) + "\n"

    sarcastic = paralleldots.sarcasm(text)
    if sarcastic['Sarcastic'] > 0.5:
        print("You may sound sarcastic: ", sarcastic['Sarcastic'])
        answer = answer + "You may sound sarcastic: " + str(
            sarcastic['Sarcastic']) + "\n"

    return answer
コード例 #12
0
ファイル: main_script.py プロジェクト: timtinlong/McHacks2021
def short_questions_3():
    global page_idx
    global challenge
    global coping
    global goals
    global challenge_emotion
    global coping_emotion
    global goals_emotion
    global keyword_challenge
    global keyword_coping
    global keyword_goals
    global emotion_arr
    current_emotion = most_common(emotion_arr)
    print('Submitted emotion:', current_emotion)
    emotion_arr = []

    page_idx += 1
    goals = '{}'.format(request.form['goals'])

    goals_emotion = paralleldots.emotion( goals )
    goals_emotion = goals_emotion['emotion']
    goals_emotion = max(goals_emotion.items(), key=operator.itemgetter(1))[0]
    keyword_goals = paralleldots.keywords( goals )

    html_output_writer.goal_text = goals
    html_output_writer.goal_kw = keyword_goals['keywords']
    html_output_writer.goal_face = get_face_emotion_emoji(current_emotion)
    html_output_writer.goal_text_emotion = get_text_emotion_emoji(goals_emotion)


    render_template(page_ID[page_idx])
    return redirect('/')
コード例 #13
0
def moody():

    data = request.args.to_dict()

    text = data['Review']
    language = data["Language"]

    if (language == "it"):
        translate_client = translate.Client()
        if isinstance(text, six.binary_type):
            text = text.decode('utf-8')

        result = translate_client.translate(text)
        text = result['translatedText']

    response_1 = paralleldots.emotion(text)
    main_emotion = response_1['emotion']

    #return (str(main_emotion))

    maxscore = 0
    ResultMood = ""
    print(main_emotion)
    for emotions in main_emotion:
        Mood = emotions
        score = main_emotion[Mood]
        if (score > maxscore):
            maxscore = score
            ResultMood = Mood
    print("the User is :  " + ResultMood)
    return jsonify(Mood=ResultMood)
コード例 #14
0
def getMeme(text):
    # Get Emotion
    apiResponse = paralleldots.emotion(text)
    apiResponse = apiResponse["emotion"]
    emotion = max(apiResponse, key=apiResponse.get)
    # print('Emotion:', emotion)

    # Generate a random template based on emotion
    template_id = random.choice(emotions[emotion])
    # print('>Template ID', template_id)

    # Check if the text has only word
    if ' ' in text:
        text0, text1 = text.split(' ', 1)
    else:
        text0 = text
        text1 = ''

    # print('>Meme', 'Text 0:', text0)
    # print('>Meme', 'Text 1:', text1)

    # Prepare for Meme Generation 🔥
    querystring = {
        'username': username,
        'password': passsword,
        'template_id': template_id,
        'text0': text0,
        'text1': text1
    }
    response = requests.request('POST', url, params=querystring).json()

    # Return MEME URL // In case the meme genration fails send a default response
    return response["data"]["url"] if response["success"] else default_url
コード例 #15
0
def get_parallel_dots_emo(transcript_li):

    total_emot_dict = defaultdict(int)
    line_counter = 1
    try:
        for line in transcript_li:
            if (line):
                print(line)
                line_score_dict = pd.emotion(line)['emotion']
                print(line_score_dict)
                total_emot_dict["Excited"] += line_score_dict["Excited"]
                total_emot_dict["Bored"] += line_score_dict["Bored"]
                total_emot_dict["Happy"] += line_score_dict["Happy"]
                total_emot_dict["Fear"] += line_score_dict["Fear"]
                total_emot_dict["Angry"] += line_score_dict["Angry"]
                total_emot_dict["Sad"] += line_score_dict["Sad"]

                line_counter += 1

    except:
        normalized_total_emot_dict = {
            k: v / line_counter
            for k, v in total_emot_dict.items()
        }
        return normalized_total_emot_dict

    normalized_total_emot_dict = {
        k: v / line_counter
        for k, v in total_emot_dict.items()
    }  #dividing each value by total number of sentences processed
    return normalized_total_emot_dict
コード例 #16
0
def analyze_sentence(sentence):
    '''
    Return softmaxed probability vector of sentence emotions.
    '''
    paralleldots.set_api_key(key)
    result = paralleldots.emotion(sentence)

    return result['emotion']['probabilities']
コード例 #17
0
def get_highest_two_emotions(text):
    paralleldots.set_api_key(paralleldots_TOKEN)
    paralleldots.get_api_key()
    emotions = paralleldots.emotion(text)["emotion"]
    my_list = [k for k, v in emotions.items() if v == max(emotions.values())]
    if my_list[0] == "Fear":
        return "Sad"
    return my_list[0]
コード例 #18
0
def analyze_text_w(text):
    paralleldots.set_api_key(paralleldots_TOKEN)
    paralleldots.get_api_key()
    emotions = paralleldots.emotion(text)["emotion"]
    pos = (emotions["Happy"] + emotions["Excited"]) / 2
    neg = (emotions["Angry"] + emotions["Bored"] + emotions["Fear"] +
           emotions["Sad"]) / 4
    print(pos, " ", neg)
コード例 #19
0
ファイル: emotion.py プロジェクト: PrateekSahni/Euphorum
def get_emotion(line):

    set_api_key("use api key from parallel dots")
    #get_api_key()
    # line=str('mytextbox')
    result = emotion(line)
    #print("Emotions : ",result['probabilities'])
    #print("Final emotion : ",result['emotion'])
    return result['emotion']['emotion']
コード例 #20
0
 def getEmotionFromText(self, text):
     predictions = paralleldots.emotion(text)
     max_pred = 0
     emotion = ""
     for key in predictions['emotion'].keys():
         if predictions['emotion'][key] > max_pred:
             max_pred = predictions['emotion'][key]
             emotion = key.lower()
     return emotion
コード例 #21
0
def get_vects(text):
    print(str(text))

    # keywords_vect = [ k['keyword'] for k in keywords(text)['keywords'] ]
    # emotion_vect = [ (key, value) for key, value in emotion(text)['probabilities'].items() ]
    # sentiment_vect = sentiment(text)
    # del sentiment_vect['usage']
    # return [keywords_vect, emotion_vect, sentiment_vect]

    return [{"keywords": mch_text.subs(text)}, emotion(text), sentiment(text)]
コード例 #22
0
ファイル: app.py プロジェクト: meyash/tweetify_
def paralleldots_api(text):
    api_key = ""
    paralleldots.set_api_key(api_key)
    text_todo=text
    emot=paralleldots.emotion(text_todo)
    # print(emot) #emotion analysis
    sentiment=paralleldots.sentiment(text_todo)
    # print(intent) #Intent analyisis
    # return {'emotion':emot,'intent':intent}
    return {'emot':emot['emotion'],'sent':sentiment['sentiment']}
コード例 #23
0
ファイル: main.py プロジェクト: ncorona/eia-server
def analyze_text(text, lang):
    """
    Returns the results for emotion detection analysis.
    Clean text removing puntuaction characters.
    """
    res = paralleldots.emotion(text, lang)
    clean_text = text
    for txt in string.punctuation:
        clean_text = clean_text.replace(txt, " ")
    res["text"] = clean_text
    return res
コード例 #24
0
def test():
    set_api_key("tEgayBzxTpAZZNppX62n8niYYoHeTna20DqQw8S9TQU")
    # category  = { "finance": [ "markets", "economy", "shares" ], "world politics": [ "diplomacy", "UN", "war" ], "india": [ "congress", "india", "bjp" ] }
    # print(similarity( "Sachin is the greatest batsman", "Tendulkar is the finest cricketer" ))
    print(sentiment("Come on, lets play together"))
    # print(ner( "Narendra Modi is the prime minister of India","en" ))
    # print(taxonomy("Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019"))
    # print(keywords( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." ))
    # print(phrase_extractor( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." ))
    print(emotion("i have nothing else to do. life is so boring man."))
    # print(intent("Finance ministry calls banks to discuss new facility to drain cash"))
    print(abuse("you f**king a$$hole"))
コード例 #25
0
def getEmotionsDicFromText(input):
    text = input

    emotionsDic = paralleldots.emotion(text)
    emotionsUnicode = emotionsDic[u'emotion'][u'probabilities']

    emotionsStringDic = {}

    for emotion in emotionsUnicode:
        emotionsStringDic[unicodedata.normalize('NFKD', emotion).encode(
            'ascii', 'ignore')] = emotionsUnicode[emotion]

    return emotionsStringDic
コード例 #26
0
ファイル: api_call.py プロジェクト: danish17/litmus-ai-web
def get_emotion(string):
    """Returns emotion and emotion score of a string as a PD Series."""
    try:
        title_emotion = emotion(string)
        title_emotion = (sorted(((title_emotion['emotion']).items()),
                                key=lambda kv: (kv[1], kv[0]),
                                reverse=True))[0]
        emotion_type = title_emotion[0]
        emotion_score = title_emotion[1]
        return pd.Series([emotion_type, emotion_score])

    except:
        print("Error in ", string)
コード例 #27
0
    def get_feelings(self, texts):
        """
        Return average of feelings values
        """
        t_feelings = {
            'sentiment': {
                's_negative': [],
                's_neutral': [],
                's_positive': []
            },
            'emotion': {
                'e_angry': [],
                'e_excited': [],
                'e_happy': [],
                'e_indifferent': [],
                'e_sad': []
            }
        }

        for t_part in texts:
            sent = sentiment(t_part)
            emot = emotion(t_part)

            t_feelings['sentiment']['s_negative'].append(sent['probabilities']['negative'])
            t_feelings['sentiment']['s_neutral'].append(sent['probabilities']['neutral'])
            t_feelings['sentiment']['s_positive'].append(sent['probabilities']['positive'])

            t_feelings['emotion']['e_angry'].append(emot['probabilities']['angry'])
            t_feelings['emotion']['e_excited'].append(emot['probabilities']['excited'])
            t_feelings['emotion']['e_happy'].append(emot['probabilities']['happy'])
            t_feelings['emotion']['e_indifferent'].append(emot['probabilities']['indifferent'])
            t_feelings['emotion']['e_sad'].append(emot['probabilities']['sad'])

        t_feelings['sentiment'] = (
                self.scale_to_one(
                        self.get_avg_feelings(t_feelings['sentiment']))
                )
        t_feelings['emotion'] = (
                self.scale_to_one(
                    self.get_avg_feelings(t_feelings['emotion']))
                )

        result = [(*t_feelings['sentiment'].keys(),
                   *t_feelings['emotion'].keys()),
                  (*t_feelings['sentiment'].values(),
                   *t_feelings['emotion'].values())
                  ]
        return result
コード例 #28
0
def textAnalyze(request):
    res = OrderedDict()
    if request.method == 'POST':
        try:
            s = request.POST['str']
            res['emotion'] = paralleldots.emotion(
                s)['emotion']['probabilities']
            res['sentiment'] = paralleldots.sentiment(s)['probabilities']
            res['response'] = apiai_response(s)

        except Exception as e:
            print(e)
            res['error'] = "Emotion key not found"
    else:
        res['error'] = "Method not supported"
    return JsonResponse(res, safe=False)
コード例 #29
0
def getNLP(text, entities = {}):

	print('\n\nSTATEMENT:')
	print(text)

	global client
	global count
	count += 1
	
	emotionResponse = paralleldots.emotion(text)
	emotions = emotionResponse['emotion']
	print('\nEMOTIONS:')
	print(emotions)
	maxEmotion = ''
	maxEmotionMag = 0
	for emotion in emotions:
		if maxEmotionMag < emotions[emotion]:
			maxEmotionMag = emotions[emotion]
			maxEmotion = emotion
	print('\nWINNING EMOTION')
	print(maxEmotion)
	
	# The text to analyze
	document = types.Document(
		content=text,
		type=enums.Document.Type.PLAIN_TEXT)
	
	#Get a subject
	entitiyIterator = client.analyze_entities(document = document)
	highest = 0
	subject = ''
	for key in entities:
		entities[key] /= 2
	for entity in entitiyIterator.entities:
		name = entity.name
		salience = entity.salience
		if name in entities:
			entities[name] += salience
		else:
			entities[name] = salience
		if (entities[name] >= 1) and (count >= 3):
			highest = max(entities[name], highest)
			print('\nSUBJECT')
			print(name)
	print('\nENTITIES')
	print(entities)
コード例 #30
0
def classify_emotion(input_file, output_file):
    df = pd.read_csv(input_file, header=0)
    drop_index = []
    for i in range(50):
        drop_index.append(i)
        tweet_id = df.loc[i, 'id']
        tweet = df.loc[i, 'text']
        res = paralleldots.emotion(tweet)
        print(res)
        emotion = res['emotion']
        insert_row = [(tweet_id, tweet, emotion['Bored'], emotion['Angry'],
                       emotion['Sad'], emotion['Fear'], emotion['Happy'],
                       emotion['Excited'])]
        print(insert_row)
        append_list_as_row(output_file, insert_row, [
            'id', 'text', 'Bored', 'Angry', 'Sad', 'Fear', 'Happy', 'Excited'
        ])
    df = df.drop(drop_index)
    df.to_csv(input_file, index=False)