def main(): paralleldots.set_api_key(API_KEY) messages = [] with open("messages.csv") as csv_file: reader = csv.reader(csv_file, delimiter='|') for row in reader: messages.append(row[2]) messages = messages[1:] emotions = [] response = paralleldots.batch_emotion(messages) for result in response['batch']: emotions.append(result['emotion']['emotion']) languages = [] response = paralleldots.batch_language_detection(messages) for result in response['batch']: languages.append(result['output']) sentiments = [] response = paralleldots.batch_sentiment(messages) for result in response['batch']: sentiments.append(result['sentiment']) data = {} data['languages'] = languages data['emotions'] = emotions data['sentiments'] = sentiments generareRaport(data)
def analyze_entry(raw_text): sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') text_sentences = sent_detector.tokenize(raw_text.strip()) emotions_overall = paralleldots.emotion(raw_text) sentiment_overall = paralleldots.sentiment(raw_text) emotions_sentences = paralleldots.batch_emotion(text_sentences) sentiment_sentences = paralleldots.batch_sentiment(text_sentences) #print("type of emotions_overall: ", type(emotions_overall)) overall = {} overall.update(emotions_overall) overall.update(sentiment_overall) sentences = {} sentences.update(emotions_sentences) sentences.update(sentiment_sentences) data = { 'Overall': overall, 'Sentences': sentences, 'Source Text': raw_text } #print("type of data: ", type(data)) #data = json.dumps(data) #print("type of data: ",type(data)) # data['Overall'].append(emotions_overall) # data['Overall'].append(sentiment_overall) # data['Sentences'] = [] # data['Sentences'].append(emotions_sentences) # data['Sentences'].append(sentiment_sentences) #print(type(data)) return data
def make_request_pd(api, text): if (api == 'sentiment'): response = paralleldots.batch_sentiment(text) elif (api == 'emotion'): response = paralleldots.batch_emotion(text) elif (api == 'abuse'): response = paralleldots.batch_abuse(text) return response
def getBatchEmotionFromText(self, texts): predictions = paralleldots.batch_emotion(texts) emotions = [] for prediction in predictions['emotion']: max_pred = 0 emotion = "" for key in prediction.keys(): if prediction[key] > max_pred: max_pred = prediction[key] emotion = key.lower() emotions.append(emotion) return emotions
def get_emotion(sentence): import paralleldots paralleldots.set_api_key("AWDCWos9GlVND0R3Pf8L6D3NDjRAKQzDDWsgdtW0Pbw") text = sentence.split('.') response = paralleldots.batch_emotion(text) return process_emotions(response['emotion']) # if __name__ == '__main__': # text=["Choke me daddy"] # response=paralleldots.batch_emotion(text) # # print(response) # print(f'You are {process_emotions(response["emotion"])}')
Jews were required to frequent only Jewish-owned barbershops and beauty parlors; Jews were forbidden to be out on the streets between 8 P.M. and 6 A.M.; Jews were forbidden to attend theaters, movies or any other forms of entertainment; Jews were forbidden to use swimming pools, tennis courts, hockey fields or any other athletic fields; Jews were forbidden to go rowing; Jews were forbidden to take part in any athletic activity in public; Jews were forbidden to sit in their gardens or those of their friends after 8 P.M.; Jews were forbidden to visit Christians in their homes; Jews were required to attend Jewish schools, etc. You couldn't do this and you couldn't do that, but life went on. Jacque always said to me, "I don't dare do anything anymore, 'cause I'm afraid it's not allowed.""" sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') text_sentences = sent_detector.tokenize(text.strip()) emotions_overall=paralleldots.emotion(text) sentiment_overall = paralleldots.sentiment(text) emotions_sentences = paralleldots.batch_emotion(text_sentences) sentiment_sentences = paralleldots.batch_sentiment(text_sentences) # print("Emotions Overall: ", emotions_overall) # print("Sentiment Overall: ", sentiment_overall) # print("Emotions Sentences: ", emotions_sentences) # print("Sentiment Sentences: ", sentiment_overall) data = {} data['Overall'] = [] data['Overall'].append(emotions_overall) data['Overall'].append(sentiment_overall) data['Sentences'] = [] data['Sentences'].append(emotions_sentences) data['Sentences'].append(sentiment_sentences) # with open('data.txt','w') as outfile:
import paralleldots paralleldots.set_api_key("ASLpEycGzwLshxgSF5vZEcoZqvTvqwQ2snGDirHov5Q") # for single sentence text = "I am trying to imagine you with a personality." response = paralleldots.emotion(text) print(response) # for multiple sentence as array text = ["I am trying to imagine you with a personality.", "This is shit."] response = paralleldots.batch_emotion(text) print(response)
def main(): # https://developer.spotify.com/documentation/web-api/reference/browse/get-recommendations/ recommendationToken = 'BQBHOYwD4SvHekv_FpdRoFLgWNGW7_dirrgPd2xjFrEBZy-tNy7RsLOQlQjNc15SliWzfeo_fJa3jdRx06JqBEhxRnF2dV9-3gc4bmHHz-WBiWFSi57VWOVG1HHbpeqZibXDTAXkveB92Uc' # https://developer.spotify.com/documentation/web-api/reference/tracks/ choose the link to /v1/audio-features/{id} featureToken = 'BQDRAn4Ip5H0T86o3b9hgMAB-H-IK5OUq8FlYfkWiNHNILf2HXrev4GYzPSzaoHLIYXFsyQ-3zWZzwetGYJz8DSR7X5Du23CUWhgAqbzjMHteTtcbMdSkIvWPP-SxvFMVZp7xvYCdDVMI2k' paralleldots.set_api_key("CWTPMu1Z9kaCUVeghKKecMyXLbfZPpfUWEnjytlHh4Q") cur, conn = setUpDatabase('emotify.db') input_text = [ "I am counting my calories, yet I really want dessert.", "If you like tuna and tomato sauce- try combining the two. It’s really not as bad as it sounds.", "I would have gotten the promotion, but my attendance wasn’t good enough.", "I was very proud of my nickname throughout high school but today- I couldn’t be any different to what my nickname was.", "I really want to go to work, but I am too sick to drive." ] cur.execute("DROP TABLE IF EXISTS Valence") cur.execute("DROP TABLE IF EXISTS Energy") cur.execute("DROP TABLE IF EXISTS Sentiment") cur.execute("DROP TABLE IF EXISTS Emotion") conn.commit() sentiment_text = paralleldots.batch_sentiment(input_text) emotion_text = paralleldots.batch_emotion(input_text) negative_valence = [] bored_energy = [] for item in sentiment_text['sentiment']: negative_valence.append(str(item['negative'])) for item in emotion_text['emotion']: bored_energy.append(str(item['Bored'])) for i in range(len(negative_valence)): recommendation_headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + recommendationToken, } recommendation_params = ( ('limit', '20'), ('market', 'US'), ('seed_tracks', '0c6xIDDpzE81m2q797ordA'), ('target_energy', bored_energy[i]), ('target_valence', negative_valence[i]), ('min_popularity', '0'), ) spotifyRecommendations = requests.get( 'https://api.spotify.com/v1/recommendations', headers=recommendation_headers, params=recommendation_params) spotifyRecs = spotifyRecommendations.json() setUpSpotifyValence(featureToken, spotifyRecs, cur, conn) setUpSpotifyEnergy(featureToken, spotifyRecs, cur, conn) conn.commit() conn.commit() setUpSentiment(sentiment_text, spotifyRecs, cur, conn) setUpEmotion(emotion_text, spotifyRecs, cur, conn) setUpEmotify(cur, conn) conn.close()
def test(): set_api_key("Put your Api key here") category = { "finance": ["markets", "economy", "shares"], "world politics": ["diplomacy", "UN", "war"], "india": ["congress", "india", "bjp"] } print( similarity("Sachin is the greatest batsman", "Tendulkar is the finest cricketer")) print(sentiment("Come on, lets play together")) print(ner("Narendra Modi is the prime minister of India", "en")) print( taxonomy( "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019" )) print( keywords( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." )) print( phrase_extractor( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." )) print( emotion( "Did you hear the latest Porcupine Tree song ? It's rocking !")) print( intent( "Finance ministry calls banks to discuss new facility to drain cash" )) print(abuse("you f**king a$$hole")) print( custom_classifier("Narendra Modi is the prime minister of India", category)) print( batch_intent([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ])) print(batch_abuse(["drugs are fun", "dont do drugs, stay in school"])) print( batch_sentiment([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ])) print( batch_phrase_extractor([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ])) print( batch_taxonomy([ "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019", "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019" ])) print( batch_ner([ "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019", "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019" ])) print( batch_emotion([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ])) print(facial_emotion_url("https://i.imgur.com/klb812s.jpg")) print(object_recognizer_url("https://i.imgur.com/klb812s.jpg")) print( sarcasm( "The movie that i watched last night is so funny that i get rolled out with laughter" )) print( batch_sarcasm([ "The movie that i watched last night is so funny that i get rolled out with laughter", "I want to spend my life alone" ]))