def get_sentiments(query): p = 0 n = 0 ne = 0 set_api_key('2Z4UlTNyfjXwIn5CGLy4EvS5IaySrLFfJDiMSPGCo3o') get_api_key() public_tweets = api.search(query) for tweet in public_tweets: text = tweet.text print( colored( "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", color='blue')) print(colored(tweet.text, color='red')) r = sentiment(tweet.text) print(colored(r, color='red')) result = r['sentiment'] if result == "positive": p = p + 1 elif r['sentiment'] == "neutral": n = n + 1 else: ne = ne + 1 print( colored( "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", color='green')) print "Maximum positive comments: ", p print "Maximum neutral comments: ", n print "Maximum negative comments: ", ne print( colored( "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", color='green'))
def analyze_entry(raw_text): sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') text_sentences = sent_detector.tokenize(raw_text.strip()) emotions_overall = paralleldots.emotion(raw_text) sentiment_overall = paralleldots.sentiment(raw_text) emotions_sentences = paralleldots.batch_emotion(text_sentences) sentiment_sentences = paralleldots.batch_sentiment(text_sentences) #print("type of emotions_overall: ", type(emotions_overall)) overall = {} overall.update(emotions_overall) overall.update(sentiment_overall) sentences = {} sentences.update(emotions_sentences) sentences.update(sentiment_sentences) data = { 'Overall': overall, 'Sentences': sentences, 'Source Text': raw_text } #print("type of data: ", type(data)) #data = json.dumps(data) #print("type of data: ",type(data)) # data['Overall'].append(emotions_overall) # data['Overall'].append(sentiment_overall) # data['Sentences'] = [] # data['Sentences'].append(emotions_sentences) # data['Sentences'].append(sentiment_sentences) #print(type(data)) return data
def get_tweets(username): auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) tweets = api.user_timeline(screen_name=username, count=20) tmp = [] tweets_for_csv = [tweet.text for tweet in tweets] # CSV file created for j in tweets_for_csv: tmp.append(j) var1 = 0 var2 = 0 var3 = 0 print(tmp) from paralleldots import set_api_key, get_api_key, sentiment set_api_key("6dm9k0RomplpimtZETEkwp6JzMTrPSDhhMIiGPGmu68") get_api_key() for t in tmp: a = sentiment(t) print(t, "-->", a) time.sleep(1) if a['sentiment'] == 'positive': var1 += 1 if a['sentiment'] == 'negative': var2 += 1 if a['sentiment'] == 'neutral': var3 += 1 if (var1 > var2) and (var1 > var3): print("This user is positive on Twitter") if (var2 > var3) and (var2 > var1): print("This user is negative on Twitter") if (var3 > var2) and (var3 > var1): print("This user is neutral on Twitter")
def get_tweets(username): #sentimental analysis auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) tweets = api.user_timeline(screen_name=username, count=20) tmp = [] tweets_for_csv = [tweet.text for tweet in tweets] # CSV file created for j in tweets_for_csv: tmp.append(j) # store the tweets in tmp list var1 = 0 var2 = 0 var3 = 0 print(tmp) from paralleldots import set_api_key, get_api_key, sentiment set_api_key("6dm9k0RomplpimtZETEkwp6JzMTrPSDhhMIiGPGmu68") get_api_key() for t in tmp: a = sentiment(t) print(a) if a['sentiment'] == 'positive': #checking positive tweets var1 += 1 if a['sentiment'] == 'negative': #checking negative tweets var2 += 1 if a['sentiment'] == 'neutral': #checking neutral tweets var3 += 1 if (var1 > var2) and (var1 > var3): #checking the person is positive or not print("positive") if (var2 > var3) and (var2 > var1): #checking the person is negative or not print("negative") if (var3 > var2) and (var3 > var1): #checking the person is neutrl or not print("neutral")
def test_sentiments(): list_of_sents = [] tweets = get_tweets() set_api_key(paralleldots_api_key) for tweet in tweets: list_of_sents.append(sentiment(tweet.text)) return list_of_sents
def sent_analysis(): positive = 0 negative = 0 neutral = 0 query() from paralleldots import set_api_key, sentiment set_api_key("") paralleldots.get_api_key() for tweet in tweets: tweet_text = tweet.text sentiment_type = sentiment(tweet_text) sentiment_values = sentiment_type['sentiment'] if sentiment_values == "positive": positive = positive + 1 elif sentiment_values == "negative": negative = negative + 1 else: neutral = negative + 1 if positive > negative and positive > neutral: print("POSITIVE SENTIMENT with count" + " " + str(positive)) elif negative > positive and negative > neutral: print("NEGATIVE SENTIMENT with count" + " " + str(negative)) else: print("NEUTRAL SENTIMNET with count" + " " + str(neutral))
def predict_sentiment_with_paralleldots(data_df): import paralleldots # Setting your API key paralleldots.set_api_key(PARALLEL_DOTS_KEY) texts_list = data_df.tolist() result = paralleldots.sentiment(texts_list) return result['sentiment']
def sent_analysis(): positive = 0 negative = 0 neutral = 0 query() from paralleldots import set_api_key, sentiment # Setting API key set_api_key("F6IhnjekXoKsgzOwy1ZsGCX6ph76YK5F6SzFf968gOk") #Viewing API key paralleldots.get_api_key() for tweet in tweets: tweet_text = tweet.text sentiment_type = sentiment(tweet_text) sentiment_values = sentiment_type['sentiment'] if sentiment_values == "positive": positive = positive + 1 elif sentiment_values == "negative": negative = negative + 1 else: neutral = negative + 1 if positive > negative and positive > neutral: print("POSITIVE SENTIMENT with count" + " " + str(positive)) elif negative > positive and negative > neutral: print("NEGATIVE SENTIMENT with count" + " " + str(negative)) else: print("NEUTRAL SENTIMNET with count" + " " + str(neutral))
def home(request): user_sent = "" user_input = "" fname = "na" if request.POST: user_input = request.POST.get('user_input', '') lang_code = "en" paralleldots.set_api_key("NlxGNPr4VRsjdyORAdKFWWraVX2HNGdBw0JUXCJ9uYg") user_response = paralleldots.sentiment(user_input, lang_code) user_sent = user_response['sentiment'] if (user_sent == 'neutral'): fname = "emoticon-1634586_640.png" elif (user_sent == 'negative'): fname = "emoticon-1634515_640.png" elif (user_sent == 'positive'): fname = "smiley-163510_640.jpg" else: fname = "na" return render(request, 'jack/home.html', { 'resp': user_sent, 'fname': fname, 'user_input': user_input })
def get_vects(text): keywords_vect = [k['keyword'] for k in keywords(text)['keywords']] emotion_vect = [(key, value) for key, value in emotion(text)['probabilities'].items()] sentiment_vect = sentiment(text) del sentiment_vect['usage'] return keywords_vect, emotion_vect, sentiment_vect
def feed_view(request): # check whether used is logged in user = check_validation(request) if user: # sort posts in ascending order of time posts = PostModel.objects.all().order_by('-created_on') # iterating through all posts for post in posts: # setting api for parallel dots to analyse sentiments set_api_key('C2TJEgxONUsOJgbfTRzJZk896mQDzl5aADdNQrYzJrQ') # checking whether comment is positive or negative if post.caption != None: response = sentiment(str(post.caption)) if response['sentiment'] >= 0.5: post.review = 'Positive' elif response['sentiment'] < 0.5: post.review = 'Negative' # checking for existing like existing_like = LikeModel.objects.filter(post_id=post.id, user=user).exists() if existing_like: post.has_liked = True # redirecting to feeds return render(request, 'feed.html', {'posts': posts}) # if user not logged in else: return redirect('/login/')
def sixth(): auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) username = input("enter any user id:") tweets = api.user_timeline(screenname=username, count=20) tmp = [] tweets_for_csv = [tweet.text for tweet in tweets] for j in tweets_for_csv: tmp.append(j) flotpos = 0 flotneg = 0 flotneu = 0 print(tmp) from paralleldots import set_api_key, get_api_key, sentiment set_api_key("60TE8tX8lV1KIy8OhpGEUpLRa4RvyJaXA7IsIEXt6x4") get_api_key() for t in tmp: a = sentiment(t) if a['sentiment'] == 'positive': flotpos += 1 if a['sentiment'] == 'negative': flotneg += 1 if a['sentiment'] == 'neutral': flotneu += 1 if (flotpos > flotneg) and (flotpos > flotneu): print("postive") if (flotneg > flotneu) and (flotneg > flotpos): print("negative") if (flotneu > flotneg) and (flotneu > flotpos): print("neutral")
def test_sentiments(): list_sents = [] tweets = getSearch() set_api_key(" ") for tweet in tweets: list_sents.append(sentiment(tweet.text)) return list_sents
def trySentiment(request): if request.method == 'POST': sentence = request.POST.get('sent') data = sentiment(sentence) ans = data['sentiment'] print(ans) return render(request, 'trial.html', {'ans': ans})
def SentAnalysis(): lists = [] tweets = GetSearch() set_api_key("5Ilq8t88HXC0EYjVzpCDqqnQSlPJm5mJ9faJTnigwG4") for tweet in tweets: lists.append(sentiment(tweet.text)) return lists
def return_sentiments(msg): paralleldots.set_api_key("b6IJBihauZBESHXNQdWtO9ODVyzJDauTG3ntQePKRDY") response = paralleldots.sentiment(msg, "en") return "Negative % " + str( response['sentiment']['negative']) + " Positive % " + str( response['sentiment']['positive']) + " Neutral % " + str( response['sentiment']['neutral'])
def getuserdatared(): #Code for getting User Input's data and extracting data from Stack Overflow with Try Exception Handling try: redurl=stackidentry.get() submission = reddit.submission(url=redurl) SenDict = {"negative":0, "neutral":0, "positive":0} for top_level_comment in submission.comments: if isinstance(top_level_comment, MoreComments): continue text = top_level_comment.body #print(text) RedditPostSentiment = paralleldots.sentiment(text)['sentiment'] LoopSen=max(RedditPostSentiment, key = lambda x: RedditPostSentiment.get(x)) if LoopSen == "negative": SenDict["negative"]+=1 elif LoopSen == "positive": SenDict["positive"]+=1 elif LoopSen == "neutral": SenDict["neutral"]+=1 else: pass df = pd.DataFrame.from_dict(SenDict, orient = 'index') df.reset_index(inplace=True) df.columns=['Sentiment', 'Frequency'] plt.figure(figsize=(15,5)) sns.barplot(x="Sentiment", y="Frequency", data = df) plt.show() except Exception: errortext=Label(fourth_window).config(text="") errortext=Label(fourth_window, text="Please enter a correct url") errortext.grid(row=2, column=1)
def sentimentAnalysis(): positive_sentiment=0; negative_sentiment=0; query() from paralleldots import set_api_key, get_api_key,sentiment set_api_key("8dyQhJPFerUALsn2lBpMAftocXOIr6bAFb6vJcrEYYM") get_api_key() for tweet in tweets: txt = tweet.text sentiment_value = sentiment(txt) value = sentiment_value['sentiment'] if value == "positive": positive_sentiment = positive_sentiment + 1 else: negative_sentiment = negative_sentiment + 1 if positive_sentiment > negative_sentiment : print("Sentiment is Positive ") else: print("Sentiment is Negative")
def sentimentVal(receivedMessage): results = paralleldots.sentiment(read_Object, "en") output = 0 for sense, num in results['sentiment'].items(): if num > output: output = num emotion = sense return emotion
def post_view(request): user = check_validation(request) if user: if request.method == 'POST': form = PostForm(request.POST, request.FILES) if form.is_valid(): image = form.cleaned_data.get('image') caption = form.cleaned_data.get('caption') post = PostModel(user=user, image=image, caption=caption) post.save() path = str(BASE_DIR +'/'+ post.image.url) client = ImgurClient(YOUR_CLIENT_ID, YOUR_CLIENT_SECRET) post.image_url = client.upload_from_path(path, anon=True)['link'] #Creating instance of an API with KEY app = ClarifaiApp(api_key=KEY) model = app.models.get('nsfw-v1.0') response_image = model.predict_by_url(url=post.image_url) safe=response_image['outputs'][0]['data']['concepts'][0]['value'] set_api_key(PKEY) response = sentiment(str(caption)) sentiment_value = response['sentiment'] if sentiment_value > 0.6 and safe > 0.6: post.save() success_message = 'Post can be submitted' return render(request, 'post.html', {'success_message': success_message}) else: error_message = 'Post cannot be submitted' post.delete() return render(request, 'post.html', {'error_message': error_message}) return redirect('/feed/') else: form = PostForm() return render(request, 'post.html', {'form' : form}) else: return redirect('/login/')
def get_vects(text): print(str(text)) # keywords_vect = [ k['keyword'] for k in keywords(text)['keywords'] ] # emotion_vect = [ (key, value) for key, value in emotion(text)['probabilities'].items() ] # sentiment_vect = sentiment(text) # del sentiment_vect['usage'] # return [keywords_vect, emotion_vect, sentiment_vect] return [{"keywords": mch_text.subs(text)}, emotion(text), sentiment(text)]
def Index(request): data = json.loads(request.body.decode('utf-8')) sent = data["sentiment"] result = paralleldots.sentiment(sent, lang_code='en')['sentiment'] result = dict(result) pos = result['positive'] neg = result['negative'] neu = result['neutral'] data = {'positive': pos, 'negative': neg, 'neutral': neu} return JsonResponse(data, safe=False)
def paralleldots_api(text): api_key = "" paralleldots.set_api_key(api_key) text_todo=text emot=paralleldots.emotion(text_todo) # print(emot) #emotion analysis sentiment=paralleldots.sentiment(text_todo) # print(intent) #Intent analyisis # return {'emotion':emot,'intent':intent} return {'emot':emot['emotion'],'sent':sentiment['sentiment']}
def sentiments(): tweet_input = input("Enter the hashtag you want to search: ") tweets = api.search(q=tweet_input) print(tweets) for tweet in tweets: text = tweet.text print("\nSentiments") print(paralleldots.sentiment("Hi ")["sentiment"])
def Sentiment(file): set_api_key("VIJL2MNSIraV6xzz2fNepEPdGX86Rxd7s0JvCqwqAEI") with file as f: for line in f: tweet = line.split(',')[2] score = sentiment(tweet) data = json.dumps(score) result = data.split('{')[2] finalResult = result.split('}')[0] file.write(', ' + finalResult) file.close()
def test(): set_api_key("tEgayBzxTpAZZNppX62n8niYYoHeTna20DqQw8S9TQU") # category = { "finance": [ "markets", "economy", "shares" ], "world politics": [ "diplomacy", "UN", "war" ], "india": [ "congress", "india", "bjp" ] } # print(similarity( "Sachin is the greatest batsman", "Tendulkar is the finest cricketer" )) print(sentiment("Come on, lets play together")) # print(ner( "Narendra Modi is the prime minister of India","en" )) # print(taxonomy("Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019")) # print(keywords( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." )) # print(phrase_extractor( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." )) print(emotion("i have nothing else to do. life is so boring man.")) # print(intent("Finance ministry calls banks to discuss new facility to drain cash")) print(abuse("you f**king a$$hole"))
def get_sentiment(string): """Returns sentiment and sentiment score of a string as a PD Series.""" try: sent = sentiment(string) sent = (sorted(((sent['sentiment']).items()), key=lambda kv: (kv[1], kv[0]), reverse=True))[0] sent_type = sent[0] sent_score = sent[1] return pd.Series([sent_type, sent_score]) except: print("Error in ", string)
def sentiments(message): sentiment = paralleldots.sentiment(message)['sentiment'] print(sentiment) if (sentiment['positive'] < sentiment['negative']): if (sentiment['negative'] < sentiment['neutral']): return 0 else: return -1 elif (sentiment['negative'] < sentiment['positive']): if (sentiment['positive'] < sentiment['neutral']): return 0 else: return 1
def Sentiment(tweet): set_api_key("VIJL2MNSIraV6xzz2fNepEPdGX86Rxd7s0JvCqwqAEI") score = sentiment(tweet) data = json.dumps(score) result = data.split('{')[2] finalResult = result.split('}')[0] negative = score['probabilities']['negative'] neutral = score['probabilities']['neutral'] positive = score['probabilities']['positive'] print(negative, neutral, positive) return(negative, neutral, positive)
def post_view(request): user = check_validation(request) if user: if request.method == 'POST': form = PostForm(request.POST, request.FILES) if form.is_valid(): image = form.cleaned_data.get('image') caption = form.cleaned_data.get('caption') post = PostModel(user=user, image=image, caption=caption) post.save() z = post.image.url path = str(BASE_DIR + '\\' + post.image.url) client = ImgurClient( '0e144dffb567600', '17ed6b09b6b16a35f32bfcd307e1b21cb132b21e') post.image_url = client.upload_from_path(path, anon=True)['link'] # using calrifai response = model.predict_by_url(url=post.image_url) right = response["outputs"][0]["data"]["concepts"][0]["value"] # using paralleldots set_api_key(PKEY) response = sentiment(str(caption)) sentiment_score = response["sentiment"] if sentiment_score >= 0.6 and right > 0.5: post.save() saved_message = 'Post is successfully submitted' return render(request, 'error.html', {'context': saved_message}) else: error_message = 'Post cannot be submitted' post.delete() return render(request, 'error.html', {'context': error_message}) return redirect('/post/') else: form = PostForm() return render( request, 'post.html', {'form': form}, ) else: return redirect('/login/')