Beispiel #1
0
def return_sentiments(msg):
    paralleldots.set_api_key("b6IJBihauZBESHXNQdWtO9ODVyzJDauTG3ntQePKRDY")
    response = paralleldots.sentiment(msg, "en")
    return "Negative % " + str(
        response['sentiment']['negative']) + " Positive % " + str(
            response['sentiment']['positive']) + " Neutral % " + str(
                response['sentiment']['neutral'])
Beispiel #2
0
def SentAnalysis():
    lists = []
    tweets = GetSearch()
    set_api_key("5Ilq8t88HXC0EYjVzpCDqqnQSlPJm5mJ9faJTnigwG4")
    for tweet in tweets:
        lists.append(sentiment(tweet.text))
    return lists
Beispiel #3
0
def test_sentiments():
    list_of_sents = []
    tweets = get_tweets()
    set_api_key(paralleldots_api_key)
    for tweet in tweets:
        list_of_sents.append(sentiment(tweet.text))
    return list_of_sents
Beispiel #4
0
def nlp_function(data):
    paralleldots.set_api_key("pwYgvFI30sVIFqTDdbmLM68vbjYwnZ1shoCe8GXGQwk")
    text1 = data
    text2 = "this is rajeev"
    response = paralleldots.similarity(text1, text2)
    print(response)
    return response
 def sixth():
     auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
     auth.set_access_token(access_token, access_token_secret)
     api = tweepy.API(auth)
     username = input("enter any user id:")
     tweets = api.user_timeline(screenname=username, count=20)
     tmp = []
     tweets_for_csv = [tweet.text for tweet in tweets]
     for j in tweets_for_csv:
         tmp.append(j)
     flotpos = 0
     flotneg = 0
     flotneu = 0
     print(tmp)
     from paralleldots import set_api_key, get_api_key, sentiment
     set_api_key("60TE8tX8lV1KIy8OhpGEUpLRa4RvyJaXA7IsIEXt6x4")
     get_api_key()
     for t in tmp:
         a = sentiment(t)
         if a['sentiment'] == 'positive':
             flotpos += 1
         if a['sentiment'] == 'negative':
             flotneg += 1
         if a['sentiment'] == 'neutral':
             flotneu += 1
     if (flotpos > flotneg) and (flotpos > flotneu):
         print("postive")
     if (flotneg > flotneu) and (flotneg > flotpos):
         print("negative")
     if (flotneu > flotneg) and (flotneu > flotpos):
         print("neutral")
Beispiel #6
0
def comment_view(request):
    user = check_validation(request)
    if user and request.method == 'POST':
        form = CommentForm(request.POST)

        if form.is_valid():
            post_id = form.cleaned_data.get('post').id
            post = PostModel.objects.filter(pk=post_id)
            comment_text = str(form.cleaned_data.get('comment_text'))
            set_api_key('0qqGfin1x8jlBmHYYft245Shx9YZdoZq8bi83ZlUYDs')
            review = sentiment(comment_text)
            print review

            if review['sentiment']:
                comment = CommentModel.objects.create(user=user, post_id=post_id, comment_text=comment_text,
                                                      review=review['sentiment'])
                print comment.review
                comment.save()
                return redirect('/feed/')

            else:
                redirect('/feed/')
        else:
            return redirect('/feed/')
    else:
        return redirect('/login')
    def get_top_similar_texts(user_query):
        """
        :return: Returns a list of triplets, where every triplet consists of :
                    1. the entire text which was used in finding the similarity with the user's text
                    2. the similarity between the text from file and user's input
                    3. the name of the location
        """

        paralleldots.set_api_key(TextToText.api_keys[TextToText.count])
        TextToText.count += 1
        if TextToText.count == len(TextToText.api_keys):
            TextToText.count += 0

        sim_list = []
        list_cities = [
            'Vienna', 'London', 'Lisbon', 'Berlin', 'Bucharest', 'Copenhagen',
            'Edinburgh', 'Athens', 'Barcelona', 'Bern', 'St.Petersburg'
        ]
        for city in list_cities:
            with open(r"../Scrapping/textData/" + city + ".txt",
                      encoding="utf8") as file:
                for line in file.readlines()[:5]:
                    similarity = TextToText.get_similarity(text1=user_query,
                                                           text2=line)
                    try:
                        sim_list.append(
                            [line, similarity["similarity_score"], city])
                    except:
                        print("error")
        sim_list = sorted(sim_list, key=lambda x: x[1], reverse=True)
        return sim_list
Beispiel #8
0
def sent_analysis():
    positive = 0
    negative = 0
    neutral = 0
    query()
    from paralleldots import set_api_key, sentiment

    set_api_key("")

    paralleldots.get_api_key()
    for tweet in tweets:
        tweet_text = tweet.text
        sentiment_type = sentiment(tweet_text)
        sentiment_values = sentiment_type['sentiment']
        if sentiment_values == "positive":
            positive = positive + 1
        elif sentiment_values == "negative":
            negative = negative + 1
        else:
            neutral = negative + 1
    if positive > negative and positive > neutral:
        print("POSITIVE SENTIMENT with count" + " " + str(positive))
    elif negative > positive and negative > neutral:
        print("NEGATIVE SENTIMENT with count" + " " + str(negative))
    else:
        print("NEUTRAL SENTIMNET with count" + " " + str(neutral))
Beispiel #9
0
def feed_view(request):

    # check whether used is logged in
    user = check_validation(request)
    if user:
        # sort posts in ascending order of time
        posts = PostModel.objects.all().order_by('-created_on')
        # iterating through all posts
        for post in posts:
            # setting api for parallel dots to analyse sentiments
            set_api_key('C2TJEgxONUsOJgbfTRzJZk896mQDzl5aADdNQrYzJrQ')
            # checking whether comment is positive or negative

            if post.caption != None:

                response = sentiment(str(post.caption))

                if response['sentiment'] >= 0.5:
                    post.review = 'Positive'
                elif response['sentiment'] < 0.5:
                    post.review = 'Negative'
            # checking for existing like
            existing_like = LikeModel.objects.filter(post_id=post.id,
                                                     user=user).exists()
            if existing_like:
                post.has_liked = True
        # redirecting to feeds
        return render(request, 'feed.html', {'posts': posts})
    # if user not logged in
    else:
        return redirect('/login/')
def sent_analysis():
    positive = 0
    negative = 0
    neutral = 0
    query()
    from paralleldots import set_api_key, sentiment
    # Setting  API key
    set_api_key("F6IhnjekXoKsgzOwy1ZsGCX6ph76YK5F6SzFf968gOk")
    #Viewing  API key
    paralleldots.get_api_key()
    for tweet in tweets:
        tweet_text = tweet.text
        sentiment_type = sentiment(tweet_text)
        sentiment_values = sentiment_type['sentiment']
        if sentiment_values == "positive":
            positive = positive + 1
        elif sentiment_values == "negative":
            negative = negative + 1
        else:
            neutral = negative + 1
    if positive > negative and positive > neutral:
        print("POSITIVE SENTIMENT with count" + " " + str(positive))
    elif negative > positive and negative > neutral:
        print("NEGATIVE SENTIMENT with count" + " " + str(negative))
    else:
        print("NEUTRAL SENTIMNET with count" + " " + str(neutral))
def test():
    set_api_key("write your api key here")
    similarity("Sachin is the greatest batsman",
               "Tendulkar is the finest cricketer")
    sentiment("Come on, lets play together")
    ner("Narendra Modi is the prime minister of India")
    keywords(
        "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
    )
    emotion("Did you hear the latest Porcupine Tree song ? It's rocking !")
    intent(
        "Finance ministry calls banks to discuss new facility to drain cash")
    abuse("you f**king a$$hole")
    batch_intent([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_abuse([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_ner([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_sentiment([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_phrase_extractor([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
Beispiel #12
0
def get_sentiments(query):
    p = 0
    n = 0
    ne = 0
    set_api_key('2Z4UlTNyfjXwIn5CGLy4EvS5IaySrLFfJDiMSPGCo3o')
    get_api_key()
    public_tweets = api.search(query)
    for tweet in public_tweets:
        text = tweet.text
        print(
            colored(
                "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++",
                color='blue'))
        print(colored(tweet.text, color='red'))
        r = sentiment(tweet.text)
        print(colored(r, color='red'))
        result = r['sentiment']
        if result == "positive":
            p = p + 1
        elif r['sentiment'] == "neutral":
            n = n + 1
        else:
            ne = ne + 1
    print(
        colored(
            "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++",
            color='green'))
    print "Maximum positive comments: ", p
    print "Maximum neutral comments: ", n
    print "Maximum negative comments: ", ne
    print(
        colored(
            "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++",
            color='green'))
Beispiel #13
0
 def sentimental():
     auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
     auth.set_access_token(access_token, access_token_secret)
     api = tweepy.API(auth)
     username = input("enter any user id:")
     tweets = api.user_timeline(screenname=username, count=20)
     tmp = []
     tweets_for_csv = [tweet.text for tweet in tweets]
     for j in tweets_for_csv:
         tmp.append(j)
     count1 = 0
     count2 = 0
     count3 = 0
     print(tmp)
     from paralleldots import set_api_key, get_api_key, sentiment
     set_api_key("M6aheAI13WZLXrxV9Gv3rsm8Fc8kXYKuYapZ7n2G8Wo")
     get_api_key()
     for t in tmp:
         a = sentiment(t)
         if a['sentiment'] == 'positive':
             count1 += 1
         if a['sentiment'] == 'negative':
             count2 += 1
         if a['sentiment'] == 'neutral':
             count3 += 1
     if (count1 > count2) and (count1 > count3):
         print("postive")
     if (count2 > count3) and (count2 > count1):
         print("negative")
     if (count3 > count2) and (count3 > count1):
         print("neutral")
Beispiel #14
0
def main():
    paralleldots.set_api_key(API_KEY)
    messages = []
    with open("messages.csv") as csv_file:
        reader = csv.reader(csv_file, delimiter='|')
        for row in reader:
            messages.append(row[2])

    messages = messages[1:]

    emotions = []
    response = paralleldots.batch_emotion(messages)
    for result in response['batch']:
        emotions.append(result['emotion']['emotion'])

    languages = []
    response = paralleldots.batch_language_detection(messages)
    for result in response['batch']:
        languages.append(result['output'])

    sentiments = []
    response = paralleldots.batch_sentiment(messages)
    for result in response['batch']:
        sentiments.append(result['sentiment'])

    data = {}

    data['languages'] = languages
    data['emotions'] = emotions
    data['sentiments'] = sentiments

    generareRaport(data)
Beispiel #15
0
    def __init__(self, data_dict, api_key, num_reviews=10):
        self.data_dict = data_dict
        self.language = data_dict[0]['language']
        # sort data by weight
        self.sorted_data_dict = self.sort_reviews_by_weight(self.data_dict)

        # try:
        #     self.api_key = os.environ.get('API_KEY')
        #     print(self.api_key)
        # except Exception as e:
        #     raise Exception("please add your Paralleldots API_KEY to your environment variable")

        self.api_key = api_key

        paralleldots.set_api_key(self.api_key)

        self.text = self.piece_text(self.sorted_data_dict, num_reviews)
        self.text = self.strip_text(self.text, self.language)

        self.key_words, self.key_phrase, self.emotion = self.go(
            self.text, self.language)

        self.result = dict()
        self.result['keywords'] = self.key_words
        self.result['phrase'] = self.key_phrase
        self.result['emotion'] = self.emotion
Beispiel #16
0
def sentimentAnalysis():
    positive_sentiment=0;
    negative_sentiment=0;
   
    
    query()
    from paralleldots import set_api_key, get_api_key,sentiment
    
    set_api_key("8dyQhJPFerUALsn2lBpMAftocXOIr6bAFb6vJcrEYYM")
    get_api_key()
    for tweet in tweets:
        txt = tweet.text
        sentiment_value = sentiment(txt)
        value = sentiment_value['sentiment']
        if value == "positive":
            positive_sentiment = positive_sentiment + 1
            
        else:
            negative_sentiment = negative_sentiment + 1
        
    if positive_sentiment > negative_sentiment :
        print("Sentiment is Positive ")
    
    else:
        print("Sentiment is Negative")
Beispiel #17
0
def home(request):
    user_sent = ""
    user_input = ""
    fname = "na"
    if request.POST:
        user_input = request.POST.get('user_input', '')
        lang_code = "en"
        paralleldots.set_api_key("NlxGNPr4VRsjdyORAdKFWWraVX2HNGdBw0JUXCJ9uYg")
        user_response = paralleldots.sentiment(user_input, lang_code)
        user_sent = user_response['sentiment']

        if (user_sent == 'neutral'):
            fname = "emoticon-1634586_640.png"
        elif (user_sent == 'negative'):
            fname = "emoticon-1634515_640.png"
        elif (user_sent == 'positive'):
            fname = "smiley-163510_640.jpg"
        else:
            fname = "na"

    return render(request, 'jack/home.html', {
        'resp': user_sent,
        'fname': fname,
        'user_input': user_input
    })
Beispiel #18
0
def met():
    import paralleldots
    import json
    api_key = "zIAZOZfZvvLW6luxNluGHa0Pvt623evzdR42paLpWNY"
    paralleldots.set_api_key(api_key)
    p1 = "/home/ganesh/Desktop/Projects/exp/photos/"
    p2 = os.listdir(p1)
    p1 = p1 + str(p2[0])
    k = paralleldots.facial_emotion(p1)
    if "No face detected." in k:
        shutil.rmtree('/home/ganesh/Desktop/Projects/exp/photos')
        os.mkdir('/home/ganesh/Desktop/Projects/exp/photos')
        return render_template('error.html')
    else:
        m = 0
        mv = "p"
        if 'facial_emotion' not in k:
            shutil.rmtree('/home/ganesh/Desktop/Projects/exp/photos')
            os.mkdir('/home/ganesh/Desktop/Projects/exp/photos')
            return render_template('error.html')
        for j in k['facial_emotion']:
            e = j['tag']
            if j['score'] > m:
                mv = e
                m = j['score']
    shutil.rmtree('/home/ganesh/Desktop/Projects/exp/photos')
    os.mkdir('/home/ganesh/Desktop/Projects/exp/photos')
    return render_template('success.html', emotion=mv)
Beispiel #19
0
def nlp(req):  # NLP work
    datetime = feedbacks.objects.latest('id').DateTime
    category = req.POST.get("category")
    text = req.POST.get("text")[10:]
    text = text[:-2]
    bw = req.POST.get("bw")
    fid = feedbacks.objects.latest('id').id
    counter = 0
    feedback = analyzedFeedbacks.objects.all()
    for feedbac in feedback:
        if(classify(text) > 70):  # If its greater than 70 it means it's garbadge text
            return
        # Finds similar feedbacks
        if(feedbac.category.lower() == category.lower() and feedbac.bw.lower() == bw.lower()):
            paralleldots.set_api_key(
                "pCQlFdWiBwhGO8RERIGpwHDeAHQmWUjP3i9LLOrK0oc")  # Paralleldots API Key
            result = paralleldots.similarity(
                feedbac.text.lower(), text.lower())
            #print(result['similarity_score'])
            #If similarity score is greater than 0.5 It means they are same. You can change it
            if(result['similarity_score'] >= 0.65):
                counter = counter+1
                postToRelated(fid, feedbac.fid)  # Post Related in related table
                return
# If we are here it means feedback is neither garbadge nor it's similar so we add it in analyzedfeedback table
    m = analyzedFeedbacks(
        DateTime=datetime, category=category, text=text, bw=bw, fid=fid, related=counter)
    m.save()
def test(request):
    paralleldots.set_api_key("M4rTJatLfpK0pp1AjE5pZ8ciHa4hW2KTOeq65fUIoEk")
    text = "i wanna die"
    data = paralleldots.emotion(text)
    dick = data['emotion']
    print(dick['Angry'])
    return render(request, 'home/test.html')
Beispiel #21
0
def predict_sentiment_with_paralleldots(data_df):
    import paralleldots
    # Setting your API key
    paralleldots.set_api_key(PARALLEL_DOTS_KEY)
    texts_list = data_df.tolist()
    result = paralleldots.sentiment(texts_list)
    return result['sentiment']
    def get_tweets(username):
        auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)
        api = tweepy.API(auth)
        tweets = api.user_timeline(screen_name=username, count=20)
        tmp = []
        tweets_for_csv = [tweet.text for tweet in tweets]  # CSV file created
        for j in tweets_for_csv:
            tmp.append(j)
        var1 = 0
        var2 = 0
        var3 = 0

        print(tmp)
        from paralleldots import set_api_key, get_api_key, sentiment
        set_api_key("6dm9k0RomplpimtZETEkwp6JzMTrPSDhhMIiGPGmu68")
        get_api_key()
        for t in tmp:
            a = sentiment(t)
            print(t, "-->", a)
            time.sleep(1)
            if a['sentiment'] == 'positive':
                var1 += 1
            if a['sentiment'] == 'negative':
                var2 += 1
            if a['sentiment'] == 'neutral':
                var3 += 1
        if (var1 > var2) and (var1 > var3):
            print("This user is positive on Twitter")
        if (var2 > var3) and (var2 > var1):
            print("This user is negative on Twitter")
        if (var3 > var2) and (var3 > var1):
            print("This user is neutral on Twitter")
Beispiel #23
0
def testing_sentiments():
    sent_list = []
    tweets = get_search()
    set_api_key("5Ilq8t88HXC0EYjVzpCDqqnQSlPJm5mJ9faJTnigwG4")
    for tweet in tweets:
        sent_list.append(sentiment(tweet.text))
    return sent_list
Beispiel #24
0
def test_sentiments():
    list_sents = []
    tweets = getSearch()
    set_api_key(" ")
    for tweet in tweets:
        list_sents.append(sentiment(tweet.text))
    return list_sents
Beispiel #25
0
    def get_tweets(username):  #sentimental analysis
        auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)
        api = tweepy.API(auth)
        tweets = api.user_timeline(screen_name=username, count=20)
        tmp = []
        tweets_for_csv = [tweet.text for tweet in tweets]  # CSV file created
        for j in tweets_for_csv:
            tmp.append(j)  # store the tweets in tmp list
        var1 = 0
        var2 = 0
        var3 = 0

        print(tmp)
        from paralleldots import set_api_key, get_api_key, sentiment
        set_api_key("6dm9k0RomplpimtZETEkwp6JzMTrPSDhhMIiGPGmu68")
        get_api_key()
        for t in tmp:
            a = sentiment(t)
            print(a)
            if a['sentiment'] == 'positive':  #checking positive tweets
                var1 += 1
            if a['sentiment'] == 'negative':  #checking negative tweets
                var2 += 1
            if a['sentiment'] == 'neutral':  #checking neutral tweets
                var3 += 1
        if (var1 > var2) and (var1 >
                              var3):  #checking the person is positive or not
            print("positive")
        if (var2 > var3) and (var2 >
                              var1):  #checking the person is negative or not
            print("negative")
        if (var3 > var2) and (var3 >
                              var1):  #checking the person is neutrl or not
            print("neutral")
Beispiel #26
0
def sentiment_analysis():
    flagp = 0
    flagn = 0
    flagneg = 0
    query()
    from paralleldots import set_api_key, get_api_key
    from paralleldots import similarity, ner, taxonomy, sentiment, keywords, intent, emotion, abuse, multilang_keywords
    set_api_key("")
    get_api_key()
    for tweet in tweets:
        text = tweet.text
        sentiment_value = sentiment(text)
        values1 = sentiment_value['sentiment']
        if values1 == "positive":
            flagp = flagp + 1
        elif values1 == "negative":
            flagneg = flagneg + 1
        else:
            flagn = flagn + 1
    if flagn > flagneg and flagn > flagp:
        print("Sentiment: Neutral")
    elif flagneg > flagn and flagneg > flagp:
        print("Sentiment: Negative")
    else:
        print("Sentiment: Positive")
Beispiel #27
0
 def Determine_the_sentiment():
     auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
     auth.set_access_token(access_token, access_token_secret)
     api = tweepy.API(auth)
     username = input("enter any user id:")
     tweets = api.user_timeline(screenname=username, count=20)
     tmp = []
     tweets_for_csv = [tweet.text for tweet in tweets]
     for j in tweets_for_csv:
         tmp.append(j)
     pos = 0
     neg = 0
     neu = 0
     print(tmp)
     from paralleldots import set_api_key, get_api_key, sentiment
     set_api_key("2S3zRrv1jxndgO6NQ989I4iJlEU8PHD1aOaAvCM4kw8")
     get_api_key()
     for t in tmp:
         a = sentiment(t)
         if a['sentiment'] == 'positive':
             pos += 1
         if a['sentiment'] == 'negative':
             neg += 1
         if a['sentiment'] == 'neutral':
             neu += 1
     if (pos > neg) and (pos > neu):
         print("postive")
     if (neg > neu) and (neg > pos):
         print("negative")
     if (neu > neg) and (neu > pos):
         print("neutral")
Beispiel #28
0
def analyze_sentence(sentence):
    '''
    Return softmaxed probability vector of sentence emotions.
    '''
    paralleldots.set_api_key(key)
    result = paralleldots.emotion(sentence)

    return result['emotion']['probabilities']
Beispiel #29
0
def analyze_text_w(text):
    paralleldots.set_api_key(paralleldots_TOKEN)
    paralleldots.get_api_key()
    emotions = paralleldots.emotion(text)["emotion"]
    pos = (emotions["Happy"] + emotions["Excited"]) / 2
    neg = (emotions["Angry"] + emotions["Bored"] + emotions["Fear"] +
           emotions["Sad"]) / 4
    print(pos, " ", neg)
Beispiel #30
0
def get_highest_two_emotions(text):
    paralleldots.set_api_key(paralleldots_TOKEN)
    paralleldots.get_api_key()
    emotions = paralleldots.emotion(text)["emotion"]
    my_list = [k for k, v in emotions.items() if v == max(emotions.values())]
    if my_list[0] == "Fear":
        return "Sad"
    return my_list[0]