Example #1
0
def emotion_data(comment):
    comment = comment.translate({ord(c): '' for c in string.punctuation})
    sn = Senticnet()
    polarity_intense = 0
    comment_sentics = {
        'sensitivity': 0,
        'attention': 0,
        'pleasantness': 0,
        'aptitude': 0
    }
    comment_mood_tags = []
    words = comment.split(" ")
    total_word_count = len(words)
    final_output = {
        'sentics': {
            'sensitivity': '0',
            'attention': '0',
            'pleasantness': '0',
            'aptitude': '0',
            'polarity': '0'
        },
        'mood tags': {}
    }
    for i in words:

        try:
            #word_emotion(i, sn)
            polarity_intense += float(sn.polarity_intense(i))
            #print sn.sentics(i)
            sentics_values(i, sn, comment_sentics)
            add_mood_tags(comment_mood_tags, sn, i)
        except KeyError:
            #print "This word does not exist", i
            if (total_word_count > 1):
                total_word_count -= 1
            pass
    comment_sentics_average(total_word_count, comment_sentics)
    final_output['sentics']['polarity'] = polarity_intense / total_word_count
    final_output['mood tags'] = comment_mood_tags
    for output in final_output['sentics']:
        if output in comment_sentics:
            final_output['sentics'][output] = comment_sentics[output]

    json_output = json.dumps(final_output)
    print(json_output)
    return json_output
def get_avg_polarity(message):
    threshold = 0.3
    sn = Senticnet()
    count = 0
    summ = 0
    for word_options in get_words_bag(message):
        polarity = 0
        for word in word_options:
            try:
                concept = sn.concept(word)
                polarity = concept['polarity']
                break
            except urllib2.HTTPError:
                pass  #Do next
        if abs(polarity) > threshold:
            summ += polarity
            count += 1

    return summ / count if count > 0 else 0
Example #3
0
 def getInfo(self, concept):
     try:
         sn = Senticnet()
         concept_info = sn.concept(concept)
         polarity_value = sn.polarity_value(concept)
         polarity_intense = sn.polarity_intense(concept)
         moodtags = sn.moodtags(concept)
         semantics = sn.semantics(concept)
         sentics = sn.sentics(concept)
         """print(concept)
         print("concept_info: {}".format(concept_info))
         print("polarity_value: {}".format(polarity_value))
         print("polarity_intense: {}".format(polarity_intense))
         print("moodtags: {}".format(moodtags))
         print("semantics: {}".format(semantics))
         print("sentics: {}".format(sentics))
         print("\n\n")"""
         return "{} - {}".format(polarity_value, polarity_intense)
     except:
         return "NOT POSSIBLE"
Example #4
0
from senticnet.senticnet import Senticnet

sn = Senticnet()
print "polarity value:", sn.polarity_value("love")
print "polarity intense:", sn.polarity_intense("love")
print "moodtags:", ", ".join(sn.moodtags("love"))
print "semantics:", ", ".join(sn.semantics("love"))
print "\n".join(
    [key + ": " + str(value) for key, value in sn.sentics("love").items()])
Example #5
0
def score_calculation(sentence):
    sno = SnowballStemmer('english')
    lemma = WordNetLemmatizer()
    sn = Senticnet()
    polarity_intense = 0
    comment_sentics = {
        'sensitivity': 0,
        'attention': 0,
        'pleasantness': 0,
        'aptitude': 0
    }
    comment_mood_tags = []
    total_word_count = len(sentence)
    final_output = {
        'sentics': {
            'sensitivity': '0',
            'attention': '0',
            'pleasantness': '0',
            'aptitude': '0',
            'polarity': '0'
        },
        'mood tags': {}
    }
    for i in sentence:
        try:
            #word_emotion(i,sn)
            polarity_intense += float(sn.polarity_intense(i))
            sentics_values(i, sn, comment_sentics)
            add_mood_tags(comment_mood_tags, sn, i)
        except KeyError:
            #print "This word does not exist"
            try:
                current_word = lemma.lemmatize(i)
                #word_emotion(current_word,sn)
                polarity_intense += float(sn.polarity_intense(current_word))
                sentics_values(current_word, sn, comment_sentics)
                add_mood_tags(comment_mood_tags, sn, current_word)
            except KeyError:

                #print("This didnt work again")
                try:
                    # word_emotion(sno.stem(current_word),sn)
                    current_word = sno.stem(current_word)
                    polarity_intense += float(
                        sn.polarity_intense(current_word))
                    sentics_values(current_word, sn, comment_sentics)
                    add_mood_tags(comment_mood_tags, sn, current_word)
                except KeyError:
                    if (total_word_count > 1):
                        total_word_count -= 1
                    pass
    comment_sentics_average(total_word_count, comment_sentics)
    final_output['sentics']['polarity'] = polarity_intense / total_word_count
    final_output['mood tags'] = comment_mood_tags
    for output in final_output['sentics']:
        if output in comment_sentics:
            final_output['sentics'][output] = comment_sentics[output]
    json_output = json.dumps(final_output)
    final_excitement = {'score': '0', 'mood tags': {}}
    final_excitement_score = comment_sentics['attention'] + comment_sentics[
        'pleasantness']
    final_excitement['score'] = str(final_excitement_score)
    final_excitement['mood tags'] = comment_mood_tags

    #print json_output
    #print final_excitement

    return final_excitement
Example #6
0
#!/usr/bin/python
# -*- coding: utf8 -*-

from senticnet.senticnet import Senticnet
from nltk.classify import NaiveBayesClassifier
import pymorphy2
import codecs


def word_feats(words):
    return dict([(word, True) for word in words])


sn = Senticnet('ru')
morph = pymorphy2.MorphAnalyzer()

# заполняем масссив тэгами SenticNet
positive_vocab = ['#интерес', '#радость', '#сюрприз', '#восхищение']
negative_vocab = ['#попугать', '#гнев', '#печаль', '#отвращение']

# добавляем слова из WordNet
with codecs.open('dict/positive.txt', encoding='utf-8') as file_object:
    for line in file_object:
        line = line.rstrip('\n\r')
        positive_vocab.append(morph.parse(line)[0].normal_form)
with codecs.open('dict/negative.txt', encoding='utf-8') as file_object:
    for line in file_object:
        line = line.rstrip('\n\r')
        negative_vocab.append(morph.parse(line)[0].normal_form)

# наполнаяем множества позитивных и негативных слов и обучаем классификатор