Пример #1
1
from senticnet.senticnet import Senticnet

sn = Senticnet()
print "polarity:", sn.polarity('love')
print "semantics:", " ".join(sn.semantics('love'))
print "\n".join([key + ": " + str(value) for key, value in sn.sentics('love').items()])
Пример #2
0
def emotion_data(comment):
    comment = comment.translate({ord(c): '' for c in string.punctuation})
    sn = Senticnet()
    polarity_intense = 0
    comment_sentics = {
        'sensitivity': 0,
        'attention': 0,
        'pleasantness': 0,
        'aptitude': 0
    }
    comment_mood_tags = []
    words = comment.split(" ")
    total_word_count = len(words)
    final_output = {
        'sentics': {
            'sensitivity': '0',
            'attention': '0',
            'pleasantness': '0',
            'aptitude': '0',
            'polarity': '0'
        },
        'mood tags': {}
    }
    for i in words:

        try:
            #word_emotion(i, sn)
            polarity_intense += float(sn.polarity_intense(i))
            #print sn.sentics(i)
            sentics_values(i, sn, comment_sentics)
            add_mood_tags(comment_mood_tags, sn, i)
        except KeyError:
            #print "This word does not exist", i
            if (total_word_count > 1):
                total_word_count -= 1
            pass
    comment_sentics_average(total_word_count, comment_sentics)
    final_output['sentics']['polarity'] = polarity_intense / total_word_count
    final_output['mood tags'] = comment_mood_tags
    for output in final_output['sentics']:
        if output in comment_sentics:
            final_output['sentics'][output] = comment_sentics[output]

    json_output = json.dumps(final_output)
    print(json_output)
    return json_output
Пример #3
0
def get_avg_polarity(message):
    threshold = 0.3
    sn = Senticnet()
    count = 0
    summ = 0
    for word_options in get_words_bag(message):
        polarity = 0
        for word in word_options:
            try:
                concept = sn.concept(word)
                polarity = concept['polarity']
                break
            except urllib2.HTTPError:
                pass  #Do next
        if abs(polarity) > threshold:
            summ += polarity
            count += 1

    return summ / count if count > 0 else 0
Пример #4
0
def get_avg_polarity(message):
  threshold = 0.3
  sn = Senticnet()
  count = 0
  summ = 0
  for word_options in get_words_bag(message):
    polarity = 0
    for word in word_options:
      try:
        concept = sn.concept(word)
        polarity = concept['polarity']
        break
      except urllib2.HTTPError:
        pass #Do next
    if abs(polarity) > threshold:
      summ += polarity
      count += 1

  return summ / count if count > 0 else 0
Пример #5
0
 def getInfo(self, concept):
     try:
         sn = Senticnet()
         concept_info = sn.concept(concept)
         polarity_value = sn.polarity_value(concept)
         polarity_intense = sn.polarity_intense(concept)
         moodtags = sn.moodtags(concept)
         semantics = sn.semantics(concept)
         sentics = sn.sentics(concept)
         """print(concept)
         print("concept_info: {}".format(concept_info))
         print("polarity_value: {}".format(polarity_value))
         print("polarity_intense: {}".format(polarity_intense))
         print("moodtags: {}".format(moodtags))
         print("semantics: {}".format(semantics))
         print("sentics: {}".format(sentics))
         print("\n\n")"""
         return "{} - {}".format(polarity_value, polarity_intense)
     except:
         return "NOT POSSIBLE"
Пример #6
0
from senticnet.senticnet import Senticnet

sn = Senticnet()
print "polarity value:", sn.polarity_value("love")
print "polarity intense:", sn.polarity_intense("love")
print "moodtags:", ", ".join(sn.moodtags("love"))
print "semantics:", ", ".join(sn.semantics("love"))
print "\n".join(
    [key + ": " + str(value) for key, value in sn.sentics("love").items()])
Пример #7
0
__author__ = 'cloudera'

from senticnet.senticnet import Senticnet
from textblob import TextBlob

sentence = "One of the very first Apple 1 computers, worth about 500,000, goes on sale later this month at Christie's auction house, the latest vintage tech sale."

sn = Senticnet()

concept_info = sn.concept('love')
print 'sn.concept(love) = ', concept_info

polarity = sn.polarity('love')
print 'polarity(love) = ', polarity

semantics = sn.semantics('love')
print 'semantics = ', semantics

sentics = sn.sentics('love')
print 'sentics = ', sentics

sentenceBlob = TextBlob(sentence)
print sentenceBlob.parse()
print sentenceBlob.sentiment

sentenceConcept = sn.concept(sentence)
print sentenceConcept
Пример #8
0

# yule's I measure (the inverse of yule's K measure)
# higher number is higher diversity - richer vocabulary
def yules_para(para):
    words = nltk.tokenize.RegexpTokenizer(r'\w+').tokenize(para)
    token_counter = Counter(tok.lower() for tok in words)
    m1 = sum(token_counter.values())
    m2 = sum([freq**2 for freq in token_counter.values()])
    i = (m1 * m1) / (m2 - m1)
    k = 1 / i * 10000
    return (k, i)


# positive and negative emotion score
sn = Senticnet()


def positiveScore(para):
    posScore = 0
    words = nltk.tokenize.RegexpTokenizer(r'\w+').tokenize(para)
    for i in words:
        try:
            polarity_intense = float(sn.polarity_intense(i))
            if (polarity_intense > 0):
                posScore = posScore + polarity_intense
        except KeyError:
            continue

    return (posScore / len(words))
Пример #9
0
from senticnet.senticnet import Senticnet

sn = Senticnet()
print "polarity:", sn.polarity('love')
print "semantics:", " ".join(sn.semantics('love'))
print "\n".join(
    [key + ": " + str(value) for key, value in sn.sentics('love').items()])
Пример #10
0
lista[0]=lista[0].replace(']','')
lista[0]=lista[0].replace('\'','')
#lista[0]=lista[0].replace(',','')



lista[0]=lista[0].split(',')
#re.sub('[', '', data)
print(lista[0][0])

'''
polarity_value_result = list()
polarity_intense_result = list()
concept_info_result = list()

sn = Senticnet()
for concept in concepts:

    concept = concept.replace('[', '')
    concept = concept.replace('u\'', '')
    concept = concept.replace('_', ' ')
    concept = concept.replace('...', '')

    concept = concept.replace(']', '')
    concept = concept.replace('\'', '')
    concept = concept.split(',')
    print('internal loop')
    for item in concept:
        print('item:')
        print(item)
        item = str(item)
Пример #11
0
def score_calculation(sentence):
    sno = SnowballStemmer('english')
    lemma = WordNetLemmatizer()
    sn = Senticnet()
    polarity_intense = 0
    comment_sentics = {
        'sensitivity': 0,
        'attention': 0,
        'pleasantness': 0,
        'aptitude': 0
    }
    comment_mood_tags = []
    total_word_count = len(sentence)
    final_output = {
        'sentics': {
            'sensitivity': '0',
            'attention': '0',
            'pleasantness': '0',
            'aptitude': '0',
            'polarity': '0'
        },
        'mood tags': {}
    }
    for i in sentence:
        try:
            #word_emotion(i,sn)
            polarity_intense += float(sn.polarity_intense(i))
            sentics_values(i, sn, comment_sentics)
            add_mood_tags(comment_mood_tags, sn, i)
        except KeyError:
            #print "This word does not exist"
            try:
                current_word = lemma.lemmatize(i)
                #word_emotion(current_word,sn)
                polarity_intense += float(sn.polarity_intense(current_word))
                sentics_values(current_word, sn, comment_sentics)
                add_mood_tags(comment_mood_tags, sn, current_word)
            except KeyError:

                #print("This didnt work again")
                try:
                    # word_emotion(sno.stem(current_word),sn)
                    current_word = sno.stem(current_word)
                    polarity_intense += float(
                        sn.polarity_intense(current_word))
                    sentics_values(current_word, sn, comment_sentics)
                    add_mood_tags(comment_mood_tags, sn, current_word)
                except KeyError:
                    if (total_word_count > 1):
                        total_word_count -= 1
                    pass
    comment_sentics_average(total_word_count, comment_sentics)
    final_output['sentics']['polarity'] = polarity_intense / total_word_count
    final_output['mood tags'] = comment_mood_tags
    for output in final_output['sentics']:
        if output in comment_sentics:
            final_output['sentics'][output] = comment_sentics[output]
    json_output = json.dumps(final_output)
    final_excitement = {'score': '0', 'mood tags': {}}
    final_excitement_score = comment_sentics['attention'] + comment_sentics[
        'pleasantness']
    final_excitement['score'] = str(final_excitement_score)
    final_excitement['mood tags'] = comment_mood_tags

    #print json_output
    #print final_excitement

    return final_excitement
Пример #12
0
#!/usr/bin/python
# -*- coding: utf8 -*-

from senticnet.senticnet import Senticnet
from nltk.classify import NaiveBayesClassifier
import pymorphy2
import codecs


def word_feats(words):
    return dict([(word, True) for word in words])


sn = Senticnet('ru')
morph = pymorphy2.MorphAnalyzer()

# заполняем масссив тэгами SenticNet
positive_vocab = ['#интерес', '#радость', '#сюрприз', '#восхищение']
negative_vocab = ['#попугать', '#гнев', '#печаль', '#отвращение']

# добавляем слова из WordNet
with codecs.open('dict/positive.txt', encoding='utf-8') as file_object:
    for line in file_object:
        line = line.rstrip('\n\r')
        positive_vocab.append(morph.parse(line)[0].normal_form)
with codecs.open('dict/negative.txt', encoding='utf-8') as file_object:
    for line in file_object:
        line = line.rstrip('\n\r')
        negative_vocab.append(morph.parse(line)[0].normal_form)

# наполнаяем множества позитивных и негативных слов и обучаем классификатор
Пример #13
0
        content = data

        # Create a SummaryTool object
        st = SummaryTool()

        # Build the sentences dictionary
        sentences_dic = st.get_sentences_ranks(content)

        # Build the summary with the sentences dictionary
        sentences = st.get_summary(title, content, sentences_dic)

        # print sentences

        ## instantiate senticnet

        sn = Senticnet()

        counter = 0
        total = 0

        words = re.compile('\w+').findall(sentences)

        for word in words:
            word = word.lower()
            try:
                print word
                polarity = sn.polarity(word)
                counter += 1
                total += polarity

                # print counter, word, polarity