Ejemplo n.º 1
0
def test_positive():
    sent = Sentimental()

    sentence = 'Крууто. ты лучший ютубер который снимает приколы. отлично .'
    result = sent.analyze(sentence)

    assert result['score'] > 0
Ejemplo n.º 2
0
def test_negative():
    sent = Sentimental()

    sentence = 'Какое жалкое и лицемерное шоу. А вот здесь в комментариях и дизлайках как раз и проявляется настоящее отношение к этому кощею'
    result = sent.analyze(sentence)

    assert result['score'] < 0
Ejemplo n.º 3
0
def test_negative():
    sent = Sentimental()

    sentence = 'Today is a bad day!'
    result = sent.analyze(sentence)

    assert result['score'] < 0
    assert result['positive'] == 0
Ejemplo n.º 4
0
def test_neutral():
    sent = Sentimental()

    sentence = 'Nothing special!'
    result = sent.analyze(sentence)

    assert result['score'] == 0
    assert result['negative'] == 0
Ejemplo n.º 5
0
def test_negation():
    sent = Sentimental()

    sentence = 'Было не плохо!'
    result = sent.analyze(sentence)

    assert result['score'] == 0
    assert result['negative'] == 0
Ejemplo n.º 6
0
def test_neutral():
    sent = Sentimental()

    sentence = 'Ничего такого!'
    result = sent.analyze(sentence)

    assert result['score'] == 0
    assert result['negative'] == 0
Ejemplo n.º 7
0
def test_negation():
    sent = Sentimental()

    sentence = 'It was not bad!'
    result = sent.analyze(sentence)

    assert result['score'] == 0
    assert result['negative'] == 0
def test_empty_string():
    sent = Sentimental()

    sentence = ''
    result = sent.analyze(sentence)

    assert result['score'] == 0
    assert result['positive'] == 0
    assert result['negative'] == 0
    assert result['comparative'] == 0
Ejemplo n.º 9
0
class SentimentAnalyzer():

    _sentimental = Sentimental(max_ngrams=2, undersample=True)
    _sentimental.train([get_data_path() + '/sv/ruhburg'])

    def calculate_scores(marked_tree):
        reg = re.compile('\(([\w]+) \\\"GHOSTDOC-TOKEN\\\"\)')
        friend_scores = {}
        artifact_scores = {}
        for item in marked_tree:
            if 'text' in item:
                senti = SentimentAnalyzer.sentiment(item['text'])
                m = reg.findall(item['text'])
                c = sorted(list(Counter(m)))

                # artifact scores
                for artifacts in c:
                    s = artifact_scores.get(artifacts, [0, 0])
                    if senti == 1:
                        s[0] = s[0] + 1
                    elif senti == -1:
                        s[1] = s[1] + 1
                    artifact_scores[artifacts] = s

                # friend scores
                pairs = list(itertools.combinations(c, 2))
                for pair in pairs:
                    s = friend_scores.get(pair, [0, 0])
                    if senti == 1:
                        s[0] = s[0] + 1
                    elif senti == -1:
                        s[1] = s[1] + 1
                    friend_scores[pair] = s

        friend_scores = {
            _id: (vals[0] - vals[1]) *
            math.exp(max(vals) / (vals[0] + vals[1] + 1))
            for _id, vals in friend_scores.items()
        }
        artifact_scores = {
            _id: (vals[0] - vals[1]) *
            math.exp(max(vals) / (vals[0] + vals[1] + 1))
            for _id, vals in artifact_scores.items()
        }

        return {
            'friend_scores': friend_scores,
            'artifact_scores': artifact_scores
        }

    def sentiment(text):
        label = max(SentimentAnalyzer._sentimental.sentiment(text))
        if label == 'positive':
            return 1
        elif label == 'negative':
            return -1
        else:
            return 0
Ejemplo n.º 10
0
    train_active.drop('user_id', axis=1, inplace=True)
    test_active.drop('user_id', axis=1, inplace=True)
    print_step('Importing Data 13/13 4/4')
    numeric_cols += train_active.columns.values.tolist()
    train_fe = pd.concat([train_fe, train_active], axis=1)
    test_fe = pd.concat([test_fe, test_active], axis=1)

    train_fe['user_items_per_day'] = train_fe['n_user_items'] / train_fe['user_num_days']
    test_fe['user_items_per_day'] = test_fe['n_user_items'] / test_fe['user_num_days']
    train_fe['img_size_ratio'] = train_fe['img_file_size'] / (train_fe['img_size_x'] * train_fe['img_size_y'])
    test_fe['img_size_ratio'] = test_fe['img_file_size'] / (test_fe['img_size_x'] * test_fe['img_size_y'])
    numeric_cols += ['user_items_per_day', 'img_size_ratio']

    print_step('Importing Data 8/19 1/8')
    train, test = get_data()
    sent = Sentimental()
    train_fe['sentiment_negative'] = train['description'].apply(lambda s: sent.analyze(s)['negative'] if isinstance(s, str) else 0)
    test_fe['sentiment_negative'] = test['description'].apply(lambda s: sent.analyze(s)['negative'] if isinstance(s, str) else 0)
    train_fe['sentiment_positive'] = train['description'].apply(lambda s: sent.analyze(s)['positive'] if isinstance(s, str) else 0)
    test_fe['sentiment_positive'] = test['description'].apply(lambda s: sent.analyze(s)['positive'] if isinstance(s, str) else 0)
    train_fe['sentiment'] = train['description'].apply(lambda s: sent.analyze(s)['comparative'] if isinstance(s, str) else 0)
    test_fe['sentiment'] = test['description'].apply(lambda s: sent.analyze(s)['comparative'] if isinstance(s, str) else 0)
    numeric_cols += ['sentiment_negative', 'sentiment_positive', 'sentiment']

    print_step('Importing Data 8/19 2/8')
    train_nima, test_nima = load_cache('img_nima')
    print_step('Importing Data 8/19 3/8')
    train = train.merge(train_nima, on = 'image', how = 'left')
    print_step('Importing Data 8/19 4/8')
    test = test.merge(test_nima, on = 'image', how = 'left')
    print_step('Importing Data 7/19 5/8')
Ejemplo n.º 11
0
    train_embeddings_df, test_embeddings_df = load_cache('avito_fasttext_300d')

print_step('Importing Data 11/19 2/3')
train_fe = pd.concat([train_fe, train_embeddings_df], axis=1)
print_step('Importing Data 11/19 3/3')
test_fe = pd.concat([test_fe, test_embeddings_df], axis=1)
train_fe['embedding_mean'] = train_embeddings_df.mean(axis=1)
train_fe['embedding_std'] = train_embeddings_df.std(axis=1)
train_fe['embedding_skew'] = skew(train_embeddings_df, axis=1)
train_fe['embedding_kurtosis'] = kurtosis(train_embeddings_df, axis=1)
test_fe['embedding_mean'] = test_embeddings_df.mean(axis=1)
test_fe['embedding_std'] = test_embeddings_df.std(axis=1)
test_fe['embedding_skew'] = skew(test_embeddings_df, axis=1)
test_fe['embedding_kurtosis'] = kurtosis(test_embeddings_df, axis=1)

sent = Sentimental()
train_fe['sentiment_negative'] = train['description'].apply(
    lambda s: sent.analyze(s)['negative'] if isinstance(s, str) else 0)
test_fe['sentiment_negative'] = test['description'].apply(
    lambda s: sent.analyze(s)['negative'] if isinstance(s, str) else 0)
train_fe['sentiment_positive'] = train['description'].apply(
    lambda s: sent.analyze(s)['positive'] if isinstance(s, str) else 0)
test_fe['sentiment_positive'] = test['description'].apply(
    lambda s: sent.analyze(s)['positive'] if isinstance(s, str) else 0)
train_fe['sentiment'] = train['description'].apply(
    lambda s: sent.analyze(s)['comparative'] if isinstance(s, str) else 0)
test_fe['sentiment'] = test['description'].apply(
    lambda s: sent.analyze(s)['comparative'] if isinstance(s, str) else 0)

print('~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print_step('Converting to category')
Ejemplo n.º 12
0
from flask import Flask, jsonify, request, json, make_response, send_file
import requests, bs4
import time
import re
from sentimental import Sentimental
import pymorphy2
import json
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
sent = Sentimental()
morph = pymorphy2.MorphAnalyzer()

API_KEY = "abf14d0b25b6fb82ea3a316353fdb9d06eaf5d76"
BASE_URL = "https://suggestions.dadata.ru/suggestions/api/4_1/rs/suggest/{}"


def suggest(query, resource, count=10):
    url = BASE_URL.format(resource)
    headers = {
        "Authorization": "Token {}".format(API_KEY),
        "Content-Type": "application/json"
    }
    data = {"query": query, "count": count}
    r = requests.post(url, data=json.dumps(data), headers=headers)
    return r.json()


@app.route('/api/social', methods=['POST'])
def main():
    start_time = time.time()
    data_post_mas = json.loads(request.data)
Ejemplo n.º 13
0
from sentimental import Sentimental
from googletrans import Translator
sent = Sentimental()
translator = Translator()
sentence = input("Введите ваш комментарий: ")
result1 = translator.translate(sentence, src='ru', dest='en')
result = sent.analyze(result1.text)
x = result.get('score')
if x > 5: print("Комментарий слишком положительный")
if x == 5:
    print("Комментарий положительный на - 100%\n"
          '░████████▒░░\n'
          '░████████▒░░\n'
          '░████████▒░░\n'
          '░████████▒░░\n'
          '░██100%██▒░░\n'
          '░████████▒░░\n'
          '░████████▒░░\n'
          '░████████▒░░\n'
          '░████████▒░░\n'
          '░████████▒░░\n')
if x == 4:
    print("Комментарий положительный на - 80%\n"
          '░░░░░░░░░░░░\n'
          '░░▒▒▒▒▒▒▒▒░░\n'
          '░░███████▒░░\n'
          '░░███████▒░░\n'
          '░░██80%██▒░░\n'
          '░░███████▒░░\n'
          '░░███████▒░░\n'
          '░░███████▒░░\n'
 def setup(self):
     logging.info('model loading in setup: start')
     if (not self.senti):
         self.senti = Sentimental.load('model.pickle')
     logging.info('model loading in setup: done')
 def start_bundle(self):
     logging.info('model loading in start_bundle: start')
     if (not self.senti):
         self.senti = Sentimental.load('model.pickle')
     logging.info('model loading in start_bundle: done')
Ejemplo n.º 16
0
results = []
#take the tweets from the crawled tweets file
test = df2.Text
#perform the cleaning of the tweets
for t in test:
    results.append(tweet_cleaner_updated(t))

#change for buhari
query = 'buhari'

lastdf = pd.DataFrame()
#I use spacy for parts of speech tagging. pip install -U spacy
nlp = spacy.load('en')
#I use sentimental to score the sentiment of each tweet. pip install -U git+https://github.com/text-machine-lab/sentimental.git
sentiment = Sentimental(word_list='afinn.csv', negation='negations.csv')
tweetset = []
scorelist = []
for r in results:
    doc = nlp(r)
    #Part of speech taggin. get the noun subjects, noun objects and roots of the tweet
    sub_toks = [
        tok for tok in doc
        if (tok.dep_ == "nsubj" or tok.dep_ == "dobj" or tok.dep_ == "ROOT")
    ]
    #print(r + " " + str(sub_toks))
    #check if buhari/atiku is either a subject, object or root word then filter out
    if query in str(sub_toks):
        tweetset.append(r)
        sentence_sentiment = sentiment.analyze(r)
        scorelist.append(sentence_sentiment['score'])
Ejemplo n.º 17
0
 def start_bundle(self):
     logging.info('model loading : start')
     self.senti = Sentimental.load('model.pickle')
     logging.info('model loading : done')