Пример #1
0
def sent(comment, status):
    analyzer = SentimentIntensityAnalyzer()
    if (comment == ''):
        return 'sample_value'

    #initializations
    flag = ''
    val = 0

    #values range from -1 to 1
    p_threshold = 0.350
    n_threshold = -0.150

    result = analyzer.polarity_scores(comment)

    val = result['compound']
    print("result= ")
    print(result)

    if (val >= p_threshold):
        flag = 'Green'
    elif (val <= n_threshold):
        flag = 'Red'
    elif (val < p_threshold and val > n_threshold):
        flag = 'Amber'

    if (flag == status):
        return 'done'
    else:
        return 'close'
Пример #2
0
def getInterestingNewsInRange(keyword, startdate, enddate):
    db = mongodb_client.get_db()

    startdate = dateutil.parser.parse(startdate)
    enddate = dateutil.parser.parse(enddate)

    interesting_news = db[NEWS_TABLE_NAME].find({
        'publishedAt':{'$gte' : startdate, '$lte':enddate  },
        '$or' : [ { "title" : {'$in': [re.compile(keyword, re.IGNORECASE)]} }, { "description" : {'$in': [re.compile(keyword, re.IGNORECASE)]} } ]
    })

    interesting_news = list(interesting_news)
    filtered_news = []
    for news in interesting_news:
        sentence_list = tokenize.sent_tokenize(news['text'])
        selected_sentence_list = []
        for sentence in sentence_list:
            if (re.compile(r'\b({0})\b'.format(keyword), flags=re.IGNORECASE).search(sentence) != None):
                selected_sentence_list.append(sentence)
        analyzer = SentimentIntensityAnalyzer()
        paragraphSentiments=0.0
        if len(selected_sentence_list) != 0:
            for sentence in selected_sentence_list:
                vs = analyzer.polarity_scores(sentence)
                paragraphSentiments += vs["compound"]
            news['rate'] = str(round(paragraphSentiments/len(selected_sentence_list), 4))
            filtered_news.append(news)



    return json.loads(dumps(filtered_news))
Пример #3
0
def analyze_sentiment(text):
    analyzer = SentimentIntensityAnalyzer()
    sentiment = analyzer.polarity_scores(text)
    if (sentiment["compound"] > .005) or (sentiment["pos"] > abs(
            sentiment["neg"])):
        return "Bullish"
    elif (sentiment["compound"] < -.005) or (abs(sentiment["neg"]) >
                                             sentiment["pos"]):
        return "Bearish"
    else:
        return "Neutral"


# def analyze_text(item):
#     # BASE_URL = "http://localhost:3000/comments"
#     isPost = type(item) == praw.models.reddit.submission.Submission
#     isDict = type(item) == dict
#     # awards = ''
#     if(isDict):
#         text = item['body']
#         id = item['id']
#         time = item['created_utc']
#         score = item['score']
#         parent = item['parent_id']
#         if parent.startswith("t"):
#             parent = parent[3:]
#     else:
#         text = item.body
#         time = item.created_utc
#         id = item.id
#         score = item.score
#         parent = item.link_id[3:]
#         # awards = item.all_awardings

#     for word in text.split():
#         word = word.strip(punctuation)

#         # Tickers of len<2 do not exist
#         if (len(word) < 2):
#             continue

#         # Does word fit the ticker criteria
#         if word.isupper() and len(word) != 1 and (word.upper() not in gl.COMMON_WORDS) and len(word) <= 5 and word.isalpha() and (word.upper() in gl.TICKERS):
#             # Checks to see if the ticker has been cached.
#             # url = "http://localhost:3000/id/" + id
#             r = requests.get(url= BASE_URL + "/id/" + id)
#             if(r.status_code == 200):
#                 continue
#             sentiment = analyze_sentiment(text)
#             # print(score)
#             data = {
#                 "comment_id" : id,
#                 "comment_date" : time,
#                 "ticker" : word,
#                 "parent_post" : parent,
#                 "body" : text,
#                 "score" : score,
#                 "sentiment" : sentiment
#                 }
#             r = requests.post(url = BASE_URL+"/comments", data = data)
Пример #4
0
def analyze(body):
    analyzer = SentimentIntensityAnalyzer()
    sentiment = analyzer.polarity_scores(body)
    if (sentiment["compound"] > .005) or (sentiment["pos"] > abs(
            sentiment["neg"])):
        return "Bullish"
    elif (sentiment["compound"] < -.005) or (abs(sentiment["neg"]) >
                                             sentiment["pos"]):
        return "Bearish"
    else:
        return "Neutral"
Пример #5
0
    def analyze_sentiment(self):
        analyzer = SentimentIntensityAnalyzer()
        neutral_count = 0
        for text in self.bodies:
            sentiment = analyzer.polarity_scores(text)
            if (sentiment["compound"] > .005) or (sentiment["pos"] > abs(sentiment["neg"])):
                self.pos_count += 1
            elif (sentiment["compound"] < -.005) or (abs(sentiment["neg"]) > sentiment["pos"]):
                self.neg_count += 1
            else:
                neutral_count += 1

        self.bullish = int(self.pos_count / len(self.bodies) * 100)
        self.bearish = int(self.neg_count / len(self.bodies) * 100)
        self.neutral = int(neutral_count / len(self.bodies) * 100)
Пример #6
0
def evaluate_reaction(identifiers, corpus):
    relevant_twts = [
        twt_dict['tweet_text']
        for twt_dict in filter_tweets(identifiers, corpus)
    ]
    num_twts = len(relevant_twts)
    metrics = numpy.ndarray(shape=(num_twts, 4))
    sid = SentimentIntensityAnalyzer()

    for i, twt in enumerate(relevant_twts):
        ss = sid.polarity_scores(twt)
        metrics[i, :] = [ss['compound'], ss['pos'], ss['neg'], ss['neu']]

    score_order = ['compound', 'positive', 'negative', 'neutral']
    avg_scores = numpy.average(metrics, axis=0)
    for i in range(0, 4):
        print score_order[i] + ": " + str(avg_scores[i])

    return metrics
Пример #7
0
def insert_into_mysql(list_of_dict):
    cur, conn = create_conn_to_mysql()
    analyzer = SentimentIntensityAnalyzer()
    for i in list_of_dict:
        a = ''
        b = ''
        xyz = i['name']
        try:
            a, b = xyz.split(',')
        except:
            a = xyz
        z = "\"" + i['comment'] + "\""
        s = date_error(i['date'])
        w = i['address']
        print(">>>>> returned date", s)
        #print(tpl[1])
        vs = analyzer.polarity_scores(i['comment'])
        #print(str(vs))
        sen = str(vs)
        tpl = [xyz, z, s, sen]
        insert_into_table(cur, tpl)
    close_connection(cur, conn)
Пример #8
0
    dict = {}
    dict['title'] = item.find("h3").find(text=True).strip('\t\n')
    dict['date'] = item.find("p", {
        "class": "review-author"
    }).find("span", {
        "class": "author-wrapper"
    }).find(text=True).strip('\t\n')
    dict['author'] = item.find("p", {
        "class": "review-author"
    }).find("span", {
        "itemprop": "name"
    }).find(text=True).strip('\t\n')
    dict['comment'] = item.find("p", {
        "class": "review-description"
    }).find(text=True).strip('\t\n')
    vs = analyzer.polarity_scores(dict['comment'])
    dict['sentiment'] = str(vs)
    xyz.append(dict)
    #list_dict = xyz
#print(xyz)
con = pymysql.connect(user='******',
                      password='******',
                      database='customerreviews',
                      charset='utf8')
cursor = con.cursor()
DB_NAME = 'customerreviews'

for i in xyz:
    w = i['author']
    v = i['title']
    x = i['date']
Пример #9
0
from vaderSentiment import SentimentIntensityAnalyzer

#sentences = ["now i am not feeling good"]
senten = input("Enter the text = ")
sentences = senten.split()

#sentences = [str(a) for a in sentences]

#sentences = eval(sentences)
#print(sentences)
#senten = raw_input("Hii ")
#sentences = list(map(str, senten.split()))

analyzer = SentimentIntensityAnalyzer()
for sentence in sentences:
    vs = analyzer.polarity_scores(sentence)
    print("{:-<65} {}".format(sentence, str(vs)))
Пример #10
0
import feedparser
from vaderSentiment import SentimentIntensityAnalyzer
import collections

d = feedparser.parse('http://rss.nzherald.co.nz/rss/xml/nzhrsscid_000000001.xml')
analyzer = SentimentIntensityAnalyzer()


if __name__ == '__main__':
    
    result = {}
    for post in d.entries:
        print ("\nAnalyze:", post.title)
        print ("Sentence:", post.description)
        vs = analyzer.polarity_scores(post.description)
        print("{:-<65} {}".format(post.title, str(vs)))
        print("Compound:", vs['compound'])
        result[vs['compound']] = post.title

    keylist = result.keys()
    keylist = sorted(result.keys())

    print ("\n %s: %s" % (keylist[0], result[keylist[0]]))
Пример #11
0
def getNeautral(text):
    analyzer = SentimentIntensityAnalyzer()
    vs = analyzer.polarity_scores(text)
    rating = vs["neu"]
    return {'score': rating}
Пример #12
0
def getPositive(text):
    analyzer = SentimentIntensityAnalyzer()
    vs = analyzer.polarity_scores(text)
    rating = vs["pos"]
    return {'score': rating}
Пример #13
0
def getCompound(text):
    analyzer = SentimentIntensityAnalyzer()
    vs = analyzer.polarity_scores(text)
    rating = vs["compound"]
    return {'score': rating}
Пример #14
0
def getAll(text):
    analyzer = SentimentIntensityAnalyzer()
    vs = analyzer.polarity_scores(text)
    return vs
Пример #15
0
# def fileRead(folder):
#     for filename in os.listdir(folder):
#         f = open(folder + "/" + filename, encoding="utf-8-sig")
#         for l in f:
#             l.strip()
#             s = l.split("ред")
#             file.extend(s)

# # fileRead("accident")
# fileRead("Test")
# print(len(file))
# b = input("Enter Bangla sentence :")

analyzer = SentimentIntensityAnalyzer()
t = analyzer.polarity_scores(
    "If the speed of foreign aid increases and the increase in the revenue collection will reduce the amount of debt."
)
print(str(t))
file = open('data.txt', 'r', encoding="utf-8-sig")
file = [i.strip() for i in file]
for line in file:
    line.strip()
    # print(line)
    # textBlobTrans = TextBlob(line)
    # print(textBlobTrans)
    # c = str(textBlobTrans.translate(to='en'))
    # print(c)

    # analyze = TextBlob(c)
    # print('By TextBlob Sentiment analysis')
    # print(analyze.sentiment)