Beispiel #1
0
def SelectState_rel_only(policy_mode,
                         str_rule,
                         relavance,
                         user_input,
                         pre_history,
                         TreeState,
                         force_strategy=None):
    branch_idx = TreeState.keys()[0]
    branch = TreeState[branch_idx]['node']
    if not force_strategy == None:
        bool_idx, int_idx = force_strategy
        return TreeState[branch_idx][bool_idx][int_idx]
    if relavance >= branch['threshold_relavance']:
        #print 'we are in the h'
        return TreeState[branch_idx][True][
            0]  # only use the continue, don't expand

    else:
        if policy_mode == 0 or pre_history == None:
            return random.choice(
                TreeState[branch_idx][False]
                [0:-1])  # don't choose the last leave, go back
        else:
            #choose this based on the previous utterances' sentiment.
            curr_1 = sentiment.get_sentiment(user_input)
            curr_2 = sentiment.get_sentiment(pre_history[-1])
            curr_3 = sentiment.get_sentiment(pre_history[-2])
            print 'this is the previous history'
            print curr_1
            print curr_2
            print curr_3
            strategy = str_rule[(curr_1, curr_2, curr_3)]
            return {'name': strategy}
Beispiel #2
0
 def sentimentize(self, compare=True):
     from sentiment import get_sentiment
     if compare:
         self.sentiment = dict()
         for mode in ['nltk', 'vader', 'textblob', 'api']:
             self.sentiment[mode] = get_sentiment(self.text, mode)
     else:
         self.sentiment = {
             'textblob': get_sentiment(self.text, mode='textblob')
         }
Beispiel #3
0
def get_social_score():
    posts = graph.get_all_connections(
        id='me',
        connection_name='posts',
        fields='reactions,likes,comments,created_time,message')

    num_likes = 0
    num_posts = 0
    num_comments = 0
    results = []

    for post in posts:
        num_comments = 0
        num_likes = 0
        content = ''

        if 'comments' in post:
            num_comments = len(post['comments']['data'])
        if 'likes' in post:
            num_likes += len(post['likes']['data'])

        sentiment = 0.5

        if 'message' in post:
            content = post['message']
            sentiment = get_sentiment(content)
        time = post['created_time']
        result = (time, sentiment, num_comments + num_likes)
        results.append(result)
        print(format_result(result))

    return results
Beispiel #4
0
 async def on_ready(self):
     print("Generating report for guild %d..." % self.guild_id)
     guild = self.get_guild_guaranteed(self.guild_id)
     today = datetime.utcnow().date()
     day_start = datetime(today.year, today.month, today.day)
     message_infos: "List[MessageInfo]" = []
     for _, channel in enumerate(guild.text_channels):
         messages = channel.history(limit=None, after=day_start)
         async for unk_message in messages:
             message = cast(discord.Message, unk_message)
             author = cast(discord.User, message.author)
             if message.content == "" or author.bot:
                 continue
             has_prefix = False
             for prefix in self.command_prefixes:
                 if message.content.startswith(prefix):
                     has_prefix = True
             if has_prefix:
                 continue
             sentiment = get_sentiment(message.content)
             message_infos.append(
                 MessageInfo(message.id, message.created_at,
                             message.content, sentiment))
     df = pd.DataFrame(
         [message_info.as_dict() for message_info in message_infos])
     df.to_csv(self.output_path, index=False, index_label="message_id")
     await self.logout()
     await self.close()
Beispiel #5
0
    def on_data(self, data):
        print("data")
        tweet = json.loads(data)

        tweetid = tweet.get('id', None)
        place = tweet.get('place', None)
        user = tweet['user']['name'] if tweet.get('user',
                                                  None) is not None else None
        text = tweet.get('text', None)
        lang = None

        try:
            lang = detect(text)
        except LangDetectException:
            pass

        if lang in supported_langs.keys():
            s = get_sentiment(text, supported_langs[lang])
            tweet = Tweet(tweetid, place, user, text, s)
            print(tweet)
            for connection in WSHandler.connections:
                data = json.dumps(tweet.__dict__).encode('utf-8')
                connection.write_message(data)
        else:
            print("detected lang '{}' is not supported".format(lang))

        return True
Beispiel #6
0
def sentiment_analysis():
    data = request.get_json()
    input_text = data['inputText']
    input_text2 = data['inputText2']
    input_text3 = data['inputText3']
    response = sentiment.get_sentiment(input_text, input_text2, input_text3)
    return jsonify(response)
Beispiel #7
0
def sentiment_analysis():
    data = request.get_json()
    if 'language' not in data:
        input_lang = 'en-US'
    else:
        input_lang = data['language']

    if 'text' not in data:
        print("No text provided to sentiment")
        abort(400)

    input_text = data['text']

    response = sentiment.get_sentiment(input_text, input_lang,
                                       SUBSCRIPTION_KEY, API_URL)
    evalnum = response['documents'][0]['score']

    print(str(evalnum))

    if 0.2 >= evalnum >= 0:
        answer = "VERY NEGATIVE"
    elif 0.4 >= evalnum > 0.2:
        answer = "NEGATIVE"
    elif 0.8 > evalnum >= 0.6:
        answer = "POSITIVE"
    elif 1 >= evalnum >= 0.8:
        answer = "VERY POSITIVE"
    else:
        answer = "NEUTRAL"
    return answer
Beispiel #8
0
 def text_badcases(self):
     t = [
         "We hate the boring new project with @V_and_A  find out more about #ReelToReal http://bit.ly/1xR3bKc",
     ]
     for txt in t:
         print txt
         self.assertTrue(get_sentiment(txt)<0.3)
Beispiel #9
0
def sentiment_analysis():
    data = request.get_json()
    input_text = data['inputText']
    input_lang = data['inputLanguage']
    output_text = data['outputText']
    output_lang =  data['outputLanguage']
    response = sentiment.get_sentiment(input_text, input_lang, output_text, output_lang)
    return jsonify(response)
Beispiel #10
0
 def text_ambivalentcases(self):
     t = [
         'more info on exciting exhibition about visual representation of protest that we are involved in @V_and_A here: http://www.vam.ac.uk/content/exhibitions/disobedient-objects/',
         'Really looking forward to Disobedient Objects - show of activist folk art - opening next week at @V_and_A pic.twitter.com/l2EjaqBVYt',
         '@V_and_A @sciencemuseum Congrats for making it into our 101 things to do in London for teenagers list #KidsLondon http://ow.ly/z82lQ',
     ]
     for txt in t:
         print txt
         self.assertTrue(-0.3< get_sentiment(txt) < 0.3)
Beispiel #11
0
    def test_positivecases(self):

        t = [
            'Designers, @barberosgerby have collaborated with @BMWGroup to create a memorable experience in the @V_and_A #LDF14 http://bit.ly/W2IDRu',
            "We've launched an exciting new learning project with @V_and_A  find out more about #ReelToReal http://bit.ly/1xR3bKc",
            "@V_and_A Spent half day at The V&A with my daughter Eva. She loved it, surrounded by beauty and history. Thanks! pic.twitter.com/sCAU9TRJNq"
            ]
        for txt in t:
            print txt
            self.assertTrue(get_sentiment(txt)>0.3)
Beispiel #12
0
def main():
    client = WebClient(os.getenv("SLACK_TOKEN"))
    channel_name = os.getenv(os.getenv("CHANNEL_NAME"))
    channel_id = fetch_conversation(client, channel_name)
    conversation_history = fetch_messages(client, channel_id)
    sentiment_analysis = 0
    for message in conversation_history:
        sentiment_analysis += sentiment.get_sentiment(message["text"])
    sentiment_analysis = sentiment_analysis / len(conversation_history)
    send_message(client, channel_id, sentiment_analysis)
Beispiel #13
0
def sentiment():
    city = get_arg('city', 'melbourne').lower()
    years = list(
        map(int,
            get_arg('years', '2014,2015,2016,2017,2018').split(',')))
    months = list(
        map(int,
            get_arg('months', '1,2,3,4,5,6,7,8,9,10,11,12').split(',')))
    weekdays = list(map(int, get_arg('weekdays', '0,1,2,3,4,5,6').split(',')))
    return as_json(get_sentiment(city, years, months, weekdays))
Beispiel #14
0
def sentiment():
    a = dict()
    records = Watchlist.query.filter_by(panid=session["panid"]).all()
    news_list = []
    for x in records:
        news_list.append(companynews(x.stockname))
    flat = [j for sub in news_list for j in sub]
    sentiment_list = get_sentiment(flat)
    for i, x in enumerate(records):
        a[f"{x.stockname}"] = sentiment_list[i]
    return render_template("sentiment.html", records=records, a=a)
Beispiel #15
0
    def post(self, id):
        args = self.reqparse.parse_args()
        text = args["text"]
        
        # getting sentiment analysis from google nlp api
        annotations = get_sentiment(text)
        sentiment = annotations.document_sentiment.score

        # getting emotion from deepaffects text api
        emotion = list(json.loads(get_emotion(text).text)["response"].keys())[0]
        ketchup = CheckIn(id, text, sentiment, emotion)
        self.add_checkin_to_db(ketchup)
        most_common, average, slope, r2 = self.get_data(id)
        return jsonify({"emotion": emotion, "sentiment": sentiment, "most_freq_emotion": most_common, "average_sentiment": average, "slope": slope, "r2": r2})
Beispiel #16
0
	def post(self):
		tw=self.get_argument('twitter')
		frm=self.get_argument('from')
		to=self.argument('to')
		msg=self.argument('message')
		getVars(msg,to,frm)
		if msg=="":
			pol1=get_sentiment(tw_nm)
		if twitter=="":
			MSG=filtering(to)		
			pol2=get_sentiment(MSG)
		if pol1==0:
			pol=pol2
		elif pol2==0:
			pol==pol1
		else:
			pol=(pol1+pol2)/2
		if pol>0:
			emo="happy"
		elif pol<0:
			emo="sad"
		elif pol==0:
			emo="neutral"
		get_info(nm,emo)
Beispiel #17
0
def SelectState_rel_only(policy_mode,str_rule, relavance, user_input, pre_history, TreeState, force_strategy=None):
	branch_idx = TreeState.keys()[0]
	branch = TreeState[branch_idx]['node']
        if not force_strategy == None:
            bool_idx, int_idx = force_strategy
            return TreeState[branch_idx][bool_idx][int_idx]
        if relavance >= branch['threshold_relavance']:
		#print 'we are in the h'
                return TreeState[branch_idx][True][0] # only use the continue, don't expand

	else:
            if policy_mode ==0 or  pre_history==None:
		return random.choice(TreeState[branch_idx][False][0:-1])# don't choose the last leave, go back
	    else:
                #choose this based on the previous utterances' sentiment.
                curr_1 = sentiment.get_sentiment(user_input)
                curr_2 = sentiment.get_sentiment(pre_history[-1])
                curr_3 = sentiment.get_sentiment(pre_history[-2])
                print 'this is the previous history'
                print curr_1
                print curr_2
                print curr_3
                strategy = str_rule[(curr_1,curr_2,curr_3)]
                return {'name':strategy}
Beispiel #18
0
def store_post_data(classifier, status_data, topic, game_related_data=False):
    post_list = []
    for i in status_data:
        if unidecode(i['lang']):
            post_text = unidecode(i['text'])
            if post_text not in post_list:
                post_sentiment = get_sentiment(classifier, post_text)

                post_item = {"_id": int(i['id']),
                             "gamer": game_related_data,
                             "topic": topic,
                             "post_text": post_text,
                             "post_sentiment": post_sentiment}

                add_to_db(post_item, "posts")
                post_list.append(post_text)
Beispiel #19
0
def trade(ticker): 
    max_shares = 0
    avg_price = 0
    if request.method == 'GET':
        tick = ticker
        sentiment_news = str(get_sentiment(ticker))
        name, price, market, isOpen = get_company_data(ticker)
        image = plot_historical(ticker)
        CurrCash = Money.query.filter_by(id=1).first().CurrCash
        if(Portfolio.query.filter_by(ticker=ticker).count() != 0):
            max_shares = Portfolio.query.filter_by(ticker=ticker)[0].shares
            avg_price = Portfolio.query.filter_by(ticker=ticker)[0].avg_value
        return render_template('trade.html', sentiment=sentiment_news, name=name, price=price, 
        market=market, isOpen = isOpen, ticker=tick, avg_price=avg_price, image=image, CurrCash=CurrCash, maxshares=int(max_shares))
    else: 
        if request.form['tick']:
            tick = request.form['tick']
            return redirect('/trade/'+tick)
Beispiel #20
0
def twitter_login():
    # If the user is not authorized, redirect to the twitter login page
    if not twitter.authorized:
        return redirect(url_for('twitter.login'))
    # If user is authorized retrieve his/her account details
    account_info = twitter.get('account/settings.json')
    # If user is authorized retrieve his/her tweets
    user_tweets = twitter.get("statuses/user_timeline.json")

    # If account information is successfully retrieved, proceed to analyse and display it
    if account_info.ok:
        # Convert retrieved info to json format
        user_tweets_json = user_tweets.json()
        account_info_json = account_info.json()

        # Get tweet text from the objects returned
        all_tweets = []
        print(account_info_json)
        for tweet in user_tweets_json:
            all_tweets.append(tweet['text'])

        # Text Cleaning for tweets
        all_tweets_cleaned = text_cleaning.clean_tweets(all_tweets)

        # BTM model for topic modeling results
        classified_tweets, topics = btm_model.categorize(all_tweets_cleaned)

        # Sentiment analysis
        tweet_sentiment = sentiment.get_sentiment(all_tweets_cleaned)

        # Prepare data to be sent and rendered on the template for user dashboard
        data = {
            "all_tweets": all_tweets,
            "account_info_json": account_info_json,
            "classified_tweets": classified_tweets,
            "topics": topics,
            "sentiment": tweet_sentiment
        }

        # Render template with user data
        return render_template('user_dash.html', data=data)

    # If account info is not retrieved successfully return an error message.
    return '<h2>Error</h2>'
Beispiel #21
0
    def infer_sentiment(self, s, pos=False):
        '''
        Get sentiment scores as a dict only for select POS, and 0 score for rest
        '''
        #pos_included = ['ADJ', 'NOUN', 'PROPN', 'INTJ']
        pos_included = ['ADJ', 'NOUN', 'PROPN', 'VERB', 'ADV', 'INTJ']
        s = self.nlp(s)
        sentiments = get_sentiment(s)
        if pos:
            for t in s:
                if t.text.lower() in sentiments and t.pos_ not in pos_included:
                    sentiments[ t.text ] = None
                if t.lemma_.lower() in sentiments and t.pos_ not in pos_included:
                    sentiments[ t.lemma_ ] = None
        sentiments = {k: v for k,v in sentiments.items() if v}

        res = []
        for t in s:
            res.append( sentiments.get( t.text.lower(), 0.0 ) )

        return self.convert_sentiment( res )
def get_business_score(reviews, id):
    sentences = reviews_to_sentences(reviews)
    final_cat = {"food":0, "service":0,"ambiance":0, "money":0}
    cat_count = {"food":1, "service":1, "ambiance":1, "money":1}
    items_food = {}
    items_service = {}
    items_amb = {}
    items_money = {}
    for sent in sentences:
        sentr = senti.get_sentiment(sent)
        if sentr != 0.5:
            cat = sim.get_similarity(sent)
            final_cat[cat] = sentr + final_cat[cat]
            cat_count[cat] = 1 + cat_count[cat]        
            if cat == 'food':
                items_food[sent] = sentr
            elif cat == 'service':
                items_service[sent] = sentr
            elif cat == 'ambiance':
                items_amb[sent] = sentr
            elif cat == 'money':
                items_money[sent] = sentr
            
    for key in final_cat.keys():
            final_cat[key] = final_cat[key] / (1.0 * cat_count[key])
    
    total_sen = 5
    new_f = dict(sorted(items_food.iteritems(), key=operator.itemgetter(1), reverse=True)[:total_sen])
    new_s = dict(sorted(items_service.iteritems(), key=operator.itemgetter(1), reverse=True)[:total_sen])
    new_a = dict(sorted(items_amb.iteritems(), key=operator.itemgetter(1), reverse=True)[:total_sen])
    new_m = dict(sorted(items_money.iteritems(), key=operator.itemgetter(1), reverse=True)[:total_sen])
    
    f = " ".join(new_f.keys())
    s = " ".join(new_s.keys())
    a = " ".join(new_a.keys())
    m = " ".join(new_m.keys())
    write_image(f, s, a, m, id)

    return final_cat
Beispiel #23
0
from sentiment import get_sentiment

tweets = pd.read_csv('mytweets.csv')

states = pd.read_pickle('base_us_table.pd').code.values

locs = [i for i in tweets['loc'].values]
sents = [get_sentiment(i) for i in tweets.text.values]

result = []
for l, sent in zip(locs, sents):
    l = str(l)
    for s in states:
        s = str(s)
        if s in l:
            result.append((s, float(sent)))
            continue

data_ = pd.DataFrame(result, columns=['state', 'score'])
data_means = data_.groupby('state').score.mean()
                    help='Analyse review tiped by user',
                    action='store_true')
parser.add_argument('-f',
                    '--file',
                    help='File with reviews',
                    default='amazon_reviews.txt')

args = parser.parse_args()

user_input = args.userinput
fileName = args.file
reviews = []
r = Reviews_manager(fileName)

if user_input == True:
    user_review = input('Write your reviews or let me select some for you: ')
    reviews.append(user_review)
else:
    reviews = r.get_reviews()

scoring = get_sentiment(reviews)

if user_input == True:
    print('Your score is {} Do you agree with that?'.format(
        scoring[0]['score']))
else:
    compare = r.compare_reviews(scoring)
    for e in compare:
        print('{} \nThe score for this review is {}, the user {} with that\n'.
              format(e['review'], e['score'], e['compare']))
Beispiel #25
0
if __name__ == "__main__":

    filepath = 'video_ids.txt'
    output_file = 'output_analysis.txt'
    DATA_PATH = 'InputComments'
    FILE = "Output/Output.%s.json"

    comment_files = [join(DATA_PATH, f) for f in listdir(DATA_PATH) if (isfile(join(DATA_PATH, f)) and "json" in f)]
    #print(comment_files)

    for f in comment_files:
        #print(f)
        with open(f, 'r') as outfile:
            data = json.load(outfile)
            data = pd.DataFrame(data)
            dictionary_comments = sentiment.get_sentiment(data['text'],100)
            print("-----dictionary----", dictionary_comments)

            video_id = f[26:37]
            with open(FILE % video_id, "w") as f:
                f.write(json.dumps(dictionary_comments))

    #data['author_replied'] = data['author_replied'].astype(int)


    # with open(filepath,'r') as fp:
    #     youtube_url = fp.readline()
    #     cnt = 1
    #     while youtube_url != '':
    #         print(cnt,"------url--------", youtube_url,"\n")
    #         video_id = extract_video_id(youtube_url)
Beispiel #26
0
def get_portfolio_info(api, stock_ticker):
    """
    This function gets various information about stocks held in your portfolio. 

    Parameters
    ----------
    api: Alpaca api object
    stock_ticker: Stock ticker string (ex: NRZ)

    Returns
    -------
    None
    
    """
    # Checking moving averages
    ma_200 = support.get_ma(api, stock_ticker=stock_ticker, days="200")
    print(f"200-day moving average of {stock_ticker} is", ma_200)
    ma_50 = support.get_ma(api, stock_ticker=stock_ticker, days="50")
    print(f"50-day moving average of {stock_ticker} is", ma_50)

    # Checking last trading price
    price = support.get_price(api, stock_ticker=stock_ticker).price

    account = api.get_account()

    # Checking Sentiment
    stock_sentiment = sentiment.get_sentiment(stock_ticker=stock_ticker)
    if stock_sentiment <= -0.5:
        print(
            f"News sentiment of {stock_ticker} is negative ({stock_sentiment})."
        )
    elif stock_sentiment > -0.5 and stock_sentiment < 0.5:
        print(
            f"News sentiment of {stock_ticker} is neutral ({stock_sentiment})."
        )
    elif stock_sentiment >= 0.5:
        print(
            f"News sentiment of {stock_ticker} is positive ({stock_sentiment})."
        )

    # Checking stock position
    currently_own_this_stock = support.currently_own_this_stock(
        api, stock_ticker=stock_ticker)
    if currently_own_this_stock == 0:
        print(f"{stock_ticker} stock is currently not held.")
    else:
        print(f"{stock_ticker} stock is currently held.")

    # Checking pending orders
    pending_buy = support.check_for_pending(api,
                                            stock_ticker=stock_ticker,
                                            trade_type="buy",
                                            day_range=5,
                                            zone='UTC',
                                            result_limit=200)
    pending_sell = support.check_for_pending(api,
                                             stock_ticker=stock_ticker,
                                             trade_type="sell",
                                             day_range=5,
                                             zone='UTC',
                                             result_limit=200)
def analyze_sentiment(textdata):
    print('<<<<<<<In analyze_sentiment function>>>>>>')
    response = get_sentiment(textdata)
    return response
import galbackend_online
import sentiment
galbackend_online.InitLogging()
galbackend_online.InitResource('v4')
while True:
    user_id = 'a'
    history = {user_id : ['aa','bb']}
    theme = {user_id: 'movies'}
    theme, strategy,utt,previous_history, word2vec = galbackend_online.get_response('joke',1,'joke_joke',user_id,history,theme)
    print utt
    sent = sentiment.get_sentiment(utt)
    print sent
    if sent not in ['pos','neg','neutral']:
        break
import sentiment
import pickle
import readall

rating_logs = readall.readall("/home/ubuntu/zhou/Backend/rating_log/")
user_input  = readall.get_log(rating_logs)
'''
with open('user_input_v2.pkl') as ff:
    user_input = pickle.load(ff)
'''
f = open ('sentiment_log.txt','w')
sentiment_label = []
for turn in user_input:
    question = turn['question']
    label = sentiment.get_sentiment(question)
    sentiment_label.append(label)
    f.write(question+'\n')
    f.write('sentiment: '+ label + '\n')

pos_number = sentiment_label.count('pos')
print pos_number
neg_number = sentiment_label.count('neg')
print neg_number
neutral_number = sentiment_label.count('neutral')
print neutral_number
Beispiel #30
0
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
try:
    user_timeline = twitter.get_user_timeline(screen_name=username, count=200)
except TwythonError as e:
    print e

date_str = str((date.today() - timedelta(days=0)).strftime('%a %b %d'))
date_key = str((date.today() - timedelta(days=0)).strftime('%y%m%d'))


tweet_count = 0
total_sentiment_score = 0
tweets = ""


for i in range(0, len(user_timeline)):
    if date_str in user_timeline[i]['created_at']:
        tweet_count += 1
        sentiment_score = sentiment.get_sentiment(user_timeline[i]['text'])
        total_sentiment_score += sentiment_score
        tweets += user_timeline[i][
            'text'] + ' - ' + str(sentiment_score) + '\n'
    else:
        break
if tweet_count > 0:
    avg_sentiment_score = total_sentiment_score / tweet_count
    print "Today's tweet count for " + username + ": " + str(tweet_count)
    print('\nAvg Sentiment Score : %.2f' % avg_sentiment_score)
else:
    print 'No tweets for ' + username + ' @ ' + date_str
from requests import get
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
import lxml
import html2text
from sentiment import get_sentiment

def find_song(song_name, artist):
    token = "Bearer token_here"
    genius_search = "http://api.genius.com/search?q=" + "%20".join(song_name.split(" ")) + "%20" + "%20".join(
        artist.split(" "))
    Header = {
        "Authorization": token
    }
    res = get(genius_search, headers=Header).json()
    return (res['response']['hits'][0]['result']['url'])

def get_lyrics(url):
    url = find_song(title, artist)
    only_div = SoupStrainer('div', {"class": "lyrics"})
    scrape = get(url).text
    soup = BeautifulSoup(scrape, 'lxml', parse_only=only_div)
    return (html2text.html2text(soup.text))

lyrics = get_lyrics(find_song(title, artist))

print(get_sentiment(lyrics))
Beispiel #32
0

for tt_utt in user_input_all:
    conv = []
    conv.append(tt_utt)
    # clean the cash for alice
    commands.getstatusoutput("rm c.txt")
    f = open('simulate_conv/' + str(conv_index) + '.txt', 'w')
    f.write('Turn: 0' + '\n')
    f.write('You: Hello' + '\n')
    f.write('TickTock: ' + tt_utt + '\n')
    f.write('Appropriateness: ' + '\n')
    f.write('Strategy: new' + '\n')
    f.write('')
    f.write('\n')
    sent_3 = sentiment.get_sentiment(tt_utt)
    sent_2 = 'null'
    sent_1 = 'null'
    theme[str(conv_index)] = random.choice(TopicLib)
    previous_history[str(conv_index)] = ['Hello', tt_utt]
    for turn_id in range(1, 10):
        print turn_id
        al_utt = alice.alice(tt_utt)
        conv.append(al_utt)
        f.write('Turn: ' + str(turn_id) + '\n')
        f.write('You: ' + al_utt + '\n')
        next_sent_1 = sent_3
        next_sent_2 = sentiment.get_sentiment(al_utt)
        state = (sent_1, sent_2, sent_3, turn_id)
        # here we see if we go into get_response, it happen to be in any of the five strategy, then we select one to excecute. otherwise we stick to the original strategy.
        theme_new, strategy, response, previous_history_new, word2vec = galbackend_online.get_response(
Beispiel #33
0
def find_stats(text):

    sent_tokenizer = pickle.load(open("static/english.pickle", "rb"))
    
    statistics = {}
    sents = sent_tokenizer.tokenize(text)
    words = []
    for x in sents:
        words.extend(nltk.word_tokenize(x))

    total_words = len(words)
    statistics["total_words"] = total_words

    letters = "".join(words)
    total_letters = len(letters)
    statistics["total_letters"] = total_letters

    total_sents = len(sents)
    statistics["total_sents"] = total_sents

    if total_words > 0:
        statistics["avg_word_len"] = total_letters/total_words
    else:
        statistics["avg_word_len"] = 0

    if total_sents > 0:
        statistics["avg_sent_len"] = total_words/total_sents
    else:
        statistics["avg_sent_len"] = 0

    try:
        ari = (4.71*(total_letters/total_words) + 0.5*(total_words/total_sents) - 21.43)
    except ZeroDivisionError:
        ari = 1

    ari = ceil(ari)
    ari = max(ari, 1)
    ari = min(ari, 14)
    statistics["ari"] = ari

    ari_table = ["Kindergarten", "First grade", "Second grade",
                 "Third grade", "Fourth grade", "Fifth grade",
                 "Sixth grade", "Seventh grade", "Eighth grade",
                 "Ninth grade", "Tenth grade", "Eleventh grade",
                 "Twelfth grade", "College"]
    ari_grade_level = ari_table[ari-1]
    statistics["ari_grade_level"] = ari_grade_level
    ari_age_lower = ari + 4
    if ari_age_lower == 18:
        ari_age_upper = 22
    else:
        ari_age_upper = ari_age_lower + 1
    statistics["ari_age_lower"] = ari_age_lower
    statistics["ari_age_upper"] = ari_age_upper
    ari_age_lower = str(ari_age_lower)
    ari_age_upper = str(ari_age_upper)
    ari_age_range = "%s -- %s" % (ari_age_lower, ari_age_upper)
    statistics["ari_age_range"] = ari_age_range

    # sentiment
    pos, neg = get_sentiment(words)
    sentiment = (pos-neg)/len(words)
    statistics["sentiment"] = sentiment

    return statistics
Beispiel #34
0
def analyze_text_block(text,
                       sentiment_library="textblob",
                       entity_library="spacy",
                       get_sentiment_per_topic=True):
    text = re.sub('\s+', ' ', text)
    text = text.replace("\n", ' ')

    entities_res = entities.get_entities(text, library=entity_library)
    keywords_res = keywords.get_keywords(text)
    sentiment_res = sentiment.get_sentiment(text, library=sentiment_library)
    lemmas_dict = {}

    # Calculate sentiment per lemmas, keywords and entities, by averaging
    # the sentiment for all the sentences that they appear in:
    if get_sentiment_per_topic:
        blob = TextBlob(text)
        for sentence in blob.sentences:
            sentence_score = sentiment.get_sentiment(
                str(sentence), library=sentiment_library)['sentiment.score']

            sentence_lemmas = lemmas.get_lemmas(sentence)

            sentence = str(sentence).lower()

            for lemma in sentence_lemmas:
                lemmatxt = lemma['text']
                if lemmatxt in lemmas_dict.keys():
                    lemmas_dict[lemmatxt]['sentiment.score'].append(
                        sentence_score)
                else:
                    lemmas_dict[lemmatxt] = {
                        'sentiment.score': [sentence_score]
                    }

            for keyword in keywords_res:
                word = keyword['text']
                if word.lower() in sentence:
                    if 'sentiment.score' not in keyword.keys():
                        keyword['sentiment.score'] = []
                    keyword['sentiment.score'].append(sentence_score)

            for entity in entities_res:
                word = entity['text']
                if word.lower() in sentence:
                    if 'sentiment.score' not in entity.keys():
                        entity['sentiment.score'] = []
                    entity['sentiment.score'].append(sentence_score)

        for keyword in keywords_res:
            # WARNING: This is a hack. Happens when we have different libraries not agreeing on sentence boundaries!
            if 'sentiment.score' not in keyword.keys():
                keyword['sentiment.score'] = [sentiment_res['sentiment.score']]

            keyword['num.sentences'] = len(keyword['sentiment.score'])
            keyword['sentiment.score'] = np.mean(keyword['sentiment.score'])

        for entity in entities_res:
            # WARNING: This is a hack. Happens when we have different libraries not agreeing on sentence boundaries!
            if 'sentiment.score' not in entity.keys():
                entity['sentiment.score'] = [sentiment_res['sentiment.score']]

            entity['num.sentences'] = len(entity['sentiment.score'])
            entity['sentiment.score'] = np.mean(entity['sentiment.score'])

        lemmas_res = []
        for lemma in lemmas_dict.keys():
            scores = lemmas_dict[lemma]['sentiment.score']
            lemmas_res.append({
                'text': lemma,
                'num.sentences': len(scores),
                'sentiment.score': np.mean(scores)
            })
    else:
        lemmas_res = lemmas.get_lemmas(text)

    results = {
        'entities': entities_res,
        'sentiment': sentiment_res,
        'keywords': keywords_res,
        'lemmas': lemmas_res
    }

    return (results)
Beispiel #35
0
def main():
    """
    Run the app by initiating the conversation set to True
    First there is a brief greeting opportunity, then you can ask questions
    """
    storyNum = -1  # arbitrary value to mark first story being told
    conversation = True

    # speech = Pygsr()
    # speech.record(5) # duration in seconds (3)
    # phrase, complete_response = speech.speech_to_text('en_US') # select the language
    # print phrase

    # Start the greeting portion of the interaction
    print_slow("\n>>Hello! I am Richard P Feynman. How are you?\n")
    greeting = raw_input()
    if not (s.get_sentiment(greeting) == "neg"):
        # Use sentiment analysis to only accept positive or neutral sentences
        # Used to avoid troll inputs
        feyn_response = [line.rstrip("\n") for line in open("text/responses.txt")]
        numLines = file_length("text/responses.txt")
        rand = random.randint(0, numLines)
        print_slow(">>" + feyn_response[rand])
    else:
        print ">>You're a bit of odd person arent you."
        conversation = False

    # Start the main conversation
    while conversation:
        print_slow("\n>>Do you have a question?\n")
        response = str(raw_input())
        if "beauty" in response or "flower" in response:
            display("image/flower.jpg")
            play_audio("audio/beauty.mp3")
        elif "suggestion" in response:
            print_slow(">>Well, I could talk to you about ...\n")
            suggestions()
        elif "principle" in response:
            print_slow(
                ">>The first principle is that you must not fool yourself - and you are the easiest person to fool.\n"
            )
        elif "poetry" in response:
            display("image/glass.jpg")
            play_audio("audio/glass.mp3")
            print_slow(
                ">>Poets say that science takes away from the beauty of the stars--mere globs of gas atoms. Nothing is mere. "
                "I too can see the stars on a desert night and feel them. But do I see less or more? The vastness of the heavens stretches "
                "my imagination - stuck on this carosusel my little eye can catch one-million-year-old light. A vast pattern - of which I am a "
                "part - perhaps my stuff was belched from some forgotten star, as one is belching there. Or see them with the greater eye of Palomar, "
                "rushing all apart from some common starting point when they were perhaps all together. What is the pattern, or the meaning, or the why? "
                "It does not do harm to the mystery to know a little about it. For far more marvelous is the truth than any artists of the past imagined. "
                "Why do the poets of the present not speak of it? What men are poets who can speak of Jupiter if he were like a man, but if he is an "
                "immense spinning sphere of methane and ammonia must be silent?\n"
            )
        elif "philosophy" in response:
            play_audio("audio/philosophy.mp3")
        elif "radio" in response:
            print_slow(">>Oh, here's a good story!\n")
            play_audio("audio/Radios.mp3")
        elif "father" in response or "inertia" in response:
            play_audio("audio/inertia.mp3")
        elif "teaching" in response:
            play_audio("audio/teaching.mp3")
        elif "doubt" in response or "uncertainty" in response:
            display("image/knowing.jpg")
            play_audio("audio/doubt.mp3")
        elif "train" in response:
            print_slow(">>Ah! Here's an interesting problem!\n")
            play_audio("audio/train.mp3")
        elif "social science" in response:
            print_slow("Well here's my opinion: \n")
            play_audio("audio/social.mp3")
        elif "science" in response and "learn" in response:
            play_audio("audio/miracle-people.mp3")
        elif "nature" in response:
            play_audio("audio/simplicity.mp3")
        elif "aliens" in response or "flying saucers" in response:
            play_audio("audio/aliens.mp3")
        elif "why" in response:
            play_audio("audio/why.mp3")
        elif "tell me a story" in response:
            storyNum = newStory(storyNum)
        elif "feeling lost" in response:
            with open("text/letter.txt", "r") as myfile:
                letter = myfile.read()

            print_slow(
                ">>Perhaps I should read you what I once wrote to a friend of mine who was feeling as you are now. I wrote: \n\n"
            )
            print_slow(letter)
            converstaion = False
        # end the conversation
        elif "done" in response or "finished" in response:
            print_slow(">>Well I'm glad to have talked a little with you! I hope we talk again soon!\n")
            conversation = False
        else:
            print_slow(">>I have nothing to say about that!\n")
            conversation = False
Beispiel #36
0
def push_tweet(data, timelineable, parse_terms):
    global max_id
    global min_id
    global id_policy_bits

    id = data["id"]

    # merge tweet by id
    tweet = graph.merge_one("Tweet", "id", id)

    # timelining stuff
    if timelineable:
        if id > max_id:
            max_id = id

        if id < min_id:
            min_id = id
            id_policy_bits = id_policy_bits | USE_MAX_ID

    # authorship
    if "user" in data:
        user = push_user(data["user"])
        graph.create_unique(Relationship(user, "POSTS", tweet))

    # quotes
    if "quoted_status" in data:
        tweet2 = push_tweet(data["quoted_status"], False, False)
        graph.create_unique(Relationship(tweet, "QUOTES", tweet2))

    # is a retweet
    if "retweeted_status" in data:
        tweet2 = push_tweet(data["retweeted_status"], False, False)
        graph.create_unique(Relationship(tweet, "RETWEETS", tweet2))

    # reply
    reply = data.get("in_reply_to_status_id")

    if reply:
        reply_tweet = graph.merge_one("Tweet", "id",
                                      data["in_reply_to_status_id"])
        graph.create_unique(Relationship(tweet, "REPLY_TO", reply_tweet))

    # geolocation exact/estimated
    if data["coordinates"] is not None:
        tweet.properties["lon"] = data["coordinates"]["coordinates"][0]
        tweet.properties["lat"] = data["coordinates"]["coordinates"][1]
    elif data["place"] is not None:
        coordinates = data["place"]["bounding_box"]["coordinates"][0]
        lon = (coordinates[0][0] + coordinates[1][0] + coordinates[2][0] +
               coordinates[3][0]) / 4
        lat = (coordinates[0][1] + coordinates[1][1] + coordinates[2][1] +
               coordinates[3][1]) / 4
        tweet.properties["lon"] = lon
        tweet.properties["lat"] = lat

    # fav count
    tweet.properties["favorite_count"] = data["favorite_count"]

    # rt count
    tweet.properties["retweet_count"] = data["retweet_count"]

    # text
    tweet.properties["text"] = data["text"]
    if "user" in data and parse_terms:
        for tok in process_text(data["text"]):
            word = push_word(tok)
            if "terms" in user.properties:
                # terms = user.properties["terms"]
                # q = "{0}:".format(tok)
                # idx = terms.find(q)
                # if not idx == -1:
                #     sub = terms[(idx + len(q)):]
                #     sub = sub[:sub.find(" ")]
                #     q += sub
                #     terms = terms.replace(q, "{0}:{1}".format(tok, int(sub) + 1))
                # else:
                #     terms += "{0}:1 ".format(tok)
                # user.properties["terms"] = terms
                user.properties[
                    "term_count"] = user.properties["term_count"] + 1
            else:
                user.properties["term_count"] = 1
                # user.properties["terms"] = "{0}:1 ".format(tok)
            user.push()
            rel = graph.match_one(user, "DISCUSSES", word)
            if rel:
                rel.properties["count"] = rel.properties["count"] + 1
                rel.push()
            else:
                rel = Relationship(user, "DISCUSSES", word)
                rel.properties["count"] = 1
                graph.create_unique(rel)

    if "text" in data:
        sent = sentiment.get_sentiment(data["text"])
        tweet["polarity"] = sent[0]
        tweet["subjectivity"] = sent[1]
        for tok in process_text(data["text"]):
            word = push_word(tok)
            rel = graph.match_one(tweet, "CONTAINS", word)
            if rel:
                rel.properties["count"] = rel.properties["count"] + 1
                rel.push()
            else:
                rel = Relationship(tweet, "CONTAINS", word)
                rel.properties["count"] = 1
                graph.create_unique(rel)

    # hashtags
    for h in data["entities"].get("hashtags", []):
        hashtag = push_hashtag(h)
        graph.create_unique(Relationship(hashtag, "TAGS", tweet))

    # mentions
    for m in data["entities"].get("user_mentions", []):
        mention = push_user(m)
        graph.create_unique(Relationship(tweet, "MENTIONS", mention))

    tweet.push()

    return tweet
import pickle

def most_common(lst):
        return max(set(lst), key=lst.count)

conn = sqlite3.connect('rs_ratings.db')
c = conn.cursor()
c.execute('''SELECT * FROM responses''')
table_num = {}
table_sum = {}
tmplist =[]
for item in c.fetchall():
    print item
    #print item[0]
    #break
    turn_1_user_sent = sentiment.get_sentiment(item[0])
    turn_1_tt_sent = sentiment.get_sentiment(item[1])
    turn_0_user_sent = sentiment.get_sentiment(item[7])
    turn_0_tt_sent = sentiment.get_sentiment(item[8])
    turn_str = item[10]
    if item[11] == None or item[12] == None or item[13] ==None:
        break
    score_list = [ item[11], item[12], item[13] ]
    score = most_common(score_list)
    tmplist.append([turn_0_user_sent,turn_0_tt_sent,turn_1_user_sent,turn_str,score])
    key = (turn_0_user_sent, turn_0_tt_sent,turn_1_user_sent,turn_str)
    if table_sum.has_key(key):
        table_sum[key]= table_sum[key]+score
        table_num[key] = table_num[key]+1
    else:
        table_sum[key] = score
Beispiel #38
0
import sentiment
import pickle
import readall

rating_logs = readall.readall("/home/ubuntu/zhou/Backend/rating_log/")
user_input = readall.get_log(rating_logs)
'''
with open('user_input_v2.pkl') as ff:
    user_input = pickle.load(ff)
'''
f = open('sentiment_log.txt', 'w')
sentiment_label = []
for turn in user_input:
    question = turn['question']
    label = sentiment.get_sentiment(question)
    sentiment_label.append(label)
    f.write(question + '\n')
    f.write('sentiment: ' + label + '\n')

pos_number = sentiment_label.count('pos')
print pos_number
neg_number = sentiment_label.count('neg')
print neg_number
neutral_number = sentiment_label.count('neutral')
print neutral_number
    return value

for tt_utt in user_input_all:
    conv =[]
    conv.append(tt_utt)
    # clean the cash for alice
    commands.getstatusoutput("rm c.txt")
    f = open('simulate_conv/'+str(conv_index)+'.txt','w')
    f.write('Turn: 0'+'\n')
    f.write('You: Hello'+'\n' )
    f.write('TickTock: ' + tt_utt +'\n')
    f.write('Appropriateness: ' + '\n')
    f.write('Strategy: new' + '\n')
    f.write('')
    f.write('\n')
    sent_3 = sentiment.get_sentiment(tt_utt)
    sent_2 = 'null'
    sent_1 = 'null'
    theme[str(conv_index)] = random.choice(TopicLib)
    previous_history[str(conv_index)] = ['Hello',tt_utt]
    for turn_id in range(1,10):
        print turn_id
        al_utt = alice.alice(tt_utt)
        conv.append(al_utt)
        f.write('Turn: ' + str(turn_id) +'\n')
        f.write('You: ' + al_utt+'\n')
        next_sent_1 = sent_3
        next_sent_2 = sentiment.get_sentiment(al_utt)
        state = (sent_1,sent_2,sent_3,turn_id)
# here we see if we go into get_response, it happen to be in any of the five strategy, then we select one to excecute. otherwise we stick to the original strategy.
        theme_new, strategy, response,previous_history_new,word2vec = galbackend_online.get_response( None,policy_mode,al_utt, str(conv_index) ,previous_history,theme, oov_state,name_entity_state,short_answer_state,anaphra_state,word2vec_ranking_state,tfidf_state)
            max_statuses_count = tweet["user"]["statuses_count"]

    # Can be called when analysing data
    # data_analysis(statuses)
    for tweet in statuses:
        data = {}
        if tweet.has_key('user'):
            if tweet['user'].has_key('name'):
                data["screenname"] = tweet["user"]["name"]
            elif tweet['user'].has_key('screenname'):
                data["screenname"] = tweet["user"]["screenname"]
        data["date"] = tweet["created_at"]
        data["value"] = 1
        data["text"] = tweet["text"]
        if tweet["lang"] == "en" and len(tweet["text"]) > 2:
            data["sentiment"] = get_sentiment(tweet["text"])
        elif len(tweet["text"])% 7 < 3:
            data["sentiment"] = 1
        elif len(tweet["text"])% 7 > 5:
            data["sentiment"] = 2
        else:
            data["sentiment"] = 0
        date = tweet["created_at"].split()
        data["time"] = int(date[3].split(":")[0])
        data["minutes"] = int(date[3].split(":")[1])
        data["lang"] = tweet["lang"]
        try:
            data["gender"] = user_gender.get(data["screenname"])
        except:
            data["gender"] = "female"
Beispiel #41
0
    resolved_texts = [entity.resolve_anaphores(text) for text in texts]

    domain_excluded_entities = domainexcludedentities.\
        get_domain_excluded_entities(texts)

    contexts_lists = [context.extract(text) for text in texts]
    contexts = []
    for contexts_list in contexts_lists:
        contexts += contexts_list

    entities = []
    entities_sentiments = {}
    concept_graph = conceptgraph.ConceptGraph()
    for candidate_context in contexts:
        entity_sentiment = sentiment.get_sentiment(candidate_context)
        context_entities_set = set(entity.extract(candidate_context))
        for extracted_entity in context_entities_set:
            if extracted_entity in domain_excluded_entities:
                continue
            entities.append(extracted_entity)
            if extracted_entity not in entities_sentiments:
                entities_sentiments[extracted_entity] = []
            entities_sentiments[extracted_entity].append(entity_sentiment)

        for (entity1, entity2) in itertools.combinations(context_entities_set, 2):
            if (entity1 in domain_excluded_entities) or\
                    (entity2 in domain_excluded_entities):
                continue
            concept_graph.add_edge(entity1, entity2)
Beispiel #42
0
from bs4 import BeautifulSoup
import requests

import sentiment

url = "https://itunes.apple.com/us/rss/customerreviews/id=954338382/sortBy=mostRecent/xml"

r = requests.get(url)

data = r.text

soup = BeautifulSoup(data, "html.parser")

for block in soup.find_all('title'):

    text = block.find_next_sibling()
    if text.name != 'content':
        continue

    text = text.text
    print(text)

    sentiment.get_sentiment(text)
    import pdb
    pdb.set_trace()