def __init__(self):
    self.root = tk.Tk()
  
    #create a Frame for Text and Scrollbar
    text_frame = tk.Frame(self.root,width=900,height=600)
    text_frame.pack(fill="both",expand=True)
    text_frame.grid_propagate(False)
    text_frame.grid_rowconfigure(0,weight=1)
    text_frame.grid_columnconfigure(0,weight=1)
    
    #create a Text widget
    self.text = tk.Text(text_frame,borderwidth=3,relief="sunken")
    self.text.config(font=("consolas",12),undo = True,wrap='word')
    self.text.grid(row=0,column=0,sticky="nsew",padx=2,pady=2)
    cnt = 0
    for tweet in get_tweets():
       self.text.insert('1.0',tweet['text']+'\n')
       cnt = cnt + 1
       if cnt == 50 :
         break

    #create a Scrollbar and connect it with text 
    scrollbar = tk.Scrollbar(text_frame,command=self.text.yview)
    scrollbar.grid(row=0,column=1,sticky='nsew')
    self.text['yscrollcommand'] = scrollbar.set
예제 #2
0
파일: usermap.py 프로젝트: ayamnova/twitter
def user_graph(directory):
    '''
    A function to build a graph based on retweets. Every vertex is a
    different tweet and every edge connects a tweet with a retweet.

    directory: the directory with all the tweets in it

    Returns: a graph of the tweet-retweet relationship
    '''

    g = grapher.Graph()  # the dictionary to return

    # Get all the tweets in the directory
    tweets = get_tweets(directory)

    for tweet in tweets:
        try:
            # treat it first like a retweet
            retweet = tweet["retweeted_status"]
            # add the edge with id string of the tweets as the id of the
            # vertices and the screen names as the data of the vertices
            g.addEdge(retweet["id_str"],
                      tweet["id_str"],
                      f_data=retweet["user"]["screen_name"],
                      t_data=tweet["user"]["screen_name"])
        except KeyError:
            # not a retweet because the retweeted status not been found!

            # add the tweet to the relationship dictionary
            g.addVertex(tweet["id_str"], tweet["user"]["screen_name"])
    return g
예제 #3
0
파일: tdm.py 프로젝트: ayamnova/twitter
def build_documents_country(directory):
    '''
    A function to build the documents separated by country
    to get a TDM from a directory

    directory: a directory with Flume files
    '''

    documents = dict()

    resolver = carmen.get_resolver()
    resolver.load_locations()

    raw_tweets = get_tweets(directory)

    for tweet in raw_tweets:
        country = str()
        try:
            country = resolver.resolve_tweet(tweet)[1].country
        except TypeError:
            continue

        text = clean_text(tweet["text"])
        try:
            documents[country] += text
        except KeyError:
            documents[country] = text

    return documents
예제 #4
0
def get_user_tweets(uid, limit=None, offset=0):
    """Return specified user tweets limited by limit and offset."""
    if limit is None:
        limit = config.DEFAULT_LIMIT
    else:
        limit = min(config.MAX_LIMIT, limit)
    tids = conn.zrange(utils.get_user_tweets_key(uid),
                       offset, offset + limit - 1)
    return tweets.get_tweets(tids)
예제 #5
0
def get_timeline(uid, limit=None, offset=0):
    """Return sepcified user timeline limited by limit and offset."""
    if limit is None:
        limit = config.DEFAULT_LIMIT
    else:
        limit = min(config.MAX_LIMIT, limit)
    refresh_timeline(uid)
    tids = conn.zrevrange(utils.get_timeline_key(uid),
                          offset, offset + limit - 1)
    return tweets.get_tweets(tids)
예제 #6
0
def tweets_page():
    q = request.args.get('q') or ''

    tweet_list = []
    if q:
        tweet_list = tweets.get_tweets(q)

    sent_list = [
        sentiment.get_text_category(STATE, tweet) for tweet in tweet_list
    ]

    pairs = zip(tweet_list, sent_list)

    return render_template('tweets.html', q=q, pairs=pairs)
예제 #7
0
def tokenize_tweets(user):
    """Returns a list of all useful tokenized tweets."""
    raw_text = ''
    for tweet in get_tweets(user):
        raw_text += tweet.text + " "

    words = raw_text.split(" ")
    words = [word for word in words if not LINKS_RT.search(word)]
    words = " ".join(words)
    words = [word for word in wordpunct_tokenize(words)]
    words = [word for word in words if word not in stop_words()]
    words = [word.lower() for word in words if len(word) > 4]

    return words
예제 #8
0
def words_handler():

    form = request.form.to_dict()
    screen_name = form['screen_name']

    recent_tweets = tweets.get_tweets(screen_name)
    tokens = tweets.tokenize_tweets(recent_tweets)

    result = []
    for token, size in tweets.top_tokens(tokens):
        data = {
            'text': token.upper(),
            'size': size
        }
        result.append(data)

    return Response(json.dumps(result), mimetype='application/json', headers={'Cache-Control': 'no-cache'})
예제 #9
0
파일: tdm.py 프로젝트: ayamnova/twitter
def build_documents_user(directory):
    '''
    A function to build the documents separated by user
    to get a TDM from a directory

    directory: a directory with Flume files
    '''

    documents = dict()

    raw_tweets = get_tweets(directory)

    for tweet in raw_tweets:
        text = clean_text(tweet["text"])
        try:
            documents[tweet["user"]["screen_name"]] += text
        except KeyError:
            documents[tweet["user"]["screen_name"]] = text

    return documents
예제 #10
0
def build_graph(directory):
    '''
    A function to build a table based on retweets

    directory: the directory with all the tweets in it

    Return: a dictionary with Tweet ID's for keys that map to a tuple of the
    form (username, relationship)

    relationship is an integer (1-2). 1 = author, 2 = retweeter
    '''

    g = nx.DiGraph()  # the one graph to rule them all

    # Get all the tweets in the directory
    raw_tweets = get_tweets(directory)
    # Append location data to tweets
    tweets = add_positions(raw_tweets)

    # Add all the users as nodes with the position data from the first tweet
    # of theirs that is discovered
    userlocs = list()
    for tweet in tweets:
        username = tweet["user"]["screen_name"]
        if username not in userlocs:
            userlocs.append(username)
            g.add_node(username, pos=tweet["position"])

    for tweet in tweets:
        try:
            # treat if first like a retweet
            head = tweet["retweeted_status"]["user"]["screen_name"]
            tail = tweet["user"]["screen_name"]
            g.add_edge(head, tail)
            g.edges[head, tail]['tweet'] = tweet["retweeted_status"]
        except KeyError:
            # it's not a retweet, so don't add any edges
            pass
    return g
예제 #11
0
파일: views.py 프로젝트: cshintov/timeline
def show_tweets(request):
    scr_name= request.POST.get("screen_name")
    twt_count = int(request.POST.get("tweet_count"))
    stat_count = int(request.POST.get("stat_count"))
    try:
        user = User.objects.get(scr_name=scr_name)
        tweets = [tweet.tweet_text for tweet in user.tweet_set.all()]
        hash_tags, mentions = get_hash_mentions(tweets[:twt_count], stat_count)
        tweets = tweets[:twt_count]
        return result(request, tweets, hash_tags, mentions)
    except User.DoesNotExist:
        tweets = get_tweets(scr_name, MAX_TWEET)
        if 'error' in tweets:
            retry = '<br><a href="/timeline/get_input">Try another user!</a>'
            return HttpResponse(tweets['error']+retry)
        tweets = extract_tweets(tweets)
        user = User(scr_name=scr_name)
        user.save()
        for tweet in tweets:
            user.tweet_set.create(tweet_text=tweet)
        hash_tags, mentions = get_hash_mentions(tweets[:twt_count], stat_count)
    return result(request, tweets[:twt_count], hash_tags, mentions)
예제 #12
0
파일: count.py 프로젝트: ayamnova/twitter
'''
Count
A script to count the english and non-english tweets from flume files

Author: Karsten Ladner
Date: 7/12/2018
'''

import sys
from os.path import join as jn
from tweets import get_tweets
from config import PROC, OUT, PATH

if __name__ == '__main__':
    out = jn(OUT, sys.argv[2])
    dirs = sys.argv[1].split(',')

    with open(out, 'a') as fout:
        for d in dirs:
            tweets = get_tweets([d], prefix=PATH)
            print("{0},{1},{2}".format(d, len(tweets[0]), tweets[1]))
            fout.write("{0},{1},{2}".format(d, len(tweets[0]), tweets[1]))
    fout.close()
예제 #13
0
    sc = afinn.score(tweet['text'])

    total += sc

    if sc < _min:
        _min = sc
    if sc > _max:
        _max = sc
    if sc < 0:
        neg += 1
    elif sc > 0:
        pos += 1
    elif sc == 0:
        neut += 1

    return (total, _min, _max, neg, neut, pos)


if __name__ == '__main__':
    out = jn(OUT, sys.argv[2])
    dirs = sys.argv[1].split(',')

    with open(out, 'w') as fout:
        for d in dirs:
            tweets = get_tweets([d], key='text', prefix=PATH)
            sent, total, mn, mx, neg, neut, pos = get_sentiment(tweets[0])
            print("Sentiment Calculated")
            fout.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\n".format(
                d, sent, mn, mx, neg, neut, pos, total, tweets[1]))
    fout.close()
예제 #14
0
def _query():
    keywords = request.args.get('keywords', "default", type=str)
    tweets = get_tweets(keywords)
    return jsonify(tweets=tweets)
예제 #15
0
'''
Usernames
A script to save the usernames of the users for each day
'''

import sys
from tweets import get_tweets, save_to_file as save
from config import PATH

if __name__ == '__main__':
    dirs = sys.argv[1].split(',')
    for d in dirs:
        out = "./out/users-" + d.replace("/", "_") + ".dat"
        tweets = get_tweets([d], key='names', prefix=PATH)
        data = {'data': tweets[0], 'num_filtered': tweets[1]}
        save(data, out)
예제 #16
0
from predict import predict
from tweets import get_tweets
import json
import os
import torch
import argparse

if __name__ == "__main__":
    my_parser = argparse.ArgumentParser()
    my_parser.version = '1.0'
    my_parser.add_argument("search_term", help="The search to be used in order to retrieve tweets")
    my_parser.add_argument('-n','--num_tweets', action='store',type=int,default=500,help='No of tweets to be retrieved, must be less than 1000')
    args = my_parser.parse_args()
    search_term = args.search_term
    no_of_tweets = args.num_tweets
    
    if not os.path.isfile('twitterAPIkeys.json'):
        raise FileNotFoundError('Please make sure a \'twitterAPIkeys.json\' exists in the src folder!')
    
    with open('twitterAPIkeys.json') as f:
        APIkeys_dict = json.load(f)
    
    
    tweet_df = get_tweets(search_term, no_of_tweets, APIkeys_dict, retweets=False)
    inputs = list(tweet_df['tweet_text'].values)
    outputs = predict(inputs)
    outputs_2d = tsne_cpu(outputs)
    # print(outputs_2d)
    tweet_df['x_coord'] = outputs_2d[:,0]
    tweet_df['y_coord'] = outputs_2d[:,1]
    visualise(tweet_df)
    def evaluatePositions(self):
        openTrades = []
        for trade in self.trades:
            if (trade.status == "OPEN"):
                openTrades.append(trade)

        if (len(openTrades) < self.numSimulTrades):
            if (self.btc_historical_total <= 100000):
                bitcoin_query = 'BTC OR Bitcoin OR $BTC'

                btc_historical_tweets, self.btc_sinceid = tweets.get_tweets(
                    50, self.btc_sinceID, bitcoin_query)
                btc_total_score, btc_positive, btc_negative, btc_total = tweets.classify(
                    btc_historical_tweets)
                self.btc_historical_positive = self.btc_historical_positive + btc_positive
                self.btc_historical_negative = self.btc_historical_negative + btc_negative
                self.btc_historical_score = self.btc_historical_score + btc_total_score
                self.btc_historical_total = self.btc_historical_total + btc_total

                self.btc_historical_percent = (self.btc_historical_positive /
                                               self.btc_historical_total) * 100

                ethereum_query = 'Ethereum OR ETH OR $ETH'

                eth_historical_tweets, self.eth_sinceID = tweets.get_tweets(
                    50, self.eth_sinceID, ethereum_query)
                eth_total_score, eth_positive, eth_negative, eth_total = tweets.classify(
                    eth_historical_tweets)
                self.eth_historical_positive = self.eth_historical_positive + eth_positive
                self.eth_historical_negative = self.eth_historical_negative + eth_negative
                self.eth_historical_score = self.eth_historical_score + eth_total_score
                self.eth_historical_total = self.eth_historical_total + eth_total

                self.eth_historical_percent = (self.eth_historical_positive /
                                               self.eth_historical_total) * 100

                if self.btc_historical_total >= 100000:
                    print '\033[1m' + "Historical Tweets Analyzed"
                    print "historical btc percent: " + str(
                        self.btc_historical_percent)
                    print "historical eth percent: " + str(
                        self.eth_historical_percent)
                    print '\033[0m'

            elif (self.btc_historical_total > 100000):
                bitcoin_query = 'BTC OR Bitcoin OR $BTC'

                btc_tweets, sinceid_recent = tweets.get_tweets(
                    50, 0, bitcoin_query)
                btc_total_score2, btc_positive2, btc_negative2, btc_total2 = tweets.classify(
                    btc_tweets)
                btc_percent = (btc_positive2 / btc_total2) * 100

                ethereum_query = 'Ethereum OR ETH OR $ETH'

                eth_tweets, sinceid_recent = tweets.get_tweets(
                    50, 0, ethereum_query)
                eth_total_score2, eth_positive2, eth_negative2, eth_total2 = tweets.classify(
                    eth_tweets)
                eth_percent = (eth_positive2 / eth_total2) * 100

                if (eth_percent > 1.042 * self.eth_historical_percent
                        or btc_percent < 0.943 * self.btc_historical_percent):

                    if btc_percent < 0.943 * self.btc_historical_percent:
                        self.btc_sentiments = []
                        self.btc_trading_percent = btc_percent
                        self.type_of_trade = 'BTC'
                        self.trades.append(BotTrade(self.prices,
                                                    stopLoss=0.01))
                    elif (eth_percent > 1.042 * self.eth_historical_percent):
                        self.eth_sentiments = []
                        self.eth_trading_percent = eth_percent
                        self.type_of_trade = 'ETH'
                        self.trades.append(BotTrade(self.prices,
                                                    stopLoss=0.01))
                    else:
                        self.type_of_trade = ''

                time.sleep(60 * 5)

        for trade in openTrades:

            if (self.type_of_trade == 'BTC'):

                bitcoin_query = 'BTC OR Bitcoin OR $BTC'
                btc_tweets, sinceid_recent = tweets.get_tweets(
                    10, 0, bitcoin_query)

                btc_total_score2, btc_positive2, btc_negative2, btc_total2 = tweets.classify(
                    btc_tweets)
                btc_percent = (btc_positive2 / btc_total2) * 100

                self.btc_sentiments.append(btc_percent)

                if (len(self.btc_sentiments) > 5):

                    mean_sentiment = np.mean(self.btc_sentiments)
                    std_sentiment = np.std(self.btc_sentiments)

                    if btc_percent >= mean_sentiment + (
                        (0.800 * std_sentiment) /
                            math.sqrt(len(self.btc_sentiments))):
                        price = self.api.getticker('BTC-ETH')
                        self.currentClose = price['Bid']
                        trade.close(self.currentClose)
                else:
                    time.sleep(60 * 3)

            elif (self.type_of_trade == 'ETH'):

                ethereum_query = 'Ethereum OR ETH OR $ETH'
                eth_tweets, sinceid_recent = tweets.get_tweets(
                    10, 0, ethereum_query)

                eth_total_score2, eth_positive2, eth_negative2, eth_total2 = tweets.classify(
                    eth_tweets)
                eth_percent = (eth_positive2 / eth_total2) * 100

                self.eth_sentiments.append(eth_percent)

                if (len(self.eth_sentiments) > 5):

                    mean_sentiment = np.mean(self.eth_sentiments)
                    std_sentiment = np.std(self.eth_sentiments)

                    if eth_percent <= mean_sentiment - (
                        (0.674 * std_sentiment) /
                            math.sqrt(len(self.eth_sentiments))):
                        price = self.api.getticker('BTC-ETH')
                        self.currentClose = price['Bid']
                        trade.close(self.currentClose)
                else:
                    time.sleep(60 * 3)
            else:
                price = self.api.getticker('BTC-ETH')
                self.currentClose = price['Bid']
                trade.close(self.currentClose)
예제 #18
0
 def testNumberRequest(self):
     from tweets import get_tweets
     t = get_tweets(name='JennyJarv', count=7)
     self.assertTrue(len(t) == 7)
예제 #19
0
 def testGetAnythingFromTwitter(self):
     alstweets = tweets.get_tweets(name='allieehenry')
     self.assertTrue(len(alstweets) >= 1)
예제 #20
0
 def testNoName(self):
     t = tweets.get_tweets()
     self.assertTrue(t is None)
예제 #21
0
파일: main.py 프로젝트: ashiqueh/discountme
import extract
import tags
import tweets

if __name__ == "__main__":
	print("Don't forget to chcp 65001") # this is a reminder to me when I run this program from command line
	print('Enter your query')
	query = input()

	list_of_tags = tags.get_tags(query)

	list_of_tweets = tweets.get_tweets(list_of_tags)

	result = extract.get_codes(list_of_tweets)

	for code in result:
		print (code)


	r = extract.get_likely_tweets(list_of_tweets)
	for t in r:
		print(t)
예제 #22
0
        for col in row:
            out += str(col) + "\t"
        out.rstrip("\t")
        out += "\n"
        fout.write(out)
    fout.close()


if __name__ == '__main__':
    if sys.argv[1] == "save":
        g = build_graph(sys.argv[2])
        parts = sys.argv[2].strip(".").split("/")
        fileparts = [p + "-" for p in parts[1:] if p is not "crisis"]
        filename = "".join(fileparts) + "data"
        save_graph(g, filename)
    elif sys.argv[1] == "show":
        g = load_graph(sys.argv[2])
        display_graph(g)
    elif sys.argv[1] == "print":
        tweets = get_tweets(sys.argv[2].split(','))
        g = user_graph(tweets)
        print_user_graph(g)
    elif sys.argv[1] == "mout":
        tweets = get_tweets_from_dirs(sys.argv[2].split(','),
                                      prefix='./crisis/crisis/2018/06/')
        g = user_graph(tweets)
        write_file(g, sys.argv[3])
    else:
        g = build_graph("./data/25crisis")
        print("Please enter either 'save','show', or 'print'")
예제 #23
0
def tweets(username, count=2):
    return get_tweets(username, count)
예제 #24
0
파일: usermap.py 프로젝트: ayamnova/twitter
    # print the first row (which is all the usernames)
    # get the list of users and sort the keys alphabetically
    for name in users:
        # fancy print all the screen_names of the users
        # truncate the name to 10 characters and right align at 15 chars
        print("{0:>15.10}".format(name), end="")
    for tw in graph.getRoots():
        # fancy print the screen_name of the user who tweeted
        # truncate it at 10 characters and right align it at 10 chars
        print("\n{0:>10.10}".format(tw.getData()), end=" " * 5)
        for value in matrix[tweet_indices[tw.getId()]]:
            # fancy print every value in the list associated with this
            print("{0:<15}".format(value), end="")


if __name__ == '__main__':
    if sys.argv[1] == "save":
        dirs = [pjoin(PATH, d) for d in sys.argv[2].split(',')]
        tweets = get_tweets(dirs)
        g = build_graph(tweets[0])
        save_graph(g, sys.argv[3])
    elif sys.argv[1] == "show":
        g = load_graph(sys.argv[2])
        display_graph(g)
    elif sys.argv[1] == "print":
        g = user_graph(sys.argv[2])
        print_user_graph(g)
    else:
        print("Please enter either 'save','show', or 'print'")
예제 #25
0
def tweets(username, count=2):
    return get_tweets(username, count)
예제 #26
0
from PIL import Image, ImageFont, ImageDraw

import tweets

tweets = tweets.get_tweets()

for i, tweet in enumerate(tweets):

    base = Image.open("boat.jpg").convert('RGBA')
    fnt = ImageFont.truetype('Impact.ttf', 30)
    txt = Image.new('RGBA', base.size, (255, 255, 255, 0))

    # The greatest line wrapping function
    tweetlines = [tweet[:32], tweet[32:64], tweet[64:96], tweet[96:]]

    d = ImageDraw.Draw(txt)
    d.text((10, 32), tweetlines[0], font=fnt, fill=(255, 255, 255, 255))
    d.text((10, 64), tweetlines[1], font=fnt, fill=(255, 255, 255, 255))
    d.text((10, 128), tweetlines[2], font=fnt, fill=(255, 255, 255, 255))
    d.text((10, 256), tweetlines[3], font=fnt, fill=(255, 255, 255, 255))

    out = Image.alpha_composite(base, txt)
    out.save('output/{}'.format(i), 'JPEG')