def backup_tweets(twitter_config, my_tweets_file): key = APIKey( consumer_key = twitter_config["consumer_key"], consumer_secret = twitter_config["consumer_secret"], bearer_token = twitter_config["bearer_token"], access_token = twitter_config["access_token"], access_token_secret = twitter_config["access_token_secret"] ) twitter = ITwitter(key,"http://127.0.0.1:7890") twitter.get_tweets(my_tweets_file) twitter.empty_tweets()
def draw_test(foldername, searchword): list_word = twitter.get_tweets(foldername, searchword) address = '../' + foldername + '/' + foldername + '_images/' #build a white background for i in range(0, len(list_word)): font = ImageFont.truetype('./DejaVuSerif-Italic.ttf', 15) word = list_word[i].encode('ascii', 'ignore').decode('ascii') image = Image.new('RGB', (500, 312), color=(190, 226, 231)) draw = ImageDraw.Draw(image) draw.text((5, 200), word, fill=(0, 0, 0), font=font) j = str(i + 1) image.save('../' + foldername + '/' + foldername + '_images/' + j + '.jpg', dpi=(300.0, 300.0)) # image.show() return address # if __name__ == '__main__': # foldername = input('Enter your foldername: ') # searchword = input('Enter the searchword you would like to search: ') # draw_test(foldername, searchword)
def get_tweet_list(user_handle): """Run a sentiment analysis request on text within a passed filename.""" client = language.LanguageServiceClient() tweet_list = twitter.get_tweets(handle=user_handle) if tweet_list[0] == "34": return tweet_list for i in range(len(tweet_list)): content = tweet_list[i].get("text") document = types.Document(content=content, type=enums.Document.Type.PLAIN_TEXT) annotations = client.analyze_sentiment(document=document) # Print the results # print_result(annotations) score = annotations.document_sentiment.score magnitude = annotations.document_sentiment.magnitude tweet_list[i]["score"] = score tweet_list[i]["magnitude"] = magnitude # print(tweet_list[i]) return tweet_list
def produce_next_tweet(app_status): app_status = status.load() # Just get the latest tweets tweets = twitter.get_timeline_tweets(800) tweets = filter_tweets(tweets) tweets = filter(lambda t:not t['user']['screen_name'] == twitter_settings.screen_name, tweets) if len(tweets) <= 1: print('Could not generate tweet (not enough eligible tweets)') app_status['latest_tweet'] = 'Could not generate tweet (not enough eligible tweets)' return recent_tweets = twitter.get_tweets(twitter_settings.screen_name, True) best_tweet = create_markovated_tweet(tweets, 140, map(lambda t: t['text'].strip(), recent_tweets)) if best_tweet != None: twitter.post_tweet(best_tweet) encoded = unicode(best_tweet).encode('utf-8') print(encoded + '(' + str(len(encoded)) + ')') app_status['latest_tweet'] = encoded; else: print('Could not generate tweet') app_status['latest_tweet'] = 'Could not generate tweet' status.save(app_status)
def produce_next_tweet(app_status): app_status = status.load() # Just get the latest tweets tweets = twitter.get_timeline_tweets(800) tweets = filter_tweets(tweets) tweets = filter( lambda t: not t['user']['screen_name'] == twitter_settings.screen_name, tweets) if len(tweets) <= 1: print('Could not generate tweet (not enough eligible tweets)') app_status[ 'latest_tweet'] = 'Could not generate tweet (not enough eligible tweets)' return recent_tweets = twitter.get_tweets(twitter_settings.screen_name, True) best_tweet = create_markovated_tweet( tweets, 140, map(lambda t: t['text'].strip(), recent_tweets)) if best_tweet != None: twitter.post_tweet(best_tweet) encoded = unicode(best_tweet).encode('utf-8') print(encoded + '(' + str(len(encoded)) + ')') app_status['latest_tweet'] = encoded else: print('Could not generate tweet') app_status['latest_tweet'] = 'Could not generate tweet' status.save(app_status)
def reply_to_user(user, app_status): if user['protected']: print("@" + user['screen_name'] + " sorry, I can't process protected users :(") return screen_name = user['screen_name'] print(screen_name) tweets = filter_tweets(twitter.get_tweets(screen_name, True)) if len(tweets) <= 1: print("Not enough tweets") fail_reply = "@" + screen_name + " sorry, you need to tweet more (or tweet less @ mentions and links) :(" twitter.post_tweet(fail_reply) app_status['latest_reply'] = fail_reply return tweet_prefix = '@' + screen_name + ' markovated: ' ideal_tweet_length = 140 - len(tweet_prefix) best_tweet = create_markovated_tweet(tweets, ideal_tweet_length) if best_tweet != None: tweet = tweet_prefix + best_tweet twitter.post_tweet(tweet) encoded = unicode(tweet).encode('utf-8') print(encoded + '(' + str(len(encoded)) + ')') app_status['latest_reply'] = encoded else: print('<p>Could not generate reply</p>') app_status['latest_reply'] = 'Could not generate'
def get(self): app_status = status.load() # Just get the latest tweets tweets = twitter.get_timeline_tweets(800) tweets = filter_tweets(tweets) tweets = filter(lambda t:not t['user']['screen_name'] == twitter_settings.screen_name, tweets) if len(tweets) <= 1: self.response.out.write('<p>Could not generate tweet (not enough eligible tweets)</p>') app_status['latest_tweet'] = 'Could not generate tweet (not enough eligible tweets)' return recent_tweets = twitter.get_tweets(twitter_settings.screen_name, True) best_tweet = create_markovated_tweet(tweets, 140, map(lambda t: t['text'].strip(), recent_tweets)) if best_tweet != None: twitter.post_tweet(best_tweet) self.response.out.write('<p>' + best_tweet + '</p>' + '(' + str(len(best_tweet)) + ')') app_status['latest_tweet'] = best_tweet; else: self.response.out.write('<p>Could not generate tweet</p>') app_status['latest_tweet'] = 'Could not generate tweet' status.save(app_status)
def wordcloud(): username = request.args.get('user').strip() if username.startswith('@'): username = username[1:] words = get_tweets(username) path = get_wordcloud(username, words) image = dict(desc='WordCloud', path=path) return render_template('figure.html', image=image)
def reply_to_user(self, user, app_status): if user['protected']: self.response.out.write("@" + user['screen_name'] + " sorry, I can't process protected users :(") return screen_name = user['screen_name'] self.response.out.write("<h1>" + screen_name + "</h1>") tweets = filter_tweets(twitter.get_tweets(screen_name, True)) if len(tweets) <= 1: self.response.out.write("<p>Not enough tweets</p>") fail_reply = "@" + screen_name + " sorry, you need to tweet more (or tweet less @ mentions and links) :(" twitter.post_tweet(fail_reply) app_status['latest_reply'] = fail_reply return tweet_prefix = '@' + screen_name + ' markovated: ' ideal_tweet_length = 140 - len(tweet_prefix) best_tweet = create_markovated_tweet(tweets, ideal_tweet_length) if best_tweet != None: tweet = tweet_prefix + best_tweet twitter.post_tweet(tweet) self.response.out.write('<p>' + tweet + '</p>' + '(' + str(len(tweet_prefix + best_tweet)) + ')') app_status['latest_reply'] = tweet else: self.response.out.write('<p>Could not generate reply</p>') app_status['latest_reply'] = 'Could not generate'
def produce_next_tweet(app_status): app_status = status.load() # Just get the latest tweets tweets = twitter.get_timeline_tweets(800) tweets = filter_tweets(tweets) tweets = filter(lambda t:not t['user']['screen_name'] == twitter_settings.screen_name, tweets) if len(tweets) <= 1: print('Could not generate tweet (not enough eligible tweets)') app_status['latest_tweet'] = 'Could not generate tweet (not enough eligible tweets)' return recent_tweets = twitter.get_tweets(twitter_settings.screen_name, True) best_tweet = create_markovated_tweet(tweets, 140, map(lambda t: t['text'].strip(), recent_tweets)) if best_tweet != None: html_ent = re.findall(regexp, best_tweet) for e in html_ent: h = HTMLParser.HTMLParser() unescaped = h.unescape(e) #finds the unescaped value of the html entity best_tweet = best_tweet.replace(e, unescaped) #replaces html entity with unescaped value twitter.post_tweet(best_tweet, None) encoded = unicode(best_tweet).encode('utf-8') print(encoded + '(' + str(len(encoded)) + ')') app_status['latest_tweet'] = encoded; else: print('Could not generate tweet') app_status['latest_tweet'] = 'Could not generate tweet' status.save(app_status)
def index(): if request.method == 'POST': handle = request.form['handle'] tweets = twitter.get_tweets(handle, 5) for tweet in tweets: tweet.text = synonymize(tweet.text) return render_template('index.html', messages=tweets) # Fetch five random handles from the list handles = random.sample(mainhandles, 5) tweets = [] for handle in handles: t = twitter.get_tweets(handle, 1) if len(t) > 0: tweet = t[0] tweet.text = synonymize(tweet.text) tweets.append(tweet) return render_template('index.html', messages=tweets)
def test_get_tweets(): # Check if key file is present if get_config_key('consumer_key') is None: elon_num = twitter.get_num_followers assert elon_num is not None # If key credential file exists, test get tweets function num = twitter.get_tweets('elonmusk') assert num is not None
def test_get_tweets(): assert get_tweets('blakelively','blakelively') assert get_tweets('EXO','EXO') assert get_tweets('sehun','sehun') assert get_tweets('baekhyun','baekhyun') assert get_tweets('evanlin','evanlin') assert get_tweets('chanyeol','chanyeol')
def fetch_tweets(keyword, num_of_tweets): time_stamp, location_list, twitter_user, tweet_list = twitter.get_tweets( keyword, num_of_tweets) print("successfuly obtained tweets") prep_text = preprocess_texts(tweet_list) labels = Predict(prep_text) df = pd.DataFrame( list(zip(time_stamp, location_list, twitter_user, tweet_list, labels)), columns=['time_stamp', 'location', 'user name', 'text', 'val']) df.to_csv('file.csv', index=False) for i, j in zip(tweet_list, labels): print(i) print("\n\n") print(j)
def generate_indiv_slides(handle, unique_code): # Obtain the previous tweets from the desired user tweets = get_tweets(handle) # Store the number of tweets fetched, number is generally 20 # num_tweets = len(tweets) # Default number of tweets fetched to 3 num_tweets = 3 if len(tweets) > 3 else len(tweets) # Generate videos for individual tweets for position in range(0, num_tweets): create_single_tweet(position, tweets[position], unique_code) return num_tweets
def tweet2image(username, searchword): list_word = twitter.get_tweets(username, searchword) address = username + '/' + username + '_images/' #build a white background for i in range(0, len(list_word)): font = ImageFont.truetype('./DejaVuSerif-Italic.ttf', 15) word = list_word[i].encode('ascii', 'ignore').decode('ascii') image = Image.new('RGB', (500, 312), color=(190, 226, 231)) draw = ImageDraw.Draw(image) draw.text((5, 200), word, fill=(0, 0, 0), font=font) j = str(i + 1) image.save(username + '/' + username + '_images/' + j + '.jpg', dpi=(300.0, 300.0)) # image.show() return address
def get_tones(topic): service = ToneAnalyzerV3( url='https://gateway.watsonplatform.net/tone-analyzer/api', version='2018-06-05', iam_apikey='i5NQ4PEpjkuL6SMqASyjoCcnY8ftngT_HTSEAOFi6hf8') tweets = get_tweets(topic, 50) tweet_object_arr = [] for tweet in tweets: tweet_object_arr.append({'text': tweet}) tone_chat = service.tone_chat(tweet_object_arr).get_result() return tone_chat
def _get_tweet_data(): username = request.args['user'] model = m.TwitterUser.query.filter_by(user_id=username).first() if not model: tweets = twitter.get_tweets(username) text = unicodedata.normalize('NFKC', '\n'.join(tweets)) data = bluemix.analyse_text(text) model = m.TwitterUser() model.user_id = username model.personality_data = json.dumps(data) db.session.add(model) db.session.commit() data = json.dumps(data) else: data = model.personality_data return data
def most_recent_tweet(): global new_text global old_text global new_code while True: start = time.time() for tweet in get_tweets("chipotletweets", pages=1): new_text = tweet["text"] print(new_text) end = time.time() if new_text != old_text: words = new_text.split(" ") for word in words: if "FREE" in word: new_code = word old_text = new_text print("Retrieved in " + str(end - start)[:5] + " seconds!")
def expand_notices(notices): receiver_ids = [notice['ReceiverID'] for notice in notices] sender_ids = [notice['SenderID'] for notice in notices] user_ids = list(set(receiver_ids + sender_ids)) users = twitter.get_users(user_ids=user_ids) users = {user['id_str']: user for user in users} tweet_ids = list(set([notice['TweetID'] for notice in notices])) tweets = twitter.get_tweets(tweet_ids) tweets = {tweet['id_str']: tweet for tweet in tweets} get_notice = lambda notice: { 'Receiver': users.get(notice['ReceiverID']), 'Sender': users.get(notice['SenderID']), 'Tweet': tweets.get(notice['TweetID']), 'Timestamp': notice['Timestamp'] } notices = [get_notice(notice) for notice in notices] return notices
def get_tweets(search_term, location, location_name): '''Gets the tweets for a given location. It tries the Google Datastore first; that failing, it grabs new tweets from Twitter. :param location: the google geocode location coordinates :param location_name: the named location :param search_term: twitter is queried with this''' # First, check whether tweets for this search term and location already exist in the db. q = twitter.Tweet.all() q.filter("location_name =", location_name.lower()) q.filter("search_term =", search_term) tweet_records = q.run(limit=10, read_policy=db.STRONG_CONSISTENCY) tweet_records = list(tweet_records) if len(tweet_records) > 0: logging.info('using cached tweets') # If they don't, fetch the tweets and put them in the db if len(tweet_records) == 0: logging.debug('no cached tweets; fetching new ones') tweet_records = [] tweets = twitter.get_tweets(search_term, location) for tweet in tweets: # e.g. Sat, 13 Apr 2013 21:50:12 +0000 dt = datetime.datetime.strptime(tweet['created_at'],'%a, %d %b %Y %H:%M:%S +0000') # Get the tweet's sentiment sentiment = get_sentiment(tweet['text']) record = twitter.Tweet( text=tweet['text'], from_user=tweet['from_user'], profile_image_url=tweet['profile_image_url'], created_at=dt, location_name=location_name.lower(), search_term=search_term, pos=sentiment['probability']['pos'], neg=sentiment['probability']['neg'], neutral=sentiment['probability']['neutral'], label=sentiment['label'] ) record.put() # persist to the db tweet_records.append(record) # collect for immediate return return tweet_records
def reply_to_user(user, app_status, replyID): if user['protected']: print("@" + user['screen_name'] + " sorry, I can't process protected users :(") return screen_name = user['screen_name'] print(screen_name) tweets = filter_tweets(twitter.get_tweets(screen_name, True)) if len(tweets) <= 1: print("Not enough tweets") fail_reply = "@" + screen_name + " Pas assez de tweets. " twitter.post_tweet(fail_reply, None) app_status['latest_reply'] = fail_reply return tweet_prefix = '@' + screen_name + ' ' ideal_tweet_length = 140 - len(tweet_prefix) best_tweet = create_markovated_tweet(tweets, ideal_tweet_length) if best_tweet != None: html_ent = re.findall(regexp, best_tweet) for e in html_ent: h = HTMLParser.HTMLParser() unescaped = h.unescape(e) #finds the unescaped value of the html entity best_tweet = best_tweet.replace(e, unescaped) #replaces html entity with unescaped value tweet = tweet_prefix + best_tweet twitter.post_tweet(tweet, replyID) encoded = unicode(tweet).encode('utf-8') print(encoded + '(' + str(len(encoded)) + ')') app_status['latest_reply'] = encoded else: print('<p>Could not generate reply</p>') app_status['latest_reply'] = 'Could not generate'
def get_info(): access_token = setup_api() topic = request.form['topic'] tweets = get_tweets(access_token, topic) tot = 0 most_positive = 0 most_negative = 1 pos_tweet = "" neg_tweet = "" for tweet in tweets: tweet_sequence = prepare_tweet(tweet) sent = model.predict(tweet_sequence)[0] tot += sent if sent > most_positive: most_positive = sent pos_tweet = tweet elif sent < most_negative: most_negative = sent neg_tweet = tweet average_sentiment = (tot / len(tweets)) * 100 return make_result_page(topic, average_sentiment[0], pos_tweet, neg_tweet)
def get_top_tweets(user): tweets = get_tweets("from:" + user) news = get_news() scored_tweets = [] for tweet in tweets: news.insert(0, tweet.text) vect = TfidfVectorizer(min_df=1) tfidf = vect.fit_transform(news) vectors = (tfidf * tfidf.T).A scores_list = vectors.tolist()[0] tweet.set_score(sum(scores_list)) news.pop(0) scored_tweets.append(tweet) sorted_tweets = sorted(scored_tweets) top_ten_scored_tweet_ids = {} tweet_count = min(10, len(sorted_tweets)) for i in range(tweet_count): if sorted_tweets[i].score > 1.5: top_ten_scored_tweet_ids[str(i)] = sorted_tweets[i].id return top_ten_scored_tweet_ids
def ask(self, raw_input_string): """ :param raw_input_string: Users question as raw string :return: Bots response as string """ query = self.cleaner.clean(raw_input_string) if "translate" in query: return translate.translate_text(query.replace("translate", "")) elif "tweet" in query: return twitter.get_tweets() elif "wolfram" in query: return wolfram.wolfram_query(query.replace("wolfram", "")) else: results = self.es.search(query) if len(results) > 0: return results[0]["_source"]["response"] from random import randint return DEFAULT[randint(0, len(DEFAULT) - 1)]
def fetch_tweets(keyword, num_of_tweets): time_stamp, location_list, twitter_user, subjectivity, polarity, tweet_list = twitter.get_tweets( keyword, num_of_tweets) print("[INFO] successfuly obtained tweets") prep_text = preprocess_texts(tweet_list) labels = Predict(prep_text) df = pd.DataFrame(list( zip(time_stamp, location_list, twitter_user, prep_text, subjectivity, polarity, labels)), columns=[ 'time_stamp', 'location', 'user name', 'text', 'Polarity', 'Subjectivity', 'Sentiments' ]) os.system('rm file.csv') os.system('rm temp.csv') df.to_csv('temp.csv', index=False) df.to_csv('file.csv', index=False) print("file is written") for i, j in zip(tweet_list, labels): print(i) print("\n\n") print(j)
def get_tweets_parallel(user): cxn = Connection() db = cxn.tweets print "Getting tweets for ", user updates = twitter.get_tweets(user,limit = 1000)
from tqdm import tqdm import gslides, twitter, analytics import sys enterprise = '--enterprise' in sys.argv presentation = gslides.init() tweets = twitter.get_tweets_plus() if enterprise else twitter.get_tweets() i = 0 qs = [] for tweet in tqdm(tweets): q, i = gslides.new_slide_plus(presentation, tweet, i) if enterprise else gslides.new_slide( presentation, tweet, i) qs += q # TODO: add more detailed analytics for enterprise users q, i = gslides.new_image_slide( presentation, analytics.daily_performance(tweets).decode('utf-8'), i) qs += q gslides.execute(presentation, qs)
# Listing_16-17.py # Copyright Warren Sande, 2009 # Released under MIT license http://www.opensource.org/licenses/mit-license.php # Version 61 ---------------------------- # move a beach ball image in a pygame window with wrapping import pygame, sys, feedparser, twitter c = 0 tweets = twitter.get_tweets() pygame.init() pygame.mouse.set_visible(0) screen = pygame.display.set_mode([0,0] ,pygame.FULLSCREEN) screen.fill([0, 0, 0]) font = pygame.font.SysFont('astronaut',250) text = font.render(tweets[c] , False, (0,0,255), (0,0,0)) fontSize = font.size(tweets[c]) x = screen.get_width() y = screen.get_height() / 2 x_speed = -10 while 1: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: sys.exit() pygame.time.delay(30) pygame.draw.rect(screen, [0,0,0], [x, y, fontSize[0], fontSize[1]], 0) x = x + x_speed
def predict_for_hashtag(hashtag, lang): tweets = twitter.get_tweets(hashtag, lang) tweets['sentiment'] = predict(tweets['text']) return tweets
TwitterFeed = [] for link in soup.find_all('a'): if re.match(r'^T$', link.text) != None: TwitterFeed.append(link.get('href')) # GET TWITTER USERNAMES FROM LINKS usernames = [] for user in TwitterFeed: usernames.append(user[23:len(user)]) # GRAB RECENT TWEETS FROM EACH NEWSPAPER if level=='all': tweet_list = [] state_tweets =[] #for return for user in usernames: tweets = twitter.get_tweets(user,count=3) tweet_list.append({'user':user,'tweets':tweets}) state_tweets.extend(tweets) elif level=='country': state_tweets =[] #for return for user in usernames: tweets = twitter.get_tweets(user,count=4) state_tweets.extend(tweets) #SENTIMENT ANALYSIS def senti(list_of_tweets): sentiment,subjectivity = [],[] print 'list of tweets',len(list_of_tweets) for tweet in list_of_tweets: blob = TextBlob(tweet) sentiment.append(blob.sentiment.polarity)
def get(self, args): tweets = get_tweets(args["handle"]) return jsonify(tweets[0].to_json())
for word in diclist: pn = word['PN'] if pn != 'notfound': pn_list.append(pn) if len(pn_list) > 0: pnmean = np.mean(pn_list) else: pnmean = None return (pnmean) if __name__ == '__main__': t = Tokenizer() api = tw.get_api() search_results = tw.get_search_results(api) tweetlist = tw.get_tweets(search_results) #pnmean_list = [] tweet_pnmean_list = [] for td in tweetlist: tweet = td['tweet'] tweet = tweet.replace('\n', '') parsed_tweet = t.tokenize(tweet) diclist = get_diclist(parsed_tweet) diclist = add_pnvalue(diclist) #pp = pprint.PrettyPrinter(indent=4) #pp.pprint(diclist) pnmean = get_pnmean(diclist) #pnmean_list.append(pnmean) d = {'pnmean': pnmean, 'tweet': tweet} tweet_pnmean_list.append(d) #print(pnmean_list)
# Written by Jake Lever 24/03/2021 import twitter import csv with open('query_list.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=',', quotechar='"') next(reader, None) for row in reader: if len(row) == 4: index, query, start_date, end_date = row[0].strip(), row[1].strip(), row[2].strip(), row[3].strip() with open('output/{}_tweets.csv'.format(index), 'w', newline='') as csv_out: writer = csv.writer(csv_out, delimiter=',', quoting= csv.QUOTE_MINIMAL) writer.writerow(['Tweet_ID', 'Date', 'Text', 'Author_ID']) tweets = twitter.get_tweets(start_date, end_date, query) if tweets is not None: for tweet in tweets: # print(tweet) text = tweet.full_text.replace('"', "'").replace('\n', ' ') writer.writerow([tweet.tweet_id, tweet.date, text, tweet.author_id]) else: print('Error reading CSV row {}: Contains incorrect number of columns ({})'.format(index, len(row))) print('Queries searched successfully. Tweets stored in output folder.') # query = 'Johnson & Johnson' # tweets = twitter.get_tweets('2019-09-10', '2019-11-25', query) #
def run(self): # print(str(self.thread_name) +" "+ str(self.thread_ID)); twitter.get_tweets()
help='File path to the list of tweets IDs.') parser.add_argument('--file-credential', '-c', dest='file_credential', required=True, help='File with the twitter app credentials.') parser.add_argument('--output-file-path', '-o', dest='output_file_path', required=True, help='Output path to store tweets.') return parser.parse_args() if __name__ == '__main__': args = command_line_parsing() with open(args.file_credential) as file: credentials = json.load(file) tweets_ids = read_text_file(args.file_path) twitter = FindTweetsByID(credentials) twitter.get_tweets(args.output_file_path, tweets_ids)
def index(): if is_data_stale(): twitter.get_tweets() twitter.get_stream() return render_template('index.html', entries = convert_results())
import twitter print twitter.get_rate_limit_status() mentions = twitter.get_mentions() assert len(mentions) > 1 tweets = twitter.get_tweets('markovator_dev') assert len(tweets) > 1 print("Tests passed")
# Listing_16-17.py # Copyright Warren Sande, 2009 # Released under MIT license http://www.opensource.org/licenses/mit-license.php # Version 61 ---------------------------- # move a beach ball image in a pygame window with wrapping import pygame, sys, feedparser, twitter c = 0 tweets = twitter.get_tweets() pygame.init() pygame.mouse.set_visible(0) screen = pygame.display.set_mode([0, 0], pygame.FULLSCREEN) screen.fill([0, 0, 0]) font = pygame.font.SysFont('astronaut', 250) text = font.render(tweets[c], False, (0, 0, 255), (0, 0, 0)) fontSize = font.size(tweets[c]) x = screen.get_width() y = screen.get_height() / 2 x_speed = -10 while 1: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: sys.exit() pygame.time.delay(30) pygame.draw.rect(screen, [0, 0, 0], [x, y, fontSize[0], fontSize[1]], 0) x = x + x_speed