def main(): logger.info('Starting follow neighbour script') try: db.init_db() lat, lon = config.TWEET_HOME_GEO radius = '100km' geocode = ','.join([str(lat), str(lon), radius]) registered_users = [ user.user_id for user in db.session.query(User).all()] results = getTweets([config.TWEET_LOCATION.split(',')[0]], result_type='recent') near_tweets = [] for tweet in results: if tweet.get('place'): if tweet.get('place').get('full_name') == 'San Francisco, CA': near_tweets.append(tweet) near_tweets[0] authors = [tweet.get('user') for tweet in near_tweets] authors_id_list = set([author.get('id_str') for author in authors]) follow_list = [author for author in authors_id_list if author not in registered_users] logger.info('Found %d new neighbours' % len(follow_list)) followUsers(follow_list[:30]) except Exception, err: logger.error('%s: %s' % (Exception, err))
def main(): # Set keys found at https://apps.twitter.com consumer_key = '1AeMVu48Xly5oNzqfy4GLpngI' consumer_secret = 'kKqqxu7vxuvxdFkWqRgTPOyQSqmNimGNZ2MbWLvoLJo0Ug3nX3' access_token_key = '48263654-b6Th8oJ3IuR3TeBo3FC1dKl1Avz8hBjx6WIKhW5yU' access_token_secret = '7TnBPtIASTq1HVmYZG5ceeMSPPFocH74VJE1fKEzCAPqZ' # Create instance of twitter.Api print("*************************************") print(' Initializing Twitter API...') print("*************************************") api = twitter.Api(consumer_key, consumer_secret, access_token_key, access_token_secret) print('Twitter API Initialized!') print('\n') # Verfy Credentials print("**********************************") print(' Verifying Credentials...') print("**********************************") print(api.VerifyCredentials()) print('\n') # Fetch Trump Tweets print("**********************************") print(" Get Trump Tweets...") print("**********************************") trumpTweets = tweets.getTweets(api) print('\n') # Compare to novel print("*******************************") print(" Comparing to Novel...") print("*******************************") txtFile = open('1984.txt', 'r') _1984Text = txtFile.read() similarQuotes = tweets.compare(trumpTweets, _1984Text, percentMatch) print('\n') # Matches print("*****************") print(" Matches") print("*****************") for quote in similarQuotes: print(quote.text)
def main(): try: first_sleep = randrange(INITIAL_SLEEP_MAX) logger.info('Starting retweet script, but first sleeping %d minutes' % first_sleep) sleep(60*first_sleep) # Get tweets tweets = getTweets(config.TWEET_LIKES, result_type='mixed') # Analyze tweets analyzed = [] db.init_db() retweets = [tweet.tweet_id for tweet in db.session.query(ReTweet).all()] for tweet in tweets: if tweet.get('id_str') not in retweets: analyzed.append((analysis.sentiment(tweet.get('text')), tweet.get('id_str') ) ) analyzed = sorted(analyzed, key=itemgetter(0), reverse=True) # Retweet the most positive tweets retweet_count = randrange(RETWEET_COUNT_MAX) logger.info('Retweeting %d tweets' % retweet_count) for i in range(retweet_count): tweet = analyzed[i] reTweet(tweet[1]) new_retweet = ReTweet() new_retweet.tweet_id = tweet[1] new_retweet.sentiment = tweet[0] new_retweet.retweet_date = datetime.datetime.now() db.session.add(new_retweet) db.session.commit() sleep_time = randrange(TWEET_SLEEP_MAX) logger.info( 'Retweeting %d/%d tweet_id: %s, with sentiment %s and sleeping' ' for %d minutes' % (i+1, retweet_count, analyzed[0][1], analyzed[0][0], sleep_time)) sleep(60*sleep_time) except Exception, err: logger.error('%s: %s' % (Exception, err))
import filter, tweets, sentiment, sys from sys import argv if len(argv) == 3: # Get search term and count of tweets to be analyzed from command line arguments dataset = tweets.getTweets(argv[1], argv[2]) # Call function to get sentiment print sentiment.Sentiment(dataset) else: print "Usage: python main.py <search term> <no. of tweets to analyze>"
import tweets as twt from curator import DataCurator tweets = twt.getTweets() dc = DataCurator(tweets) clean_data = dc.basicCleanUp().removeRepeatedPunctuations().removePunctuationsAndReTweet()\ .expandPunctuatedWords().expandShortHands().build() f = open('cleaned_data.txt', 'w', 1024) for tweet in clean_data: f.write("%s\n" % (tweet)) f.close()
def analyze(term, count): tweets = getTweets(term, count) return Sentiment(tweets)