class TwitterUtils: def __init__(self): self.__client = Client(CONSUMER_KEY, CONSUMER_SECRET) def __request(self, url, kwargs): url = "%s%s" % (API_URL, url) query = self.__generate_query(kwargs) result = self.__client.request(url + "?" + query) return result def search(self, q, **kwargs): url = URLS["search"] kwargs["q"] = q return self.__request(url, kwargs) def lookup_user(self, id): url = URLS["lookup_user"] return self.__request(url, {"user_id": id}) @staticmethod def __generate_query(kwargs): s = "" for key in kwargs: val = quote(str(kwargs[key]), safe='') s += "%s=%s&" % (key, val) if len(s) > 0: s = s[:-1] return s
class ClientTestCase(unittest.TestCase): """Test case for the client methods.""" def setUp(self): self.patcher = patch('application_only_auth.client.urlopen', fake_urlopen) self.patcher.start() self.client = Client('xxxxxx', 'xxxxxx') def tearDown(self): self.patcher.stop() def test_rate_limit(self): """Test rate limit response.""" status = self.client.rate_limit_status() resource_status = status['resources']['search']['/search/tweets'] expected_status = {'remaining': 450, 'limit': 450, 'reset': 1380131036} self.assertEqual(resource_status, expected_status) def test_rate_limit_with_resource(self): """Test rate limit response using a resource as parameter.""" response = self.client.rate_limit_status('/search/tweets') expected = {'remaining': 450, 'limit': 450, 'reset': 1380131036} self.assertEqual(response, expected) def test_show_status(self): """Test status show response.""" resource_url = 'https://api.twitter.com/1.1' \ '/statuses/show.json?id=316683059296624640' tweet = self.client.request(resource_url) self.assertEqual(tweet['id_str'], "316683059296624640") def test_invalid_resource(self): """Test status show response.""" resource_url = 'https://api.twitter.com/1.1/resource/invalid.json' self.assertRaises(ClientException, self.client.request, resource_url)
def get_tweets_with_tag(tag): tag = "%23" + tag client = Client(twitterkeys.consumer_key, twitterkeys.consumer_secret) query = tag + "&count=" + "100" # str(MAX_TWEET_COUNT) # tweets = client.request(twitter_api_url + query_tweets_url + query) response_json = client.request(twitter_api_url + query_tweets_url + query) response_dict = json.loads(json.dumps(response_json, sort_keys=True)) search_metadata = response_dict['search_metadata'] print "Query for " + tag print "query time: " + str(search_metadata['completed_in']) statuses = response_dict['statuses'] tweets = [] for status in statuses: tweet = Tweet(status) tweets.append(tweet) # print "Tweet: " + tweet.text # print "From user: "******"Favorited: " + str(tweet.favorite_count) # print "Retweeted: " + str(tweet.retweet_count) return tweets
class Twitter: def __init__(self, user): api_key = config['auth']['twitter']['api_key'] secret_key = config['auth']['twitter']['secret_key'] # build client and class props self.client = Client(api_key, secret_key) self.user = user def scrape(self): # build request url params = f'screen_name={self.user}&count={TWITTER_MESSAGE_REQUEST_COUNT}&tweet_mode=extended' url = f'{TWITTER_API_URL}{TWITTER_API_TIMELINE_PATH}?{params}' # request users posts posts = self.client.request(url) # filter list of new posts # reverse posts for correct chronological posting order new_posts = list(filter(self._is_new, posts)) new_posts_reversed = list(reversed(new_posts)) # return list of new raw posts return new_posts_reversed def message(self, post): # storing json objects for building message tweet_id = post['id'] screen_name = post['user']['screen_name'] author_link = f'https://twitter.com/{screen_name}' text = f'{author_link}/status/{tweet_id}' # build message message = {'text': text, 'unfurl_links': True, 'unfurl_media': True} # return formatted message return message def _is_new(self, post): # if invalid dict return false if 'created_at' not in post: return False # calculate times for check twitter_ts = post['created_at'] post_time = int( time.mktime(time.strptime(twitter_ts, TWITTER_TS_PATTERN))) current_time = time.time() last_check_time = current_time - SLEEP_TIME # if the post time is larger, its newer than last check return True if post_time > last_check_time else False
def get_team_tweets(handle): twitter = Client(cfg.CONSUMER_KEY, cfg.CONSUMER_SECRET) tweets = twitter.request( 'https://api.twitter.com/1.1/statuses/user_timeline.json?count=3&screen_name=' + handle) return (jsonify(tweets)) # if __name__ == '__main__': # app.run(debug=True)
def gettweetsfromapi( messageBody ): CONSUMER_KEY = getconsumerkey() CONSUMER_SECRET = getconsumersecret() client = Client(CONSUMER_KEY, CONSUMER_SECRET) hashtag = gethashtag(messageBody); tweet = client.request('https://api.twitter.com/1.1/search/tweets.json?q=' + hashtag + '&count=5&lang=en') status = client.rate_limit_status() print status['resources']['search'] return formattweet(tweet)
def gettweetsfromapi(messageBody): CONSUMER_KEY = getconsumerkey() CONSUMER_SECRET = getconsumersecret() client = Client(CONSUMER_KEY, CONSUMER_SECRET) hashtag = gethashtag(messageBody) tweet = client.request( 'https://api.twitter.com/1.1/search/tweets.json?q=' + hashtag + '&count=5&lang=en') status = client.rate_limit_status() print status['resources']['search'] return formattweet(tweet)
def twitterSearch(): global meta, count, exception while 1: try: client = Client(CONSUMER_KEY, CONSUMER_SECRET) tweet = client.request('https://api.twitter.com/1.1/search/tweets.json'+meta+'&count=100') meta = tweet['search_metadata']['refresh_url'] count= len(tweet['statuses']) except: print 'Attempting twitterSearch() - exception' exception = 1 time.sleep(3) pass if exception == 0: break
def initialSearch(): global meta, exception while 1: try: client = Client(CONSUMER_KEY, CONSUMER_SECRET) tweet = client.request('https://api.twitter.com/1.1/search/tweets.json?q=%23bitcoin&count=100') meta = tweet['search_metadata']['refresh_url'] exception = 0 except: print 'Attempting intialSearch() - exception' exception = 1 time.sleep(3) pass if exception == 0: break
def access_twitter(): client = Client(twitterkeys.consumer_key, twitterkeys.consumer_secret) query_tag = "%23liverpool" start_date = "" #"2014-10-01" end_date = "2014-10-22" result_type = "popular" count = str(MAX_TWEET_COUNT) query_string = query_tag if start_date != "": query_string += "+since%3A" + start_date if end_date != "": query_string += "+until%3A" + end_date if count != "": query_string += "&count=" + count if result_type != "": query_string += "&result_type=" + result_type response_json = client.request(twitter_api_url + query_tweets_url + query_string) # print json.dumps(response_json, sort_keys=True, indent=4, separators=(',', ':')) response_dict = json.loads(json.dumps(response_json, sort_keys=True)) search_metadata = response_dict['search_metadata'] print "Query for " + query_tag print "query time: " + str(search_metadata['completed_in']) statuses = response_dict['statuses'] tweets = [] for status in statuses: tweet = Tweet(status) tweets.append(tweet) print "Tweet: " + tweet.text print "From user: "******"Favorited: " + str(tweet.favorite_count) print "Retweeted: " + str(tweet.retweet_count)
def twitter_search(search_string): CONSUMER_KEY = 'sbmaTK8blnFbLo4FeDxe6HkDm' CONSUMER_SECRET = 'aRgoP3t1BOs322MXDbyaVABH5Vfqjy44bKwur2r8UPkb6Ij1pH' client = Client(CONSUMER_KEY, CONSUMER_SECRET) tweets = client.request( "https://api.twitter.com/1.1/search/tweets.json?q={0}&result_type=recent" .format(search_string)) response = [] for index, tweet in enumerate(tweets['statuses']): url = "https://twitter.com/{0}/status/{1}".format( tweet['user']['screen_name'], tweet['id_str']) r = requests.get('https://publish.twitter.com/oembed?url=' + url) j = json.loads(r.text) response.append(j) if index >= 9: break return json.dumps(response)
def get_hashtees(search_phrase, since='2012-01-01'): search_phrase = search_phrase.replace(' ', '%20') search_phrase = search_phrase.replace('#', HASHTAG) client = Client(config.CONSUMER_KEY, config.CONSUMER_SECRET) if since is not None: since_ = "%20since:"+since else: since_ = '' tweet = client.request(TWTTR_API + TWTTR_API_VERSION + TWTTR_QUERY + search_phrase + '&' + TWTTR_RECENT) hashtags_ele = search(["hashtags"], tweet) actual_tags = search(["text"], hashtags_ele) catch_words = [] for tags in actual_tags: for k in tags: catch_words.append(tags[k]) return catch_words
class Twitter(): def __init__(self): self.client = Client(CONSUMER_KEY, CONSUMER_SECRET) def get_twitter_info(self, screen_name): info = {} if (len(screen_name) > 0): api_url = TWITTER_INFO_BASE_URL + screen_name try: data = self.client.request(api_url) info = { "twitter_handle": data["screen_name"], "description": data["description"], "favourites_count": data["favourites_count"], "followers_count": data["followers_count"], "friends_count": data["friends_count"], "statuses_count": data["statuses_count"] } except Exception as err: print err return info def get_tData(keyword): tweets = [] url = 'http://search.twitter.com/search.json' data = {'q': keyword, 'lang': 'en', 'result_type': 'recent'} params = urllib.urlencode(data) try: req = urllib2.Request(url, params) response = urllib2.urlopen(req) jsonData = json.load(response) tweets = [] for item in jsonData['results']: tweets.append(item['text']) return tweets except urllib2.URLError, e: print "error" return tweets
def get_tweets_with_tag_and_period(tag, count, from_date, until_date): tag = "%23" + tag client = Client(twitterkeys.consumer_key, twitterkeys.consumer_secret) query = tag if from_date is not None: query += "+since%3A" + from_date if until_date is not None: query += "+until%3A" + until_date if count is not None: query += "&count=" + str(count) # str(MAX_TWEET_COUNT) else: query += "&count=" + str(MAX_TWEET_COUNT) # tweets = client.request(twitter_api_url + query_tweets_url + query) response_json = client.request(twitter_api_url + query_tweets_url + query) response_dict = json.loads(json.dumps(response_json, sort_keys=True)) search_metadata = response_dict['search_metadata'] print "Query for " + tag print "query time: " + str(search_metadata['completed_in']) statuses = response_dict['statuses'] tweets = [] for status in statuses: tweet = Tweet(status) tweets.append(tweet) # print "Tweet: " + tweet.text # print "From user: "******"Favorited: " + str(tweet.favorite_count) # print "Retweeted: " + str(tweet.retweet_count) return tweets
import json,os,time from application_only_auth import Client CONSUMER_KEY = 'dzxgD8JMD6wBqb8DiyAaP3y4i' CONSUMER_SECRET = 'OVi5EzU3dUTSg2Fnd4oeWYWxSF15qJ3VRLsARGw5sU2EBA79pz' client = Client(CONSUMER_KEY, CONSUMER_SECRET) # status = client.rate_limit_status() # print status['resources']['search'] # a=json.dumps(tweet, sort_keys=True, indent=4, separators=(',', ':')) # print type(a) userlookup = client.request('https://api.twitter.com/1.1/users/lookup.json?user_id=15066760,21836409,813286') print userlookup # def limitcheck(client): # requestlimits = client.request('https://api.twitter.com/1.1/application/rate_limit_status.json?resources=statuses') # remaining = int(requestlimits['resources']['statuses']['/statuses/user_timeline']['remaining']) # resettime = int(requestlimits['resources']['statuses']['/statuses/user_timeline']['reset']) # if(remaining <= 1): # curtime = int(round(time.time()))+1 # time.sleep(resettime-curtime) # f=open('list.txt','r') # limitcheck(client) # last_tweet = 0 # for line in f: # querystr = (line.split('@'))[1] # querystr = querystr[0:-1] # print querystr # if(querystr in os.listdir('listtweets')): # pass # else:
import json from application_only_auth import Client # The consumer secret is an example and will not work for real requests # To register an app visit https://dev.twitter.com/apps/new CONSUMER_KEY = 'QgnRAOglaJ6I0ulrIgP3R1mrt' CONSUMER_SECRET = 'JrsmPHBodqeN8R9jbtyZEVbwGFMtWRBToLTjyCca2M33Rg5MYX' client = Client(CONSUMER_KEY, CONSUMER_SECRET) # Pretty print of tweet payload tweet = client.request(url='https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=twitterapi&count=200') # print json.dumps(tweet, sort_keys=True, indent=4, separators=(',', ':')) # Show rate limit status for this application status = client.rate_limit_status() print status['resources']['statuses']['/statuses/user_timeline']
import json from application_only_auth import Client location = [38.557657, -121.708919] # The consumer secret is an example and will not work for real requests # To register an app visit https://dev.twitter.com/apps/new CONSUMER_KEY = 'ZVUSsTWfsdqUhbG9IBBpsFTVA' CONSUMER_SECRET = 'FQJql1iyMIVMMhtORLfxFSUNMgSNsBzueye1N9AATdFhM20D0d' LAT, LONG = location[0], location[1] client = Client(CONSUMER_KEY, CONSUMER_SECRET) # Format search request string #request_url = 'https://api.twitter.com/1.1/search/tweets.json?q=geocode={},{},1mi'.format(LAT,LONG) request_url = 'https://api.twitter.com/1.1/search/tweets.json?q=geocode=38.557657,-121.708919,1mi' tweet = client.request(request_url) print(json.dumps(tweet, sort_keys=True, indent=4, separators=(',', ':'))) # print the number of tweets found print(tweet["search_metadata"]["count"]) # Show rate limit status for this application status = client.rate_limit_status() print(status['resources']['search'])
class Twitter: def __init__(self, user): # build client and class props self.client = Client(API_KEY, SECRET_KEY) self.user = user def scrape(self): # build request url params = f'screen_name={self.user}&count={TWITTER_MESSAGE_REQUEST_COUNT}&tweet_mode=extended' url = f'{TWITTER_API_URL}{TWITTER_API_TIMELINE_PATH}?{params}' # request users posts posts = self.client.request(url) # filter list of new posts new_posts = list(filter(self._is_new, posts)) # return list of new raw posts return new_posts def message(self, post): # storing json objects for building message tweet_id = post['id'] author_name = post['user']['screen_name'] text = post['full_text'] pretext_base = f'https://twitter.com/{author_name}/status/' pretext = f'{pretext_base}{tweet_id}' # convert twitter time to epoch ts_twitter = post['created_at'] ts = int(time.mktime(time.strptime(ts_twitter, TWITTER_TS_PATTERN))) # overwrite with retweet info is_retweet = True if 'retweeted_status' in post else False if is_retweet: # retweet specific overwrites retweet_author = post['retweeted_status']['user']['screen_name'] author_name = f'@{author_name} - retweeted @{retweet_author}' text = post['retweeted_status']['full_text'] tweet_id = post['retweeted_status']['id'] # build message message = { 'pretext': pretext, 'author_name': author_name, 'text': text, 'footer': TWITTER_NAME, 'footer_icon': TWITTER_ICON, 'ts': ts } # return formatted message return message def _is_new(self, post): # if invalid dict return false if 'created_at' not in post: return False # calculate times for check twitter_ts = post['created_at'] post_time = int( time.mktime(time.strptime(twitter_ts, TWITTER_TS_PATTERN))) current_time = time.time() last_check_time = current_time - SLEEP_TIME # if the post time is larger, its newer than last check return True if post_time > last_check_time else False
class TweetFetcher(object): """docstring for TweetFetcher""" twitter_client = None tweet_max_id = None def __init__(self): super(TweetFetcher, self).__init__() # self.arg = arg self.twitter_client = Client(twitterkeys.consumer_key, twitterkeys.consumer_secret) self.twitter_client._get_access_token() def get_client_status(self): status = self.twitter_client.rate_limit_status() return status['resources']['search']['/search/tweets']['remaining'] def get_tweets_with_tag_and_max_id(self, search_tag, max_id): query = search_tag query += "%20lang%3Aen" query += "&result_type=" + "mixed" # result_type if max_id is not None: query += "&max_id=" + str(max_id) query += "&count=" + str(MAX_TWEET_COUNT) request_start = datetime.datetime.now() # request timing response_json = self.twitter_client.request(twitter_api_url + query_tweets_url + query) DLOG("Request time: " + str(datetime.datetime.now() - request_start)) # request timing response_dict = json.loads(json.dumps(response_json, sort_keys=True)) statuses = response_dict['statuses'] tweets = [] for status in statuses: tweet = Tweet(status) tweets.append(tweet) if len(tweets) > 0: new_max_id = tweets[-1].id return (tweets, new_max_id) else: return (tweets, 0) def get_timeline(self, search_tag, length): max_id = None new_max_id = None loop_counter = 0 timeline = [] # Iterate over timeline while (loop_counter == 0) | (max_id != new_max_id): loop_counter += 1 max_id = new_max_id - 1 if new_max_id is not None else None (new_tweets, new_max_id) = get_tweets_with_tag_and_max_id(search_tag, max_id) timeline.extend(new_tweets) # print str(max_id) + ", " + str(new_max_id) if len(timeline) >= length or new_max_id == 0: break return timeline def get_tweets(self, search_tag): query = search_tag query += "%20lang%3Aen" query += "&result_type=" + "mixed" # result_type if self.tweet_max_id is not None: query += "&max_id=" + str(self.tweet_max_id) query += "&count=" + str(MAX_TWEET_COUNT) request_start = datetime.datetime.now() # request timing response_json = self.twitter_client.request(twitter_api_url + query_tweets_url + query) DLOG("Request time: " + str(datetime.datetime.now() - request_start)) # request timing response_dict = json.loads(json.dumps(response_json, sort_keys=True)) statuses = response_dict['statuses'] # search_metadata = response_dict['search_metadata'] tweets = [] for status in statuses: tweet = Tweet(status) tweets.append(tweet) if len(tweets) > 0: self.tweet_max_id = tweets[-1].id return tweets # return tweets, search_metadata else: self.tweet_max_id = None return tweets def stop_fetching(self): self.tweet_max_id = None
# status = client.rate_limit_status() # print status['resources'] # Level 1 - Get a list of my followers # https://dev.twitter.com/rest/reference/get/friends/ids MY_USERNAME = '******' levelOneFileName = 'levelOne.dat' if os.path.isfile(levelOneFileName): print('Reading pickled data') with open(levelOneFileName, 'rb') as fin: following = pickle.load(fin) else: followingData = client.request('https://api.twitter.com/1.1/friends/ids.json?cursor=-1&screen_name=' + MY_USERNAME + '&count=5000') following = followingData['ids'] with open(levelOneFileName, 'wb') as fout: pickle.dump(following, fout) # Level 2 - Get a list of those following my followers # https://dev.twitter.com/rest/reference/get/followers/ids levelTwoFileName = 'levelTwo.dat' numRecordAtATime = 14 secondsBetweenQuery = 15*60.0 # blockNum = 0 # time.sleep(secondsBetweenQuery) # blockNum = 8 # levelTwoFollowers = []
if "n" in rts: include_rts = False print("Ignoring retweets") #TODO make sure something was input here ^ # Generate twitter api request request_url = "https://api.twitter.com/1.1/statuses/user_timeline.json" request_param = { "screen_name": username, "count": count, "include_rts": include_rts, } # timeline comes back as a list of dicts timeline = twitter.request(request_url + dict_to_query(request_param)) # get the id of the last tweet received oldest_id = timeline[-1]["id"] # add each tweet text to a list tweets = [] for tweet in timeline: if "retweeted_status" in tweet: tweets.append(tweet["retweeted_status"]["text"]) else: tweets.append(tweet["text"]) # clean up those escape chars, @user, and http links for index, tweet in enumerate(tweets): clean_tweet = tweet
master = {} sentiments = ["sad", "depressed", "upset", "heartbroken"] quants = ["", "really ", "very ", "extremely "] me = ["I am ", "I'm ", "I'm feeling "] sad_boys = [] for s in sentiments: for q in quants: for m in me: sad_boys.append(m + q + s) i = 0 j = 0 for s in sad_boys: print "query#: " + str(i) + " Using: " + s query = urllib2.quote(s).encode("utf8") tweets = client.request('https://api.twitter.com/1.1/search/tweets.json?count=50&result_type=recent&q=' + "\"" + query + "\"") statuses = tweets["statuses"] i += 1 for status in statuses: #iterating over tweets for specific sentiment print "status parse: " + str(j) j += 1 if status["in_reply_to_status_id"] == None: curr_status = status["text"].encode('ascii', 'ignore') curr_id = status["id"] master[curr_status] = curr_id #filter text filtered_statuses = {} for status in master.keys(): if "#cheermeup" in status.lower(): filtered_statuses[status] = master[status]
import json from application_only_auth import Client # The consumer secret is an example and will not work for real requests # To register an app visit https://dev.twitter.com/apps/new CONSUMER_KEY = 'QgnRAOglaJ6I0ulrIgP3R1mrt' CONSUMER_SECRET = 'JrsmPHBodqeN8R9jbtyZEVbwGFMtWRBToLTjyCca2M33Rg5MYX' client = Client(CONSUMER_KEY, CONSUMER_SECRET) # Pretty print of tweet payload tweet = client.request( url= 'https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=twitterapi&count=200' ) # print json.dumps(tweet, sort_keys=True, indent=4, separators=(',', ':')) # Show rate limit status for this application status = client.rate_limit_status() print status['resources']['statuses']['/statuses/user_timeline']
# Query User's Timelines numCurRec = 1490 secondsBetweenQuery = 15 * 60.0 blockNum = 0 start = 217200 # Start at zero for new run currentRecords = [] for i in range(start, len(uLevelTwoIDs)): if i % 100 == 0: print("Starting record {0}".format(str(i))) if i % 700 == 0: time.sleep(100.0) try: tweetData = client.request( 'https://api.twitter.com/1.1/statuses/user_timeline.json?user_id={id}&count={cnt}' .format(id=str(uLevelTwoIDs[i]).replace("L", ""), cnt=100)) currentRecords.append(tweetData) except Exception as e: print('Error when searching id: {id}'.format(id=str(uLevelTwoIDs[i]))) if len(currentRecords) % 100 == 0: for rec in currentRecords: # Get User Data if len(rec) > 0: user_id = rec[0]['user']['id'] user_screen_name = rec[0]['user']['screen_name'].encode( 'utf-8').replace("'", "").replace('"', '') user_description = rec[0]['user']['description'].encode( 'utf-8').replace("'", "").replace('"', '')
import json from application_only_auth import Client from config import * # The consumer secret is an example and will not work for real requests # To register an app visit https://dev.twitter.com/apps/new CONSUMER_KEY = account1[0] CONSUMER_SECRET = account1[1] client = Client(CONSUMER_KEY, CONSUMER_SECRET) # Pretty print of tweet payload tweet = client.request( 'https://api.twitter.com/1.1/statuses/show.json?id=316683059296624640') # Show rate limit status for this application status = client.rate_limit_status() print(status['resources']['statuses']['/statuses/retweeters/ids'])