def __post_status(self, text, media_id): """ post the tweet with a media and text """ params = { "status": text, "media_ids": ",".join(map(str, [media_id])) } response = self.session.post(STATUS_UPDATE_URL, data=params) res_err(response, "POSTING THE TWEET AFTER MEDIA UPLOAD") logging.info(f'posted {text}')
def __fetch_users(self, query): """ https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-users-search """ search_res = self.session.get(f'{USER_SEARCH_URL}?q={query}') res_err(search_res, f'searching users with query: {query}') if search_res.status_code < 200 or search_res.status_code > 299: return None json = search_res.json() logging.info(f'users search with {query} returned {len(json)} results') return json
def __search_tweets(self, query): url = f'{TWEET_SEARCH_URL}?q={query}&count=100' tweets_res = self.session.get(url) res_err(tweets_res, f'searching tweets with query: {query}') if tweets_res.status_code < 200 or tweets_res.status_code > 299: return None tweets = tweets_res.json()["statuses"] # file = open("mock/tweets_hello_100.json", "r") # tweets = json.load(file)["statuses"] # file.close() logging.info( f'tweet search with {query} returned {len(tweets)} results') return tweets
def create(self): """ add friends based on the latest trends """ # fetch the most popular trends on twitter (Canada) # file = open("mock/trends_ottawa.json", "r") # trends = json.load(file)[0]["trends"] # file.close() trends_res = self.session.get(f'{TRENDS_URL}?id=23424775') res_err(trends_res, "fetching trends") if trends_res.status_code < 200 or trends_res.status_code > 299: return trends = trends_res.json()[0]["trends"] # perform a user lookup based on the top trends # (memcache those users?) top_trends = trends[0:3] logging.info(f'top trends are: {top_trends}') # look for users who have hardcoded trend information in their profile # user_ids = [] # for trend in top_trends: # query = trend["query"] # users = self.__fetch_users(query=query) # # file = open("mock/users_search_summer_fun.json", "r") # # users = json.load(file) # # file.close() # if not users: # continue # ids = map(lambda user: user["id"], users) # user_ids.extend(ids) # look for unique users related to the trends trend_query = "%20OR%20".join( map(lambda trend: trend["query"], top_trends)) tweets = self.__search_tweets(query=trend_query) user_tweet_map = {} for tweet in tweets: user_tweet_map[tweet["user"]["id"]] = tweet logging.info(f'adding {len(user_tweet_map)} friends') logging.info(f'liking {len(user_tweet_map)} tweets') # follow all those memcached users for user_id, tweet in user_tweet_map.items(): self.__follow(user_id) self.__like(tweet) # throttle time.sleep(FOLLOW_THROTTLE_SECONDS)
def __upload_media(self, file_name): """ Using https://github.com/twitterdev/large-video-upload-python/blob/master/async-upload.py as a resource """ file_path = f'resources/pictures/{file_name}' file_size = os.path.getsize(file_path) # INIT params = { "command": "INIT", "media_type": "image/jpeg", "total_bytes": file_size } init_response = self.session.post(MEDIA_UPLOAD_URL, data=params) res_err(init_response, "MEDIA UPLOAD INIT") if init_response.status_code < 200 or init_response.status_code > 299: return None logging.info(f'{file_name} INIT succeeded') media_id = init_response.json()['media_id'] # APPEND CHUNKED segment_id = 0 bytes_sent = 0 media_file = open(file_path, 'rb') try: while bytes_sent < file_size: # read 4MB at a time chunk = media_file.read(4*1024*1024) params = { "command": "APPEND", "media_id": media_id, "segment_index": segment_id } files = { "media": chunk } chunk_res = self.session.post( MEDIA_UPLOAD_URL, data=params, files=files) res_err(chunk_res, "MEDIA UPLOAD APPEND") res_err(chunk_res, f'({bytes_sent}/{file_size}) {file_path}') if chunk_res.status_code < 200 or chunk_res.status_code > 299: return None segment_id = segment_id + 1 bytes_sent = media_file.tell() finally: media_file.close() logging.info(f'{file_name} CHUNK succeeded') # FINALIZE params = { "command": "FINALIZE", "media_id": media_id, } fin_res = self.session.post(MEDIA_UPLOAD_URL, data=params) res_err(fin_res, "MEDIA UPLOAD FINALIZE") if fin_res.status_code < 200 or fin_res.status_code > 299: return None logging.info(f'{file_name} {media_id} FINALIZE succeeded') # RETURN MEDIA_ID return media_id
def purge(self): """ remove friends, unlike tweets of friends that have not followed back """ # fetch all the user's friends friends = [] next_cursor = -1 while next_cursor != 0: pair = self.__fetch_friends(cursor=next_cursor) if pair is None: return next_cursor = pair[0] friends.extend(pair[1]) time.sleep(5) # fetch all the user's followers followers = [] next_cursor = -1 while next_cursor != 0: pair = self.__fetch_followers(cursor=next_cursor) if pair is None: return next_cursor = pair[0] followers.extend(pair[1]) time.sleep(5) # friends_file = open('mock/friends1.json', 'r') # followers_file = open('mock/followers1.json', 'r') # friends_json = json.load(friends_file) # followers_json = json.load(followers_file) # friends_file.close() # followers_file.close() # friends = friends_json['users'] # followers = followers_json['users'] # identify those who are not "followed_by" friend_ids = set(map(lambda user: user['id'], friends)) follower_ids = set(map(lambda user: user['id'], followers)) users_to_unfollow = friend_ids - follower_ids logging.info(f'{len(friend_ids)} friends') logging.info(f'{len(follower_ids)} followers') logging.info(f'removing {len(users_to_unfollow)} friends') # unfollow all those users for user_id in users_to_unfollow: self.__unfollow(user_id) # unfollow throttle time.sleep(UNFOLLOW_THROTTLE_SECONDS) # unfavourite all tweets favourites = self.__favourited_tweets() unfavourite_limit = 10 i = 0 while favourites and i <= unfavourite_limit: logging.info(f'unliking {len(favourites)} tweets') # unlike all favourited tweets for tweet in favourites: self.__unlike(tweet) # unlike throttle time.sleep(UNLIKE_THROTTLE_SECONDS) favourites = self.__favourited_tweets() i = i + 1 time.sleep(1) logging.info(f'purge completed')