def upload_images(self, urls): """Uploads one or more images from web URLs. https://dev.twitter.com/rest/reference/post/media/upload Args: urls: sequence of string URLs of images Returns: list of string media ids """ ids = [] for url in urls: image_resp = util.urlopen(url) bad_type = self._check_mime_type(url, image_resp, IMAGE_MIME_TYPES, 'JPG, PNG, GIF, and WEBP images') if bad_type: return bad_type headers = twitter_auth.auth_header( API_UPLOAD_MEDIA, self.access_token_key, self.access_token_secret, 'POST') resp = util.requests_post(API_UPLOAD_MEDIA, files={'media': image_resp}, headers=headers) resp.raise_for_status() logging.info('Got: %s', resp.text) ids.append(source.load_json(resp.text, API_UPLOAD_MEDIA)['media_id_string']) return ids
def urlopen(self, url, **kwargs): """Wraps :func:`urllib2.urlopen()` and passes through the access token.""" log_url = url if self.access_token: # TODO add access_token to the data parameter for POST requests url = util.add_query_params(url, [('access_token', self.access_token)]) resp = util.urlopen(urllib2.Request(url, **kwargs)) return (resp if kwargs.get('data') else source.load_json(resp.read(), url).get('data'))
def request(): resp = twitter_auth.signed_urlopen( url, self.access_token_key, self.access_token_secret, **kwargs) return source.load_json(resp.read(), url) if parse_response else resp
def get_activities_response(self, user_id=None, group_id=None, app_id=None, activity_id=None, start_index=0, count=0, etag=None, min_id=None, cache=None, fetch_replies=False, fetch_likes=False, fetch_shares=False, fetch_events=False, fetch_mentions=False, search_query=None, **kwargs): """Fetches posts and converts them to ActivityStreams activities. XXX HACK: this is currently hacked for bridgy to NOT pass min_id to the request for fetching activity tweets themselves, but to pass it to all of the requests for filling in replies, retweets, etc. That's because we want to find new replies and retweets of older initial tweets. TODO: find a better way. See :meth:`source.Source.get_activities_response()` for details. app_id is ignored. min_id is translated to Twitter's since_id. The code for handling ETags (and 304 Not Changed responses and setting If-None-Match) is here, but unused right now since Twitter evidently doesn't support ETags. From https://dev.twitter.com/discussions/5800 : "I've confirmed with our team that we're not explicitly supporting this family of features." Likes (ie favorites) are scraped from twitter.com HTML, since Twitter's REST API doesn't offer a way to fetch them. You can also get them from the Streaming API, though, and convert them with streaming_event_to_object(). https://dev.twitter.com/docs/streaming-apis/messages#Events_event Shares (ie retweets) are fetched with a separate API call per tweet: https://dev.twitter.com/docs/api/1.1/get/statuses/retweets/%3Aid However, retweets are only fetched for the first 15 tweets that have them, since that's Twitter's rate limit per 15 minute window. :( https://dev.twitter.com/docs/rate-limiting/1.1/limits Quote tweets are fetched by searching for the possibly quoted tweet's ID, using the OR operator to search up to 5 IDs at a time, and then checking the quoted_status_id_str field https://dev.twitter.com/overview/api/tweets#quoted_status_id_str Use the group_id @self to retrieve a user_id’s timeline. If user_id is None or @me, it will return tweets for the current API user. group_id can be used to specify the slug of a list for which to return tweets. By default the current API user’s lists will be used, but lists owned by other users can be fetched by explicitly passing a username to user_id, e.g. to fetch tweets from the list @exampleuser/example-list you would call get_activities(user_id='exampleuser', group_id='example-list'). Twitter replies default to including a mention of the user they're replying to, which overloads mentions a bit. When fetch_shares is True, we determine that a tweet mentions the current user if it @-mentions their username and: * it's not a reply, OR * it's a reply, but not to the current user, AND * the tweet it's replying to doesn't @-mention the current user """ if group_id is None: group_id = source.FRIENDS # nested function for lazily fetching the user object if we need it user = [] def _user(): if not user: user.append(self.urlopen(API_USER % user_id if user_id else API_CURRENT_USER)) return user[0] if count: count += start_index activities = [] if activity_id: tweets = [self.urlopen(API_STATUS % activity_id)] total_count = len(tweets) else: if group_id == source.SELF: if user_id in (None, source.ME): user_id = '' url = API_USER_TIMELINE % { 'count': count, 'screen_name': user_id, } if fetch_likes: liked = self.urlopen(API_FAVORITES % user_id) if liked: activities += [self._make_like(tweet, _user()) for tweet in liked] elif group_id == source.SEARCH: url = API_SEARCH % { 'q': urllib.quote_plus(search_query.encode('utf-8')), 'count': count, } elif group_id in (source.FRIENDS, source.ALL): url = API_TIMELINE % (count) else: if not user_id: user_id = _user().get('screen_name') url = API_LIST_TIMELINE % { 'count': count, 'slug': group_id, 'owner_screen_name': user_id, } headers = {'If-None-Match': etag} if etag else {} total_count = None try: resp = self.urlopen(url, headers=headers, parse_response=False) etag = resp.info().get('ETag') tweet_obj = source.load_json(resp.read(), url) if group_id == source.SEARCH: tweet_obj = tweet_obj.get('statuses', []) tweets = tweet_obj[start_index:] except urllib2.HTTPError, e: if e.code == 304: # Not Modified, from a matching ETag tweets = [] else: raise
if fetch_mentions: # fetch mentions *after* replies so that we don't get replies to mentions # https://github.com/snarfed/bridgy/issues/631 mentions = self.fetch_mentions(_user().get('screen_name'), tweets, min_id=min_id) tweet_activities += [self.tweet_to_activity(m) for m in mentions] if fetch_likes: for tweet, activity in zip(tweets, tweet_activities): id = tweet['id_str'] count = tweet.get('favorite_count') if self.is_public(activity) and count and count != cached.get('ATF ' + id): url = HTML_FAVORITES % id try: resp = util.urlopen(url).read() html = source.load_json(resp, url).get('htmlUsers', '') except urllib2.URLError, e: util.interpret_http_exception(e) # just log it continue likes = self.favorites_html_to_likes(tweet, html) activity['object'].setdefault('tags', []).extend(likes) cache_updates['ATF ' + id] = count activities += tweet_activities response = self.make_activities_base_response(activities) response.update({'total_count': total_count, 'etag': etag}) if cache_updates and cache is not None: cache.set_multi(cache_updates) return response def fetch_replies(self, activities, min_id=None):