示例#1
0
 def getCandidate(isbn):
   existing = Book.query(Book.isbn == isbn)
   if existing.count() > 0:
     book = existing.get()
     book.total_copies += 1
     k = book.put()
     return k
   else:
     bookrpc = urlfetch.create_rpc()
     coverrpc = urlfetch.create_rpc()
     urlfetch.make_fetch_call(bookrpc, BOOK_API_ISBN_URL % isbn)
     urlfetch.make_fetch_call(coverrpc, COVER_URL % isbn)
     result = bookrpc.get_result()
     if result.status_code != 200:
       return None
     book = Book()
     book.xml_payload = result.content
     tree = et.XML(result.content)
     if not tree[0]:
       return None
     xbook = tree[0][0]
     book.author = xbook.findtext('AuthorsText')
     book.title = xbook.findtext('TitleLong') if xbook.findtext('TitleLong') else xbook.findtext('Title')
     book.total_copies = 1
     book.isbn = isbn
     cover = coverrpc.get_result()
     if cover.status_code != 200:
       return book
     file_name = files.blobstore.create(mime_type='image/jpeg')
     with files.open(file_name, 'a') as f:
       f.write(cover.content)
     files.finalize(file_name)
     book.cover = files.blobstore.get_blob_key(file_name)
     return book.put()
def main():
    """
  This method makes some queries against the own application api, during which the Memcache is hopefully going to be filled again.
  This should be called by a cronjob every 3 minutes ...
  """

    # Fetch API Locations with Genres
    genre_dict = utils.genres
    genre_dict.update({'all': ''})
    counter = -1
    queries = []
    for genre in genre_dict.keys():
        counter += 1
        queries.insert(counter, urlfetch.create_rpc())
        urlfetch.make_fetch_call(
            queries[counter], settings.APPLICATION_URL +
            "/api/locations/maxtracks?genre=" + genre)
        for i in range(0, 20):
            counter += 1
            #logging.info("Genre %s Counter %i i %i" % (genre, counter, i))
            queries.insert(counter, urlfetch.create_rpc())
            urlfetch.make_fetch_call(
                queries[counter], settings.APPLICATION_URL +
                "/api/locations/?&limit=10&offset=%i&genre=%s" % (i, genre))

    # Fetch top Cities
    # TODO

    counter2 = 0
    for query in queries:
        #logging.info("waiting for counter "+str(counter2))
        counter2 += 1
        query.wait()
示例#3
0
    def __init__(self):
        rpc = urlfetch.create_rpc(deadline=60)
        urlfetch.make_fetch_call(rpc, self.url)

        rpcs = []

        try:
            result = rpc.get_result()
            if result.status_code == 200:
                content = EncodingHelper.getEncodedContent(result)
                soup = BeautifulSoup(content)

                events = soup.find(id='hp-articles').findChildren('a')

                for event in events:
                    structuredEvent = {}
                    structuredEvent['source'] = self.url
                    structuredEvent['url'] = self.url+event.get('href')
                    structuredEvent['title'] = event.findChild('h2').string
                    structuredEvent['img'] = event.findChild('img').get('src')
                    structuredEvent['place'] = event.findChild('div', attrs={"class": "hp-article-title"}).string
                    self.structuredEvents.append(structuredEvent)

                    innerRpc = urlfetch.create_rpc(deadline=60)
                    innerRpc.callback = self.create_callback(innerRpc)
                    urlfetch.make_fetch_call(innerRpc, structuredEvent['url'], follow_redirects=False)
                    rpcs.append(innerRpc)

        except urlfetch.DownloadError:
            self.response.write("chyba stahovani")

        for irpc in rpcs:
            irpc.wait()
示例#4
0
 def http_check(self, af='both'):
   if self.urltype != URLV6LITERAL:
     # Test over IPv4
     self.rpc_ipv4 = urlfetch.create_rpc()
     urlfetch.make_fetch_call(self.rpc_ipv4, 'http://'+self.hostname, method=urlfetch.HEAD, follow_redirects=False)
   if self.urltype != URLV4LITERAL:
     # Test over IPv6
     self.rpc_ipv6 = urlfetch.create_rpc()
     if self.urltype == URLV6LITERAL:
       urlfetch.make_fetch_call(self.rpc_ipv6, 'http://isupme.psimonkey.org.uk/?url=['+self.hostname+']', method=urlfetch.GET, follow_redirects=False)
     else:
       urlfetch.make_fetch_call(self.rpc_ipv6, 'http://isupme.psimonkey.org.uk/?url='+self.hostname, method=urlfetch.GET, follow_redirects=False)
   if self.urltype != URLV6LITERAL:
     try:
       response = self.rpc_ipv4.get_result()
       if response.status_code in [200,301,302,304]:
         self.up4 = True
     except (urlfetch.Error, DeadlineExceededError):
       pass
   if self.urltype != URLV4LITERAL:
     try:
       import simplejson as json
       response = json.loads(self.rpc_ipv6.get_result().content)
       self.up6 = response[u'up6']
     except (urlfetch.Error, DeadlineExceededError):
       pass
     pass
示例#5
0
 def __init__(self, url):
     try:
         self.key = urllib.quote_plus(urlparse.urlparse(url).geturl())
     except KeyError:
         logging.error(u"Couldn't parse {}".format(url))
         raise
     self.expanded = memcache.get(self.key)
     rpc = urlfetch.create_rpc()
     if self.expanded is None:
         self.rpc = urlfetch.create_rpc()
         urlfetch.make_fetch_call(rpc, url)
示例#6
0
    def _FetchUrl(self, url, post_data=None, parameters=None, headers={}, no_cache=False):
        '''Fetch a URL, optionally caching for a specified time.

        Args:
            url: The URL to retrieve
            post_data:
                If set, POST will be used.
            parameters:
                A dict whose key/value pairs should encoded and added
                to the query string. [OPTIONAL]

        Returns:
            A string containing the body of the response.
        '''

        # Build the extra parameters dict


        extra_params = {}
        if self._default_params:
            extra_params.update(self._default_params)
        if parameters:
            extra_params.update(parameters)

        if post_data:
            http_method = "POST"
        else:
            http_method = "GET"

        req = self._makeOAuthRequest(url, parameters=extra_params, http_method=http_method)
        self._signRequest(req, self._signature_method)

        url = req.to_url()

        headers = {}
        headers.update(STANDARD_HEADERS)
        headers.update(headers)



        # Open and return the URL immediately
        if post_data:
            rpc = urlfetch.create_rpc(deadline=10.0)
            urlfetch.make_fetch_call(rpc, url, method=urlfetch.POST, headers=headers,payload=post_data)
            resp = rpc.get_result()
            url_data = resp.content
        else:
            rpc = urlfetch.create_rpc(deadline=10.0)
            urlfetch.make_fetch_call(rpc, url, method=urlfetch.GET, headers=headers,)
            resp = rpc.get_result()
            url_data = resp.content

        return url_data
示例#7
0
    def _FetchUrl(self, url, post_data=None, parameters=None, headers={}, no_cache=False):
        """Fetch a URL, optionally caching for a specified time.

        Args:
            url: The URL to retrieve
            post_data:
                If set, POST will be used.
            parameters:
                A dict whose key/value pairs should encoded and added
                to the query string. [OPTIONAL]

        Returns:
            A string containing the body of the response.
        """

        # Build the extra parameters dict

        extra_params = {}
        if self._default_params:
            extra_params.update(self._default_params)
        if parameters:
            extra_params.update(parameters)

        if post_data:
            http_method = "POST"
        else:
            http_method = "GET"

        req = self._makeOAuthRequest(url, parameters=extra_params, http_method=http_method)
        self._signRequest(req, self._signature_method)

        url = req.to_url()

        headers = {}
        headers.update(STANDARD_HEADERS)
        headers.update(headers)

        # Open and return the URL immediately
        if post_data:
            rpc = urlfetch.create_rpc(deadline=10.0)
            urlfetch.make_fetch_call(rpc, url, method=urlfetch.POST, headers=headers, payload=post_data)
            resp = rpc.get_result()
            url_data = resp.content
        else:
            rpc = urlfetch.create_rpc(deadline=10.0)
            urlfetch.make_fetch_call(rpc, url, method=urlfetch.GET, headers=headers)
            resp = rpc.get_result()
            url_data = resp.content

        return url_data
 def ExpectRequest(self, response_content=None, response_code=200, response_headers=None):
     urlfetch.create_rpc().AndReturn(self.urlfetch_rpc)
     urlfetch.make_fetch_call(
         self.urlfetch_rpc,
         "http://myserver/myservice.my_method",
         payload=self.encoded_request,
         method="POST",
         headers={"Content-type": "application/json"},
     )
     if response_content is None:
         response_content = self.encoded_response
     if response_headers is None:
         response_headers = {"content-type": "application/json"}
     self.urlfetch_response = URLFetchResponse(response_content, response_code, response_headers)
     self.urlfetch_rpc.get_result().AndReturn(self.urlfetch_response)
示例#9
0
    def fetch_async(self, url, deadline=CONNECTION_TIMEOUT,
                     callback=None, cb_args=[], cb_kwargs={},
                     **kwargs):
        """
        Asynchronously fetches the requested url.  Ensures that the maximum
        number of simultaneous asynchronous fetches is not exceeded.
 
        url      - the url to fetch
        deadline - maximum number of seconds to wait for a response
        callback - if given, called upon completion.  The first argument will be
                   the rpc object (which contains the response).  If cb_args
                   or cb_kwargs were provided then these will be passed to
                   callback as additional positional and keyword arguments.
 
        All other keyword arguments are passed to urlfetch.make_fetch_call().
 
        Returns the RPC which will be used to fetch the URL.
        """
        rpc = urlfetch.create_rpc(deadline=deadline)
        rpc.callback = lambda : self.__fetch_completed(rpc, callback,
                                                       cb_args, cb_kwargs)
 
        f = lambda : urlfetch.make_fetch_call(rpc, url, **kwargs)
        if len(self.active_fetches) < MAX_SIMULTANEOUS_ASYNC_URLFETCH_REQUESTS:
            self.__fetch(rpc, f)
        else:
            self.pending_fetches.append( (rpc,f) )
        return rpc
示例#10
0
 def post(self):
     from google.appengine.api import urlfetch
     from google.appengine.api import memcache
     rpc = urlfetch.create_rpc()
     
     
     list_url = cgi.escape(self.request.get('url'))
     list_url = list_url.strip()
     if list_url[-1] == "/":
         list_url = list_url[:-1]
     if list_url.find("lists/") >= 0:
         list_url = list_url.replace("lists/","")
     split    = list_url.split("/")
     og       = "http://twitter.com/%s/%s" % (split[-2],split[-1])
     json_url = "http://twitter.com/%s/lists/%s/statuses.json" % (split[-2],split[-1])
     rss_url  = "http://twiterlist2rss.appspot.com/%s/lists/%s/statuses.rss" % (split[-2],split[-1])
     urlfetch.make_fetch_call(rpc, rss_url)       
     template_values = {
         "posted":True,
         "og":og,
         "json_url":json_url,
         "rss_url":rss_url           
     }
     path = os.path.join(os.path.dirname(__file__),'templates' ,'index.html')
     self.response.out.write(template.render(path, template_values))
示例#11
0
    def get_mdp_data_async(self, access_token, endpoint):
        url = u'https://www.mydigipass.com/oauth/%s' % endpoint
        logging.debug('Creating RPC item for %s', url)

        rpc = urlfetch.create_rpc(deadline=20)
        urlfetch.make_fetch_call(rpc, url, headers=dict(Authorization=u'Bearer %s' % access_token))
        return rpc
示例#12
0
    def send(self):
        """ Attempt to send the WebhookRequest.

        Returns:
            NotificationResponse: content/status_code
        """
        # Build the request
        headers = {
            'Content-Type': 'application/json',
            'X-TBA-Version': '{}'.format(WEBHOOK_VERSION)
        }
        message_json = self.json_string()
        # Generate checksum
        headers['X-TBA-Checksum'] = self._generate_webhook_checksum(
            message_json)

        from google.appengine.api import urlfetch
        rpc = urlfetch.create_rpc()

        from tbans.models.requests.notifications.notification_response import NotificationResponse
        try:
            urlfetch.make_fetch_call(rpc,
                                     self.url,
                                     payload=message_json,
                                     method=urlfetch.POST,
                                     headers=headers)
            return NotificationResponse(200, None)
        except Exception, e:
            # https://cloud.google.com/appengine/docs/standard/python/refdocs/google.appengine.api.urlfetch
            return NotificationResponse(500, str(e))
def make_fetch(url, data=None, headers=None, method=None):
    try:
        from google.appengine.api import urlfetch
    except ImportError:
        raise EnvironmentError('The App Engine APIs are not available.')

    urlfetch.set_default_fetch_deadline(60)
    rpc = urlfetch.create_rpc()
    if method == "POST":
        urlfetch.make_fetch_call(rpc, url, method="POST",
                                 headers=headers, payload=data,
                                 validate_certificate=True)
    elif method == "GET":
        urlfetch.make_fetch_call(rpc, url)

    try:
        result = rpc.get_result()
        if result.status_code == 200:
            logging.info("Successful urlfetch for {}".format(method))
            text = result.content
            return text
        else:
            logging.error('Returned status code {}'.format(result.status_code))
            logging.error(result)

    except urlfetch.DownloadError as e:
        logging.error(str(e))

    except urlfetch.Error as e:
        logging.error(str(e))
示例#14
0
    def _foke_http_post(self, dry_run_fail_rate):
        """foke http request from GCS foke server

    Args:
      dry_run_fail_rate: float, True/False rate 0.0 ~ 1.0

    Returns:
      True or False depend by fail rate
    """

        rpc = urlfetch.create_rpc()
        urlfetch.make_fetch_call(rpc,
                                 url='http://104.154.53.75',
                                 method=urlfetch.POST)

        try:
            result = rpc.get_result()
            r = true_false_pick(dry_run_fail_rate)
            if r:
                return 200, result.content

            else:
                return 400, '{"message": "fake fail"}'

        except urlfetch.DownloadError, e:
            return 400, e.message
示例#15
0
def unsubscribe(email):
    """Unsubscribes an existing user from the list
    """
    json_payload = {
        "status": "unsubscribed"
    }
    headers = {
        "Authorization": "Basic %s" % base64.b64encode("username:"******"Content-Type": "application/x-www-form-urlencoded"
    }
    # Subscribers are stored as endpoints using MD5 hashing on lowercase emails
    formatted_email = email.lower()
    member_code = hashlib.md5(formatted_email).hexdigest()

    url = API_ROOT + 'lists/' + config.mailchimp_list_id + '/members/' + member_code
    rpc = urlfetch.create_rpc()
    urlfetch.make_fetch_call(
        rpc, url=url,
        payload=json.dumps(json_payload),
        method=urlfetch.PATCH,
        headers=headers)
    try:
        result = rpc.get_result()
        if result.status_code == 400:
            logging.warning(result.content)
        elif result.status_code == 200:
            logging.info('User unsubscribed to Mailchimp: {}'.format(email))
    except urlfetch.DownloadError:
        # Request timed out or failed.
        logging.error('Mailchimp list unsubscribe failed.')
        result = None
    return result
    def post(self):
        def handle_result(rpc, id):
            result = rpc.get_result()
            if result.status_code == 200:
                update_station(id, result.content)
            elif result.status_code == 403:
                logging.error('403 fetching station')
                mail.send_mail("bug@" + app_identity.get_application_id() + ".appspotmail.com",
                               to="*****@*****.**",
                               subject="Access denied",
                               body="Access denied for app " + app_identity.get_application_id())
            else:
                logging.error(str(result.status_code) + ' fetching station')
                logging.error('Unable to reach webservice ' 
                              + str(result.status_code) 
                              + ' for content : ' 
                              + result.content 
                              + ' for station ' 
                              + id)

	# Use a helper function to define the scope of the callback.
        def create_callback(rpc, id):
            return lambda: handle_result(rpc, id)

	def update_station(id, content):
            soup = BeautifulStoneSoup(content)
            #try:
            parsed_station = soup.station
            to_update = stations[int(id)]
            to_update.availableBikes = int(parsed_station.available.string)
            to_update.freeSlots = int(parsed_station.free.string)
            to_update.payment = bool(int(parsed_station.ticket.string))   
            #except:
            #   logging.error('error parsing station with content ' + content)
            #  mail.send_mail("bug@" + app_identity.get_application_id() + ".appspotmail.com",
            #                to="*****@*****.**",
            #               subject="Parsing Error",
            #              body='Error while parsing ' + id + ' with content ' + content)

        url = self.request.get('update_url')
        update_ids = [id for id in self.request.get('update_ids').split('-')]
	stations = get_stations()
        #Should not append as we check before launching update
        if stations is None:
            return
	rpcs = []
        try:
            for id in update_ids:
                rpc = urlfetch.create_rpc(deadline = 10)
                rpc.callback = create_callback(rpc, id)
                urlfetch.make_fetch_call(rpc, url + '/' + id)
                rpcs.append(rpc)
            for rpc in rpcs:
                rpc.wait()
            memcache.set('stations', stations)
        except urlfetch.DownloadError:
            logging.error('Time out fetching stations')
            self.error(500)
            return
        self.response.out.write("<html><body><p>OK</p></body></html>")
示例#17
0
    def _fetch_scores(self):
        response = {}
        result = {}
        rpc = urlfetch.create_rpc()

        urlfetch.make_fetch_call(rpc, URL_SCOREBOARD)
        try:
            response = rpc.get_result()
            if response.status_code == HTTP_OK:
                counter = 100
                length = 0
                text = response.content

                while length != text.__len__():
                    length = text.__len__()
                    text = text.replace(',,', ',0,')
                    
                    # Prevent infinite loops
                    if counter != 0:
                        counter -= 1
                    else:
                        break

                result = text
            else:
                result = {
                    'status_code': response.status_code
                    }
        except urlfetch.DownloadError:
            result = {
                'Error': 'An unexpected error occurred.'
                }

        return result
示例#18
0
    def load(self, realm, frealm, toonlist, data, groupstats):

        classes = ClassEntry.get_mapping()
        oauth_headers = get_oauth_headers()

        # Request all of the toon data from the blizzard API and determine the
        # group's ilvls, armor type counts and token type counts.  subs are not
        # included in the counts, since they're not really part of the main
        # group. The Blizzard API has a limit of 100 calls per second. Keep a
        # count and if we hit 100 calls, we'll wait a half second before
        # continuing. If someone has more than 100 toons in their list, they
        # should be slapped.
        toon_count = 0
        for toon in toonlist:
            toonname = toon.name
            toonrealm = toon.realm
            if toonrealm == realm:
                toonfrealm = frealm
            else:
                toonfrealm = Realm.query_realm(toonrealm)

            # TODO: this object can probably be a class instead of another dict
            newdata = dict()
            data.append(newdata)

            # a realm is received in the json data from the API, but we need to
            # pass the normalized value to the next stages.  ignore this field
            # from the data.
            newdata['toonrealm'] = toonrealm
            newdata['toonfrealm'] = toonfrealm
            newdata['status'] = toon.status
            newdata['role'] = toon.role

            url = 'https://us.api.blizzard.com/profile/wow/character/%s/%s?namespace=profile-us&locale=en_US' % (
                toonrealm, urllib.quote(toonname.encode('utf-8').lower()))

            # create the rpc object for the fetch method.  the deadline
            # defaults to 5 seconds, but that seems to be too short for the
            # Blizzard API site sometimes.  setting it to 10 helps a little
            # but it makes page loads a little slower.
            rpc = urlfetch.create_rpc(10)
            rpc.callback = self.create_callback(rpc, toonname, newdata,
                                                groupstats, classes)
            urlfetch.make_fetch_call(rpc, url, headers=oauth_headers)
            newdata['rpc'] = rpc

            toon_count = toon_count + 1
            if toon_count > 100:
                time.sleep(0.5)
                toon_count = 0

        # Now that all of the RPC calls have been created, loop through the data
        # dictionary one more time and wait for each fetch to be completed. Once
        # all of the waits finish, then we have all of the data from the
        # Blizzard API and can loop through all of it and build the page.
        start = time.time()
        for entry in data:
            entry['rpc'].wait()
        end = time.time()
        logging.info("Time spent retrieving data: %f seconds" % (end - start))
示例#19
0
def track(event, properties=None):
    """
        A simple function for asynchronously logging to the mixpanel.com API on App Engine 
        (Python) using RPC URL Fetch object.
        @param event: The overall event/category you would like to log this data under
        @param properties: A dictionary of key-value pairs that describe the event
        See http://mixpanel.com/api/ for further detail. 
        @return Instance of RPC Object
        
        # Example usage:
        track("invite-friends",
             {"method": "email", "number-friends": "12", "ip": "123.123.123.123"})
    """
    if properties == None:
        properties = {}
    token = "'0ea4f90f7b8157d6dec15b1b26c39b38"
    if "token" not in properties:
        properties["token"] = token
    
    params = {"event": event, "properties": properties}
        
    data = base64.b64encode(json.dumps(params))
    request = "http://api.mixpanel.com/track/?data=" + data
    
    rpc = urlfetch.create_rpc()
    urlfetch.make_fetch_call(rpc, request)
    
    return rpc
示例#20
0
 def run(self, tags):
     rpc_list = [urlfetch.create_rpc() for x in xrange(len(tags))]
     for rpc, tag in zip(rpc_list, tags):
         self._fetch_async(rpc, tag)
         time.sleep(0.5)  # prevent to be blocked
     return reduce(lambda s, rpc: s + self._parse_result(rpc.get_result()),
                   rpc_list, [])
示例#21
0
def get_new_dms(user):
  # find all dms newer than user.last_dm_id
  api = get_api(user)
  dms = api.GetDirectMessages(since_id=user.last_dm_id)
  # save to db
  # update user
  rpcs = []
  #Reverse the order, so newest is sent last
  for dm in reversed(dms):
    # save & send
    if dm.id > user.last_dm_id:
      user.last_dm_id = dm.id
    # save a copy
    db.put(DirectMessage(message=dm.text, tid=dm.id, fromusername=dm.sender_screen_name, fromuserid=dm.sender_id, \
      created_at=datetime.fromtimestamp(dm.created_at_in_seconds), user=user))
    # send the tweet
    rpc = urlfetch.create_rpc()
    send_message(user, "Direct Message from %s: %s" % (dm.sender_screen_name, dm.text),async_rpc=rpc)
    logging.debug('Appending RPC')
    rpcs.append(rpc)
  
  if wait_on_rpc_response(user, rpcs):
    # all good, save user. if it failed we don't save, so the next fetch
    # starts where it left off from
    user.put()
示例#22
0
def get_new_tweets(user):
  # Find all tweets newer than user.last_tweet_id and return
  # for now, keep the default max (20)
  # later, maybe keep polling all to keep above this.  
  api = get_api(user)
  tweets =  api.GetFriendsTimeline(since_id=user.last_tweet_id)
  rpcs = []
  #Reverse the order, so newest is sent last
  for tweet in reversed(tweets):
    # save & send
    if tweet.id > user.last_tweet_id:
      user.last_tweet_id = tweet.id
    # save a copy
    db.put(Tweet(message=tweet.text, tid=tweet.id, fromusername=tweet.user.name, fromscreenname=tweet.user.screen_name, fromuserid=tweet.user.id, \
      created_at=datetime.fromtimestamp(tweet.created_at_in_seconds), user=user))
    # send the tweet
    rpc = urlfetch.create_rpc()
    send_message(user, "%s (%s): %s" % (tweet.user.name, tweet.user.screen_name, tweet.text),async_rpc=rpc)
    logging.debug('Appending RPC')
    rpcs.append(rpc)

  
  if wait_on_rpc_response(user, rpcs):
    # all good, save user. if it failed we don't save, so the next fetch
    # starts where it left off from
    user.put()
示例#23
0
def _StartBackendSearchCall(mr,
                            query_project_names,
                            shard_key,
                            invalidation_timestep,
                            deadline=None,
                            failfast=True):
    """Ask a backend to query one shard of the database."""
    shard_id, subquery = shard_key
    backend_host = modules.get_hostname(module='besearch')
    url = 'http://%s%s' % (backend_host,
                           framework_helpers.FormatURL(
                               mr,
                               urls.BACKEND_SEARCH,
                               projects=','.join(query_project_names),
                               q=subquery,
                               start=0,
                               num=mr.start + mr.num,
                               logged_in_user_id=mr.auth.user_id or 0,
                               me_user_id=mr.me_user_id,
                               shard_id=shard_id,
                               invalidation_timestep=invalidation_timestep))
    logging.info('\n\nCalling backend: %s', url)
    rpc = urlfetch.create_rpc(deadline=deadline or settings.backend_deadline)
    headers = _MakeBackendRequestHeaders(failfast)
    # follow_redirects=False is needed to avoid a login screen on googleplex.
    urlfetch.make_fetch_call(rpc, url, follow_redirects=False, headers=headers)
    return rpc
示例#24
0
def track_event(event,properties=None):
	"""
		A simple function for asynchronously logging to the mixpanel.com API on App Engine 
		(Python) using RPC URL Fetch object.
		@param event: The overall event/category you would like to log this data under
		@param properties: A dictionary of key-value pairs that describe the event
		See http://mixpanel.com/api/ for further detail. 
		@return Instance of RPC Object
	"""
	if properties == None:
		properties = {}
	if 'token' not in properties:
		properties['token'] = token
	
	params = {"event": event, "properties": properties}
	
	
	logging.info(params)
		
	data = base64.b64encode(json.dumps(params))
	request = "http://api.mixpanel.com/track/?data=" + data
	
	rpc = urlfetch.create_rpc()
	urlfetch.make_fetch_call(rpc, request)

	return rpc
示例#25
0
文件: analytics.py 项目: aalloul/Ed
    def upload_input_html(self):
        if self.html_input is None:
            logger.info("No input to HTML text found")
            return

        if self._storage_token is None:
            self._storage_token = self._refresh_token(self.storage_scope)

        logger.debug("Token = {}".format(self._storage_token))
        rpc = create_rpc(deadline=300)  # TODO Change this to acceptable delay

        url = "https://www.googleapis.com/upload/storage/v1/b/{bucket}" \
              "/o?uploadType=media&name={name}".format(
                bucket=self.storage_bucket,
                name=self.storage_html_input + "/{}.json".format(
                    self.file_name)
                )

        logger.info("make_fetch_all started...")
        make_fetch_call(rpc,
                        url,
                        method=POST,
                        headers={
                            "Content-Type":
                            "application/json",
                            "Authorization":
                            self.token.format(access_token=self._storage_token)
                        },
                        payload=dumps(self.html_input))
        return rpc
示例#26
0
def check_failed_repos():
    """Check repository name consistency between Carto and GitHub."""
    failed_repos = []
    all_repos = get_all_repos()
    repos = {}
    headers = {
        'User-Agent': 'VertNet',  # Authenticate as VertNet
        'Accept': 'application/vnd.github.v3+json',
        'Authorization': 'token {0}'.format(apikey('gh'))
    }

    for repo in all_repos:
        orgname = repo[0]
        reponame = repo[1]

        if orgname is None or reponame is None:
            failed_repos.append(repo)
            continue

        rpc = urlfetch.create_rpc()
        url = '/'.join([ghb_url, 'repos', orgname, reponame])
#        print 'url: %s' % url
        urlfetch.set_default_fetch_deadline(URLFETCH_DEADLINE)
        urlfetch.make_fetch_call(rpc, url, headers=headers)
        repos[repo] = rpc
        result = rpc.get_result()
        content = json.loads(result.content)
        try:
            name = content['name']
        except KeyError, e:
            logging.info('GitHub repository %s not found' % url)
#            print 'KeyError: %s' % e 
            failed_repos.append((orgname,reponame))
示例#27
0
  def make_async_request(self, url, token="", secret="", additional_params=None,
                         protected=False, method=urlfetch.GET, headers={}):
    """Make Request.

    Make an authenticated request to any OAuth protected resource.

    If protected is equal to True, the Authorization: OAuth header will be set.

    A urlfetch response object is returned.
    """

    payload = self.prepare_request(url, token, secret, additional_params,
                                   method)

    if method == urlfetch.GET:
      url = "%s?%s" % (url, payload)
      payload = None

    if protected:
      headers["Authorization"] = "OAuth"

    rpc = urlfetch.create_rpc(deadline=10.0)
    urlfetch.make_fetch_call(rpc, url, method=method, headers=headers,
                             payload=payload)
    return rpc
    def _track_call(self, api_action, api_details=''):
        # Creates asynchronous call
        rpc = urlfetch.create_rpc()

        analytics_id = Sitevar.get_by_id("google_analytics.id")
        if analytics_id is None:
            logging.warning("Missing sitevar: google_analytics.id. Can't track API usage.")
        else:
            GOOGLE_ANALYTICS_ID = analytics_id.contents['GOOGLE_ANALYTICS_ID']
            params = urllib.urlencode({
                'v': 1,
                'tid': GOOGLE_ANALYTICS_ID,
                'cid': '1',
                't': 'event',
                'ec': 'api',
                'ea': api_action,
                'el': api_details,
                'ev': 1,
                'ni': 1
            })

            # Sets up the call
            analytics_url = 'http://www.google-analytics.com/collect'
            urlfetch.make_fetch_call(rpc=rpc,
                url=analytics_url,
                payload=params,
                method=urlfetch.POST,
                headers={'Content-Type': 'application/x-www-form-urlencoded'})
示例#29
0
    def get(self):

        # Prepare RPC.
        #
        # We set the callback attribute of the RPC object after the RPC object
        # has been created, so we can pass the RPC object to
        # create_callback().
        rpc = urlfetch.create_rpc()
        rpc.callback = create_callback(self, rpc)
        urlfetch.make_fetch_call(rpc, "http://ae-book.appspot.com/blog/atom.xml")

        # Do other things.
        time.sleep(2)

        # Tell RPCs to finish up.
        rpc.wait()

        self.response.write(
            """
        <p>Try these:</p>
        <ul>
          <li><a href="/">a simple async call</a></li>
          <li><a href="/callbackobj">using a callback object</a></li>
          <li><a href="/callbackfunc">using a callback function</a></li>
        </ul>
        """
        )

        self.response.write("<p>The time is: %s</p>" % str(datetime.datetime.now()))
 def requestDateInfo(self):
     rpc = urlfetch.create_rpc()
     urlfetch.make_fetch_call(
         rpc,
         'http://api.timezonedb.com/v2/get-time-zone?key=%s&format=json&by=zone&zone=%s'
         % (Crenditals.TIMEZONEDB_API_KEY, self.timezone))
     return rpc
示例#31
0
def check_sites(site_configurations):

    # Query all the sites we've been given to check
    request_rpc_futures = []
    for site in site_configurations:
        rpc = urlfetch.create_rpc()
        urlfetch.make_fetch_call(rpc, site.target)
        request_rpc_futures.append((
            site,
            rpc,
        ))

    # Process the results and write out the status update
    status_futures = []
    for site, rpc in site_configurations:
        status = SiteCheck(parent=site.key(), status=SiteStatus.ONLINE)

        try:
            result = rpc.get_result()

            if result.status_code != 200:
                status.status = SiteStatus.ERROR

            if result.content != "":
                status.status = SiteStatus.UNEXPECTED

        except urlfetch.DownloadError:
            status.status = SiteStatus.OFFLINE

        if not SiteStatus.available(status.status):
            pass  # TODO Send an alert if required

        status_futures.append(status.put_async())

    ndb.Future.wait_all(status_futures)
示例#32
0
def track(event, properties=None):
    """
        A simple function for asynchronously logging to the mixpanel.com API on App Engine 
        (Python) using RPC URL Fetch object.
        @param event: The overall event/category you would like to log this data under
        @param properties: A dictionary of key-value pairs that describe the event
        See http://mixpanel.com/api/ for further detail. 
        @return Instance of RPC Object
        
        # Example usage:
        track("invite-friends",
             {"method": "email", "number-friends": "12", "ip": "123.123.123.123"})
    """
    if properties == None:
        properties = {}
    token = "'0ea4f90f7b8157d6dec15b1b26c39b38"
    if "token" not in properties:
        properties["token"] = token

    params = {"event": event, "properties": properties}

    data = base64.b64encode(json.dumps(params))
    request = "http://api.mixpanel.com/track/?data=" + data

    rpc = urlfetch.create_rpc()
    urlfetch.make_fetch_call(rpc, request)

    return rpc
示例#33
0
 def query_by_bounds(self, query_list):
     # http://api.yelp.com/v2/search?term=food&bounds=37.900000,-122.500000|37.788022,-122.399797&limit=3
     # sw_lat, sw_lon, ne_lat, ne_lon, offset, term, limit        
     url_list = []
     
     for para in query_list:
         sw_lat, sw_lon, ne_lat, ne_lon, offset, term, limit = para
         url_params = {
             'sort': 2,
             'term': term.replace(' ', '+'),
             'bounds': "{},{}|{},{}".format(sw_lat, sw_lon, ne_lat, ne_lon),
             'limit': limit,
             'offset': offset,            
         }
         url_list.append(self.sign_url(API_HOST, SEARCH_PATH, url_params))
     
     rpc_list = []
     
     for url in url_list:
         rpc = urlfetch.create_rpc(deadline=10)
         urlfetch.make_fetch_call(rpc, url)
         rpc_list.append(rpc)
         
     for rpc in rpc_list:
         rpc.wait()
     
     response = []
     
     for rpc in rpc_list:
         result = rpc.get_result()
         if result.status_code == 200:
             response.append(result.content)
                         
     return response
示例#34
0
    def send(self, msg):

        rpc = urlfetch.create_rpc()
        urlfetch.make_fetch_call(rpc,
                                 self.addr,
                                 payload=msg,
                                 method=urlfetch.PUT)
示例#35
0
    def _https_connection_gae(self,
                              method,
                              relative_url,
                              query_dict,
                              body=None):
        from google.appengine.api import urlfetch
        if (method == "GET"):
            method = urlfetch.GET
        elif (method == "POST"):
            method = urlfetch.POST
        elif (method == "PUT"):
            method = urlfetch.PUT
        elif (method == "DELETE"):
            method = urlfetch.DELETE

        header = self._create_oauth_header(query_dict)
        headers = {'Authorization': header}
        if (body):
            headers["Content-Type"] = "text/xml"

        url = self._get_url(relative_url)

        rpc = urlfetch.create_rpc(deadline=10.0)
        urlfetch.make_fetch_call(rpc,
                                 url,
                                 method=method,
                                 headers=headers,
                                 payload=body)

        return rpc.get_result().content
示例#36
0
文件: utils.py 项目: kaste/ndb-x
def urlfetch(url,
             payload=None,
             method='GET',
             headers={},
             allow_truncated=False,
             follow_redirects=True,
             validate_certificate=None,
             deadline=None,
             callback=None):
    fut = ndb.Future()
    rpc = _urlfetch.create_rpc(deadline=deadline, callback=callback)
    _urlfetch.make_fetch_call(rpc,
                              url,
                              payload=payload,
                              method=method,
                              headers=headers,
                              allow_truncated=allow_truncated,
                              follow_redirects=follow_redirects,
                              validate_certificate=validate_certificate)

    def _on_completion():
        try:
            result = rpc.get_result()
        except Exception, err:
            _, _, tb = sys.exc_info()
            fut.set_exception(err, tb)
        else:
示例#37
0
def RequestName(key):
    URL = "%s/Key2Name/" % AlarmUrl()
    logging.info('Key request send for %s to URL %s' % (key, URL))
    rpc = urlfetch.create_rpc()
    message = key
    # send the request to an SL object
    urlfetch.make_fetch_call(rpc, URL, payload=message, method="POST")
示例#38
0
    def urlfetch(
        self,
        url,
        payload=None,
        method="GET",
        headers={},
        allow_truncated=False,
        follow_redirects=True,
        validate_certificate=None,
        deadline=None,
        callback=None,
    ):
        from google.appengine.api import urlfetch

        rpc = urlfetch.create_rpc(deadline=deadline, callback=callback)
        urlfetch.make_fetch_call(
            rpc,
            url,
            payload=payload,
            method=method,
            headers=headers,
            allow_truncated=allow_truncated,
            follow_redirects=follow_redirects,
            validate_certificate=validate_certificate,
        )
        result = yield rpc
        raise tasklets.Return(result)
示例#39
0
 def fetch_urls(cls,url_list):
   rpcs = []
   for url in url_list:
     rpc = urlfetch.create_rpc(deadline=5.0)
     urlfetch.make_fetch_call(rpc, url,method = urlfetch.HEAD)
     rpcs.append(rpc)
     
   result = {}
   while len(rpcs) > 0:
     rpc = apiproxy_stub_map.UserRPC.wait_any(rpcs)
     rpcs.remove(rpc)
     request_url = rpc.request.url()
     try:
       final_url = rpc.get_result().final_url
     except AttributeError:
       final_url = request_url
     except (DownloadError,InvalidURLError,apiproxy_errors.DeadlineExceededError):
       final_url  = None        
     except UnicodeDecodeError: #Funky url with very evil characters
       final_url = unicode(rpc.get_result().final_url,'utf-8')
       
     result[request_url] = final_url
   
   logging.info('Returning results: %s' %result)
   return result
示例#40
0
    def get(self):

        # Prepare RPC.
        #
        # We set the callback attribute of the RPC object after the RPC object
        # has been created, so we can pass the RPC object to
        # create_callback().
        rpc = urlfetch.create_rpc()
        rpc.callback = create_callback(self, rpc)
        urlfetch.make_fetch_call(rpc,
                                 'http://ae-book.appspot.com/blog/atom.xml')

        # Do other things.
        time.sleep(2)

        # Tell RPCs to finish up.
        rpc.wait()

        self.response.write('''
        <p>Try these:</p>
        <ul>
          <li><a href="/">a simple async call</a></li>
          <li><a href="/callbackobj">using a callback object</a></li>
          <li><a href="/callbackfunc">using a callback function</a></li>
        </ul>
        ''')

        self.response.write('<p>The time is: %s</p>' %
                            str(datetime.datetime.now()))
示例#41
0
def RequestName(key):
    URL = "%s/Key2Name/" % AlarmUrl()
    logging.info('Key request send for %s to URL %s' % (key, URL))
    rpc = urlfetch.create_rpc()
    message = key
    # send the request to an SL object
    urlfetch.make_fetch_call(rpc, URL, payload=message, method="POST")
示例#42
0
文件: analytics.py 项目: aalloul/Ed
    def upload_images(self):

        if self._storage_token is None:
            self._storage_token = self._refresh_token(self.storage_scope)

        logger.debug("Token = {}".format(self.storage_scope))

        rpc = create_rpc(deadline=300)  # TODO Change this to acceptable delay

        url = "https://www.googleapis.com/upload/storage/v1/b/{bucket}" \
              "/o?uploadType=media&name={name}".format(
                    bucket=self.storage_bucket,
                    name=self.storage_images + "/{}.jpeg".format(self.file_name)
                )

        make_fetch_call(rpc,
                        url,
                        method=POST,
                        headers={
                            "Content-Type":
                            "image/jpeg",
                            "Authorization":
                            self.token.format(access_token=self._storage_token)
                        },
                        payload=b64decode(self.image))

        return rpc
    def send(self):
        """ Attempt to send the WebhookRequest.

        Returns:
            NotificationResponse: content/status_code
        """
        # Build the request
        headers = {
            'Content-Type': 'application/json',
            'X-TBA-Version': '{}'.format(WEBHOOK_VERSION)
        }
        message_json = self.json_string()
        # Generate checksum
        headers['X-TBA-Checksum'] = self._generate_webhook_checksum(message_json)

        from google.appengine.api import urlfetch
        rpc = urlfetch.create_rpc()

        from tbans.models.requests.notifications.notification_response import NotificationResponse
        try:
            urlfetch.make_fetch_call(rpc, self.url, payload=message_json, method=urlfetch.POST, headers=headers)
            return NotificationResponse(200, None)
        except Exception, e:
            # https://cloud.google.com/appengine/docs/standard/python/refdocs/google.appengine.api.urlfetch
            return NotificationResponse(500, str(e))
示例#44
0
    def _pages(self, soup, html, page1_url):

        base_url = '/'.join(page1_url.split('/')[:-1]) + '/%s.html'

        # a <select> tag has options that each points to a page
        opts = soup.find('select', class_='m').find_all('option')
        urls = [base_url % opt['value'] for opt in opts]

        # Page 1 has already been fetched (stored in this html param, duh!)
        # so let's save ourselves an http request
        pages_htmls = [html]
        urls = urls[1:-1]  # also remove last one since it's a comments page

        rpcs = []
        for url in urls:
            rpc = urlfetch.create_rpc()
            urlfetch.make_fetch_call(rpc, url)
            rpcs.append(rpc)

        # Finish all RPCs
        for rpc in rpcs:
            result = rpc.get_result()
            if result.status_code != 200:
                # TODO: should retry instead of panicking
                raise PyError(result.content)
            pages_htmls.append(result.content)

        returns = []
        for page_html in pages_htmls:
            soup = BeautifulSoup(page_html)
            img_url = soup.find('div', id='viewer').find('img').attrs['src']
            returns.append(img_url)
        return returns
示例#45
0
 def __fetch(self, url):
     rpc = urlfetch.create_rpc()
     promise = Promise(lambda: rpc.wait())
     rpc.callback = lambda: promise.resolve(rpc.get_result())
     full_url = self.base_url + url
     urlfetch.make_fetch_call(rpc, full_url)
     return promise
示例#46
0
    def post(self):
        current_user = users.get_current_user().email()
        name = self.request.get('name')
        cover_url = self.request.get('cover_url')
        category = self.request.get('category')

        form_fields = {
            'user_id': current_user,
            'house_name': name,
            'cover_url': cover_url,
            'category': category
        }
        print form_fields
        form_data = urllib.urlencode(form_fields)
        print form_data
        rpc = urlfetch.create_rpc()
        url = cfg.SERVICE_URL + "/service-createhouse?" + form_data
        print url
        response = urlfetch.make_fetch_call(rpc, url)
        result = rpc.get_result()
        print result.content
        data = json.loads(result.content)
        if data['status']:
            self.redirect('/house/' + name)
        else:
            self.redirect('/error')
示例#47
0
 def run(self, tags):
   rpc_list = [urlfetch.create_rpc() for x in xrange(len(tags))]
   for rpc, tag in zip(rpc_list, tags):
     self._fetch_async(rpc, tag)
     time.sleep(0.5) # prevent to be blocked
   return reduce(lambda s,rpc: s + self._parse_result(rpc.get_result()),
       rpc_list, [])
示例#48
0
def get_image(xmin, ymin, xsize, ysize, width, height):
    params = urllib.urlencode({
        'xmin': xmin,
        'ymin': ymin,
        'xsize': xsize,
        'ysize': ysize,
        'width': width,
        'height': height,
    })
    for i in range(3):  # Retries
        instance_id = hash(params) % NUM_BACKENDS
        url = urlparse.urljoin(
            backends.get_url('renderer', instance=instance_id),
            '/backend/render_tile?%s' % params)
        rpc = urlfetch.create_rpc(deadline=10.0)
        urlfetch.make_fetch_call(rpc, url)
        try:
            response = yield rpc
            if response.status_code not in (500, 0):
                break
        except (apiproxy_errors.DeadlineExceededError,
                urlfetch.DeadlineExceededError):
            pass
        logging.warn("Backend failed to render tile; retrying")
        # Wait a little before retrying
        time.sleep(0.2)
    assert response.status_code == 200, \
        "Expected status 200, got %s" % response.status_code
    raise tasklets.Return(response.content,
                          int(response.headers['X-Operation-Cost']))
示例#49
0
    def get(self):
        # [START urlfetch-rpc-callback]
        def handle_result(rpc):
            result = rpc.get_result()
            self.response.write(result.content)
            logging.info("Handling RPC in callback: result {}".format(result))

        urls = ['http://www.google.com',
                'http://www.github.com',
                'http://www.travis-ci.org']
        rpcs = []
        for url in urls:
            rpc = urlfetch.create_rpc()
            rpc.callback = functools.partial(handle_result, rpc)
            urlfetch.make_fetch_call(rpc, url)
            rpcs.append(rpc)

        # ... do other things ...

        # Finish all RPCs, and let callbacks process the results.

        for rpc in rpcs:
            rpc.wait()

        logging.info("Done waiting for RPCs")
示例#50
0
文件: auth.py 项目: maxeng/tweepy-gae
    def get_access_token(self, verifier=None):
        """
        After user has authorized the request token, get access token
        with user supplied verifier.
        """
        try:
            rpc = urlfetch.create_rpc(deadline=10)
            url = self._get_oauth_url('access_token')

            # build request
            request = oauth.OAuthRequest.from_consumer_and_token(
                self._consumer,
                token=self.request_token, http_url=url,
                verifier=str(verifier)
            )
            request.sign_request(self._sigmethod, self._consumer, self.request_token)

            # send request
            urlfetch.make_fetch_call(rpc, 
                                     url, 
                                     headers=request.to_header(), 
                                     )
            resp = rpc.get_result()
            self.access_token = oauth.OAuthToken.from_string(resp.content)
            return self.access_token
        except Exception, e:
            raise TweepError(e)
示例#51
0
  def make_async_request(self, url, token="", secret="", additional_params=None,
                         protected=False, method=urlfetch.GET, headers={}):
    """Make Request.

    Make an authenticated request to any OAuth protected resource.

    If protected is equal to True, the Authorization: OAuth header will be set.

    A urlfetch response object is returned.
    """

    payload = self.prepare_request(url, token, secret, additional_params,
                                   method)

    if method == urlfetch.GET:
      url = "%s?%s" % (url, payload)
      payload = None

    if protected:
      headers["Authorization"] = "OAuth"

    rpc = urlfetch.create_rpc(deadline=10.0)
    urlfetch.make_fetch_call(rpc, url, method=method, headers=headers,
                             payload=payload)
    return rpc
示例#52
0
文件: auth.py 项目: maxeng/tweepy-gae
    def get_xauth_access_token(self, username, password):
        """
        Get an access token from an username and password combination.
        In order to get this working you need to create an app at
        http://twitter.com/apps, after that send a mail to [email protected]
        and request activation of xAuth for it.
        """
        try:
            rpc = urlfetch.create_rpc(deadline=10)
            url = self._get_oauth_url('access_token', secure=True) # must use HTTPS
            request = oauth.OAuthRequest.from_consumer_and_token(
                oauth_consumer=self._consumer,
                http_method='POST', http_url=url,
                parameters = {
		            'x_auth_mode': 'client_auth',
		            'x_auth_username': username,
		            'x_auth_password': password
                }
            )
            request.sign_request(self._sigmethod, self._consumer, None)
            urlfetch.make_fetch_call(rpc, 
                                     url, 
                                     headers=request.to_header(), 
                                     )
            resp = rpc.get_result()
            self.access_token = oauth.OAuthToken.from_string(resp.content)
            return self.access_token
        except Exception, e:
            raise TweepError(e)
示例#53
0
 def set_up(self):
     self._rpc = urlfetch.create_rpc(deadline=40)
     urlfetch.make_fetch_call(self._rpc,
                              self._url,
                              self._post_data,
                              method=urlfetch.POST,
                              validate_certificate=True)
示例#54
0
    def _fetch_scores(self):
        response = {}
        result = {}
        rpc = urlfetch.create_rpc()

        urlfetch.make_fetch_call(rpc, URL_SCOREBOARD)
        try:
            response = rpc.get_result()
            if response.status_code == HTTP_OK:
                counter = 100
                length = 0
                text = response.content

                while length != text.__len__():
                    length = text.__len__()
                    text = text.replace(',,', ',0,')

                    # Prevent infinite loops
                    if counter != 0:
                        counter -= 1
                    else:
                        break

                result = text
            else:
                result = {'status_code': response.status_code}
        except urlfetch.DownloadError:
            result = {'Error': 'An unexpected error occurred.'}

        return result
示例#55
0
    def make_async_request(self, url, token="", secret="", additional_params=None,
                   protected=False, method=urlfetch.GET):
        """Make Request.

        Make an authenticated request to any OAuth protected resource.

        If protected is equal to True, the Authorization: OAuth header will be set.

        A urlfetch response object is returned.
        """
        
        (scm, netloc, path, params, query, _) = urlparse.urlparse(url)
        url = None
        query_params = None
        if query:
            query_params = dict([(k,v) for k,v in parse_qsl(query)])
            additional_params.update(query_params)
        url = urlparse.urlunparse(('https', netloc, path, params, '', ''))
        
        payload = self.prepare_request(url, token, secret, additional_params, method)

        if method == urlfetch.GET:
            url = "%s?%s" % (url, payload)
            payload = None
        headers = {"Authorization": "OAuth"} if protected else {}

        rpc = urlfetch.create_rpc(deadline=10.0)
        urlfetch.make_fetch_call(rpc, url, method=method, headers=headers, payload=payload)
        return rpc
示例#56
0
文件: main.py 项目: Arachnid/exabrot
def get_image(xmin, ymin, xsize, ysize, width, height):
  params = urllib.urlencode({
      'xmin': xmin,
      'ymin': ymin,
      'xsize': xsize,
      'ysize': ysize,
      'width': width,
      'height': height,
  })
  for i in range(3): # Retries
    instance_id = hash(params) % NUM_BACKENDS
    url = urlparse.urljoin(backends.get_url('renderer', instance=instance_id),
                           '/backend/render_tile?%s' % params)
    rpc = urlfetch.create_rpc(deadline=10.0)
    urlfetch.make_fetch_call(rpc, url)
    try:
      response = yield rpc
      if response.status_code not in (500, 0):
        break
    except (apiproxy_errors.DeadlineExceededError,
            urlfetch.DeadlineExceededError):
      pass
    logging.warn("Backend failed to render tile; retrying")
    # Wait a little before retrying
    time.sleep(0.2)
  assert response.status_code == 200, \
      "Expected status 200, got %s" % response.status_code
  raise tasklets.Return(
      response.content,
      int(response.headers['X-Operation-Cost']))