示例#1
0
    def _getAPIUris(self, serviceRecord):
        if hasattr(self, "_uris"):  # cache these for the life of the batch job at least? hope so
            return self._uris
        else:
            uris_json = redis.get(self._URI_CACHE_KEY)
            if uris_json is not None:
                uris = json.loads(uris_json.decode('utf-8'))
            else:
                response = self._rate_limit("user",
                                            lambda: requests.get("https://api.runkeeper.com/user/",
                                                                 headers=self._apiHeaders(serviceRecord)))
                if response.status_code != 200:
                    if response.status_code == 401 or response.status_code == 403:
                        raise APIException("No authorization to retrieve user URLs", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
                    raise APIException("Unable to retrieve user URLs" + str(response))

                uris = response.json()
                for k in uris.keys():
                    if type(uris[k]) == str:
                        uris[k] = "https://api.runkeeper.com" + uris[k]
                # Runkeeper wants you to request these on a per-user basis.
                # In practice, the URIs are identical for ever user (only the userID key changes).
                # So, only do it once every 24 hours, across the entire system.
                redis.setex(self._URI_CACHE_KEY, json.dumps(uris), timedelta(hours=24))
            self._uris = uris
            return uris
示例#2
0
    def _getAPIUris(self, serviceRecord):
        if hasattr(
                self, "_uris"
        ):  # cache these for the life of the batch job at least? hope so
            return self._uris
        else:
            uris_json = redis.get(self._URI_CACHE_KEY)
            if uris_json is not None:
                uris = json.loads(uris_json.decode('utf-8'))
            else:
                response = requests.get(
                    "https://api.runkeeper.com/user/",
                    headers=self._apiHeaders(serviceRecord))
                if response.status_code != 200:
                    if response.status_code == 401 or response.status_code == 403:
                        raise APIException(
                            "No authorization to retrieve user URLs",
                            block=True,
                            user_exception=UserException(
                                UserExceptionType.Authorization,
                                intervention_required=True))
                    raise APIException("Unable to retrieve user URLs" +
                                       str(response))

                uris = response.json()
                for k in uris.keys():
                    if type(uris[k]) == str:
                        uris[k] = "https://api.runkeeper.com" + uris[k]
                # Runkeeper wants you to request these on a per-user basis.
                # In practice, the URIs are identical for ever user (only the userID key changes).
                # So, only do it once every 24 hours, across the entire system.
                redis.setex(self._URI_CACHE_KEY, json.dumps(uris),
                            timedelta(hours=24))
            self._uris = uris
            return uris
示例#3
0
    def _rate_limit(self, endpoint, req_lambda):
        if redis.get(self._RATE_LIMIT_KEY) is not None:
            raise APIException("RK global rate limit previously reached on %s" % endpoint, user_exception=UserException(UserExceptionType.RateLimited))
        response = req_lambda()
        if response.status_code == 429:
            if "user" not in response.text:
                # When we hit a limit we preemptively fail all future requests till we're sure
                # than the limit is expired. The maximum period appears to be 1 day.
                # This entire thing is an excercise in better-safe-than-sorry as it's unclear
                # how their rate-limit logic works (when limits reset, etc).

                # As it turns out, there are several parallel rate limits operating at once.
                # Attempt to parse out how long we should wait - if we can't figure it out,
                # default to the shortest time I've seen (15m). As long as the timer doesn't reset
                # every time you request during the over-quota period, this should work.
                timeout = timedelta(minutes=15)
                timeout_match = re.search(r"(\d+) (second|minute|hour|day)", response.text)
                if timeout_match:
                    # This line is too clever for its own good.
                    timeout = timedelta(**{"%ss" % timeout_match.group(2): float(timeout_match.group(1))})

                redis.setex(self._RATE_LIMIT_KEY, response.text, timeout)
                raise APIException("RK global rate limit reached on %s" % endpoint, user_exception=UserException(UserExceptionType.RateLimited))
            else:
                # Per-user limit hit: don't halt entire system, just bail for this user
                # If a user has too many pages of activities, they will never sync as we'll keep hitting the limit.
                # But that's a Very Hard Problem to Solve™ given that I can't do incremental listings...
                raise APIException("RK user rate limit reached on %s" % endpoint, user_exception=UserException(UserExceptionType.RateLimited))
        return response
示例#4
0
文件: dropbox.py 项目: alp/tapiriik
    def GenerateUserAuthorizationURL(self, level=None):
        full = level == "full"
        if full:
            sess = session.DropboxSession(DROPBOX_FULL_APP_KEY, DROPBOX_FULL_APP_SECRET, "dropbox")
        else:
            sess = session.DropboxSession(DROPBOX_APP_KEY, DROPBOX_APP_SECRET, "app_folder")

        reqToken = sess.obtain_request_token()
        redis.setex("dropbox:oauth:%s" % reqToken.key, pickle.dumps(reqToken), timedelta(hours=24))
        return sess.build_authorize_url(reqToken, oauth_callback=WEB_ROOT + reverse("oauth_return", kwargs={"service": "dropbox", "level": "full" if full else "normal"}))
示例#5
0
    def GenerateUserAuthorizationURL(self, level=None):
        full = level == "full"
        if full:
            sess = session.DropboxSession(DROPBOX_FULL_APP_KEY, DROPBOX_FULL_APP_SECRET, "dropbox")
        else:
            sess = session.DropboxSession(DROPBOX_APP_KEY, DROPBOX_APP_SECRET, "app_folder")

        reqToken = sess.obtain_request_token()
        redis.setex("dropbox:oauth:%s" % reqToken.key, pickle.dumps(reqToken), timedelta(hours=24))
        return sess.build_authorize_url(reqToken, oauth_callback=WEB_ROOT + reverse("oauth_return", kwargs={"service": "dropbox", "level": "full" if full else "normal"}))
示例#6
0
 def GenerateUserAuthorizationURL(self, session, level=None):
     oauthSession = self._oauthSession(
         callback_uri=WEB_ROOT +
         reverse("oauth_return", kwargs={"service": "endomondo"}))
     tokens = oauthSession.fetch_request_token(
         "https://api.endomondo.com/oauth/request_token")
     redis_token_key = 'endomondo:oauth:%s' % tokens["oauth_token"]
     redis.setex(redis_token_key, tokens["oauth_token_secret"],
                 timedelta(hours=24))
     return oauthSession.authorization_url(
         "https://www.endomondo.com/oauth/authorize")
示例#7
0
 def _rate_limit(self, endpoint, req_lambda):
     if redis.get(self._RATE_LIMIT_KEY % endpoint) is not None:
         raise APIException("RK global rate limit previously reached on %s" % endpoint, user_exception=UserException(UserExceptionType.RateLimited))
     response = req_lambda()
     if response.status_code == 429:
         # When we hit a limit we preemptively fail all future requests till we're sure
         # than the limit is expired. The maximum period appears to be 1 day.
         # This entire thing is an excercise in better-safe-than-sorry as it's unclear
         # how their rate-limit logic works (when limits reset, etc).
         redis.setex(self._RATE_LIMIT_KEY % endpoint, response.text, timedelta(hours=24))
         raise APIException("RK global rate limit reached on %s" % endpoint, user_exception=UserException(UserExceptionType.RateLimited))
     return response
示例#8
0
    def _rate_limit(self, endpoint, req_lambda):
        if redis.get(self._RATE_LIMIT_KEY) is not None:
            raise APIException(
                "RK global rate limit previously reached on %s" % endpoint,
                user_exception=UserException(UserExceptionType.RateLimited))
        response = req_lambda()
        if response.status_code == 429:
            if "user" not in response.text:
                # When we hit a limit we preemptively fail all future requests till we're sure
                # than the limit is expired. The maximum period appears to be 1 day.
                # This entire thing is an excercise in better-safe-than-sorry as it's unclear
                # how their rate-limit logic works (when limits reset, etc).

                # As it turns out, there are several parallel rate limits operating at once.
                # Attempt to parse out how long we should wait - if we can't figure it out,
                # default to the shortest time I've seen (15m). As long as the timer doesn't reset
                # every time you request during the over-quota period, this should work.
                timeout = timedelta(minutes=15)
                timeout_match = re.search(r"(\d+) (second|minute|hour|day)",
                                          response.text)
                if timeout_match:
                    # This line is too clever for its own good.
                    timeout = timedelta(
                        **{
                            "%ss" % timeout_match.group(2):
                            float(timeout_match.group(1))
                        })

                redis.setex(self._RATE_LIMIT_KEY, response.text, timeout)
                raise APIException("RK global rate limit reached on %s" %
                                   endpoint,
                                   user_exception=UserException(
                                       UserExceptionType.RateLimited))
            else:
                # Per-user limit hit: don't halt entire system, just bail for this user
                # If a user has too many pages of activities, they will never sync as we'll keep hitting the limit.
                # But that's a Very Hard Problem to Solve™ given that I can't do incremental listings...
                raise APIException("RK user rate limit reached on %s" %
                                   endpoint,
                                   user_exception=UserException(
                                       UserExceptionType.RateLimited))
        return response
示例#9
0
文件: endomondo.py 项目: 7e7/tapiriik
 def GenerateUserAuthorizationURL(self, level=None):
     oauthSession = self._oauthSession(callback_uri=WEB_ROOT + reverse("oauth_return", kwargs={"service": "endomondo"}))
     tokens = oauthSession.fetch_request_token("https://api.endomondo.com/oauth/request_token")
     redis_token_key = 'endomondo:oauth:%s' % tokens["oauth_token"]
     redis.setex(redis_token_key, tokens["oauth_token_secret"], timedelta(hours=24))
     return oauthSession.authorization_url("https://www.endomondo.com/oauth/authorize")
示例#10
0
 def Set(self, pk, value, lifetime=None):
     lifetime = lifetime or self._lifetime
     redis.setex(self._cacheKey % pk, pickle.dumps(value), lifetime)
 def Set(self, pk, value, lifetime=None):
     lifetime = lifetime or self._lifetime
     redis.setex(self._cacheKey % pk, pickle.dumps(value), lifetime)
示例#12
0
	def Set(self, pk, value):
		redis.setex(self._cacheKey % pk, pickle.dumps(value), self._lifetime)
示例#13
0
 def Set(self, pk, value):
     redis.setex(self._cacheKey % pk, pickle.dumps(value), self._lifetime)