예제 #1
0
    def _getAPIUris(self, serviceRecord):
        if hasattr(
                self, "_uris"
        ):  # cache these for the life of the batch job at least? hope so
            return self._uris
        else:
            uris_json = redis.get(self._URI_CACHE_KEY)
            if uris_json is not None:
                uris = json.loads(uris_json.decode('utf-8'))
            else:
                response = requests.get(
                    "https://api.runkeeper.com/user/",
                    headers=self._apiHeaders(serviceRecord))
                if response.status_code != 200:
                    if response.status_code == 401 or response.status_code == 403:
                        raise APIException(
                            "No authorization to retrieve user URLs",
                            block=True,
                            user_exception=UserException(
                                UserExceptionType.Authorization,
                                intervention_required=True))
                    raise APIException("Unable to retrieve user URLs" +
                                       str(response))

                uris = response.json()
                for k in uris.keys():
                    if type(uris[k]) == str:
                        uris[k] = "https://api.runkeeper.com" + uris[k]
                # Runkeeper wants you to request these on a per-user basis.
                # In practice, the URIs are identical for ever user (only the userID key changes).
                # So, only do it once every 24 hours, across the entire system.
                redis.setex(self._URI_CACHE_KEY, json.dumps(uris),
                            timedelta(hours=24))
            self._uris = uris
            return uris
예제 #2
0
    def RetrieveAuthorizationToken(self, req, level):
        from tapiriik.services import Service
        tokenKey = req.GET["oauth_token"]

        redis_key = "dropbox:oauth:%s" % tokenKey
        token = redis.get(redis_key)
        assert token
        token = pickle.loads(token)
        redis.delete(redis_key)

        full = level == "full"
        if full:
            sess = session.DropboxSession(DROPBOX_FULL_APP_KEY,
                                          DROPBOX_FULL_APP_SECRET, "dropbox")
        else:
            sess = session.DropboxSession(DROPBOX_APP_KEY, DROPBOX_APP_SECRET,
                                          "app_folder")

        accessToken = sess.obtain_access_token(token)

        uid = int(req.GET["uid"])  # duh!
        return (uid, {
            "Key": accessToken.key,
            "Secret": accessToken.secret,
            "Full": full
        })
예제 #3
0
    def _rate_limit(self, endpoint, req_lambda):
        if redis.get(self._RATE_LIMIT_KEY) is not None:
            raise APIException("RK global rate limit previously reached on %s" % endpoint, user_exception=UserException(UserExceptionType.RateLimited))
        response = req_lambda()
        if response.status_code == 429:
            if "user" not in response.text:
                # When we hit a limit we preemptively fail all future requests till we're sure
                # than the limit is expired. The maximum period appears to be 1 day.
                # This entire thing is an excercise in better-safe-than-sorry as it's unclear
                # how their rate-limit logic works (when limits reset, etc).

                # As it turns out, there are several parallel rate limits operating at once.
                # Attempt to parse out how long we should wait - if we can't figure it out,
                # default to the shortest time I've seen (15m). As long as the timer doesn't reset
                # every time you request during the over-quota period, this should work.
                timeout = timedelta(minutes=15)
                timeout_match = re.search(r"(\d+) (second|minute|hour|day)", response.text)
                if timeout_match:
                    # This line is too clever for its own good.
                    timeout = timedelta(**{"%ss" % timeout_match.group(2): float(timeout_match.group(1))})

                redis.setex(self._RATE_LIMIT_KEY, response.text, timeout)
                raise APIException("RK global rate limit reached on %s" % endpoint, user_exception=UserException(UserExceptionType.RateLimited))
            else:
                # Per-user limit hit: don't halt entire system, just bail for this user
                # If a user has too many pages of activities, they will never sync as we'll keep hitting the limit.
                # But that's a Very Hard Problem to Solve™ given that I can't do incremental listings...
                raise APIException("RK user rate limit reached on %s" % endpoint, user_exception=UserException(UserExceptionType.RateLimited))
        return response
예제 #4
0
    def _getAPIUris(self, serviceRecord):
        if hasattr(self, "_uris"):  # cache these for the life of the batch job at least? hope so
            return self._uris
        else:
            uris_json = redis.get(self._URI_CACHE_KEY)
            if uris_json is not None:
                uris = json.loads(uris_json.decode('utf-8'))
            else:
                response = self._rate_limit("user",
                                            lambda: requests.get("https://api.runkeeper.com/user/",
                                                                 headers=self._apiHeaders(serviceRecord)))
                if response.status_code != 200:
                    if response.status_code == 401 or response.status_code == 403:
                        raise APIException("No authorization to retrieve user URLs", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
                    raise APIException("Unable to retrieve user URLs" + str(response))

                uris = response.json()
                for k in uris.keys():
                    if type(uris[k]) == str:
                        uris[k] = "https://api.runkeeper.com" + uris[k]
                # Runkeeper wants you to request these on a per-user basis.
                # In practice, the URIs are identical for ever user (only the userID key changes).
                # So, only do it once every 24 hours, across the entire system.
                redis.setex(self._URI_CACHE_KEY, json.dumps(uris), timedelta(hours=24))
            self._uris = uris
            return uris
예제 #5
0
파일: endomondo.py 프로젝트: 7e7/tapiriik
 def RetrieveAuthorizationToken(self, req, level):
     redis_token_key = "endomondo:oauth:%s" % req.GET["oauth_token"]
     secret = redis.get(redis_token_key)
     assert secret
     redis.delete(redis_token_key)
     oauthSession = self._oauthSession(resource_owner_secret=secret)
     oauthSession.parse_authorization_response(req.get_full_path())
     tokens = oauthSession.fetch_access_token("https://api.endomondo.com/oauth/access_token")
     userInfo = oauthSession.get("https://api.endomondo.com/api/1/user")
     userInfo = userInfo.json()
     return (userInfo["id"], {"Token": tokens["oauth_token"], "Secret": tokens["oauth_token_secret"]})
예제 #6
0
 def RetrieveAuthorizationToken(self, req, level):
     redis_token_key = "endomondo:oauth:%s" % req.GET["oauth_token"]
     secret = redis.get(redis_token_key)
     assert secret
     redis.delete(redis_token_key)
     oauthSession = self._oauthSession(resource_owner_secret=secret)
     oauthSession.parse_authorization_response(req.get_full_path())
     tokens = oauthSession.fetch_access_token("https://api.endomondo.com/oauth/access_token")
     userInfo = oauthSession.get("https://api.endomondo.com/api/1/user")
     userInfo = userInfo.json()
     return (userInfo["id"], {"Token": tokens["oauth_token"], "Secret": tokens["oauth_token_secret"]})
예제 #7
0
 def Get(self, pk, freshen=False):
     res = redis.get(self._cacheKey % pk)
     if res:
         try:
             res = pickle.loads(res)
         except pickle.UnpicklingError:
             self.Delete(pk)
             res = None
         else:
             if self._autorefresh or freshen:
                 redis.expire(self._cacheKey % pk, self._lifetime)
         return res
 def Get(self, pk, freshen=False):
     res = redis.get(self._cacheKey % pk)
     if res:
         try:
             res = pickle.loads(res)
         except pickle.UnpicklingError:
             self.Delete(pk)
             res = None
         else:
             if self._autorefresh or freshen:
                 redis.expire(self._cacheKey % pk, self._lifetime)
         return res
예제 #9
0
 def _rate_limit(self, endpoint, req_lambda):
     if redis.get(self._RATE_LIMIT_KEY % endpoint) is not None:
         raise APIException("RK global rate limit previously reached on %s" % endpoint, user_exception=UserException(UserExceptionType.RateLimited))
     response = req_lambda()
     if response.status_code == 429:
         # When we hit a limit we preemptively fail all future requests till we're sure
         # than the limit is expired. The maximum period appears to be 1 day.
         # This entire thing is an excercise in better-safe-than-sorry as it's unclear
         # how their rate-limit logic works (when limits reset, etc).
         redis.setex(self._RATE_LIMIT_KEY % endpoint, response.text, timedelta(hours=24))
         raise APIException("RK global rate limit reached on %s" % endpoint, user_exception=UserException(UserExceptionType.RateLimited))
     return response
예제 #10
0
    def _rate_limit(self, endpoint, req_lambda):
        if redis.get(self._RATE_LIMIT_KEY) is not None:
            raise APIException(
                "RK global rate limit previously reached on %s" % endpoint,
                user_exception=UserException(UserExceptionType.RateLimited))
        response = req_lambda()
        if response.status_code == 429:
            if "user" not in response.text:
                # When we hit a limit we preemptively fail all future requests till we're sure
                # than the limit is expired. The maximum period appears to be 1 day.
                # This entire thing is an excercise in better-safe-than-sorry as it's unclear
                # how their rate-limit logic works (when limits reset, etc).

                # As it turns out, there are several parallel rate limits operating at once.
                # Attempt to parse out how long we should wait - if we can't figure it out,
                # default to the shortest time I've seen (15m). As long as the timer doesn't reset
                # every time you request during the over-quota period, this should work.
                timeout = timedelta(minutes=15)
                timeout_match = re.search(r"(\d+) (second|minute|hour|day)",
                                          response.text)
                if timeout_match:
                    # This line is too clever for its own good.
                    timeout = timedelta(
                        **{
                            "%ss" % timeout_match.group(2):
                            float(timeout_match.group(1))
                        })

                redis.setex(self._RATE_LIMIT_KEY, response.text, timeout)
                raise APIException("RK global rate limit reached on %s" %
                                   endpoint,
                                   user_exception=UserException(
                                       UserExceptionType.RateLimited))
            else:
                # Per-user limit hit: don't halt entire system, just bail for this user
                # If a user has too many pages of activities, they will never sync as we'll keep hitting the limit.
                # But that's a Very Hard Problem to Solve™ given that I can't do incremental listings...
                raise APIException("RK user rate limit reached on %s" %
                                   endpoint,
                                   user_exception=UserException(
                                       UserExceptionType.RateLimited))
        return response
예제 #11
0
파일: dropbox.py 프로젝트: alp/tapiriik
    def RetrieveAuthorizationToken(self, req, level):
        from tapiriik.services import Service
        tokenKey = req.GET["oauth_token"]

        redis_key = "dropbox:oauth:%s" % tokenKey
        token = redis.get(redis_key)
        assert token
        token = pickle.loads(token)
        redis.delete(redis_key)

        full = level == "full"
        if full:
            sess = session.DropboxSession(DROPBOX_FULL_APP_KEY, DROPBOX_FULL_APP_SECRET, "dropbox")
        else:
            sess = session.DropboxSession(DROPBOX_APP_KEY, DROPBOX_APP_SECRET, "app_folder")

        accessToken = sess.obtain_access_token(token)

        uid = int(req.GET["uid"])  # duh!
        return (uid, {"Key": accessToken.key, "Secret": accessToken.secret, "Full": full})