Exemplo n.º 1
0
    def test_shared_state(self):
        requests_cache_01 = RequestsCache()
        requests_cache_01['foo'] = MockResponse(content='bar',
                                                headers={},
                                                status_code=200)        
        requests_cache_02 = RequestsCache()

        assert requests_cache_02['foo'].content == 'bar'.encode()
        assert requests_cache_02['foo'].status_code == 200
Exemplo n.º 2
0
    def test_interface_redis(self, mock_get):
        requests_cache_01 = RequestsCache()
        requests_cache_01['foo'] = MockResponse(content='bar',
                                                headers={},
                                                status_code=200)
        assert list(requests_cache_01)
        assert 'foo' in requests_cache_01

        assert requests_cache_01['foo'].content == 'bar'.encode()
        assert requests_cache_01['foo'].status_code == 200

        assert requests_cache_01.__sizeof__() == RAND_CACHE_SIZE
        
        self.assertRaises(KeyError, lambda: requests_cache_01['bar'])
Exemplo n.º 3
0
 def test_interface_in_memory(self):
     requests_cache_01 = RequestsCache()
     requests_cache_01['foo'] = MockResponse(content='bar',
                                             headers={},
                                             status_code=200)
     assert list(requests_cache_01)
     assert 'foo' in requests_cache_01
Exemplo n.º 4
0
def metrics():
    """
    Prometheus metrics endpoint.
    """
    headers = {'Content-type': 'text/plain'}

    stats_cache = StatsCache()
    requests_cache = RequestsCache()

    stats_cache.set_cache_size(sys.getsizeof(requests_cache))
    stats_cache.set_cached_objects(len(requests_cache))

    return flask.Response(generate_latest(registry=stats_cache.registry), 200,
                          headers)
Exemplo n.º 5
0
def offline_request(method,
                    url,
                    auth,
                    error_code=504,
                    error_message=b'{"message": "gateway timeout"}\n'):
    """
    Implements offline requests (serves content from cache, when possible).
    """
    headers = {}
    if auth is None:
        auth_sha = None
    else:
        auth_sha = hashlib.sha1(auth.encode()).hexdigest()
        headers['Authorization'] = auth

    # Special case for non-GET requests
    if method != 'GET':
        LOG.info('OFFLINE %s CACHE_MISS %s', method, url)
        # Not much to do here. We just build up a response
        # with a reasonable status code so users know that our
        # upstream is offline
        response = requests.models.Response()
        response.status_code = error_code
        response.headers['X-Cache'] = 'OFFLINE_MISS'
        # pylint: disable=protected-access
        response._content = error_message
        return response

    cache = RequestsCache()
    cache_key = (url, auth_sha)
    if cache_key in cache:
        LOG.info('OFFLINE GET CACHE_HIT %s', url)
        # This is the best case: upstream is offline
        # but we have the resource in cache for a given
        # user. We then serve from cache.
        cached_response = cache[cache_key]
        cached_response.headers['X-Cache'] = 'OFFLINE_HIT'
        return cached_response

    LOG.info('OFFLINE GET CACHE_MISS %s', url)
    # GETs without cached content will receive an error
    # code so they know our upstream is offline.
    response = requests.models.Response()
    response.status_code = error_code
    response.headers['X-Cache'] = 'OFFLINE_MISS'
    # pylint: disable=protected-access
    response._content = error_message
    return response
Exemplo n.º 6
0
def online_request(method, url, auth, data=None):
    """
    Implements conditional requests.
    """
    cache = RequestsCache()
    headers = {}
    if auth is None:
        auth_sha = None
    else:
        auth_sha = hashlib.sha1(auth.encode()).hexdigest()
        headers['Authorization'] = auth

    # Special case for non-GET requests
    if method != 'GET':
        # Just forward the request with the auth header
        resp = requests.request(method=method,
                                url=url,
                                headers=headers,
                                data=data,
                                timeout=REQUESTS_TIMEOUT)

        LOG.info('ONLINE %s CACHE_MISS %s', method, url)
        # And just forward the response (with the
        # cache-miss header, for metrics)
        resp.headers['X-Cache'] = 'ONLINE_MISS'
        return resp

    cache_key = (url, auth_sha)

    cached_response = None
    if cache_key in cache:
        cached_response = cache[cache_key]
        etag = cached_response.headers.get('ETag')
        if etag is not None:
            headers['If-None-Match'] = etag
        last_mod = cached_response.headers.get('Last-Modified')
        if last_mod is not None:
            headers['If-Modified-Since'] = last_mod

    resp = requests.request(method=method,
                            url=url,
                            headers=headers,
                            timeout=REQUESTS_TIMEOUT)

    if resp.status_code == 304:
        LOG.info('ONLINE GET CACHE_HIT %s', url)
        cached_response.headers['X-Cache'] = 'ONLINE_HIT'
        return cached_response

    # When wen hit the API limit, let's try to serve from cache
    if resp.status_code == 403 and 'API rate limit exceeded' in resp.text:
        if cached_response is None:
            LOG.info('RATE_LIMITED GET CACHE_MISS %s', url)
            resp.headers['X-Cache'] = 'RATE_LIMITED_MISS'
            return resp
        LOG.info('RATE_LIMITED GET CACHE_HIT %s', url)
        cached_response.headers['X-Cache'] = 'RATE_LIMITED_HIT'
        return cached_response

    LOG.info('ONLINE GET CACHE_MISS %s', url)
    resp.headers['X-Cache'] = 'ONLINE_MISS'
    # Caching only makes sense when at least one
    # of those headers is present
    if resp.status_code == 200 and any(
        ['ETag' in resp.headers, 'Last-Modified' in resp.headers]):
        cache[cache_key] = resp
    return resp