def _mock_infocache(self, env): if not self.skip_metadata: return req = Request(env) # don't fake obj metadata account, container, _ = self._extract_path(req.path_info) req.environ.setdefault('swift.infocache', {}) req.environ['swift.infocache'][get_cache_key(account)] = \ headers_to_account_info({}, 0) if container: key = get_cache_key(account, container) req.environ['swift.infocache'][key] = \ headers_to_container_info({}, 0)
def test_ratelimit_max_rate_double_container(self): global time_ticker global time_override current_rate = 2 conf_dict = { 'container_ratelimit_0': current_rate, 'clock_accuracy': 100, 'max_sleep_time_seconds': 1 } self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp()) self.test_ratelimit.log_sleep_time_seconds = .00001 req = Request.blank('/v1/a/c/o') req.method = 'PUT' req.environ['swift.cache'] = FakeMemcache() req.environ['swift.cache'].set(get_cache_key('a', 'c'), {'object_count': 1}) time_override = [0, 0, 0, 0, None] # simulates 4 requests coming in at same time, then sleeping with mock.patch('swift.common.middleware.ratelimit.get_account_info', lambda *args, **kwargs: {}): r = self.test_ratelimit(req.environ, start_response) mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], 'Slow down') mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], 'Slow down') mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], '204 No Content')
def _cache_container_exists(self, req, account, container): """ The swift3 middleware, at least, changes what it says in an object 404 response based on whether or not swift3 thinks the actual underlying Swift container exists. So we need to stick our knowledge of underlying Swift container existence in the cache for swift3 to find. The actual values are obviously made up garbage, but swift3 just looks for existence. """ fake_info = { 'status': 200, 'read_acl': '', 'write_acl': '', 'sync_key': '', 'object_count': '0', 'bytes': '0', 'versions': None, 'storage_policy': '0', 'cors': { 'allow_origin': None, 'expose_headers': None, 'max_age': None, }, 'meta': {}, 'sysmeta': {}, } cache_key = get_cache_key(account, container) infocache = req.environ.setdefault('swift.infocache', {}) infocache[cache_key] = fake_info
def test_get_ratelimitable_key_tuples(self): current_rate = 13 conf_dict = {'account_ratelimit': current_rate, 'container_ratelimit_3': 200} fake_memcache = FakeMemcache() fake_memcache.store[get_cache_key('a', 'c')] = \ {'object_count': '5'} the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app.memcache_client = fake_memcache environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'} with mock.patch('swift.common.middleware.ratelimit.get_account_info', lambda *args, **kwargs: {}): self.assertEqual(len(the_app.get_ratelimitable_key_tuples( FakeReq('DELETE', environ), 'a', None, None)), 0) self.assertEqual(len(the_app.get_ratelimitable_key_tuples( FakeReq('PUT', environ), 'a', 'c', None)), 1) self.assertEqual(len(the_app.get_ratelimitable_key_tuples( FakeReq('DELETE', environ), 'a', 'c', None)), 1) self.assertEqual(len(the_app.get_ratelimitable_key_tuples( FakeReq('GET', environ), 'a', 'c', 'o')), 0) self.assertEqual(len(the_app.get_ratelimitable_key_tuples( FakeReq('PUT', environ), 'a', 'c', 'o')), 1) self.assertEqual(len(the_app.get_ratelimitable_key_tuples( FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)), 2) self.assertEqual(the_app.get_ratelimitable_key_tuples( FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)[1], ('ratelimit/global-write/a', 10)) self.assertEqual(len(the_app.get_ratelimitable_key_tuples( FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit='notafloat')), 1)
def test_ratelimit_max_rate_double_container_listing(self): global time_ticker global time_override current_rate = 2 conf_dict = {"container_listing_ratelimit_0": current_rate, "clock_accuracy": 100, "max_sleep_time_seconds": 1} self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) self.test_ratelimit.log_sleep_time_seconds = 0.00001 req = Request.blank("/v/a/c") req.method = "GET" req.environ["swift.cache"] = FakeMemcache() req.environ["swift.cache"].set(get_cache_key("a", "c"), {"object_count": 1}) with mock.patch("swift.common.middleware.ratelimit.get_account_info", lambda *args, **kwargs: {}): time_override = [0, 0, 0, 0, None] # simulates 4 requests coming in at same time, then sleeping r = self.test_ratelimit(req.environ, start_response) mock_sleep(0.1) r = self.test_ratelimit(req.environ, start_response) mock_sleep(0.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], "Slow down") mock_sleep(0.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], "Slow down") mock_sleep(0.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], "204 No Content") mc = self.test_ratelimit.memcache_client try: self.test_ratelimit.memcache_client = None self.assertEqual(self.test_ratelimit.handle_ratelimit(req, "n", "c", None), None) finally: self.test_ratelimit.memcache_client = mc
def test_ratelimit_max_rate_double_container(self): global time_ticker global time_override current_rate = 2 conf_dict = {'container_ratelimit_0': current_rate, 'clock_accuracy': 100, 'max_sleep_time_seconds': 1} self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp()) self.test_ratelimit.log_sleep_time_seconds = .00001 req = Request.blank('/v1/a/c/o') req.method = 'PUT' req.environ['swift.cache'] = FakeMemcache() req.environ['swift.cache'].set( get_cache_key('a', 'c'), {'object_count': 1}) time_override = [0, 0, 0, 0, None] # simulates 4 requests coming in at same time, then sleeping with mock.patch('swift.common.middleware.ratelimit.get_account_info', lambda *args, **kwargs: {}): r = self.test_ratelimit(req.environ, start_response) mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], b'Slow down') mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], b'Slow down') mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], b'Some Content')
def test_get_ratelimitable_key_tuples(self): current_rate = 13 conf_dict = {"account_ratelimit": current_rate, "container_ratelimit_3": 200} fake_memcache = FakeMemcache() fake_memcache.store[get_cache_key("a", "c")] = {"object_count": "5"} the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app.memcache_client = fake_memcache req = lambda: None req.environ = {"swift.cache": fake_memcache, "PATH_INFO": "/v1/a/c/o"} with mock.patch("swift.common.middleware.ratelimit.get_account_info", lambda *args, **kwargs: {}): req.method = "DELETE" self.assertEqual(len(the_app.get_ratelimitable_key_tuples(req, "a", None, None)), 0) req.method = "PUT" self.assertEqual(len(the_app.get_ratelimitable_key_tuples(req, "a", "c", None)), 1) req.method = "DELETE" self.assertEqual(len(the_app.get_ratelimitable_key_tuples(req, "a", "c", None)), 1) req.method = "GET" self.assertEqual(len(the_app.get_ratelimitable_key_tuples(req, "a", "c", "o")), 0) req.method = "PUT" self.assertEqual(len(the_app.get_ratelimitable_key_tuples(req, "a", "c", "o")), 1) req.method = "PUT" self.assertEqual(len(the_app.get_ratelimitable_key_tuples(req, "a", "c", None, global_ratelimit=10)), 2) self.assertEqual( the_app.get_ratelimitable_key_tuples(req, "a", "c", None, global_ratelimit=10)[1], ("ratelimit/global-write/a", 10), ) req.method = "PUT" self.assertEqual( len(the_app.get_ratelimitable_key_tuples(req, "a", "c", None, global_ratelimit="notafloat")), 1 )
def test_get_account_info_env(self): cache_key = get_cache_key("account") req = Request.blank( "/v1/account", environ={'swift.infocache': {cache_key: {'bytes': 3867}}, 'swift.cache': FakeCache({})}) resp = get_account_info(req.environ, 'xxx') self.assertEqual(resp['bytes'], 3867)
def __call__(self, env, start_response): if 'swift.authorize' in env: aresp = env['swift.authorize'](Request(env)) if aresp: return aresp(env, start_response) if env['REQUEST_METHOD'] == "HEAD" and \ env['PATH_INFO'] == '/v1/a/c2/o2': cache_key = get_cache_key('a', 'c2', 'o2') env.setdefault('swift.infocache', {})[cache_key] = \ headers_to_object_info(self.headers, 200) start_response('200 OK', self.headers) elif env['REQUEST_METHOD'] == "HEAD" and \ env['PATH_INFO'] == '/v1/a/c2/o3': start_response('404 Not Found', []) else: # Cache the account_info (same as a real application) cache_key = get_cache_key('a') env.setdefault('swift.infocache', {})[cache_key] = \ headers_to_account_info(self.headers, 200) start_response('200 OK', self.headers) return []
def test_ratelimit_old_memcache_format(self): current_rate = 13 conf_dict = {"account_ratelimit": current_rate, "container_ratelimit_3": 200} fake_memcache = FakeMemcache() fake_memcache.store[get_cache_key("a", "c")] = {"container_size": 5} the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app.memcache_client = fake_memcache req = lambda: None req.method = "PUT" req.environ = {"PATH_INFO": "/v1/a/c/o", "swift.cache": fake_memcache} with mock.patch("swift.common.middleware.ratelimit.get_account_info", lambda *args, **kwargs: {}): tuples = the_app.get_ratelimitable_key_tuples(req, "a", "c", "o") self.assertEqual(tuples, [("ratelimit/a/c", 200.0)])
def test_valid_sig2(self): sig = self.sync.realms_conf.get_sig('GET', '/v1/a/c', '0', 'nonce', self.sync.realms_conf.key2('US'), 'abc') req = swob.Request.blank( '/v1/a/c', headers={'x-container-sync-auth': 'US nonce ' + sig}) infocache = req.environ.setdefault('swift.infocache', {}) infocache[get_cache_key('a', 'c')] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '200 OK') self.assertEqual(resp.body, 'Response to Authorized Request') self.assertTrue('cs:valid' in req.environ.get('swift.log_info'), req.environ.get('swift.log_info'))
def test_invalid_sig(self): req = swob.Request.blank( '/v1/a/c', headers={'x-container-sync-auth': 'US nonce sig'}) infocache = req.environ.setdefault('swift.infocache', {}) infocache[get_cache_key('a', 'c')] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '401 Unauthorized') self.assertEqual( resp.body, 'X-Container-Sync-Auth header not valid; contact cluster operator ' 'for support.') self.assertTrue('cs:invalid-sig' in req.environ.get('swift.log_info'), req.environ.get('swift.log_info'))
def test_get_object_info_env(self): cached = {'status': 200, 'length': 3333, 'type': 'application/json', 'meta': {}} cache_key = get_cache_key("account", "cont", "obj") req = Request.blank( "/v1/account/cont/obj", environ={'swift.infocache': {cache_key: cached}, 'swift.cache': FakeCache({})}) resp = get_object_info(req.environ, 'xxx') self.assertEqual(resp['length'], 3333) self.assertEqual(resp['type'], 'application/json')
def test_valid_sig2(self): sig = self.sync.realms_conf.get_sig( 'GET', '/v1/a/c', '0', 'nonce', self.sync.realms_conf.key2('US'), 'abc') req = swob.Request.blank( '/v1/a/c', headers={'x-container-sync-auth': 'US nonce ' + sig}) infocache = req.environ.setdefault('swift.infocache', {}) infocache[get_cache_key('a', 'c')] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '200 OK') self.assertEqual(resp.body, 'Response to Authorized Request') self.assertIn('cs:valid', req.environ.get('swift.log_info')) self.assertIn('swift.authorize_override', req.environ) self.assertIn('swift.slo_override', req.environ)
def test_invalid_sig(self): req = swob.Request.blank( '/v1/a/c', headers={'x-container-sync-auth': 'US nonce sig'}) infocache = req.environ.setdefault('swift.infocache', {}) infocache[get_cache_key('a', 'c')] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '401 Unauthorized') self.assertEqual( resp.body, 'X-Container-Sync-Auth header not valid; contact cluster operator ' 'for support.') self.assertIn('cs:invalid-sig', req.environ.get('swift.log_info')) self.assertNotIn('swift.authorize_override', req.environ) self.assertNotIn('swift.slo_override', req.environ)
def test_ratelimit_old_memcache_format(self): current_rate = 13 conf_dict = {'account_ratelimit': current_rate, 'container_ratelimit_3': 200} fake_memcache = FakeMemcache() fake_memcache.store[get_cache_key('a', 'c')] = \ {'container_size': 5} the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app.memcache_client = fake_memcache req = FakeReq('PUT', { 'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache}) with mock.patch('swift.common.middleware.ratelimit.get_account_info', lambda *args, **kwargs: {}): tuples = the_app.get_ratelimitable_key_tuples(req, 'a', 'c', 'o') self.assertEqual(tuples, [('ratelimit/a/c', 200.0)])
def test_already_handled(self): current_rate = 13 num_calls = 5 conf_dict = {'container_listing_ratelimit_0': current_rate} self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp()) fake_cache = FakeMemcache() fake_cache.set(get_cache_key('a', 'c'), {'object_count': 1}) req = Request.blank('/v1/a/c', environ={'swift.cache': fake_cache}) req.environ['swift.ratelimit.handled'] = True make_app_call = lambda: self.test_ratelimit(req.environ, start_response ) begin = time.time() self._run(make_app_call, num_calls, current_rate, check_time=False) time_took = time.time() - begin self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting
def test_valid_sig(self): ts = '1455221706.726999_0123456789abcdef' sig = self.sync.realms_conf.get_sig( 'GET', '/v1/a/c', ts, 'nonce', self.sync.realms_conf.key('US'), 'abc') req = swob.Request.blank('/v1/a/c', headers={ 'x-container-sync-auth': 'US nonce ' + sig, 'x-backend-inbound-x-timestamp': ts}) infocache = req.environ.setdefault('swift.infocache', {}) infocache[get_cache_key('a', 'c')] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '200 OK') self.assertEqual(resp.body, 'Response to Authorized Request') self.assertIn('cs:valid', req.environ.get('swift.log_info')) self.assertIn('X-Timestamp', resp.headers) self.assertEqual(ts, resp.headers['X-Timestamp'])
def test_ratelimit_old_memcache_format(self): current_rate = 13 conf_dict = {'account_ratelimit': current_rate, 'container_ratelimit_3': 200} fake_memcache = FakeMemcache() fake_memcache.store[get_cache_key('a', 'c')] = \ {'container_size': 5} the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app.memcache_client = fake_memcache req = lambda: None req.method = 'PUT' req.environ = {'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache} with mock.patch('swift.common.middleware.ratelimit.get_account_info', lambda *args, **kwargs: {}): tuples = the_app.get_ratelimitable_key_tuples(req, 'a', 'c', 'o') self.assertEqual(tuples, [('ratelimit/a/c', 200.0)])
def test_valid_sig(self): ts = '1455221706.726999_0123456789abcdef' sig = self.sync.realms_conf.get_sig('GET', '/v1/a/c', ts, 'nonce', self.sync.realms_conf.key('US'), 'abc') req = swob.Request.blank('/v1/a/c', headers={ 'x-container-sync-auth': 'US nonce ' + sig, 'x-backend-inbound-x-timestamp': ts }) infocache = req.environ.setdefault('swift.infocache', {}) infocache[get_cache_key('a', 'c')] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '200 OK') self.assertEqual(resp.body, 'Response to Authorized Request') self.assertIn('cs:valid', req.environ.get('swift.log_info')) self.assertIn('X-Timestamp', resp.headers) self.assertEqual(ts, resp.headers['X-Timestamp'])
def test_ratelimit_max_rate_double_container_listing(self): global time_ticker global time_override current_rate = 2 conf_dict = {'container_listing_ratelimit_0': current_rate, 'clock_accuracy': 100, 'max_sleep_time_seconds': 1} self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) self.test_ratelimit.log_sleep_time_seconds = .00001 req = Request.blank('/v/a/c') req.method = 'GET' req.environ['swift.cache'] = FakeMemcache() req.environ['swift.cache'].set( get_cache_key('a', 'c'), {'object_count': 1}) with mock.patch('swift.common.middleware.ratelimit.get_account_info', lambda *args, **kwargs: {}): time_override = [0, 0, 0, 0, None] # simulates 4 requests coming in at same time, then sleeping r = self.test_ratelimit(req.environ, start_response) mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], 'Slow down') mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], 'Slow down') mock_sleep(.1) r = self.test_ratelimit(req.environ, start_response) self.assertEqual(r[0], '204 No Content') mc = self.test_ratelimit.memcache_client try: self.test_ratelimit.memcache_client = None self.assertIsNone( self.test_ratelimit.handle_ratelimit(req, 'n', 'c', None)) finally: self.test_ratelimit.memcache_client = mc
def test_get_ratelimitable_key_tuples(self): current_rate = 13 conf_dict = {'account_ratelimit': current_rate, 'container_ratelimit_3': 200} fake_memcache = FakeMemcache() fake_memcache.store[get_cache_key('a', 'c')] = \ {'object_count': '5'} the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app.memcache_client = fake_memcache req = lambda: None req.environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'} with mock.patch('swift.common.middleware.ratelimit.get_account_info', lambda *args, **kwargs: {}): req.method = 'DELETE' self.assertEqual(len(the_app.get_ratelimitable_key_tuples( req, 'a', None, None)), 0) req.method = 'PUT' self.assertEqual(len(the_app.get_ratelimitable_key_tuples( req, 'a', 'c', None)), 1) req.method = 'DELETE' self.assertEqual(len(the_app.get_ratelimitable_key_tuples( req, 'a', 'c', None)), 1) req.method = 'GET' self.assertEqual(len(the_app.get_ratelimitable_key_tuples( req, 'a', 'c', 'o')), 0) req.method = 'PUT' self.assertEqual(len(the_app.get_ratelimitable_key_tuples( req, 'a', 'c', 'o')), 1) req.method = 'PUT' self.assertEqual(len(the_app.get_ratelimitable_key_tuples( req, 'a', 'c', None, global_ratelimit=10)), 2) self.assertEqual(the_app.get_ratelimitable_key_tuples( req, 'a', 'c', None, global_ratelimit=10)[1], ('ratelimit/global-write/a', 10)) req.method = 'PUT' self.assertEqual(len(the_app.get_ratelimitable_key_tuples( req, 'a', 'c', None, global_ratelimit='notafloat')), 1)
def _GET_using_cache(self, req): # It may be possible to fulfil the request from cache: we only reach # here if request record_type is 'shard' or 'auto', so if the container # state is 'sharded' then look for cached shard ranges. However, if # X-Newest is true then we always fetch from the backend servers. get_newest = config_true_value(req.headers.get('x-newest', False)) if get_newest: self.app.logger.debug( 'Skipping shard cache lookup (x-newest) for %s', req.path_qs) info = None else: info = _get_info_from_caches(self.app, req.environ, self.account_name, self.container_name) if (info and is_success(info['status']) and info.get('sharding_state') == 'sharded'): # container is sharded so we may have the shard ranges cached headers = headers_from_container_info(info) if headers: # only use cached values if all required headers available infocache = req.environ.setdefault('swift.infocache', {}) memcache = cache_from_env(req.environ, True) cache_key = get_cache_key(self.account_name, self.container_name, shard='listing') cached_ranges = infocache.get(cache_key) if cached_ranges is None and memcache: cached_ranges = memcache.get(cache_key) if cached_ranges is not None: infocache[cache_key] = tuple(cached_ranges) # shard ranges can be returned from cache self.app.logger.debug('Found %d shards in cache for %s', len(cached_ranges), req.path_qs) headers.update({ 'x-backend-record-type': 'shard', 'x-backend-cached-results': 'true' }) shard_range_body = self._filter_resp_shard_ranges( req, cached_ranges) # mimic GetOrHeadHandler.get_working_response... # note: server sets charset with content_type but proxy # GETorHEAD_base does not, so don't set it here either resp = Response(request=req, body=shard_range_body) update_headers(resp, headers) resp.last_modified = math.ceil( float(headers['x-put-timestamp'])) resp.environ['swift_x_timestamp'] = headers.get( 'x-timestamp') resp.accept_ranges = 'bytes' resp.content_type = 'application/json' return resp # The request was not fulfilled from cache so send to the backend # server, but instruct the backend server to ignore name constraints in # request params if returning shard ranges so that the response can # potentially be cached. Only do this if the container state is # 'sharded'. We don't attempt to cache shard ranges for a 'sharding' # container as they may include the container itself as a 'gap filler' # for shard ranges that have not yet cleaved; listings from 'gap # filler' shard ranges are likely to become stale as the container # continues to cleave objects to its shards and caching them is # therefore more likely to result in stale or incomplete listings on # subsequent container GETs. req.headers['x-backend-override-shard-name-filter'] = 'sharded' resp = self._GETorHEAD_from_backend(req) sharding_state = resp.headers.get('x-backend-sharding-state', '').lower() resp_record_type = resp.headers.get('x-backend-record-type', '').lower() complete_listing = config_true_value( resp.headers.pop('x-backend-override-shard-name-filter', False)) # given that we sent 'x-backend-override-shard-name-filter=sharded' we # should only receive back 'x-backend-override-shard-name-filter=true' # if the sharding state is 'sharded', but check them both anyway... if (resp_record_type == 'shard' and sharding_state == 'sharded' and complete_listing): # backend returned unfiltered listing state shard ranges so parse # them and replace response body with filtered listing cache_key = get_cache_key(self.account_name, self.container_name, shard='listing') data = self._parse_listing_response(req, resp) backend_shard_ranges = self._parse_shard_ranges(req, data, resp) if backend_shard_ranges is not None: cached_ranges = [dict(sr) for sr in backend_shard_ranges] if resp.headers.get('x-backend-sharding-state') == 'sharded': # cache in infocache even if no shard ranges returned; this # is unexpected but use that result for this request infocache = req.environ.setdefault('swift.infocache', {}) infocache[cache_key] = tuple(cached_ranges) memcache = cache_from_env(req.environ, True) if memcache and cached_ranges: # cache in memcache only if shard ranges as expected self.app.logger.debug('Caching %d shards for %s', len(cached_ranges), req.path_qs) memcache.set( cache_key, cached_ranges, time=self.app.recheck_listing_shard_ranges) # filter returned shard ranges according to request constraints resp.body = self._filter_resp_shard_ranges(req, cached_ranges) return resp
def set_info_cache(req, cache_data, account, container=None): req.environ.setdefault('swift.infocache', {})[get_cache_key(account, container)] = cache_data