def test_206_multiple_ranges(self): fr = FakeResponse( 206, {'Content-Type': 'multipart/byteranges; boundary=asdfasdfasdf'}, ("--asdfasdfasdf\r\n" "Content-Type: application/lunch\r\n" "Content-Range: bytes 0-3/10\r\n" "\r\n" "sand\r\n" "--asdfasdfasdf\r\n" "Content-Type: application/lunch\r\n" "Content-Range: bytes 6-9/10\r\n" "\r\n" "ches\r\n" "--asdfasdfasdf--")) doc_iters = http_response_to_document_iters(fr) first_byte, last_byte, length, headers, body = next(doc_iters) self.assertEqual(first_byte, 0) self.assertEqual(last_byte, 3) self.assertEqual(length, 10) header_dict = HeaderKeyDict(headers) self.assertEqual(header_dict.get('Content-Type'), 'application/lunch') self.assertEqual(body.read(), 'sand') first_byte, last_byte, length, headers, body = next(doc_iters) self.assertEqual(first_byte, 6) self.assertEqual(last_byte, 9) self.assertEqual(length, 10) header_dict = HeaderKeyDict(headers) self.assertEqual(header_dict.get('Content-Type'), 'application/lunch') self.assertEqual(body.read(), 'ches') self.assertRaises(StopIteration, next, doc_iters)
def _clean_outgoing_headers(self, headers): """ Removes any headers as per the middleware configuration for outgoing responses. :param headers: A WSGI start_response style list of headers, [('header1', 'value), ('header2', 'value), ...] :returns: The same headers list, but with some headers removed as per the middlware configuration for outgoing responses. """ headers = HeaderKeyDict(headers) for h in headers.keys(): remove = h in self.outgoing_remove_headers if not remove: for p in self.outgoing_remove_headers_startswith: if h.startswith(p): remove = True break if remove: if h in self.outgoing_allow_headers: remove = False if remove: for p in self.outgoing_allow_headers_startswith: if h.startswith(p): remove = False break if remove: del headers[h] return headers.items()
def generate_request_headers(self, orig_req=None, additional=None, transfer=False): """ Create a list of headers to be used in backend requets :param orig_req: the original request sent by the client to the proxy :param additional: additional headers to send to the backend :param transfer: If True, transfer headers from original client request :returns: a dictionary of headers """ # Use the additional headers first so they don't overwrite the headers # we require. headers = HeaderKeyDict(additional) if additional else HeaderKeyDict() if transfer: self.transfer_headers(orig_req.headers, headers) if "x-timestamp" not in headers: headers["x-timestamp"] = normalize_timestamp(time.time()) if orig_req: referer = orig_req.as_referer() else: referer = "" headers.update( { "x-trans-id": self.trans_id, "connection": "close", "user-agent": "proxy-server %s" % os.getpid(), "referer": referer, } ) return headers
def generate_request_headers(self, orig_req=None, additional=None, transfer=False): """ Create a list of headers to be used in backend requets :param orig_req: the original request sent by the client to the proxy :param additional: additional headers to send to the backend :param transfer: If True, transfer headers from original client request :returns: a dictionary of headers """ # Use the additional headers first so they don't overwrite the headers # we require. headers = HeaderKeyDict(additional) if additional else HeaderKeyDict() if transfer: self.transfer_headers(orig_req.headers, headers) headers.setdefault('x-timestamp', normalize_timestamp(time.time())) if orig_req: referer = orig_req.as_referer() else: referer = '' headers['x-trans-id'] = self.trans_id headers['connection'] = 'close' headers['user-agent'] = 'proxy-server %s' % os.getpid() headers['referer'] = referer return headers
def publish_search(self, path, req): """ Publish a verify request on the queue to gate engine """ headers = HeaderKeyDict(req.headers) #self.logger.debug("SWIFTSEARCH avaiable headers: %s" % (headers.items())) # TODO(mlopezc1) is this actually faster than a regular regex with match? # swift code loves this pattern (ex tempurl middleware) but not sure how # will this actually perform in high use. Perf comparison later? for k, v in headers.items(): # if header not in the whitelist of allowed full header names or * if k not in self.index_headers: #self.logger.debug("SWIFTSEARCH k=%s not in %s" % (k, self.index_headers)) for h in self.index_headers_startwith: if not k.startswith(h): #self.logger.debug("SWIFTSEARCH k=%s not allowed" % (k)) del headers[k] self.logger.debug("SWIFTSEARCH sending metadata for indexing: %s" % (headers.items())) # TODO(mlopez1) what about renaming keys to something more human ? the X- and title format is kinda weird exchange = kombu.Exchange(self.exc_str, self.exc_type, durable=self.exc_durable) queue = kombu.Queue('search', exchange=exchange, routing_key='search') with kombu.Connection(self.conn_str) as connection: with connection.Producer(serializer='json') as producer: producer.publish({'id': b64encode(path), 'path': path, 'metadata': headers}, exchange=exchange, routing_key='search', declare=[queue]) return True
def cookie_resp(status, response_headers, exc_info=None): resp_headers = HeaderKeyDict(response_headers) if 'x-auth-token' in resp_headers: auth_token = resp_headers['x-auth-token'] expires_in = int(resp_headers.get('x-auth-token-expires', 0)) storage_url = resp_headers.get('x-storage-url', '') path_parts = urlparse(storage_url) domain = path_parts.hostname secure = False if path_parts.scheme == 'https': secure = True if auth_token and domain: new_cookie = create_auth_cookie('session', domain, token=auth_token, expires_in=expires_in, secure=secure, httponly=True) response_headers.append(('Set-Cookie', new_cookie)) new_cookie = create_auth_cookie('storage', domain, token=quote(storage_url, safe=''), expires_in=expires_in, secure=secure) response_headers.append(('Set-Cookie', new_cookie)) return start_response(status, response_headers, exc_info)
def test_clean_outgoing_headers(self): orh = '' oah = '' hdrs = {'test-header': 'value'} hdrs = HeaderKeyDict(tempurl.TempURL( None, {'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah} )._clean_outgoing_headers(hdrs.items())) self.assertTrue('test-header' in hdrs) orh = 'test-header' oah = '' hdrs = {'test-header': 'value'} hdrs = HeaderKeyDict(tempurl.TempURL( None, {'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah} )._clean_outgoing_headers(hdrs.items())) self.assertTrue('test-header' not in hdrs) orh = 'test-header-*' oah = '' hdrs = {'test-header-one': 'value', 'test-header-two': 'value'} hdrs = HeaderKeyDict(tempurl.TempURL( None, {'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah} )._clean_outgoing_headers(hdrs.items())) self.assertTrue('test-header-one' not in hdrs) self.assertTrue('test-header-two' not in hdrs) orh = 'test-header-*' oah = 'test-header-two' hdrs = {'test-header-one': 'value', 'test-header-two': 'value'} hdrs = HeaderKeyDict(tempurl.TempURL( None, {'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah} )._clean_outgoing_headers(hdrs.items())) self.assertTrue('test-header-one' not in hdrs) self.assertTrue('test-header-two' in hdrs) orh = 'test-header-* test-other-header' oah = 'test-header-two test-header-yes-*' hdrs = {'test-header-one': 'value', 'test-header-two': 'value', 'test-other-header': 'value', 'test-header-yes': 'value', 'test-header-yes-this': 'value'} hdrs = HeaderKeyDict(tempurl.TempURL( None, {'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah} )._clean_outgoing_headers(hdrs.items())) self.assertTrue('test-header-one' not in hdrs) self.assertTrue('test-header-two' in hdrs) self.assertTrue('test-other-header' not in hdrs) self.assertTrue('test-header-yes' not in hdrs) self.assertTrue('test-header-yes-this' in hdrs)
def test_get_response_headers_with_legacy_data(self): broker = backend.AccountBroker(':memory:', account='a') now = time.time() with mock.patch('time.time', new=lambda: now): broker.initialize(Timestamp(now).internal) # add some container data ts = (Timestamp(t).internal for t in itertools.count(int(now))) total_containers = 0 total_objects = 0 total_bytes = 0 for policy in POLICIES: delete_timestamp = ts.next() put_timestamp = ts.next() object_count = int(policy) bytes_used = int(policy) * 10 broker.put_container('c-%s' % policy.name, put_timestamp, delete_timestamp, object_count, bytes_used, int(policy)) total_containers += 1 total_objects += object_count total_bytes += bytes_used expected = HeaderKeyDict({ 'X-Account-Container-Count': total_containers, 'X-Account-Object-Count': total_objects, 'X-Account-Bytes-Used': total_bytes, 'X-Timestamp': Timestamp(now).normal, 'X-PUT-Timestamp': Timestamp(now).normal, }) for policy in POLICIES: prefix = 'X-Account-Storage-Policy-%s-' % policy.name expected[prefix + 'Object-Count'] = int(policy) expected[prefix + 'Bytes-Used'] = int(policy) * 10 orig_policy_stats = broker.get_policy_stats def stub_policy_stats(*args, **kwargs): policy_stats = orig_policy_stats(*args, **kwargs) for stats in policy_stats.values(): # legacy db's won't return container_count del stats['container_count'] return policy_stats broker.get_policy_stats = stub_policy_stats resp_headers = utils.get_response_headers(broker) per_policy_container_headers = [ h for h in resp_headers if h.lower().startswith('x-account-storage-policy-') and h.lower().endswith('-container-count')] self.assertFalse(per_policy_container_headers) for key, value in resp_headers.items(): expected_value = expected.pop(key) self.assertEqual(expected_value, str(value), 'value for %r was %r not %r' % ( key, value, expected_value)) self.assertFalse(expected)
def test_fake_swift_sysmeta(self): swift = FakeSwift() orig_headers = HeaderKeyDict() orig_headers.update({sysmeta_header('container', 'acl'): 'test', 'x-container-meta-foo': 'bar'}) swift.register(self.method, self.path, MagicMock(), orig_headers, None) self._check_headers(swift, self.method, self.path, orig_headers) new_headers = orig_headers.copy() del new_headers[sysmeta_header('container', 'acl').title()] swift.register(self.method, self.path, MagicMock(), new_headers, None) self._check_headers(swift, self.method, self.path, orig_headers)
class FakeConn(object): def __init__(self, status, headers=None, body='', **kwargs): self.status = status try: self.reason = RESPONSE_REASONS[self.status][0] except Exception: self.reason = 'Fake' self.body = body self.resp_headers = HeaderKeyDict() if headers: self.resp_headers.update(headers) self.with_exc = False self.etag = None def _update_raw_call_args(self, *args, **kwargs): capture_attrs = ('host', 'port', 'method', 'path', 'req_headers', 'query_string') for attr, value in zip(capture_attrs, args[:len(capture_attrs)]): setattr(self, attr, value) return self def getresponse(self): if self.etag: self.resp_headers['etag'] = str(self.etag.hexdigest()) if self.with_exc: raise Exception('test') return self def getheader(self, header, default=None): return self.resp_headers.get(header, default) def getheaders(self): return self.resp_headers.items() def read(self, amt=None): if amt is None: return self.body elif isinstance(self.body, six.StringIO): return self.body.read(amt) else: return Exception('Not a StringIO entry') def send(self, data): if not self.etag: self.etag = md5() self.etag.update(data)
def generate_request_headers(self, orig_req=None, additional=None, transfer=False): # Use the additional headers first so they don't overwrite the headers # we require. headers = HeaderKeyDict(additional) if additional else HeaderKeyDict() if transfer: self.transfer_headers(orig_req.headers, headers) if 'x-timestamp' not in headers: headers['x-timestamp'] = normalize_timestamp(time.time()) if orig_req: referer = orig_req.as_referer() else: referer = '' headers.update({'x-trans-id': self.trans_id, 'connection': 'close', 'user-agent': 'proxy-server %s' % os.getpid(), 'referer': referer}) return headers
def handle_request(self, env, start_response): account_id = env.get('REMOTE_USER', None) resp = self._app_call(env) headers = HeaderKeyDict(self._response_headers) if 'x-nexe-cdr-line' in headers and account_id: try: total_time, line = headers['x-nexe-cdr-line'].split(', ', 1) node_lines = re.split(r'\s*,\s*', line) total = [] for rtime, line in zip(*[iter(node_lines)]*2): accounting_info = line.split(' ') total = self.liteacc.cache_accounting_info(account_id, rtime, accounting_info) self.liteacc.queue.put(account_id) headers['x-nexe-cdr-total'] = ' '.join([str(t) for t in total]) self._response_headers = [(k, v) for k, v in headers.iteritems()] except ValueError: self.logger.warning('Accounting cannot parse CDR entry: %s' % headers['x-nexe-cdr-line']) start_response(self._response_status, self._response_headers, self._response_exc_info) return resp
def __init__(self, status, headers=None, body='', **kwargs): self.status = status try: self.reason = RESPONSE_REASONS[self.status][0] except Exception: self.reason = 'Fake' self.body = body self.resp_headers = HeaderKeyDict() if headers: self.resp_headers.update(headers) self.with_exc = False self.etag = None
def test_200(self): fr = FakeResponse( 200, {'Content-Length': '10', 'Content-Type': 'application/lunch'}, 'sandwiches') doc_iters = http_response_to_document_iters(fr) first_byte, last_byte, length, headers, body = next(doc_iters) self.assertEqual(first_byte, 0) self.assertEqual(last_byte, 9) self.assertEqual(length, 10) header_dict = HeaderKeyDict(headers) self.assertEqual(header_dict.get('Content-Length'), '10') self.assertEqual(header_dict.get('Content-Type'), 'application/lunch') self.assertEqual(body.read(), 'sandwiches') self.assertRaises(StopIteration, next, doc_iters) fr = FakeResponse( 200, {'Transfer-Encoding': 'chunked', 'Content-Type': 'application/lunch'}, 'sandwiches') doc_iters = http_response_to_document_iters(fr) first_byte, last_byte, length, headers, body = next(doc_iters) self.assertEqual(first_byte, 0) self.assertIsNone(last_byte) self.assertIsNone(length) header_dict = HeaderKeyDict(headers) self.assertEqual(header_dict.get('Transfer-Encoding'), 'chunked') self.assertEqual(header_dict.get('Content-Type'), 'application/lunch') self.assertEqual(body.read(), 'sandwiches') self.assertRaises(StopIteration, next, doc_iters)
def test_get_response_headers_with_data(self): broker = backend.AccountBroker(':memory:', account='a') now = time.time() with mock.patch('time.time', new=lambda: now): broker.initialize(Timestamp(now).internal) # add some container data ts = (Timestamp(t).internal for t in itertools.count(int(now))) total_containers = 0 total_objects = 0 total_bytes = 0 for policy in POLICIES: delete_timestamp = ts.next() put_timestamp = ts.next() object_count = int(policy) bytes_used = int(policy) * 10 broker.put_container('c-%s' % policy.name, put_timestamp, delete_timestamp, object_count, bytes_used, int(policy)) total_containers += 1 total_objects += object_count total_bytes += bytes_used expected = HeaderKeyDict({ 'X-Account-Container-Count': total_containers, 'X-Account-Object-Count': total_objects, 'X-Account-Bytes-Used': total_bytes, 'X-Timestamp': Timestamp(now).normal, 'X-PUT-Timestamp': Timestamp(now).normal, }) for policy in POLICIES: prefix = 'X-Account-Storage-Policy-%s-' % policy.name expected[prefix + 'Object-Count'] = int(policy) expected[prefix + 'Bytes-Used'] = int(policy) * 10 resp_headers = utils.get_response_headers(broker) for key, value in resp_headers.items(): expected_value = expected.pop(key) self.assertEqual(expected_value, str(value), 'value for %r was %r not %r' % ( key, value, expected_value)) self.assertFalse(expected)
def test_206_single_range(self): fr = FakeResponse( 206, {'Content-Length': '8', 'Content-Type': 'application/lunch', 'Content-Range': 'bytes 1-8/10'}, 'andwiche') doc_iters = http_response_to_document_iters(fr) first_byte, last_byte, length, headers, body = next(doc_iters) self.assertEqual(first_byte, 1) self.assertEqual(last_byte, 8) self.assertEqual(length, 10) header_dict = HeaderKeyDict(headers) self.assertEqual(header_dict.get('Content-Length'), '8') self.assertEqual(header_dict.get('Content-Type'), 'application/lunch') self.assertEqual(body.read(), 'andwiche') self.assertRaises(StopIteration, next, doc_iters) # Chunked response should be treated in the same way as non-chunked one fr = FakeResponse( 206, {'Transfer-Encoding': 'chunked', 'Content-Type': 'application/lunch', 'Content-Range': 'bytes 1-8/10'}, 'andwiche') doc_iters = http_response_to_document_iters(fr) first_byte, last_byte, length, headers, body = next(doc_iters) self.assertEqual(first_byte, 1) self.assertEqual(last_byte, 8) self.assertEqual(length, 10) header_dict = HeaderKeyDict(headers) self.assertEqual(header_dict.get('Content-Type'), 'application/lunch') self.assertEqual(body.read(), 'andwiche') self.assertRaises(StopIteration, next, doc_iters)
def test_clean_outgoing_headers(self): orh = '' oah = '' hdrs = {'test-header': 'value'} hdrs = HeaderKeyDict( tempurl.TempURL(None, { 'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah })._clean_outgoing_headers(hdrs.iteritems())) self.assertTrue('test-header' in hdrs) orh = 'test-header' oah = '' hdrs = {'test-header': 'value'} hdrs = HeaderKeyDict( tempurl.TempURL(None, { 'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah })._clean_outgoing_headers(hdrs.iteritems())) self.assertTrue('test-header' not in hdrs) orh = 'test-header-*' oah = '' hdrs = {'test-header-one': 'value', 'test-header-two': 'value'} hdrs = HeaderKeyDict( tempurl.TempURL(None, { 'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah })._clean_outgoing_headers(hdrs.iteritems())) self.assertTrue('test-header-one' not in hdrs) self.assertTrue('test-header-two' not in hdrs) orh = 'test-header-*' oah = 'test-header-two' hdrs = {'test-header-one': 'value', 'test-header-two': 'value'} hdrs = HeaderKeyDict( tempurl.TempURL(None, { 'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah })._clean_outgoing_headers(hdrs.iteritems())) self.assertTrue('test-header-one' not in hdrs) self.assertTrue('test-header-two' in hdrs) orh = 'test-header-* test-other-header' oah = 'test-header-two test-header-yes-*' hdrs = { 'test-header-one': 'value', 'test-header-two': 'value', 'test-other-header': 'value', 'test-header-yes': 'value', 'test-header-yes-this': 'value' } hdrs = HeaderKeyDict( tempurl.TempURL(None, { 'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah })._clean_outgoing_headers(hdrs.iteritems())) self.assertTrue('test-header-one' not in hdrs) self.assertTrue('test-header-two' in hdrs) self.assertTrue('test-other-header' not in hdrs) self.assertTrue('test-header-yes' not in hdrs) self.assertTrue('test-header-yes-this' in hdrs)
def test_extract_metadata(self): self.app.register('HEAD', '/v1/a/c?extract-archive=tar', HTTPNoContent, {}, None) self.app.register('PUT', '/v1/a/c/obj1?extract-archive=tar', HTTPCreated, {}, None) self.app.register('PUT', '/v1/a/c/obj2?extract-archive=tar', HTTPCreated, {}, None) # It's a real pain to instantiate TarInfo objects directly; they # really want to come from a file on disk or a tarball. So, we write # out some files and add pax headers to them as they get placed into # the tarball. with open(os.path.join(self.testdir, "obj1"), "w") as fh1: fh1.write("obj1 contents\n") with open(os.path.join(self.testdir, "obj2"), "w") as fh2: fh2.write("obj2 contents\n") tar_ball = StringIO() tar_file = tarfile.TarFile.open(fileobj=tar_ball, mode="w", format=tarfile.PAX_FORMAT) # With GNU tar 1.27.1 or later (possibly 1.27 as well), a file with # extended attribute user.thingy = dingy gets put into the tarfile # with pax_headers containing key/value pair # (SCHILY.xattr.user.thingy, dingy), both unicode strings (py2: type # unicode, not type str). # # With BSD tar (libarchive), you get key/value pair # (LIBARCHIVE.xattr.user.thingy, dingy), which strikes me as # gratuitous incompatibility. # # Still, we'll support uploads with both. Just heap more code on the # problem until you can forget it's under there. with open(os.path.join(self.testdir, "obj1")) as fh1: tar_info1 = tar_file.gettarinfo(fileobj=fh1, arcname="obj1") tar_info1.pax_headers[u'SCHILY.xattr.user.mime_type'] = \ u'application/food-diary' tar_info1.pax_headers[u'SCHILY.xattr.user.meta.lunch'] = \ u'sopa de albóndigas' tar_info1.pax_headers[ u'SCHILY.xattr.user.meta.afternoon-snack'] = \ u'gigantic bucket of coffee' tar_file.addfile(tar_info1, fh1) with open(os.path.join(self.testdir, "obj2")) as fh2: tar_info2 = tar_file.gettarinfo(fileobj=fh2, arcname="obj2") tar_info2.pax_headers[ u'LIBARCHIVE.xattr.user.meta.muppet'] = u'bert' tar_info2.pax_headers[ u'LIBARCHIVE.xattr.user.meta.cat'] = u'fluffy' tar_info2.pax_headers[ u'LIBARCHIVE.xattr.user.notmeta'] = u'skipped' tar_file.addfile(tar_info2, fh2) tar_ball.seek(0) req = Request.blank('/v1/a/c?extract-archive=tar') req.environ['REQUEST_METHOD'] = 'PUT' req.environ['wsgi.input'] = tar_ball req.headers['transfer-encoding'] = 'chunked' req.headers['accept'] = 'application/json;q=1.0' resp = req.get_response(self.bulk) self.assertEqual(resp.status_int, 200) # sanity check to make sure the upload worked upload_status = utils.json.loads(resp.body) self.assertEqual(upload_status['Number Files Created'], 2) put1_headers = HeaderKeyDict(self.app.calls_with_headers[1][2]) self.assertEqual( put1_headers.get('Content-Type'), 'application/food-diary') self.assertEqual( put1_headers.get('X-Object-Meta-Lunch'), 'sopa de alb\xc3\xb3ndigas') self.assertEqual( put1_headers.get('X-Object-Meta-Afternoon-Snack'), 'gigantic bucket of coffee') put2_headers = HeaderKeyDict(self.app.calls_with_headers[2][2]) self.assertEqual(put2_headers.get('X-Object-Meta-Muppet'), 'bert') self.assertEqual(put2_headers.get('X-Object-Meta-Cat'), 'fluffy') self.assertEqual(put2_headers.get('Content-Type'), None) self.assertEqual(put2_headers.get('X-Object-Meta-Blah'), None)
def delete_at_update(self, op, delete_at, account, container, obj, request, objdevice, policy): """ Update the expiring objects container when objects are updated. :param op: operation performed (ex: 'PUT', or 'DELETE') :param delete_at: scheduled delete in UNIX seconds, int :param account: account name for the object :param container: container name for the object :param obj: object name :param request: the original request driving the update :param objdevice: device name that the object is in :param policy: the BaseStoragePolicy instance (used for tmp dir) """ if config_true_value( request.headers.get('x-backend-replication', 'f')): return delete_at = normalize_delete_at_timestamp(delete_at) updates = [(None, None)] partition = None hosts = contdevices = [None] headers_in = request.headers headers_out = HeaderKeyDict({ # system accounts are always Policy-0 'X-Backend-Storage-Policy-Index': 0, 'x-timestamp': request.timestamp.internal, 'x-trans-id': headers_in.get('x-trans-id', '-'), 'referer': request.as_referer()}) if op != 'DELETE': delete_at_container = headers_in.get('X-Delete-At-Container', None) if not delete_at_container: self.logger.warning( 'X-Delete-At-Container header must be specified for ' 'expiring objects background %s to work properly. Making ' 'best guess as to the container name for now.' % op) # TODO(gholt): In a future release, change the above warning to # a raised exception and remove the guess code below. delete_at_container = get_expirer_container( delete_at, self.expiring_objects_container_divisor, account, container, obj) partition = headers_in.get('X-Delete-At-Partition', None) hosts = headers_in.get('X-Delete-At-Host', '') contdevices = headers_in.get('X-Delete-At-Device', '') updates = [upd for upd in zip((h.strip() for h in hosts.split(',')), (c.strip() for c in contdevices.split(','))) if all(upd) and partition] if not updates: updates = [(None, None)] headers_out['x-size'] = '0' headers_out['x-content-type'] = 'text/plain' headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e' else: # DELETEs of old expiration data have no way of knowing what the # old X-Delete-At-Container was at the time of the initial setting # of the data, so a best guess is made here. # Worst case is a DELETE is issued now for something that doesn't # exist there and the original data is left where it is, where # it will be ignored when the expirer eventually tries to issue the # object DELETE later since the X-Delete-At value won't match up. delete_at_container = get_expirer_container( delete_at, self.expiring_objects_container_divisor, account, container, obj) delete_at_container = normalize_delete_at_timestamp( delete_at_container) for host, contdevice in updates: self.async_update( op, self.expiring_objects_account, delete_at_container, '%s-%s/%s/%s' % (delete_at, account, container, obj), host, partition, contdevice, headers_out, objdevice, policy)
def PUT(self, request): """Handle HTTP PUT requests for the Swift Object Server.""" policy_index = request.headers['X-Backend-Storage-Policy-Index'] device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) error_response = check_object_creation(request, obj) if error_response: return error_response new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: fsize = request.message_length() except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') # In case of multipart-MIME put, the proxy sends a chunked request, # but may let us know the real content length so we can verify that # we have enough disk space to hold the object. if fsize is None: fsize = request.headers.get('X-Backend-Obj-Content-Length') if fsize is not None: policy = 0 self.container_update( 'DELETE', account, container, obj, request, HeaderKeyDict({'x-timestamp': req_timestamp.internal}), device, policy) try: fsize = int(fsize) except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined): orig_metadata = {} # Checks for If-None-Match if request.if_none_match is not None and orig_metadata: if '*' in request.if_none_match: # File exists already so return 412 return HTTPPreconditionFailed(request=request) if orig_metadata.get('ETag') in request.if_none_match: # The current ETag matches, so return 412 return HTTPPreconditionFailed(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: return HTTPConflict( request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) upload_expiration = time.time() + self.max_upload_time etag = md5() elapsed_time = 0 try: with disk_file.create(size=fsize) as writer: upload_size = 0 # If the proxy wants to send us object metadata after the # object body, it sets some headers. We have to tell the # proxy, in the 100 Continue response, that we're able to # parse a multipart MIME document and extract the object and # metadata from it. If we don't, then the proxy won't # actually send the footer metadata. have_metadata_footer = False use_multiphase_commit = False mime_documents_iter = iter([]) obj_input = request.environ['wsgi.input'] hundred_continue_headers = [] if config_true_value( request.headers.get( 'X-Backend-Obj-Multiphase-Commit')): use_multiphase_commit = True hundred_continue_headers.append( ('X-Obj-Multiphase-Commit', 'yes')) if config_true_value( request.headers.get('X-Backend-Obj-Metadata-Footer')): have_metadata_footer = True hundred_continue_headers.append( ('X-Obj-Metadata-Footer', 'yes')) if have_metadata_footer or use_multiphase_commit: obj_input.set_hundred_continue_response_headers( hundred_continue_headers) mime_boundary = request.headers.get( 'X-Backend-Obj-Multipart-Mime-Boundary') if not mime_boundary: return HTTPBadRequest("no MIME boundary") try: with ChunkReadTimeout(self.client_timeout): mime_documents_iter = iter_mime_headers_and_bodies( request.environ['wsgi.input'], mime_boundary, self.network_chunk_size) _junk_hdrs, obj_input = next(mime_documents_iter) except ChunkReadTimeout: return HTTPRequestTimeout(request=request) timeout_reader = self._make_timeout_reader(obj_input) try: for chunk in iter(timeout_reader, ''): start_time = time.time() if start_time > upload_expiration: self.logger.increment('PUT.timeouts') return HTTPRequestTimeout(request=request) etag.update(chunk) upload_size = writer.write(chunk) elapsed_time += time.time() - start_time except ChunkReadTimeout: return HTTPRequestTimeout(request=request) if upload_size: self.logger.transfer_rate( 'PUT.' + device + '.timing', elapsed_time, upload_size) if fsize is not None and fsize != upload_size: return HTTPClientDisconnect(request=request) footer_meta = {} if have_metadata_footer: footer_meta = self._read_metadata_footer( mime_documents_iter) request_etag = (footer_meta.get('etag') or request.headers.get('etag', '')).lower() etag = etag.hexdigest() if request_etag and request_etag != etag: return HTTPUnprocessableEntity(request=request) if policy_index == '0': metadata = { 'X-Timestamp': request.timestamp.internal, 'Content-Type': request.headers['content-type'], 'ETag': etag, 'Content-Length': str(upload_size), 'Disk-Info': 'ssd', 'Hot-Flag': 'hot', } else: metadata = { 'X-Timestamp': request.timestamp.internal, 'Content-Type': request.headers['content-type'], 'ETag': etag, 'Content-Length': str(upload_size), 'Disk-Info': 'hdd', 'Hot-Flag': request.headers['hot-flag'], } metadata.update(val for val in request.headers.iteritems() if is_sys_or_user_meta('object', val[0])) metadata.update(val for val in footer_meta.iteritems() if is_sys_or_user_meta('object', val[0])) headers_to_copy = ( request.headers.get( 'X-Backend-Replication-Headers', '').split() + list(self.allowed_headers)) for header_key in headers_to_copy: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] writer.put(metadata) # if the PUT requires a two-phase commit (a data and a commit # phase) send the proxy server another 100-continue response # to indicate that we are finished writing object data if use_multiphase_commit: request.environ['wsgi.input'].\ send_hundred_continue_response() if not self._read_put_commit_message(mime_documents_iter): return HTTPServerError(request=request) # got 2nd phase confirmation, write a timestamp.durable # state file to indicate a successful PUT writer.commit(request.timestamp) # Drain any remaining MIME docs from the socket. There # shouldn't be any, but we must read the whole request body. try: while True: with ChunkReadTimeout(self.client_timeout): _junk_hdrs, _junk_body = next(mime_documents_iter) drain(_junk_body, self.network_chunk_size, self.client_timeout) except ChunkReadTimeout: raise HTTPClientDisconnect() except StopIteration: pass except (DiskFileXattrNotSupported, DiskFileNoSpace): return HTTPInsufficientStorage(drive=device, request=request) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update( 'PUT', new_delete_at, account, container, obj, request, device, policy) if orig_delete_at: self.delete_at_update( 'DELETE', orig_delete_at, account, container, obj, request, device, policy) update_headers = HeaderKeyDict({ 'x-size': metadata['Content-Length'], 'x-content-type': metadata['Content-Type'], 'x-timestamp': metadata['X-Timestamp'], 'x-etag': metadata['ETag'], 'x-disk': metadata['Disk-Info'], 'x-hot-flag': metadata['Hot-Flag']}) # apply any container update header overrides sent with request self._check_container_override(update_headers, request.headers) self._check_container_override(update_headers, footer_meta) # insert for hybird policy = 0 self.container_update( 'PUT', account, container, obj, request, update_headers, device, policy) return HTTPCreated(request=request, etag=etag)
def gen_headers(hdrs_in=None, add_ts=False): hdrs_out = HeaderKeyDict(hdrs_in) if hdrs_in else HeaderKeyDict() if add_ts: hdrs_out['X-Timestamp'] = Timestamp(time()).internal hdrs_out['User-Agent'] = 'direct-client %s' % os.getpid() return hdrs_out
def test_clean_outgoing_headers(self): orh = "" oah = "" hdrs = {"test-header": "value"} hdrs = HeaderKeyDict( tempurl.TempURL( None, {"outgoing_remove_headers": orh, "outgoing_allow_headers": oah} )._clean_outgoing_headers(hdrs.items()) ) self.assertTrue("test-header" in hdrs) orh = "test-header" oah = "" hdrs = {"test-header": "value"} hdrs = HeaderKeyDict( tempurl.TempURL( None, {"outgoing_remove_headers": orh, "outgoing_allow_headers": oah} )._clean_outgoing_headers(hdrs.items()) ) self.assertTrue("test-header" not in hdrs) orh = "test-header-*" oah = "" hdrs = {"test-header-one": "value", "test-header-two": "value"} hdrs = HeaderKeyDict( tempurl.TempURL( None, {"outgoing_remove_headers": orh, "outgoing_allow_headers": oah} )._clean_outgoing_headers(hdrs.items()) ) self.assertTrue("test-header-one" not in hdrs) self.assertTrue("test-header-two" not in hdrs) orh = "test-header-*" oah = "test-header-two" hdrs = {"test-header-one": "value", "test-header-two": "value"} hdrs = HeaderKeyDict( tempurl.TempURL( None, {"outgoing_remove_headers": orh, "outgoing_allow_headers": oah} )._clean_outgoing_headers(hdrs.items()) ) self.assertTrue("test-header-one" not in hdrs) self.assertTrue("test-header-two" in hdrs) orh = "test-header-* test-other-header" oah = "test-header-two test-header-yes-*" hdrs = { "test-header-one": "value", "test-header-two": "value", "test-other-header": "value", "test-header-yes": "value", "test-header-yes-this": "value", } hdrs = HeaderKeyDict( tempurl.TempURL( None, {"outgoing_remove_headers": orh, "outgoing_allow_headers": oah} )._clean_outgoing_headers(hdrs.items()) ) self.assertTrue("test-header-one" not in hdrs) self.assertTrue("test-header-two" in hdrs) self.assertTrue("test-other-header" not in hdrs) self.assertTrue("test-header-yes" not in hdrs) self.assertTrue("test-header-yes-this" in hdrs)
def account_update(self, req, account, container, broker): """ Update the account server(s) with latest container info. :param req: swob.Request object :param account: account name :param container: container name :param broker: container DB broker object :returns: if all the account requests return a 404 error code, HTTPNotFound response object, if the account cannot be updated due to a malformed header, an HTTPBadRequest response object, otherwise None. """ account_hosts = [h.strip() for h in req.headers.get('X-Account-Host', '').split(',')] account_devices = [d.strip() for d in req.headers.get('X-Account-Device', '').split(',')] account_partition = req.headers.get('X-Account-Partition', '') if len(account_hosts) != len(account_devices): # This shouldn't happen unless there's a bug in the proxy, # but if there is, we want to know about it. self.logger.error(_('ERROR Account update failed: different ' 'numbers of hosts and devices in request: ' '"%s" vs "%s"' % (req.headers.get('X-Account-Host', ''), req.headers.get('X-Account-Device', '')))) return HTTPBadRequest(req=req) if account_partition: updates = zip(account_hosts, account_devices) else: updates = [] account_404s = 0 for account_host, account_device in updates: account_ip, account_port = account_host.rsplit(':', 1) new_path = '/' + '/'.join([account, container]) info = broker.get_info() account_headers = HeaderKeyDict({ 'x-put-timestamp': info['put_timestamp'], 'x-delete-timestamp': info['delete_timestamp'], 'x-object-count': info['object_count'], 'x-bytes-used': info['bytes_used'], 'x-trans-id': req.headers.get('x-trans-id', '-'), 'user-agent': 'container-server %s' % os.getpid(), 'referer': req.as_referer()}) if req.headers.get('x-account-override-deleted', 'no').lower() == \ 'yes': account_headers['x-account-override-deleted'] = 'yes' try: with ConnectionTimeout(self.conn_timeout): conn = http_connect( account_ip, account_port, account_device, account_partition, 'PUT', new_path, account_headers) with Timeout(self.node_timeout): account_response = conn.getresponse() account_response.read() if account_response.status == HTTP_NOT_FOUND: account_404s += 1 elif not is_success(account_response.status): self.logger.error(_( 'ERROR Account update failed ' 'with %(ip)s:%(port)s/%(device)s (will retry ' 'later): Response %(status)s %(reason)s'), {'ip': account_ip, 'port': account_port, 'device': account_device, 'status': account_response.status, 'reason': account_response.reason}) except (Exception, Timeout): self.logger.exception(_( 'ERROR account update failed with ' '%(ip)s:%(port)s/%(device)s (will retry later)'), {'ip': account_ip, 'port': account_port, 'device': account_device}) if updates and account_404s == len(updates): return HTTPNotFound(req=req) else: return None
def DELETE(self, request): """Handle HTTP DELETE requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except DiskFileExpired as e: orig_timestamp = e.timestamp orig_metadata = e.metadata response_class = HTTPNotFound except DiskFileDeleted as e: orig_timestamp = e.timestamp orig_metadata = {} response_class = HTTPNotFound except (DiskFileNotExist, DiskFileQuarantined): orig_timestamp = 0 orig_metadata = {} response_class = HTTPNotFound else: orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp < req_timestamp: response_class = HTTPNoContent else: response_class = HTTPConflict response_timestamp = max(orig_timestamp, req_timestamp) orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) try: req_if_delete_at_val = request.headers['x-if-delete-at'] req_if_delete_at = int(req_if_delete_at_val) except KeyError: pass except ValueError: return HTTPBadRequest( request=request, body='Bad X-If-Delete-At header value') else: # request includes x-if-delete-at; we must not place a tombstone # if we can not verify the x-if-delete-at time if not orig_timestamp: # no object found at all return HTTPNotFound() if orig_delete_at != req_if_delete_at: return HTTPPreconditionFailed( request=request, body='X-If-Delete-At and X-Delete-At do not match') else: # differentiate success from no object at all response_class = HTTPNoContent if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy_idx) if orig_timestamp < req_timestamp: disk_file.delete(req_timestamp) self.container_update( 'DELETE', account, container, obj, request, HeaderKeyDict({'x-timestamp': req_timestamp.internal}), device, policy_idx) return response_class( request=request, headers={'X-Backend-Timestamp': response_timestamp.internal})
def PUT(self, request): """Handle HTTP PUT requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) if 'x-timestamp' not in request.headers or \ not check_float(request.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=request, content_type='text/plain') error_response = check_object_creation(request, obj) if error_response: return error_response new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: fsize = request.message_length() except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except (DiskFileNotExist, DiskFileQuarantined): orig_metadata = {} # Checks for If-None-Match if request.if_none_match is not None and orig_metadata: if '*' in request.if_none_match: # File exists already so return 412 return HTTPPreconditionFailed(request=request) if orig_metadata.get('ETag') in request.if_none_match: # The current ETag matches, so return 412 return HTTPPreconditionFailed(request=request) orig_timestamp = orig_metadata.get('X-Timestamp') if orig_timestamp and orig_timestamp >= request.headers['x-timestamp']: return HTTPConflict(request=request) orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) upload_expiration = time.time() + self.max_upload_time etag = md5() elapsed_time = 0 try: with disk_file.create(size=fsize) as writer: upload_size = 0 def timeout_reader(): with ChunkReadTimeout(self.client_timeout): return request.environ['wsgi.input'].read( self.network_chunk_size) try: for chunk in iter(lambda: timeout_reader(), ''): start_time = time.time() if start_time > upload_expiration: self.logger.increment('PUT.timeouts') return HTTPRequestTimeout(request=request) etag.update(chunk) upload_size = writer.write(chunk) elapsed_time += time.time() - start_time except ChunkReadTimeout: return HTTPRequestTimeout(request=request) if upload_size: self.logger.transfer_rate( 'PUT.' + device + '.timing', elapsed_time, upload_size) if fsize is not None and fsize != upload_size: return HTTPClientDisconnect(request=request) etag = etag.hexdigest() if 'etag' in request.headers and \ request.headers['etag'].lower() != etag: return HTTPUnprocessableEntity(request=request) metadata = { 'X-Timestamp': request.headers['x-timestamp'], 'Content-Type': request.headers['content-type'], 'ETag': etag, 'Content-Length': str(upload_size), } metadata.update(val for val in request.headers.iteritems() if is_user_meta('object', val[0])) for header_key in ( request.headers.get('X-Backend-Replication-Headers') or self.allowed_headers): if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] writer.put(metadata) except DiskFileNoSpace: return HTTPInsufficientStorage(drive=device, request=request) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update( 'PUT', new_delete_at, account, container, obj, request, device) if orig_delete_at: self.delete_at_update( 'DELETE', orig_delete_at, account, container, obj, request, device) self.container_update( 'PUT', account, container, obj, request, HeaderKeyDict({ 'x-size': metadata['Content-Length'], 'x-content-type': metadata['Content-Type'], 'x-timestamp': metadata['X-Timestamp'], 'x-etag': metadata['ETag']}), device, policy_idx) return HTTPCreated(request=request, etag=etag)
def reconstruct_fa(self, job, node, metadata): """ Reconstructs a fragment archive - this method is called from ssync after a remote node responds that is missing this object - the local diskfile is opened to provide metadata - but to reconstruct the missing fragment archive we must connect to multiple object servers. :param job: job from ssync_sender :param node: node that we're rebuilding to :param metadata: the metadata to attach to the rebuilt archive :returns: a DiskFile like class for use by ssync :raises DiskFileError: if the fragment archive cannot be reconstructed """ part_nodes = job['policy'].object_ring.get_part_nodes(job['partition']) part_nodes.remove(node) # the fragment index we need to reconstruct is the position index # of the node we're rebuilding to within the primary part list fi_to_rebuild = node['index'] # KISS send out connection requests to all nodes, see what sticks headers = { 'X-Backend-Storage-Policy-Index': int(job['policy']), } pile = GreenAsyncPile(len(part_nodes)) path = metadata['name'] for node in part_nodes: pile.spawn(self._get_response, node, job['partition'], path, headers, job['policy']) responses = [] etag = None for resp in pile: if not resp: continue resp.headers = HeaderKeyDict(resp.getheaders()) responses.append(resp) etag = sorted( responses, reverse=True, key=lambda r: Timestamp(r.headers.get('X-Backend-Timestamp')) )[0].headers.get('X-Object-Sysmeta-Ec-Etag') responses = [ r for r in responses if r.headers.get('X-Object-Sysmeta-Ec-Etag') == etag ] if len(responses) >= job['policy'].ec_ndata: break else: self.logger.error( 'Unable to get enough responses (%s/%s) ' 'to reconstruct %s with ETag %s' % (len(responses), job['policy'].ec_ndata, self._full_path(node, job['partition'], metadata['name'], job['policy']), etag)) raise DiskFileError('Unable to reconstruct EC archive') rebuilt_fragment_iter = self.make_rebuilt_fragment_iter( responses[:job['policy'].ec_ndata], path, job['policy'], fi_to_rebuild) return RebuildingECDiskFileStream(metadata, fi_to_rebuild, rebuilt_fragment_iter)
def test_extract_metadata(self): self.app.register('HEAD', '/v1/a/c?extract-archive=tar', HTTPNoContent, {}, None) self.app.register('PUT', '/v1/a/c/obj1?extract-archive=tar', HTTPCreated, {}, None) self.app.register('PUT', '/v1/a/c/obj2?extract-archive=tar', HTTPCreated, {}, None) # It's a real pain to instantiate TarInfo objects directly; they # really want to come from a file on disk or a tarball. So, we write # out some files and add pax headers to them as they get placed into # the tarball. with open(os.path.join(self.testdir, "obj1"), "w") as fh1: fh1.write("obj1 contents\n") with open(os.path.join(self.testdir, "obj2"), "w") as fh2: fh2.write("obj2 contents\n") tar_ball = StringIO() tar_file = tarfile.TarFile.open(fileobj=tar_ball, mode="w", format=tarfile.PAX_FORMAT) # With GNU tar 1.27.1 or later (possibly 1.27 as well), a file with # extended attribute user.thingy = dingy gets put into the tarfile # with pax_headers containing key/value pair # (SCHILY.xattr.user.thingy, dingy), both unicode strings (py2: type # unicode, not type str). # # With BSD tar (libarchive), you get key/value pair # (LIBARCHIVE.xattr.user.thingy, dingy), which strikes me as # gratuitous incompatibility. # # Still, we'll support uploads with both. Just heap more code on the # problem until you can forget it's under there. with open(os.path.join(self.testdir, "obj1")) as fh1: tar_info1 = tar_file.gettarinfo(fileobj=fh1, arcname="obj1") tar_info1.pax_headers[u'SCHILY.xattr.user.mime_type'] = \ u'application/food-diary' tar_info1.pax_headers[u'SCHILY.xattr.user.meta.lunch'] = \ u'sopa de albóndigas' tar_info1.pax_headers[ u'SCHILY.xattr.user.meta.afternoon-snack'] = \ u'gigantic bucket of coffee' tar_file.addfile(tar_info1, fh1) with open(os.path.join(self.testdir, "obj2")) as fh2: tar_info2 = tar_file.gettarinfo(fileobj=fh2, arcname="obj2") tar_info2.pax_headers[ u'LIBARCHIVE.xattr.user.meta.muppet'] = u'bert' tar_info2.pax_headers[ u'LIBARCHIVE.xattr.user.meta.cat'] = u'fluffy' tar_info2.pax_headers[ u'LIBARCHIVE.xattr.user.notmeta'] = u'skipped' tar_file.addfile(tar_info2, fh2) tar_ball.seek(0) req = Request.blank('/v1/a/c?extract-archive=tar') req.environ['REQUEST_METHOD'] = 'PUT' req.environ['wsgi.input'] = tar_ball req.headers['transfer-encoding'] = 'chunked' req.headers['accept'] = 'application/json;q=1.0' resp = req.get_response(self.bulk) self.assertEqual(resp.status_int, 200) # sanity check to make sure the upload worked upload_status = utils.json.loads(resp.body) self.assertEqual(upload_status['Number Files Created'], 2) put1_headers = HeaderKeyDict(self.app.calls_with_headers[1][2]) self.assertEqual(put1_headers.get('Content-Type'), 'application/food-diary') self.assertEqual(put1_headers.get('X-Object-Meta-Lunch'), 'sopa de alb\xc3\xb3ndigas') self.assertEqual(put1_headers.get('X-Object-Meta-Afternoon-Snack'), 'gigantic bucket of coffee') put2_headers = HeaderKeyDict(self.app.calls_with_headers[2][2]) self.assertEqual(put2_headers.get('X-Object-Meta-Muppet'), 'bert') self.assertEqual(put2_headers.get('X-Object-Meta-Cat'), 'fluffy') self.assertEqual(put2_headers.get('Content-Type'), None) self.assertEqual(put2_headers.get('X-Object-Meta-Blah'), None)