def run_quarantine_range_etag(self): container = 'container-range-%s' % uuid4() obj = 'object-range-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'RANGE') # Stash the on disk data for future comparison - this may not equal # 'VERIFY' if for example the proxy has crypto enabled backend_data = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] metadata = read_metadata(data_file) metadata['ETag'] = 'badetag' write_metadata(data_file, metadata) base_headers = {'X-Backend-Storage-Policy-Index': self.policy.idx} for header, result in [({'Range': 'bytes=0-2'}, backend_data[0:3]), ({'Range': 'bytes=1-11'}, backend_data[1:]), ({'Range': 'bytes=0-11'}, backend_data)]: req_headers = base_headers.copy() req_headers.update(header) odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers=req_headers)[-1] self.assertEqual(odata, result) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: self.assertEqual(err.http_status, 404)
def run_quarantine(self): container = 'container-%s' % uuid4() obj = 'object-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'VERIFY') # Stash the on disk data for future comparison - this may not equal # 'VERIFY' if for example the proxy has crypto enabled backend_data = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] metadata = read_metadata(data_file) metadata['ETag'] = 'badetag' write_metadata(data_file, metadata) odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] self.assertEqual(odata, backend_data) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: self.assertEqual(err.http_status, 404)
def run_quarantine_range_etag(self): container = "container-range-%s" % uuid4() obj = "object-range-%s" % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, "RANGE") metadata = read_metadata(data_file) metadata["ETag"] = "badetag" write_metadata(data_file, metadata) base_headers = {"X-Backend-Storage-Policy-Index": self.policy.idx} for header, result in [ ({"Range": "bytes=0-2"}, "RAN"), ({"Range": "bytes=1-11"}, "ANGE"), ({"Range": "bytes=0-11"}, "RANGE"), ]: req_headers = base_headers.copy() req_headers.update(header) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj, headers=req_headers)[-1] self.assertEquals(odata, result) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={"X-Backend-Storage-Policy-Index": self.policy.idx} ) raise Exception("Did not quarantine object") except ClientException as err: self.assertEquals(err.http_status, 404)
def run_quarantine_range_etag(self): container = 'container-range-%s' % uuid4() obj = 'object-range-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'RANGE') metadata = read_metadata(data_file) metadata['ETag'] = 'badetag' write_metadata(data_file, metadata) base_headers = {'X-Backend-Storage-Policy-Index': self.policy.idx} for header, result in [({'Range': 'bytes=0-2'}, 'RAN'), ({'Range': 'bytes=1-11'}, 'ANGE'), ({'Range': 'bytes=0-11'}, 'RANGE')]: req_headers = base_headers.copy() req_headers.update(header) odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers=req_headers)[-1] self.assertEqual(odata, result) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: self.assertEqual(err.http_status, 404)
def run_quarantine_range_etag(self): container = 'container-range-%s' % uuid4() obj = 'object-range-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'RANGE') with open(data_file) as fp: metadata = read_metadata(fp) metadata['ETag'] = 'badetag' with open(data_file) as fp: write_metadata(fp, metadata) for header, result in [({'Range': 'bytes=0-2'}, 'RAN'), ({'Range': 'bytes=1-11'}, 'ANGE'), ({'Range': 'bytes=0-11'}, 'RANGE')]: odata = direct_client.direct_get_object(onode, opart, self.account, container, obj, headers=header)[-1] self.assertEquals(odata, result) try: resp = direct_client.direct_get_object(onode, opart, self.account, container, obj) raise "Did not quarantine object" except client.ClientException, e: self.assertEquals(e.http_status, 404)
def test_sync(self): all_objects = [] # upload some containers for policy in ENABLED_POLICIES: container = 'container-%s-%s' % (policy.name, uuid.uuid4()) client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': policy.name}) obj = 'object-%s' % uuid.uuid4() body = 'test-body' client.put_object(self.url, self.token, container, obj, body) all_objects.append((policy, container, obj)) Manager(['container-updater']).once() headers = client.head_account(self.url, self.token) self.assertEqual(int(headers['x-account-container-count']), len(ENABLED_POLICIES)) self.assertEqual(int(headers['x-account-object-count']), len(ENABLED_POLICIES)) self.assertEqual(int(headers['x-account-bytes-used']), len(ENABLED_POLICIES) * len(body)) part, nodes = self.account_ring.get_nodes(self.account) for node in nodes: direct_delete_account(node, part, self.account) Manager(['account-reaper']).once() get_to_final_state() for policy, container, obj in all_objects: cpart, cnodes = self.container_ring.get_nodes( self.account, container) for cnode in cnodes: try: direct_head_container(cnode, cpart, self.account, container) except ClientException as err: self.assertEquals(err.http_status, 404) else: self.fail('Found un-reaped /%s/%s on %r' % (self.account, container, node)) object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/') part, nodes = object_ring.get_nodes(self.account, container, obj) for node in nodes: try: direct_get_object(node, part, self.account, container, obj) except ClientException as err: self.assertEquals(err.http_status, 404) else: self.fail('Found un-reaped /%s/%s/%s on %r in %s!' % (self.account, container, obj, node, policy))
def test_direct_get_object_error(self): with mocked_http_conn(500) as conn: with self.assertRaises(ClientException) as raised: direct_client.direct_get_object( self.node, self.part, self.account, self.container, self.obj) self.assertEqual(conn.host, self.node['ip']) self.assertEqual(conn.port, self.node['port']) self.assertEqual(conn.method, 'GET') self.assertEqual(conn.path, self.obj_path) self.assertEqual(raised.exception.http_status, 500) self.assertTrue('GET' in str(raised.exception))
def _run(self, thread): if time.time() - self.heartbeat >= 15: self.heartbeat = time.time() self._log_status("GETS") device, partition, name, container_name = random.choice(self.names) with self.connection() as conn: try: if self.use_proxy: client.get_object(self.url, self.token, container_name, name, http_conn=conn) else: node = {"ip": self.ip, "port": self.port, "device": device} direct_client.direct_get_object(node, partition, self.account, container_name, name) except client.ClientException, e: self.logger.debug(str(e)) self.failures += 1
def test_direct_get_object_error(self): with mocked_http_conn(500) as conn: try: direct_client.direct_get_object( self.node, self.part, self.account, self.container, self.obj) except ClientException as err: pass else: self.fail('ClientException not raised') self.assertEqual(conn.method, 'GET') self.assertEqual(conn.path, self.obj_path) self.assertEqual(err.http_status, 500) self.assertTrue('GET' in str(err))
def run_quarantine_zero_byte_get(self): container = 'container-zbyte-%s' % uuid4() obj = 'object-zbyte-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'DATA') metadata = read_metadata(data_file) unlink(data_file) with open(data_file, 'w') as fpointer: write_metadata(fpointer, metadata) try: direct_client.direct_get_object(onode, opart, self.account, container, obj, conn_timeout=1, response_timeout=1) raise Exception("Did not quarantine object") except client.ClientException as err: self.assertEquals(err.http_status, 404)
def run_quarantine(self): container = "container-%s" % uuid4() obj = "object-%s" % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, "VERIFY") with open(data_file) as fpointer: metadata = read_metadata(fpointer) metadata["ETag"] = "badetag" with open(data_file) as fpointer: write_metadata(fpointer, metadata) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] self.assertEquals(odata, "VERIFY") try: direct_client.direct_get_object(onode, opart, self.account, container, obj) raise Exception("Did not quarantine object") except client.ClientException as err: self.assertEquals(err.http_status, 404)
def test_direct_get_object(self): contents = six.StringIO('123456') with mocked_http_conn(200, body=contents) as conn: resp_header, obj_body = direct_client.direct_get_object( self.node, self.part, self.account, self.container, self.obj) self.assertEqual(conn.method, 'GET') self.assertEqual(conn.path, self.obj_path) self.assertEqual(obj_body, contents.getvalue())
def run_quarantine(self): container = 'container-%s' % uuid4() obj = 'object-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'VERIFY') metadata = read_metadata(data_file) metadata['ETag'] = 'badetag' write_metadata(data_file, metadata) odata = direct_client.direct_get_object( onode, opart, self.account, container, obj)[-1] self.assertEquals(odata, 'VERIFY') try: direct_client.direct_get_object(onode, opart, self.account, container, obj) raise Exception("Did not quarantine object") except client.ClientException as err: self.assertEquals(err.http_status, 404)
def run_quarantine(self): container = "container-%s" % uuid4() obj = "object-%s" % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, "VERIFY") metadata = read_metadata(data_file) metadata["ETag"] = "badetag" write_metadata(data_file, metadata) odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={"X-Backend-Storage-Policy-Index": self.policy.idx} )[-1] self.assertEquals(odata, "VERIFY") try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={"X-Backend-Storage-Policy-Index": self.policy.idx} ) raise Exception("Did not quarantine object") except ClientException as err: self.assertEquals(err.http_status, 404)
def direct_get(self, node, part): req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)} headers, data = direct_client.direct_get_object( node, part, self.account, self.container_name, self.object_name, headers=req_headers, resp_chunk_size=64 * 2 ** 20) hasher = md5() for chunk in data: hasher.update(chunk) return hasher.hexdigest()
def run_quarantine(self): container = 'container-%s' % uuid4() obj = 'object-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'VERIFY') with open(data_file) as fp: metadata = read_metadata(fp) metadata['ETag'] = 'badetag' with open(data_file) as fp: write_metadata(fp, metadata) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] self.assertEquals(odata, 'VERIFY') try: resp = direct_client.direct_get_object(onode, opart, self.account, container, obj) raise "Did not quarantine object" except client.ClientException, e: self.assertEquals(e.http_status, 404)
def test_delayed_reap(self): # define reapers which are supposed to operate 3 seconds later account_reapers = [] for conf_file in self.configs['account-server'].values(): conf = utils.readconf(conf_file, 'account-reaper') conf['delay_reaping'] = '3' account_reapers.append(reaper.AccountReaper(conf)) self.assertTrue(account_reapers) # run reaper, and make sure that nothing is reaped for account_reaper in account_reapers: account_reaper.run_once() for policy, container, obj in self.all_objects: cpart, cnodes = self.container_ring.get_nodes( self.account, container) for cnode in cnodes: try: direct_head_container(cnode, cpart, self.account, container) except ClientException: self.fail( "Nothing should be reaped. Container should exist") part, nodes = policy.object_ring.get_nodes(self.account, container, obj) headers = {'X-Backend-Storage-Policy-Index': int(policy)} for node in nodes: try: direct_get_object(node, part, self.account, container, obj, headers=headers) except ClientException: self.fail("Nothing should be reaped. Object should exist") # wait 3 seconds, run reaper, and make sure that all is reaped sleep(3) for account_reaper in account_reapers: account_reaper.run_once() self._verify_account_reaped()
def test_direct_get_object_chunks(self): contents = six.BytesIO(b'123456') with mocked_http_conn(200, body=contents) as conn: resp_header, obj_body = direct_client.direct_get_object( self.node, self.part, self.account, self.container, self.obj, resp_chunk_size=2) self.assertEqual(conn.host, self.node['ip']) self.assertEqual(conn.port, self.node['port']) self.assertEqual('GET', conn.method) self.assertEqual(self.obj_path, conn.path) self.assertEqual([b'12', b'34', b'56'], list(obj_body))
def run_quarantine_range_etag(self): container = "container-range-%s" % uuid4() obj = "object-range-%s" % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, "RANGE") with open(data_file) as fpointer: metadata = read_metadata(fpointer) metadata["ETag"] = "badetag" with open(data_file) as fpointer: write_metadata(fpointer, metadata) for header, result in [ ({"Range": "bytes=0-2"}, "RAN"), ({"Range": "bytes=1-11"}, "ANGE"), ({"Range": "bytes=0-11"}, "RANGE"), ]: odata = direct_client.direct_get_object(onode, opart, self.account, container, obj, headers=header)[-1] self.assertEquals(odata, result) try: direct_client.direct_get_object(onode, opart, self.account, container, obj) raise Exception("Did not quarantine object") except client.ClientException as err: self.assertEquals(err.http_status, 404)
def run_quarantine_zero_byte_get(self): container = 'container-zbyte-%s' % uuid4() obj = 'object-zbyte-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'DATA') metadata = read_metadata(data_file) unlink(data_file) with open(data_file, 'w') as fpointer: write_metadata(fpointer, metadata) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, conn_timeout=1, response_timeout=1, headers={'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: self.assertEqual(err.http_status, 404)
def direct_get(self, node, part, require_durable=True): req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)} if not require_durable: req_headers.update( {'X-Backend-Fragment-Preferences': json.dumps([])}) headers, data = direct_client.direct_get_object( node, part, self.account, self.container_name, self.object_name, headers=req_headers, resp_chunk_size=64 * 2 ** 20) hasher = md5() for chunk in data: hasher.update(chunk) return headers, hasher.hexdigest()
def run_quarantine(self): container = 'container-%s' % uuid4() obj = 'object-%s' % uuid4() onode, opart, data_file = self._setup_data_file( container, obj, 'VERIFY') # Stash the on disk data for future comparison - this may not equal # 'VERIFY' if for example the proxy has crypto enabled backend_data = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] metadata = read_metadata(data_file) metadata['ETag'] = 'badetag' write_metadata(data_file, metadata) odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] self.assertEqual(odata, backend_data) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: self.assertEqual(err.http_status, 404)
def run_quarantine_range_etag(self): container = 'container-range-%s' % uuid4() obj = 'object-range-%s' % uuid4() onode, opart, data_file = self._setup_data_file( container, obj, 'RANGE') metadata = read_metadata(data_file) metadata['ETag'] = 'badetag' write_metadata(data_file, metadata) base_headers = {'X-Backend-Storage-Policy-Index': self.policy.idx} for header, result in [({ 'Range': 'bytes=0-2' }, 'RAN'), ({ 'Range': 'bytes=1-11' }, 'ANGE'), ({ 'Range': 'bytes=0-11' }, 'RANGE')]: req_headers = base_headers.copy() req_headers.update(header) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj, headers=req_headers)[-1] self.assertEquals(odata, result) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: self.assertEquals(err.http_status, 404)
def run_quarantine_zero_byte_get(self): container = "container-zbyte-%s" % uuid4() obj = "object-zbyte-%s" % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, "DATA") metadata = read_metadata(data_file) unlink(data_file) with open(data_file, "w") as fpointer: write_metadata(fpointer, metadata) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, conn_timeout=1, response_timeout=1, headers={"X-Backend-Storage-Policy-Index": self.policy.idx}, ) raise Exception("Did not quarantine object") except ClientException as err: self.assertEquals(err.http_status, 404)
def run_quarantine_range_etag(self): container = 'container-range-%s' % uuid4() obj = 'object-range-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'RANGE') with open(data_file) as fpointer: metadata = read_metadata(fpointer) metadata['ETag'] = 'badetag' with open(data_file) as fpointer: write_metadata(fpointer, metadata) for header, result in [({'Range': 'bytes=0-2'}, 'RAN'), ({'Range': 'bytes=1-11'}, 'ANGE'), ({'Range': 'bytes=0-11'}, 'RANGE')]: odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers=header)[-1] self.assertEquals(odata, result) try: direct_client.direct_get_object(onode, opart, self.account, container, obj) raise Exception("Did not quarantine object") except client.ClientException, err: self.assertEquals(err.http_status, 404)
def direct_get(self, node, part): req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)} headers, data = direct_client.direct_get_object(node, part, self.account, self.container_name, self.object_name, headers=req_headers, resp_chunk_size=64 * 2**20) hasher = md5() for chunk in data: hasher.update(chunk) return hasher.hexdigest()
def test_direct_get_object_chunks(self): contents = io.BytesIO(b'123456') with mocked_http_conn(200, body=contents) as conn: resp_header, obj_body = direct_client.direct_get_object( self.node, self.part, self.account, self.container, self.obj, resp_chunk_size=2) self.assertEqual(conn.host, self.node['ip']) self.assertEqual(conn.port, self.node['port']) self.assertEqual('GET', conn.method) self.assertEqual(self.obj_path, conn.path) self.assertEqual([b'12', b'34', b'56'], list(obj_body))
def test_direct_get_object_chunks(self): contents = six.StringIO('123456') downloaded = b'' with mocked_http_conn(200, body=contents) as conn: resp_header, obj_body = direct_client.direct_get_object( self.node, self.part, self.account, self.container, self.obj, resp_chunk_size=2) while obj_body: try: chunk = obj_body.next() except StopIteration: break downloaded += chunk self.assertEqual('GET', conn.method) self.assertEqual(self.obj_path, conn.path) self.assertEqual('123456', downloaded)
def test_direct_get_object(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' name = 'o' contents = StringIO.StringIO('123456') was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200, body=contents) resp_header, obj_body = ( direct_client.direct_get_object(node, part, account, container, name)) self.assertEqual(obj_body, contents) direct_client.http_connect = was_http_connector pass
def direct_get(self, node, part, require_durable=True, extra_headers=None): req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)} if extra_headers: req_headers.update(extra_headers) if not require_durable: req_headers.update( {'X-Backend-Fragment-Preferences': json.dumps([])}) # node dict has unicode values so utf8 decode our path parts too in # case they have non-ascii characters headers, data = direct_client.direct_get_object( node, part, self.account.decode('utf8'), self.container_name.decode('utf8'), self.object_name.decode('utf8'), headers=req_headers, resp_chunk_size=64 * 2 ** 20) hasher = md5() for chunk in data: hasher.update(chunk) return headers, hasher.hexdigest()
def direct_get(self, node, part, require_durable=True): req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)} if not require_durable: req_headers.update( {'X-Backend-Fragment-Preferences': json.dumps([])}) # node dict has unicode values so utf8 decode our path parts too in # case they have non-ascii characters headers, data = direct_client.direct_get_object( node, part, self.account.decode('utf8'), self.container_name.decode('utf8'), self.object_name.decode('utf8'), headers=req_headers, resp_chunk_size=64 * 2**20) hasher = md5() for chunk in data: hasher.update(chunk) return headers, hasher.hexdigest()
def run_quarantine_zero_byte_get(self): container = 'container-zbyte-%s' % uuid4() obj = 'object-zbyte-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'DATA') with open(data_file) as fp: metadata = read_metadata(fp) os.unlink(data_file) with open(data_file, 'w') as fp: write_metadata(fp, metadata) try: resp = direct_client.direct_get_object(onode, opart, self.account, container, obj, conn_timeout=1, response_timeout=1) raise "Did not quarantine object" except client.ClientException, e: self.assertEquals(e.http_status, 404)
def direct_get(self, node, part, require_durable=True, extra_headers=None): req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)} if extra_headers: req_headers.update(extra_headers) if not require_durable: req_headers.update( {'X-Backend-Fragment-Preferences': json.dumps([])}) # node dict has unicode values so utf8 decode our path parts too in # case they have non-ascii characters if six.PY2: acc, con, obj = (s.decode('utf8') for s in ( self.account, self.container_name, self.object_name)) else: acc, con, obj = self.account, self.container_name, self.object_name headers, data = direct_client.direct_get_object( node, part, acc, con, obj, headers=req_headers, resp_chunk_size=64 * 2 ** 20) hasher = md5(usedforsecurity=False) for chunk in data: hasher.update(chunk) return headers, hasher.hexdigest()
def test_main(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) apart, anodes = self.account_ring.get_nodes(self.account) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] kill(self.pids[self.port2server[onode['port']]], SIGTERM) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill(self.pids[self.port2server[node['port']]], SIGTERM) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: self.pids[self.port2server[node['port']]] = Popen([ 'swift-object-server', '/etc/swift/object-server/%d.conf' % ((node['port'] - 6000) / 10)]).pid sleep(2) # We've indirectly verified the handoff node has the object, but let's # directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object(another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container(cnode, cpart, self.account, container)[1]] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) self.pids[self.port2server[onode['port']]] = Popen([ 'swift-object-server', '/etc/swift/object-server/%d.conf' % ((onode['port'] - 6000) / 10)]).pid sleep(2) exc = False try: direct_client.direct_get_object(onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Previously downed object server had test object') # Run the extra server last so it'll remove its extra partition ps = [] for n in onodes: ps.append(Popen(['swift-object-replicator', '/etc/swift/object-server/%d.conf' % ((n['port'] - 6000) / 10), 'once'])) for p in ps: p.wait() call(['swift-object-replicator', '/etc/swift/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once']) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) exc = False try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Handoff object server still had test object') # Because POST has changed to a COPY by default, POSTs will succeed on all up # nodes now if at least one up node has the object. # kill(self.pids[self.port2server[onode['port']]], SIGTERM) # client.post_object(self.url, self.token, container, obj, # headers={'x-object-meta-probe': 'value'}) # oheaders = client.head_object(self.url, self.token, container, obj) # if oheaders.get('x-object-meta-probe') != 'value': # raise Exception('Metadata incorrect, was %s' % repr(oheaders)) # exc = False # try: # direct_client.direct_get_object(another_onode, opart, self.account, # container, obj) # except Exception: # exc = True # if not exc: # raise Exception('Handoff server claimed it had the object when ' # 'it should not have it') # self.pids[self.port2server[onode['port']]] = Popen([ # 'swift-object-server', # '/etc/swift/object-server/%d.conf' % # ((onode['port'] - 6000) / 10)]).pid # sleep(2) # oheaders = direct_client.direct_get_object(onode, opart, self.account, # container, obj)[0] # if oheaders.get('x-object-meta-probe') == 'value': # raise Exception('Previously downed object server had the new ' # 'metadata when it should not have it') # # Run the extra server last so it'll remove its extra partition # ps = [] # for n in onodes: # ps.append(Popen(['swift-object-replicator', # '/etc/swift/object-server/%d.conf' % # ((n['port'] - 6000) / 10), 'once'])) # for p in ps: # p.wait() # call(['swift-object-replicator', # '/etc/swift/object-server/%d.conf' % # ((another_onode['port'] - 6000) / 10), 'once']) # oheaders = direct_client.direct_get_object(onode, opart, self.account, # container, obj)[0] # if oheaders.get('x-object-meta-probe') != 'value': # raise Exception( # 'Previously downed object server did not have the new metadata') kill(self.pids[self.port2server[onode['port']]], SIGTERM) client.delete_object(self.url, self.token, container, obj) exc = False try: client.head_object(self.url, self.token, container, obj) except Exception: exc = True if not exc: raise Exception('Regular object HEAD was still successful') objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) self.pids[self.port2server[onode['port']]] = Popen([ 'swift-object-server', '/etc/swift/object-server/%d.conf' % ((onode['port'] - 6000) / 10)]).pid sleep(2) direct_client.direct_get_object(onode, opart, self.account, container, obj) # Run the extra server last so it'll remove its extra partition ps = [] for n in onodes: ps.append(Popen(['swift-object-replicator', '/etc/swift/object-server/%d.conf' % ((n['port'] - 6000) / 10), 'once'])) for p in ps: p.wait() call(['swift-object-replicator', '/etc/swift/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once']) exc = False try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Handoff object server still had the object')
def test_main(self): # Create container # Kill one container/obj primary server # Delete the "objects" directory on the primary server # Create container/obj (goes to two primary servers and one handoff) # Kill other two container/obj primary servers # Indirectly through proxy assert we can get container/obj # Restart those other two container/obj primary servers # Directly to handoff server assert we can get container/obj # Assert container listing (via proxy and directly) has container/obj # Bring the first container/obj primary server back up # Assert that it doesn't have container/obj yet # Run object replication for first container/obj primary server # Run object replication for handoff node # Assert the first container/obj primary server now has container/obj # Assert the handoff server no longer has container/obj container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes(self.account, container, obj) onode = onodes[0] kill_server(onode['port'], self.port2server, self.pids) obj_dir = '%s/objects' % self._get_objects_dir(onode) shutil.rmtree(obj_dir, True) self.assertFalse(os.path.exists(obj_dir)) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill_server(node['port'], self.port2server, self.pids) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: start_server(node['port'], self.port2server, self.pids) self.assertFalse(os.path.exists(obj_dir)) # We've indirectly verified the handoff node has the object, but # let's directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object(another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ] if obj not in objs: raise Exception('Container listing did not know about object') timeout = time.time() + 5 found_objs_on_cnode = [] while time.time() < timeout: for cnode in [ c for c in cnodes if cnodes not in found_objs_on_cnode ]: objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] if obj in objs: found_objs_on_cnode.append(cnode) if len(found_objs_on_cnode) >= len(cnodes): break time.sleep(0.3) if len(found_objs_on_cnode) < len(cnodes): missing = [ '%s:%s' % (cnode['ip'], cnode['port']) for cnode in cnodes if cnode not in found_objs_on_cnode ] raise Exception('Container servers %r did not know about object' % missing) start_server(onode['port'], self.port2server, self.pids) self.assertFalse(os.path.exists(obj_dir)) exc = None try: direct_client.direct_get_object(onode, opart, self.account, container, obj) except ClientException as err: exc = err self.assertEquals(exc.http_status, 404) self.assertFalse(os.path.exists(obj_dir)) try: port_num = onode['replication_port'] except KeyError: port_num = onode['port'] try: another_port_num = another_onode['replication_port'] except KeyError: another_port_num = another_onode['port'] call([ 'swift-object-replicator', self.configs['object-replicator'] % ((port_num - 6000) / 10), 'once' ]) call([ 'swift-object-replicator', self.configs['object-replicator'] % ((another_port_num - 6000) / 10), 'once' ]) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) exc = None try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except ClientException as err: exc = err self.assertEquals(exc.http_status, 404)
def test_main(self): # Create container # Kill one container/obj primary server # Create container/obj (goes to two primary servers and one handoff) # Kill other two container/obj primary servers # Indirectly through proxy assert we can get container/obj # Restart those other two container/obj primary servers # Directly to handoff server assert we can get container/obj # Assert container listing (via proxy and directly) has container/obj # Bring the first container/obj primary server back up # Assert that it doesn't have container/obj yet # Run object replication, ensuring we run the handoff node last so it # should remove its extra handoff partition # Assert the first container/obj primary server now has container/obj # Assert the handoff server no longer has container/obj # Kill the first container/obj primary server again (we have two # primaries and the handoff up now) # Delete container/obj # Assert we can't head container/obj # Assert container/obj is not in the container listing, both indirectly # and directly # Restart the first container/obj primary server again # Assert it still has container/obj # Run object replication, ensuring we run the handoff node last so it # should remove its extra handoff partition # Assert primary node no longer has container/obj container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes(self.account, container, obj) onode = onodes[0] kill_server(onode['port'], self.port2server, self.pids) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill_server(node['port'], self.port2server, self.pids) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: start_server(node['port'], self.port2server, self.pids) # We've indirectly verified the handoff node has the object, but let's # directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object(another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) start_server(onode['port'], self.port2server, self.pids) exc = None try: direct_client.direct_get_object(onode, opart, self.account, container, obj) except direct_client.ClientException, err: exc = err
def test_main(self): # Create container container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) # Kill one container/obj primary server cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] kill_server(onode['port'], self.port2server, self.pids) # Create container/obj (goes to two primary servers and one handoff) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill other two container/obj primary servers # to ensure GET handoff works for node in onodes[1:]: kill_server(node['port'], self.port2server, self.pids) # Indirectly through proxy assert we can get container/obj odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Restart those other two container/obj primary servers for node in onodes[1:]: start_server(node['port'], self.port2server, self.pids) # We've indirectly verified the handoff node has the container/object, # but let's directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) # Assert container listing (via proxy and directly) has container/obj objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) # Bring the first container/obj primary server back up start_server(onode['port'], self.port2server, self.pids) # Assert that it doesn't have container/obj yet try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEquals(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Run object replication, ensuring we run the handoff node last so it # will remove its extra handoff partition for node in onodes: try: port_num = node['replication_port'] except KeyError: port_num = node['port'] node_id = (port_num - 6000) / 10 Manager(['object-replicator']).once(number=node_id) try: another_port_num = another_onode['replication_port'] except KeyError: another_port_num = another_onode['port'] another_num = (another_port_num - 6000) / 10 Manager(['object-replicator']).once(number=another_num) # Assert the first container/obj primary server now has container/obj odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) # Assert the handoff server no longer has container/obj try: direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEquals(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Kill the first container/obj primary server again (we have two # primaries and the handoff up now) kill_server(onode['port'], self.port2server, self.pids) # Delete container/obj try: client.delete_object(self.url, self.token, container, obj) except client.ClientException as err: if self.object_ring.replica_count > 2: raise # Object DELETE returning 503 for (404, 204) # remove this with fix for # https://bugs.launchpad.net/swift/+bug/1318375 self.assertEqual(503, err.http_status) # Assert we can't head container/obj try: client.head_object(self.url, self.token, container, obj) except client.ClientException as err: self.assertEquals(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Assert container/obj is not in the container listing, both indirectly # and directly objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) # Restart the first container/obj primary server again start_server(onode['port'], self.port2server, self.pids) # Assert it still has container/obj direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) # Run object replication, ensuring we run the handoff node last so it # will remove its extra handoff partition for node in onodes: try: port_num = node['replication_port'] except KeyError: port_num = node['port'] node_id = (port_num - 6000) / 10 Manager(['object-replicator']).once(number=node_id) another_node_id = (another_port_num - 6000) / 10 Manager(['object-replicator']).once(number=another_node_id) # Assert primary node no longer has container/obj try: direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEquals(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it")
class TestObjectHandoff(TestCase): def setUp(self): (self.pids, self.port2server, self.account_ring, self.container_ring, self.object_ring, self.url, self.token, self.account, self.configs) = reset_environment() def tearDown(self): kill_servers(self.port2server, self.pids) def test_main(self): # Create container # Kill one container/obj primary server # Create container/obj (goes to two primary servers and one handoff) # Kill other two container/obj primary servers # Indirectly through proxy assert we can get container/obj # Restart those other two container/obj primary servers # Directly to handoff server assert we can get container/obj # Assert container listing (via proxy and directly) has container/obj # Bring the first container/obj primary server back up # Assert that it doesn't have container/obj yet # Run object replication, ensuring we run the handoff node last so it # should remove its extra handoff partition # Assert the first container/obj primary server now has container/obj # Assert the handoff server no longer has container/obj # Kill the first container/obj primary server again (we have two # primaries and the handoff up now) # Delete container/obj # Assert we can't head container/obj # Assert container/obj is not in the container listing, both indirectly # and directly # Restart the first container/obj primary server again # Assert it still has container/obj # Run object replication, ensuring we run the handoff node last so it # should remove its extra handoff partition # Assert primary node no longer has container/obj container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes(self.account, container, obj) onode = onodes[0] kill_server(onode['port'], self.port2server, self.pids) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill_server(node['port'], self.port2server, self.pids) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: start_server(node['port'], self.port2server, self.pids) # We've indirectly verified the handoff node has the object, but let's # directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object(another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) start_server(onode['port'], self.port2server, self.pids) exc = None try: direct_client.direct_get_object(onode, opart, self.account, container, obj) except direct_client.ClientException, err: exc = err self.assertEquals(exc.http_status, 404) # Run the extra server last so it'll remove its extra partition processes = [] for node in onodes: try: port_num = node['replication_port'] except KeyError: port_num = node['port'] processes.append( Popen([ 'swift-object-replicator', self.configs['object-replicator'] % ((port_num - 6000) / 10), 'once' ])) for process in processes: process.wait() try: another_port_num = another_onode['replication_port'] except KeyError: another_port_num = another_onode['port'] call([ 'swift-object-replicator', self.configs['object-replicator'] % ((another_port_num - 6000) / 10), 'once' ]) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) exc = None try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except direct_client.ClientException, err: exc = err
self.container_deletes += 1 self.logger.increment('deletes') self.logger.timing_since('deletes.timing', start_time) else: part, nodes = self.object_ring.get_nodes( info['account'], info['container'], row['name']) shuffle(nodes) exc = None looking_for_timestamp = float(row['created_at']) timestamp = -1 headers = body = None for node in nodes: try: these_headers, this_body = direct_get_object( node, part, info['account'], info['container'], row['name'], resp_chunk_size=65536) this_timestamp = float(these_headers['x-timestamp']) if this_timestamp > timestamp: timestamp = this_timestamp headers = these_headers body = this_body except ClientException, err: # If any errors are not 404, make sure we report the # non-404 one. We don't want to mistakenly assume the # object no longer exists just because one says so and # the others errored for some other reason. if not exc or exc.http_status == HTTP_NOT_FOUND: exc = err except (Exception, Timeout), err: exc = err
def test_main(self): # Create container # Kill one container/obj primary server # Delete the "objects" directory on the primary server # Create container/obj (goes to two primary servers and one handoff) # Kill other two container/obj primary servers # Indirectly through proxy assert we can get container/obj # Restart those other two container/obj primary servers # Directly to handoff server assert we can get container/obj # Assert container listing (via proxy and directly) has container/obj # Bring the first container/obj primary server back up # Assert that it doesn't have container/obj yet # Run object replication for first container/obj primary server # Run object replication for handoff node # Assert the first container/obj primary server now has container/obj # Assert the handoff server no longer has container/obj container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] kill_server(onode['port'], self.port2server, self.pids) obj_dir = '%s/objects' % self._get_objects_dir(onode) shutil.rmtree(obj_dir, True) self.assertFalse(os.path.exists(obj_dir)) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill_server(node['port'], self.port2server, self.pids) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: start_server(node['port'], self.port2server, self.pids) self.assertFalse(os.path.exists(obj_dir)) # We've indirectly verified the handoff node has the object, but # let's directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) start_server(onode['port'], self.port2server, self.pids) self.assertFalse(os.path.exists(obj_dir)) exc = None try: direct_client.direct_get_object(onode, opart, self.account, container, obj) except direct_client.ClientException, err: exc = err
def container_sync_row(self, row, sync_to, sync_key, broker, info): """ Sends the update the row indicates to the sync_to container. :param row: The updated row in the local database triggering the sync update. :param sync_to: The URL to the remote container. :param sync_key: The X-Container-Sync-Key to use when sending requests to the other container. :param broker: The local container database broker. :param info: The get_info result from the local container database broker. :returns: True on success """ try: start_time = time() if row['deleted']: try: delete_object(sync_to, name=row['name'], headers={ 'x-timestamp': row['created_at'], 'x-container-sync-key': sync_key }, proxy=self.proxy) except ClientException as err: if err.http_status != HTTP_NOT_FOUND: raise self.container_deletes += 1 self.logger.increment('deletes') self.logger.timing_since('deletes.timing', start_time) else: part, nodes = self.object_ring.get_nodes( info['account'], info['container'], row['name']) shuffle(nodes) exc = None looking_for_timestamp = float(row['created_at']) timestamp = -1 headers = body = None for node in nodes: try: these_headers, this_body = direct_get_object( node, part, info['account'], info['container'], row['name'], resp_chunk_size=65536) this_timestamp = float(these_headers['x-timestamp']) if this_timestamp > timestamp: timestamp = this_timestamp headers = these_headers body = this_body except ClientException as err: # If any errors are not 404, make sure we report the # non-404 one. We don't want to mistakenly assume the # object no longer exists just because one says so and # the others errored for some other reason. if not exc or exc.http_status == HTTP_NOT_FOUND: exc = err except (Exception, Timeout) as err: exc = err if timestamp < looking_for_timestamp: if exc: raise exc raise Exception( _('Unknown exception trying to GET: %(node)r ' '%(account)r %(container)r %(object)r'), { 'node': node, 'part': part, 'account': info['account'], 'container': info['container'], 'object': row['name'] }) for key in ('date', 'last-modified'): if key in headers: del headers[key] if 'etag' in headers: headers['etag'] = headers['etag'].strip('"') headers['x-timestamp'] = row['created_at'] headers['x-container-sync-key'] = sync_key put_object(sync_to, name=row['name'], headers=headers, contents=FileLikeIter(body), proxy=self.proxy) self.container_puts += 1 self.logger.increment('puts') self.logger.timing_since('puts.timing', start_time) except ClientException as err: if err.http_status == HTTP_UNAUTHORIZED: self.logger.info( _('Unauth %(sync_from)r => %(sync_to)r'), { 'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to }) elif err.http_status == HTTP_NOT_FOUND: self.logger.info( _('Not found %(sync_from)r => %(sync_to)r \ - object %(obj_name)r'), { 'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to, 'obj_name': row['name'] }) else: self.logger.exception(_('ERROR Syncing %(db_file)s %(row)s'), { 'db_file': str(broker), 'row': row }) self.container_failures += 1 self.logger.increment('failures') return False except (Exception, Timeout) as err: self.logger.exception(_('ERROR Syncing %(db_file)s %(row)s'), { 'db_file': str(broker), 'row': row }) self.container_failures += 1 self.logger.increment('failures') return False return True
def test_main(self): # Create container container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': self.policy.name}) # Kill one container/obj primary server cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] kill_server((onode['ip'], onode['port']), self.ipport2server) # Create container/obj (goes to two primary servers and one handoff) client.put_object(self.url, self.token, container, obj, b'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != b'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Stash the on disk data from a primary for future comparison with the # handoff - this may not equal 'VERIFY' if for example the proxy has # crypto enabled direct_get_data = direct_client.direct_get_object( onodes[1], opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] # Kill other two container/obj primary servers # to ensure GET handoff works for node in onodes[1:]: kill_server((node['ip'], node['port']), self.ipport2server) # Indirectly through proxy assert we can get container/obj odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != b'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Restart those other two container/obj primary servers for node in onodes[1:]: start_server((node['ip'], node['port']), self.ipport2server) # We've indirectly verified the handoff node has the container/object, # but let's directly verify it. another_onode = next(self.object_ring.get_more_nodes(opart)) odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] self.assertEqual(direct_get_data, odata) # drop a tempfile in the handoff's datadir, like it might have # had if there was an rsync failure while it was previously a # primary handoff_device_path = self.device_dir(another_onode) data_filename = None for root, dirs, files in os.walk(handoff_device_path): for filename in files: if filename.endswith('.data'): data_filename = filename temp_filename = '.%s.6MbL6r' % data_filename temp_filepath = os.path.join(root, temp_filename) if not data_filename: self.fail('Did not find any data files on %r' % handoff_device_path) open(temp_filepath, 'w') # Assert container listing (via proxy and directly) has container/obj objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) # Bring the first container/obj primary server back up start_server((onode['ip'], onode['port']), self.ipport2server) # Assert that it doesn't have container/obj yet try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Run object replication, ensuring we run the handoff node last so it # will remove its extra handoff partition for node in onodes: try: port_num = node['replication_port'] except KeyError: port_num = node['port'] node_id = (port_num - 6000) // 10 Manager(['object-replicator']).once(number=node_id) try: another_port_num = another_onode['replication_port'] except KeyError: another_port_num = another_onode['port'] another_num = (another_port_num - 6000) // 10 Manager(['object-replicator']).once(number=another_num) # Assert the first container/obj primary server now has container/obj odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] self.assertEqual(direct_get_data, odata) # and that it does *not* have a temporary rsync dropping! found_data_filename = False primary_device_path = self.device_dir(onode) for root, dirs, files in os.walk(primary_device_path): for filename in files: if filename.endswith('.6MbL6r'): self.fail('Found unexpected file %s' % os.path.join(root, filename)) if filename == data_filename: found_data_filename = True self.assertTrue(found_data_filename, 'Did not find data file %r on %r' % ( data_filename, primary_device_path)) # Assert the handoff server no longer has container/obj try: direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Kill the first container/obj primary server again (we have two # primaries and the handoff up now) kill_server((onode['ip'], onode['port']), self.ipport2server) # Delete container/obj try: client.delete_object(self.url, self.token, container, obj) except client.ClientException as err: if self.object_ring.replica_count > 2: raise # Object DELETE returning 503 for (404, 204) # remove this with fix for # https://bugs.launchpad.net/swift/+bug/1318375 self.assertEqual(503, err.http_status) # Assert we can't head container/obj try: client.head_object(self.url, self.token, container, obj) except client.ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Assert container/obj is not in the container listing, both indirectly # and directly objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) # Restart the first container/obj primary server again start_server((onode['ip'], onode['port']), self.ipport2server) # Assert it still has container/obj direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) # Run object replication, ensuring we run the handoff node last so it # will remove its extra handoff partition for node in onodes: try: port_num = node['replication_port'] except KeyError: port_num = node['port'] node_id = (port_num - 6000) // 10 Manager(['object-replicator']).once(number=node_id) another_node_id = (another_port_num - 6000) // 10 Manager(['object-replicator']).once(number=another_node_id) # Assert primary node no longer has container/obj try: direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it")
def test_ec_handoff_duplicate_available(self): container_name = 'container-%s' % uuid4() object_name = 'object-%s' % uuid4() # create EC container headers = {'X-Storage-Policy': self.policy.name} client.put_container(self.url, self.token, container_name, headers=headers) # get our node lists opart, onodes = self.object_ring.get_nodes( self.account, container_name, object_name) # find both primary servers that have both of their devices in # the primary node list group_nodes_by_config = defaultdict(list) for n in onodes: group_nodes_by_config[self.config_number(n)].append(n) double_disk_primary = [] for config_number, node_list in group_nodes_by_config.items(): if len(node_list) > 1: double_disk_primary.append((config_number, node_list)) # sanity, in a 4+2 with 8 disks two servers will be doubled self.assertEqual(len(double_disk_primary), 2) # shutdown the first double primary primary0_config_number, primary0_node_list = double_disk_primary[0] Manager(['object-server']).stop(number=primary0_config_number) # PUT object contents = Body() client.put_object(self.url, self.token, container_name, object_name, contents=contents) # sanity fetch two frags on handoffs handoff_frags = [] for node in self.object_ring.get_more_nodes(opart): headers, data = direct_client.direct_get_object( node, opart, self.account, container_name, object_name, headers={'X-Backend-Storage-Policy-Index': int(self.policy)} ) handoff_frags.append((node, headers, data)) # bring the first double primary back, and fail the other one Manager(['object-server']).start(number=primary0_config_number) primary1_config_number, primary1_node_list = double_disk_primary[1] Manager(['object-server']).stop(number=primary1_config_number) # we can still GET the object resp_etag = self.get_object(container_name, object_name) self.assertEqual(resp_etag, contents.etag) # now start to "revert" the first handoff frag node = primary0_node_list[0] handoff_node, headers, data = handoff_frags[0] # N.B. object server api returns quoted ETag headers['ETag'] = headers['Etag'].strip('"') headers['X-Backend-Storage-Policy-Index'] = int(self.policy) direct_client.direct_put_object( node, opart, self.account, container_name, object_name, contents=data, headers=headers) # sanity - check available frags frag2count = self._check_nodes(opart, onodes, container_name, object_name) # ... five frags total self.assertEqual(sum(frag2count.values()), 5) # ... only 4 unique indexes self.assertEqual(len(frag2count), 4) # we can still GET the object resp_etag = self.get_object(container_name, object_name) self.assertEqual(resp_etag, contents.etag) # ... but we need both handoffs or we get a error for handoff_node, hdrs, data in handoff_frags: Manager(['object-server']).stop( number=self.config_number(handoff_node)) with self.assertRaises(Exception) as cm: self.get_object(container_name, object_name) self.assertIn(cm.exception.http_status, (404, 503)) Manager(['object-server']).start( number=self.config_number(handoff_node)) # fix everything Manager(['object-server']).start(number=primary1_config_number) Manager(["object-reconstructor"]).once() # sanity - check available frags frag2count = self._check_nodes(opart, onodes, container_name, object_name) # ... six frags total self.assertEqual(sum(frag2count.values()), 6) # ... all six unique self.assertEqual(len(frag2count), 6)
def test_stale_reads(self): # Create container container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': self.policy.name}) # Kill one primary obj server obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] kill_server((onode['ip'], onode['port']), self.ipport2server) # Create container/obj (goes to two primaries and one handoff) client.put_object(self.url, self.token, container, obj, b'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != b'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Stash the on disk data from a primary for future comparison with the # handoff - this may not equal 'VERIFY' if for example the proxy has # crypto enabled direct_get_data = direct_client.direct_get_object( onodes[1], opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] # Restart the first container/obj primary server again start_server((onode['ip'], onode['port']), self.ipport2server) # send a delete request to primaries client.delete_object(self.url, self.token, container, obj) # there should be .ts files in all primaries now for node in onodes: try: direct_client.direct_get_object( node, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # verify that handoff still has the data, DELETEs should have gone # only to primaries another_onode = next(self.object_ring.get_more_nodes(opart)) handoff_data = direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] self.assertEqual(handoff_data, direct_get_data) # Indirectly (i.e., through proxy) try to GET object, it should return # a 404, before bug #1560574, the proxy would return the stale object # from the handoff try: client.get_object(self.url, self.token, container, obj) except client.ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it")
def container_sync_row(self, row, sync_to, user_key, broker, info, realm, realm_key): """ Sends the update the row indicates to the sync_to container. :param row: The updated row in the local database triggering the sync update. :param sync_to: The URL to the remote container. :param user_key: The X-Container-Sync-Key to use when sending requests to the other container. :param broker: The local container database broker. :param info: The get_info result from the local container database broker. :param realm: The realm from self.realms_conf, if there is one. If None, fallback to using the older allowed_sync_hosts way of syncing. :param realm_key: The realm key from self.realms_conf, if there is one. If None, fallback to using the older allowed_sync_hosts way of syncing. :returns: True on success """ try: start_time = time() if row['deleted']: try: headers = {'x-timestamp': row['created_at']} if realm and realm_key: nonce = uuid.uuid4().hex path = urlparse(sync_to).path + '/' + quote( row['name']) sig = self.realms_conf.get_sig( 'DELETE', path, headers['x-timestamp'], nonce, realm_key, user_key) headers['x-container-sync-auth'] = '%s %s %s' % ( realm, nonce, sig) else: headers['x-container-sync-key'] = user_key delete_object(sync_to, name=row['name'], headers=headers, proxy=self.select_http_proxy()) except ClientException as err: if err.http_status != HTTP_NOT_FOUND: raise self.container_deletes += 1 self.logger.increment('deletes') self.logger.timing_since('deletes.timing', start_time) else: part, nodes = self.object_ring.get_nodes( info['account'], info['container'], row['name']) shuffle(nodes) exc = None looking_for_timestamp = float(row['created_at']) timestamp = -1 headers = body = None for node in nodes: try: these_headers, this_body = direct_get_object( node, part, info['account'], info['container'], row['name'], resp_chunk_size=65536) this_timestamp = float(these_headers['x-timestamp']) if this_timestamp > timestamp: timestamp = this_timestamp headers = these_headers body = this_body except ClientException as err: # If any errors are not 404, make sure we report the # non-404 one. We don't want to mistakenly assume the # object no longer exists just because one says so and # the others errored for some other reason. if not exc or exc.http_status == HTTP_NOT_FOUND: exc = err except (Exception, Timeout) as err: exc = err if timestamp < looking_for_timestamp: if exc: raise exc raise Exception( _('Unknown exception trying to GET: %(node)r ' '%(account)r %(container)r %(object)r'), {'node': node, 'part': part, 'account': info['account'], 'container': info['container'], 'object': row['name']}) for key in ('date', 'last-modified'): if key in headers: del headers[key] if 'etag' in headers: headers['etag'] = headers['etag'].strip('"') headers['x-timestamp'] = row['created_at'] if realm and realm_key: nonce = uuid.uuid4().hex path = urlparse(sync_to).path + '/' + quote(row['name']) sig = self.realms_conf.get_sig( 'PUT', path, headers['x-timestamp'], nonce, realm_key, user_key) headers['x-container-sync-auth'] = '%s %s %s' % ( realm, nonce, sig) else: headers['x-container-sync-key'] = user_key put_object(sync_to, name=row['name'], headers=headers, contents=FileLikeIter(body), proxy=self.select_http_proxy()) self.container_puts += 1 self.logger.increment('puts') self.logger.timing_since('puts.timing', start_time) except ClientException as err: if err.http_status == HTTP_UNAUTHORIZED: self.logger.info( _('Unauth %(sync_from)r => %(sync_to)r'), {'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to}) elif err.http_status == HTTP_NOT_FOUND: self.logger.info( _('Not found %(sync_from)r => %(sync_to)r \ - object %(obj_name)r'), {'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to, 'obj_name': row['name']}) else: self.logger.exception( _('ERROR Syncing %(db_file)s %(row)s'), {'db_file': str(broker), 'row': row}) self.container_failures += 1 self.logger.increment('failures') return False except (Exception, Timeout) as err: self.logger.exception( _('ERROR Syncing %(db_file)s %(row)s'), {'db_file': str(broker), 'row': row}) self.container_failures += 1 self.logger.increment('failures') return False return True
def test_sync(self): all_objects = [] # upload some containers for policy in ENABLED_POLICIES: container = 'container-%s-%s' % (policy.name, uuid.uuid4()) client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': policy.name}) obj = 'object-%s' % uuid.uuid4() body = 'test-body' client.put_object(self.url, self.token, container, obj, body) all_objects.append((policy, container, obj)) Manager(['container-updater']).once() headers = client.head_account(self.url, self.token) self.assertEqual(int(headers['x-account-container-count']), len(ENABLED_POLICIES)) self.assertEqual(int(headers['x-account-object-count']), len(ENABLED_POLICIES)) self.assertEqual(int(headers['x-account-bytes-used']), len(ENABLED_POLICIES) * len(body)) part, nodes = self.account_ring.get_nodes(self.account) for node in nodes: direct_delete_account(node, part, self.account) # run the reaper Manager(['account-reaper']).once() for policy, container, obj in all_objects: # verify that any container deletes were at same timestamp cpart, cnodes = self.container_ring.get_nodes( self.account, container) delete_times = set() for cnode in cnodes: try: direct_head_container(cnode, cpart, self.account, container) except ClientException as err: self.assertEquals(err.http_status, 404) delete_time = err.http_headers.get( 'X-Backend-DELETE-Timestamp') # 'X-Backend-DELETE-Timestamp' confirms it was deleted self.assertTrue(delete_time) delete_times.add(delete_time) else: # Container replicas may not yet be deleted if we have a # policy with object replicas < container replicas, so # ignore successful HEAD. We'll check for all replicas to # be deleted again after running the replicators. pass self.assertEqual(1, len(delete_times), delete_times) # verify that all object deletes were at same timestamp object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/') part, nodes = object_ring.get_nodes(self.account, container, obj) headers = {'X-Backend-Storage-Policy-Index': int(policy)} delete_times = set() for node in nodes: try: direct_get_object(node, part, self.account, container, obj, headers=headers) except ClientException as err: self.assertEquals(err.http_status, 404) delete_time = err.http_headers.get('X-Backend-Timestamp') # 'X-Backend-Timestamp' confirms obj was deleted self.assertTrue(delete_time) delete_times.add(delete_time) else: self.fail('Found un-reaped /%s/%s/%s on %r in %s!' % (self.account, container, obj, node, policy)) self.assertEqual(1, len(delete_times)) # run replicators and updaters self.get_to_final_state() for policy, container, obj in all_objects: # verify that ALL container replicas are now deleted cpart, cnodes = self.container_ring.get_nodes( self.account, container) delete_times = set() for cnode in cnodes: try: direct_head_container(cnode, cpart, self.account, container) except ClientException as err: self.assertEquals(err.http_status, 404) delete_time = err.http_headers.get( 'X-Backend-DELETE-Timestamp') # 'X-Backend-DELETE-Timestamp' confirms it was deleted self.assertTrue(delete_time) delete_times.add(delete_time) else: self.fail('Found un-reaped /%s/%s on %r' % (self.account, container, cnode)) # sanity check that object state is still consistent... object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/') part, nodes = object_ring.get_nodes(self.account, container, obj) headers = {'X-Backend-Storage-Policy-Index': int(policy)} delete_times = set() for node in nodes: try: direct_get_object(node, part, self.account, container, obj, headers=headers) except ClientException as err: self.assertEquals(err.http_status, 404) delete_time = err.http_headers.get('X-Backend-Timestamp') # 'X-Backend-Timestamp' confirms obj was deleted self.assertTrue(delete_time) delete_times.add(delete_time) else: self.fail('Found un-reaped /%s/%s/%s on %r in %s!' % (self.account, container, obj, node, policy)) self.assertEqual(1, len(delete_times))
class TestEmptyDevice(TestCase): def setUp(self): (self.pids, self.port2server, self.account_ring, self.container_ring, self.object_ring, self.url, self.token, self.account) = reset_environment() def tearDown(self): kill_servers(self.port2server, self.pids) def _get_objects_dir(self, onode): device = onode['device'] node_id = (onode['port'] - 6000) / 10 obj_server_conf = readconf('/etc/swift/object-server/%s.conf' % node_id) devices = obj_server_conf['app:object-server']['devices'] obj_dir = '%s/%s' % (devices, device) return obj_dir def test_main(self): # Create container # Kill one container/obj primary server # Delete the "objects" directory on the primary server # Create container/obj (goes to two primary servers and one handoff) # Kill other two container/obj primary servers # Indirectly through proxy assert we can get container/obj # Restart those other two container/obj primary servers # Directly to handoff server assert we can get container/obj # Assert container listing (via proxy and directly) has container/obj # Bring the first container/obj primary server back up # Assert that it doesn't have container/obj yet # Run object replication for first container/obj primary server # Run object replication for handoff node # Assert the first container/obj primary server now has container/obj # Assert the handoff server no longer has container/obj container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] kill_server(onode['port'], self.port2server, self.pids) obj_dir = '%s/objects' % self._get_objects_dir(onode) shutil.rmtree(obj_dir, True) self.assertFalse(os.path.exists(obj_dir)) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill_server(node['port'], self.port2server, self.pids) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: start_server(node['port'], self.port2server, self.pids) self.assertFalse(os.path.exists(obj_dir)) # We've indirectly verified the handoff node has the object, but # let's directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) start_server(onode['port'], self.port2server, self.pids) self.assertFalse(os.path.exists(obj_dir)) exc = None try: direct_client.direct_get_object(onode, opart, self.account, container, obj) except direct_client.ClientException, err: exc = err self.assertEquals(exc.http_status, 404) self.assertFalse(os.path.exists(obj_dir)) call(['swift-object-replicator', '/etc/swift/object-server/%d.conf' % ((onode['port'] - 6000) / 10), 'once']) call(['swift-object-replicator', '/etc/swift/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once']) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) exc = None try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except direct_client.ClientException, err: exc = err
def test_main(self): # Create container container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': self.policy.name}) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] # Kill one container/obj primary server kill_server((onode['ip'], onode['port']), self.ipport2server) # Delete the default data directory for objects on the primary server obj_dir = '%s/%s' % (self._get_objects_dir(onode), get_data_dir(self.policy)) shutil.rmtree(obj_dir, True) self.assertFalse(os.path.exists(obj_dir)) # Create container/obj (goes to two primary servers and one handoff) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Stash the on disk data from a primary for future comparison with the # handoff - this may not equal 'VERIFY' if for example the proxy has # crypto enabled direct_get_data = direct_client.direct_get_object( onodes[1], opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] # Kill other two container/obj primary servers # to ensure GET handoff works for node in onodes[1:]: kill_server((node['ip'], node['port']), self.ipport2server) # Indirectly through proxy assert we can get container/obj odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Restart those other two container/obj primary servers for node in onodes[1:]: start_server((node['ip'], node['port']), self.ipport2server) self.assertFalse(os.path.exists(obj_dir)) # We've indirectly verified the handoff node has the object, but # let's directly verify it. # Directly to handoff server assert we can get container/obj another_onode = next(self.object_ring.get_more_nodes(opart)) odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] self.assertEqual(direct_get_data, odata) # Assert container listing (via proxy and directly) has container/obj objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj not in objs: raise Exception('Container listing did not know about object') timeout = time.time() + 5 found_objs_on_cnode = [] while time.time() < timeout: for cnode in [c for c in cnodes if cnodes not in found_objs_on_cnode]: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj in objs: found_objs_on_cnode.append(cnode) if len(found_objs_on_cnode) >= len(cnodes): break time.sleep(0.3) if len(found_objs_on_cnode) < len(cnodes): missing = ['%s:%s' % (cnode['ip'], cnode['port']) for cnode in cnodes if cnode not in found_objs_on_cnode] raise Exception('Container servers %r did not know about object' % missing) # Bring the first container/obj primary server back up start_server((onode['ip'], onode['port']), self.ipport2server) # Assert that it doesn't have container/obj yet self.assertFalse(os.path.exists(obj_dir)) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) self.assertFalse(os.path.exists(obj_dir)) else: self.fail("Expected ClientException but didn't get it") # Run object replication for first container/obj primary server _, num = get_server_number( (onode['ip'], onode.get('replication_port', onode['port'])), self.ipport2server) Manager(['object-replicator']).once(number=num) # Run object replication for handoff node _, another_num = get_server_number( (another_onode['ip'], another_onode.get('replication_port', another_onode['port'])), self.ipport2server) Manager(['object-replicator']).once(number=another_num) # Assert the first container/obj primary server now has container/obj odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] self.assertEqual(direct_get_data, odata) # Assert the handoff server no longer has container/obj try: direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it")
def test_main(self): # Create container container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': self.policy.name}) # Kill one container/obj primary server cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes(self.account, container, obj) onode = onodes[0] kill_server((onode['ip'], onode['port']), self.ipport2server, self.pids) # Create container/obj (goes to two primary servers and one handoff) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill other two container/obj primary servers # to ensure GET handoff works for node in onodes[1:]: kill_server((node['ip'], node['port']), self.ipport2server, self.pids) # Indirectly through proxy assert we can get container/obj odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Restart those other two container/obj primary servers for node in onodes[1:]: start_server((node['ip'], node['port']), self.ipport2server, self.pids) # We've indirectly verified the handoff node has the container/object, # but let's directly verify it. another_onode = next(self.object_ring.get_more_nodes(opart)) odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) # Assert container listing (via proxy and directly) has container/obj objs = [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) # Bring the first container/obj primary server back up start_server((onode['ip'], onode['port']), self.ipport2server, self.pids) # Assert that it doesn't have container/obj yet try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Run object replication, ensuring we run the handoff node last so it # will remove its extra handoff partition for node in onodes: try: port_num = node['replication_port'] except KeyError: port_num = node['port'] node_id = (port_num - 6000) / 10 Manager(['object-replicator']).once(number=node_id) try: another_port_num = another_onode['replication_port'] except KeyError: another_port_num = another_onode['port'] another_num = (another_port_num - 6000) / 10 Manager(['object-replicator']).once(number=another_num) # Assert the first container/obj primary server now has container/obj odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) # Assert the handoff server no longer has container/obj try: direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Kill the first container/obj primary server again (we have two # primaries and the handoff up now) kill_server((onode['ip'], onode['port']), self.ipport2server, self.pids) # Delete container/obj try: client.delete_object(self.url, self.token, container, obj) except client.ClientException as err: if self.object_ring.replica_count > 2: raise # Object DELETE returning 503 for (404, 204) # remove this with fix for # https://bugs.launchpad.net/swift/+bug/1318375 self.assertEqual(503, err.http_status) # Assert we can't head container/obj try: client.head_object(self.url, self.token, container, obj) except client.ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Assert container/obj is not in the container listing, both indirectly # and directly objs = [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) # Restart the first container/obj primary server again start_server((onode['ip'], onode['port']), self.ipport2server, self.pids) # Assert it still has container/obj direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx}) # Run object replication, ensuring we run the handoff node last so it # will remove its extra handoff partition for node in onodes: try: port_num = node['replication_port'] except KeyError: port_num = node['port'] node_id = (port_num - 6000) / 10 Manager(['object-replicator']).once(number=node_id) another_node_id = (another_port_num - 6000) / 10 Manager(['object-replicator']).once(number=another_node_id) # Assert primary node no longer has container/obj try: direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it")
def container_sync_row(self, row, sync_to, user_key, broker, info, realm, realm_key): """ Sends the update the row indicates to the sync_to container. :param row: The updated row in the local database triggering the sync update. :param sync_to: The URL to the remote container. :param user_key: The X-Container-Sync-Key to use when sending requests to the other container. :param broker: The local container database broker. :param info: The get_info result from the local container database broker. :param realm: The realm from self.realms_conf, if there is one. If None, fallback to using the older allowed_sync_hosts way of syncing. :param realm_key: The realm key from self.realms_conf, if there is one. If None, fallback to using the older allowed_sync_hosts way of syncing. :returns: True on success """ try: start_time = time() if row['deleted']: try: headers = {'x-timestamp': row['created_at']} if realm and realm_key: nonce = uuid.uuid4().hex path = urlparse(sync_to).path + '/' + quote( row['name']) sig = self.realms_conf.get_sig('DELETE', path, headers['x-timestamp'], nonce, realm_key, user_key) headers['x-container-sync-auth'] = '%s %s %s' % ( realm, nonce, sig) else: headers['x-container-sync-key'] = user_key delete_object(sync_to, name=row['name'], headers=headers, proxy=self.select_http_proxy(), logger=self.logger) except ClientException as err: if err.http_status != HTTP_NOT_FOUND: raise self.container_deletes += 1 self.logger.increment('deletes') self.logger.timing_since('deletes.timing', start_time) else: part, nodes = \ self.get_object_ring(info['storage_policy_index']). \ get_nodes(info['account'], info['container'], row['name']) shuffle(nodes) exc = None looking_for_timestamp = Timestamp(row['created_at']) timestamp = -1 headers = body = None headers_out = { 'X-Backend-Storage-Policy-Index': str(info['storage_policy_index']) } for node in nodes: try: these_headers, this_body = direct_get_object( node, part, info['account'], info['container'], row['name'], headers=headers_out, resp_chunk_size=65536) this_timestamp = Timestamp( these_headers['x-timestamp']) if this_timestamp > timestamp: timestamp = this_timestamp headers = these_headers body = this_body except ClientException as err: # If any errors are not 404, make sure we report the # non-404 one. We don't want to mistakenly assume the # object no longer exists just because one says so and # the others errored for some other reason. if not exc or getattr( exc, 'http_status', HTTP_NOT_FOUND) == \ HTTP_NOT_FOUND: exc = err except (Exception, Timeout) as err: exc = err if timestamp < looking_for_timestamp: if exc: raise exc raise Exception( _('Unknown exception trying to GET: %(node)r ' '%(account)r %(container)r %(object)r'), { 'node': node, 'part': part, 'account': info['account'], 'container': info['container'], 'object': row['name'] }) for key in ('date', 'last-modified'): if key in headers: del headers[key] if 'etag' in headers: headers['etag'] = headers['etag'].strip('"') if 'content-type' in headers: headers['content-type'] = clean_content_type( headers['content-type']) headers['x-timestamp'] = row['created_at'] if realm and realm_key: nonce = uuid.uuid4().hex path = urlparse(sync_to).path + '/' + quote(row['name']) sig = self.realms_conf.get_sig('PUT', path, headers['x-timestamp'], nonce, realm_key, user_key) headers['x-container-sync-auth'] = '%s %s %s' % ( realm, nonce, sig) else: headers['x-container-sync-key'] = user_key put_object(sync_to, name=row['name'], headers=headers, contents=FileLikeIter(body), proxy=self.select_http_proxy(), logger=self.logger) self.container_puts += 1 self.logger.increment('puts') self.logger.timing_since('puts.timing', start_time) except ClientException as err: if err.http_status == HTTP_UNAUTHORIZED: self.logger.info( _('Unauth %(sync_from)r => %(sync_to)r'), { 'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to }) elif err.http_status == HTTP_NOT_FOUND: self.logger.info( _('Not found %(sync_from)r => %(sync_to)r \ - object %(obj_name)r'), { 'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to, 'obj_name': row['name'] }) else: self.logger.exception(_('ERROR Syncing %(db_file)s %(row)s'), { 'db_file': str(broker), 'row': row }) self.container_failures += 1 self.logger.increment('failures') return False except (Exception, Timeout) as err: self.logger.exception(_('ERROR Syncing %(db_file)s %(row)s'), { 'db_file': str(broker), 'row': row }) self.container_failures += 1 self.logger.increment('failures') return False return True
o['name'] for o in client.get_container(self.url, self.token, container)[1] ] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) start_server(onode['port'], self.port2server, self.pids) direct_client.direct_get_object(onode, opart, self.account, container, obj) # Run the extra server last so it'll remove its extra partition processes = [] for node in onodes: processes.append( Popen([ 'swift-object-replicator', '/etc/swift/object-server/%d.conf' % ((node['port'] - 6000) / 10), 'once' ])) for process in processes: process.wait() call([ 'swift-object-replicator', '/etc/swift/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once'
def _verify_account_reaped(self): for policy, container, obj in self.all_objects: # verify that any container deletes were at same timestamp cpart, cnodes = self.container_ring.get_nodes( self.account, container) delete_times = set() for cnode in cnodes: try: direct_head_container(cnode, cpart, self.account, container) except ClientException as err: self.assertEqual(err.http_status, 404) delete_time = err.http_headers.get( 'X-Backend-DELETE-Timestamp') # 'X-Backend-DELETE-Timestamp' confirms it was deleted self.assertTrue(delete_time) delete_times.add(delete_time) else: # Container replicas may not yet be deleted if we have a # policy with object replicas < container replicas, so # ignore successful HEAD. We'll check for all replicas to # be deleted again after running the replicators. pass self.assertEqual(1, len(delete_times), delete_times) # verify that all object deletes were at same timestamp part, nodes = policy.object_ring.get_nodes(self.account, container, obj) headers = {'X-Backend-Storage-Policy-Index': int(policy)} delete_times = set() for node in nodes: try: direct_get_object(node, part, self.account, container, obj, headers=headers) except ClientException as err: self.assertEqual(err.http_status, 404) delete_time = err.http_headers.get('X-Backend-Timestamp') # 'X-Backend-Timestamp' confirms obj was deleted self.assertTrue(delete_time) delete_times.add(delete_time) else: self.fail('Found un-reaped /%s/%s/%s on %r in %s!' % (self.account, container, obj, node, policy)) self.assertEqual(1, len(delete_times)) # run replicators and updaters self.get_to_final_state() for policy, container, obj in self.all_objects: # verify that ALL container replicas are now deleted cpart, cnodes = self.container_ring.get_nodes( self.account, container) delete_times = set() for cnode in cnodes: try: direct_head_container(cnode, cpart, self.account, container) except ClientException as err: self.assertEqual(err.http_status, 404) delete_time = err.http_headers.get( 'X-Backend-DELETE-Timestamp') # 'X-Backend-DELETE-Timestamp' confirms it was deleted self.assertTrue(delete_time) delete_times.add(delete_time) else: self.fail('Found un-reaped /%s/%s on %r' % (self.account, container, cnode)) self.assertEqual(1, len(delete_times)) # sanity check that object state is still consistent... part, nodes = policy.object_ring.get_nodes(self.account, container, obj) headers = {'X-Backend-Storage-Policy-Index': int(policy)} delete_times = set() for node in nodes: try: direct_get_object(node, part, self.account, container, obj, headers=headers) except ClientException as err: self.assertEqual(err.http_status, 404) delete_time = err.http_headers.get('X-Backend-Timestamp') # 'X-Backend-Timestamp' confirms obj was deleted self.assertTrue(delete_time) delete_times.add(delete_time) else: self.fail('Found un-reaped /%s/%s/%s on %r in %s!' % (self.account, container, obj, node, policy)) self.assertEqual(1, len(delete_times))
def test_main(self): # Create container container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': self.policy.name}) # Kill one container/obj primary server cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] kill_server((onode['ip'], onode['port']), self.ipport2server) # Create container/obj (goes to two primary servers and one handoff) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill other two container/obj primary servers # to ensure GET handoff works for node in onodes[1:]: kill_server((node['ip'], node['port']), self.ipport2server) # Indirectly through proxy assert we can get container/obj odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Restart those other two container/obj primary servers for node in onodes[1:]: start_server((node['ip'], node['port']), self.ipport2server) # We've indirectly verified the handoff node has the container/object, # but let's directly verify it. another_onode = next(self.object_ring.get_more_nodes(opart)) odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) # drop a tempfile in the handoff's datadir, like it might have # had if there was an rsync failure while it was previously a # primary handoff_device_path = self.device_dir('object', another_onode) data_filename = None for root, dirs, files in os.walk(handoff_device_path): for filename in files: if filename.endswith('.data'): data_filename = filename temp_filename = '.%s.6MbL6r' % data_filename temp_filepath = os.path.join(root, temp_filename) if not data_filename: self.fail('Did not find any data files on %r' % handoff_device_path) open(temp_filepath, 'w') # Assert container listing (via proxy and directly) has container/obj objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) # Bring the first container/obj primary server back up start_server((onode['ip'], onode['port']), self.ipport2server) # Assert that it doesn't have container/obj yet try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Run object replication, ensuring we run the handoff node last so it # will remove its extra handoff partition for node in onodes: try: port_num = node['replication_port'] except KeyError: port_num = node['port'] node_id = (port_num - 6000) / 10 Manager(['object-replicator']).once(number=node_id) try: another_port_num = another_onode['replication_port'] except KeyError: another_port_num = another_onode['port'] another_num = (another_port_num - 6000) / 10 Manager(['object-replicator']).once(number=another_num) # Assert the first container/obj primary server now has container/obj odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) # and that it does *not* have a temporary rsync dropping! found_data_filename = False primary_device_path = self.device_dir('object', onode) for root, dirs, files in os.walk(primary_device_path): for filename in files: if filename.endswith('.6MbL6r'): self.fail('Found unexpected file %s' % os.path.join(root, filename)) if filename == data_filename: found_data_filename = True self.assertTrue(found_data_filename, 'Did not find data file %r on %r' % ( data_filename, primary_device_path)) # Assert the handoff server no longer has container/obj try: direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Kill the first container/obj primary server again (we have two # primaries and the handoff up now) kill_server((onode['ip'], onode['port']), self.ipport2server) # Delete container/obj try: client.delete_object(self.url, self.token, container, obj) except client.ClientException as err: if self.object_ring.replica_count > 2: raise # Object DELETE returning 503 for (404, 204) # remove this with fix for # https://bugs.launchpad.net/swift/+bug/1318375 self.assertEqual(503, err.http_status) # Assert we can't head container/obj try: client.head_object(self.url, self.token, container, obj) except client.ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Assert container/obj is not in the container listing, both indirectly # and directly objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) # Restart the first container/obj primary server again start_server((onode['ip'], onode['port']), self.ipport2server) # Assert it still has container/obj direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) # Run object replication, ensuring we run the handoff node last so it # will remove its extra handoff partition for node in onodes: try: port_num = node['replication_port'] except KeyError: port_num = node['port'] node_id = (port_num - 6000) / 10 Manager(['object-replicator']).once(number=node_id) another_node_id = (another_port_num - 6000) / 10 Manager(['object-replicator']).once(number=another_node_id) # Assert primary node no longer has container/obj try: direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it")