def _test_expirer_delete_outdated_object_version(self, object_exists): # This test simulates a case where the expirer tries to delete # an outdated version of an object. # One case is where the expirer gets a 404, whereas the newest version # of the object is offline. # Another case is where the expirer gets a 412, since the old version # of the object mismatches the expiration time sent by the expirer. # In any of these cases, the expirer should retry deleting the object # later, for as long as a reclaim age has not passed. obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) obj_brain.put_container() if object_exists: obj_brain.put_object() # currently, the object either doesn't exist, or does not have # an expiration # stop primary servers and put a newer version of the object, this # time with an expiration. only the handoff servers will have # the new version obj_brain.stop_primary_half() now = time.time() delete_at = int(now + 2.0) obj_brain.put_object({'X-Delete-At': delete_at}) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # update object record in the container listing Manager(['container-replicator']).once() # take handoff servers down, and bring up the outdated primary servers obj_brain.start_primary_half() obj_brain.stop_handoff_half() # wait until object expiration time while time.time() <= delete_at: time.sleep(0.1) # run expirer against the outdated servers. it should fail since # the outdated version does not match the expiration time self.expirer.once() # bring all servers up, and run replicator to update servers obj_brain.start_handoff_half() Manager(['object-replicator']).once() # verify the deletion has failed by checking the container listing self.assertTrue(self._check_obj_in_container_listing(), msg='Did not find listing for %s' % self.object_name) # run expirer again, delete should now succeed self.expirer.once() # verify the deletion by checking the container listing self.assertFalse(self._check_obj_in_container_listing(), msg='Found listing for %s' % self.object_name)
def _test_expirer_delete_outdated_object_version(self, object_exists): # This test simulates a case where the expirer tries to delete # an outdated version of an object. # One case is where the expirer gets a 404, whereas the newest version # of the object is offline. # Another case is where the expirer gets a 412, since the old version # of the object mismatches the expiration time sent by the expirer. # In any of these cases, the expirer should retry deleting the object # later, for as long as a reclaim age has not passed. obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) obj_brain.put_container() if object_exists: obj_brain.put_object() # currently, the object either doesn't exist, or does not have # an expiration # stop primary servers and put a newer version of the object, this # time with an expiration. only the handoff servers will have # the new version obj_brain.stop_primary_half() now = time.time() delete_at = int(now + 2.0) obj_brain.put_object({'X-Delete-At': str(delete_at)}) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # update object record in the container listing Manager(['container-replicator']).once() # take handoff servers down, and bring up the outdated primary servers obj_brain.start_primary_half() obj_brain.stop_handoff_half() # wait until object expiration time while time.time() <= delete_at: time.sleep(0.1) # run expirer against the outdated servers. it should fail since # the outdated version does not match the expiration time self.expirer.once() # bring all servers up, and run replicator to update servers obj_brain.start_handoff_half() Manager(['object-replicator']).once() # verify the deletion has failed by checking the container listing self.assertTrue(self._check_obj_in_container_listing(), msg='Did not find listing for %s' % self.object_name) # run expirer again, delete should now succeed self.expirer.once() # verify the deletion by checking the container listing self.assertFalse(self._check_obj_in_container_listing(), msg='Found listing for %s' % self.object_name)
class TestPutIfNoneMatchRepl(ReplProbeTest): def setUp(self): super(TestPutIfNoneMatchRepl, self).setUp() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', policy=self.policy) def _do_test(self, overwrite_contents): self.brain.put_container() self.brain.stop_primary_half() # put object to only 1 of 3 primaries self.brain.put_object(contents=b'VERIFY') self.brain.start_primary_half() # Restart services and attempt to overwrite with self.assertRaises(client.ClientException) as exc_mgr: self.brain.put_object(headers={'If-None-Match': '*'}, contents=overwrite_contents) self.assertEqual(exc_mgr.exception.http_status, 412) # make sure we're GETting from the servers that missed the original PUT self.brain.stop_handoff_half() # verify the PUT did not complete with self.assertRaises(client.ClientException) as exc_mgr: client.get_object(self.url, self.token, self.container_name, self.object_name) self.assertEqual(exc_mgr.exception.http_status, 404) # for completeness, run replicators... Manager(['object-replicator']).once() # ...and verify the object was not overwritten _headers, body = client.get_object(self.url, self.token, self.container_name, self.object_name) self.assertEqual(body, b'VERIFY') def test_content_length_nonzero(self): self._do_test(b'OVERWRITE') def test_content_length_zero(self): self._do_test(b'') def test_chunked(self): self._do_test(chunker(b'OVERWRITE')) def test_chunked_empty(self): self._do_test(chunker(b''))
class TestPutIfNoneMatchRepl(ReplProbeTest): def setUp(self): super(TestPutIfNoneMatchRepl, self).setUp() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', policy=self.policy) def _do_test(self, overwrite_contents): self.brain.put_container() self.brain.stop_primary_half() # put object to only 1 of 3 primaries self.brain.put_object(contents='VERIFY') self.brain.start_primary_half() # Restart services and attempt to overwrite with self.assertRaises(client.ClientException) as exc_mgr: self.brain.put_object(headers={'If-None-Match': '*'}, contents=overwrite_contents) self.assertEqual(exc_mgr.exception.http_status, 412) # make sure we're GETting from the servers that missed the original PUT self.brain.stop_handoff_half() # verify the PUT did not complete with self.assertRaises(client.ClientException) as exc_mgr: client.get_object( self.url, self.token, self.container_name, self.object_name) self.assertEqual(exc_mgr.exception.http_status, 404) # for completeness, run replicators... Manager(['object-replicator']).once() # ...and verify the object was not overwritten _headers, body = client.get_object( self.url, self.token, self.container_name, self.object_name) self.assertEqual(body, 'VERIFY') def test_content_length_nonzero(self): self._do_test('OVERWRITE') def test_content_length_zero(self): self._do_test('') def test_chunked(self): self._do_test(chunker('OVERWRITE')) def test_chunked_empty(self): self._do_test(chunker(''))
def test_expirer_object_should_not_be_expired(self): obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated) # < T(expirer_executed) # Recreated obj should be appeared in any split brain case # T(obj_created) first_created_at = time.time() # T(obj_deleted with x-delete-at) # object-server accepts req only if X-Delete-At is later than 'now' delete_at = int(time.time() + 1.5) # T(obj_recreated) recreated_at = time.time() + 2.0 # T(expirer_executed) - 'now' sleep_for_expirer = 2.01 obj_brain.put_container(int(self.policy)) obj_brain.put_object( headers={'X-Delete-At': delete_at, 'X-Timestamp': Timestamp(first_created_at).internal}) # some object servers stopped obj_brain.stop_primary_half() obj_brain.put_object( headers={'X-Timestamp': Timestamp(recreated_at).internal, 'X-Object-Meta-Expired': 'False'}) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # some object servers recovered obj_brain.start_primary_half() # sleep to make sure expirer runs at the time after obj is recreated time.sleep(sleep_for_expirer) self.expirer.once() # inconsistent state of objects is recovered Manager(['object-replicator']).once() # check if you can get recreated object metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name) self.assertIn('x-object-meta-expired', metadata)
class TestObjectExpirer(ReplProbeTest): def setUp(self): self.expirer = Manager(['object-expirer']) self.expirer.start() err = self.expirer.stop() if err: raise unittest.SkipTest('Unable to verify object-expirer service') conf_files = [] for server in self.expirer.servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] self.client = InternalClient(conf_file, 'probe-test', 3) super(TestObjectExpirer, self).setUp() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name) def _check_obj_in_container_listing(self): for obj in self.client.iter_objects(self.account, self.container_name): if self.object_name == obj['name']: return True return False @unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy") def test_expirer_object_split_brain(self): old_policy = random.choice(ENABLED_POLICIES) wrong_policy = random.choice([p for p in ENABLED_POLICIES if p != old_policy]) # create an expiring object and a container with the wrong policy self.brain.stop_primary_half() self.brain.put_container(int(old_policy)) self.brain.put_object(headers={'X-Delete-After': 2}) # get the object timestamp metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) create_timestamp = Timestamp(metadata['x-timestamp']) self.brain.start_primary_half() # get the expiring object updates in their queue, while we have all # the servers up Manager(['object-updater']).once() self.brain.stop_handoff_half() self.brain.put_container(int(wrong_policy)) # don't start handoff servers, only wrong policy is available # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # this guy should no-op since it's unable to expire the object self.expirer.once() self.brain.start_handoff_half() self.get_to_final_state() # validate object is expired found_in_policy = None metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) self.assertIn('x-backend-timestamp', metadata) self.assertEqual(Timestamp(metadata['x-backend-timestamp']), create_timestamp) # but it is still in the listing self.assertTrue(self._check_obj_in_container_listing(), msg='Did not find listing for %s' % self.object_name) # clear proxy cache client.post_container(self.url, self.token, self.container_name, {}) # run the expirer again after replication self.expirer.once() # object is not in the listing self.assertFalse(self._check_obj_in_container_listing(), msg='Found listing for %s' % self.object_name) # and validate object is tombstoned found_in_policy = None for policy in ENABLED_POLICIES: metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(policy)}) if 'x-backend-timestamp' in metadata: if found_in_policy: self.fail('found object in %s and also %s' % (found_in_policy, policy)) found_in_policy = policy self.assertIn('x-backend-timestamp', metadata) self.assertGreater(Timestamp(metadata['x-backend-timestamp']), create_timestamp) def test_expirer_doesnt_make_async_pendings(self): # The object expirer cleans up its own queue. The inner loop # basically looks like this: # # for obj in stuff_to_delete: # delete_the_object(obj) # remove_the_queue_entry(obj) # # By default, upon receipt of a DELETE request for an expiring # object, the object servers will create async_pending records to # clean the expirer queue. Since the expirer cleans its own queue, # this is unnecessary. The expirer can make requests in such a way # tha the object server does not write out any async pendings; this # test asserts that this is the case. # Make an expiring object in each policy for policy in ENABLED_POLICIES: container_name = "expirer-test-%d" % policy.idx container_headers = {'X-Storage-Policy': policy.name} client.put_container(self.url, self.token, container_name, headers=container_headers) now = time.time() delete_at = int(now + 2.0) client.put_object( self.url, self.token, container_name, "some-object", headers={'X-Delete-At': str(delete_at), 'X-Timestamp': Timestamp(now).normal}, contents='dontcare') time.sleep(2.0) # make sure auto-created expirer-queue containers get in the account # listing so the expirer can find them Manager(['container-updater']).once() # Make sure there's no async_pendings anywhere. Probe tests only run # on single-node installs anyway, so this set should be small enough # that an exhaustive check doesn't take too long. all_obj_nodes = self.get_all_object_nodes() pendings_before = self.gather_async_pendings(all_obj_nodes) # expire the objects Manager(['object-expirer']).once() pendings_after = self.gather_async_pendings(all_obj_nodes) self.assertEqual(pendings_after, pendings_before) def test_expirer_object_should_not_be_expired(self): # Current object-expirer checks the correctness via x-if-delete-at # header that it can be deleted by expirer. If there are objects # either which doesn't have x-delete-at header as metadata or which # has different x-delete-at value from x-if-delete-at value, # object-expirer's delete will fail as 412 PreconditionFailed. # However, if some of the objects are in handoff nodes, the expirer # can put the tombstone with the timestamp as same as x-delete-at and # the object consistency will be resolved as the newer timestamp will # be winner (in particular, overwritten case w/o x-delete-at). This # test asserts such a situation that, at least, the overwriten object # which have larger timestamp than the original expirered date should # be safe. def put_object(headers): # use internal client to PUT objects so that X-Timestamp in headers # is effective headers['Content-Length'] = '0' path = self.client.make_path( self.account, self.container_name, self.object_name) try: self.client.make_request('PUT', path, headers, (2,)) except UnexpectedResponse as e: self.fail( 'Expected 201 for PUT object but got %s' % e.resp.status) obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated) # < T(expirer_executed) # Recreated obj should be appeared in any split brain case obj_brain.put_container() # T(obj_deleted with x-delete-at) # object-server accepts req only if X-Delete-At is later than 'now' # so here, T(obj_created) < T(obj_deleted with x-delete-at) now = time.time() delete_at = int(now + 2.0) recreate_at = delete_at + 1.0 put_object(headers={'X-Delete-At': str(delete_at), 'X-Timestamp': Timestamp(now).normal}) # some object servers stopped to make a situation that the # object-expirer can put tombstone in the primary nodes. obj_brain.stop_primary_half() # increment the X-Timestamp explicitly # (will be T(obj_deleted with x-delete-at) < T(obj_recreated)) put_object(headers={'X-Object-Meta-Expired': 'False', 'X-Timestamp': Timestamp(recreate_at).normal}) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # sanity, the newer object is still there try: metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name) except UnexpectedResponse as e: self.fail( 'Expected 200 for HEAD object but got %s' % e.resp.status) self.assertIn('x-object-meta-expired', metadata) # some object servers recovered obj_brain.start_primary_half() # sleep until after recreated_at while time.time() <= recreate_at: time.sleep(0.1) # Now, expirer runs at the time after obj is recreated self.expirer.once() # verify that original object was deleted by expirer obj_brain.stop_handoff_half() try: metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,)) except UnexpectedResponse as e: self.fail( 'Expected 404 for HEAD object but got %s' % e.resp.status) obj_brain.start_handoff_half() # and inconsistent state of objects is recovered by replicator Manager(['object-replicator']).once() # check if you can get recreated object try: metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name) except UnexpectedResponse as e: self.fail( 'Expected 200 for HEAD object but got %s' % e.resp.status) self.assertIn('x-object-meta-expired', metadata) def _test_expirer_delete_outdated_object_version(self, object_exists): # This test simulates a case where the expirer tries to delete # an outdated version of an object. # One case is where the expirer gets a 404, whereas the newest version # of the object is offline. # Another case is where the expirer gets a 412, since the old version # of the object mismatches the expiration time sent by the expirer. # In any of these cases, the expirer should retry deleting the object # later, for as long as a reclaim age has not passed. obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) obj_brain.put_container() if object_exists: obj_brain.put_object() # currently, the object either doesn't exist, or does not have # an expiration # stop primary servers and put a newer version of the object, this # time with an expiration. only the handoff servers will have # the new version obj_brain.stop_primary_half() now = time.time() delete_at = int(now + 2.0) obj_brain.put_object({'X-Delete-At': str(delete_at)}) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # update object record in the container listing Manager(['container-replicator']).once() # take handoff servers down, and bring up the outdated primary servers obj_brain.start_primary_half() obj_brain.stop_handoff_half() # wait until object expiration time while time.time() <= delete_at: time.sleep(0.1) # run expirer against the outdated servers. it should fail since # the outdated version does not match the expiration time self.expirer.once() # bring all servers up, and run replicator to update servers obj_brain.start_handoff_half() Manager(['object-replicator']).once() # verify the deletion has failed by checking the container listing self.assertTrue(self._check_obj_in_container_listing(), msg='Did not find listing for %s' % self.object_name) # run expirer again, delete should now succeed self.expirer.once() # verify the deletion by checking the container listing self.assertFalse(self._check_obj_in_container_listing(), msg='Found listing for %s' % self.object_name) def test_expirer_delete_returns_outdated_404(self): self._test_expirer_delete_outdated_object_version(object_exists=False) def test_expirer_delete_returns_outdated_412(self): self._test_expirer_delete_outdated_object_version(object_exists=True)
class TestDarkDataDeletion(ReplProbeTest): # NB: could be 'quarantine' in another test action = 'delete' def setUp(self): """ Reset all environment and start all servers. """ super(TestDarkDataDeletion, self).setUp() self.conf_dest = \ os.path.join('/tmp/', datetime.now().strftime('swift-%Y-%m-%d_%H-%M-%S-%f')) os.mkdir(self.conf_dest) object_server_dir = os.path.join(self.conf_dest, 'object-server') os.mkdir(object_server_dir) for conf_file in Server('object-auditor').conf_files(): config = readconf(conf_file) if 'object-auditor' not in config: continue # *somebody* should be set up to run the auditor config['object-auditor'].update({'watchers': 'swift#dark_data'}) # Note that this setdefault business may mean the watcher doesn't # pick up DEFAULT values, but that (probably?) won't matter config.setdefault(CONF_SECTION, {}).update({'action': self.action}) parser = ConfigParser() for section in ('object-auditor', CONF_SECTION): parser.add_section(section) for option, value in config[section].items(): parser.set(section, option, value) file_name = os.path.basename(conf_file) if file_name.endswith('.d'): # Work around conf.d setups (like you might see with VSAIO) file_name = file_name[:-2] with open(os.path.join(object_server_dir, file_name), 'w') as fp: parser.write(fp) self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', policy=self.policy) def tearDown(self): shutil.rmtree(self.conf_dest) def gather_object_files_by_ext(self): result = collections.defaultdict(set) for node in self.brain.nodes: for path, _, files in os.walk( os.path.join(self.device_dir(node), get_policy_string('objects', self.policy))): for file in files: if file in ('.lock', 'hashes.pkl', 'hashes.invalid'): continue _, ext = os.path.splitext(file) result[ext].add(os.path.join(path, file)) return result def test_dark_data(self): self.brain.put_container() self.brain.put_object() self.brain.stop_handoff_half() self.brain.delete_object() Manager(['object-updater']).once() Manager(['container-replicator']).once() # Sanity check: # * all containers are empty # * primaries that are still up have two .ts files # * primary that's down has one .data file for index, (headers, items) in self.direct_get_container( container=self.container_name).items(): self.assertEqual(headers['X-Container-Object-Count'], '0') self.assertEqual(items, []) files = self.gather_object_files_by_ext() self.assertLengthEqual(files, 2) self.assertLengthEqual(files['.ts'], 2) self.assertLengthEqual(files['.data'], 1) # Simulate a reclaim_age passing, # so the tombstones all got cleaned up for file_path in files['.ts']: os.unlink(file_path) # Old node gets reintroduced to the cluster self.brain.start_handoff_half() # ...so replication thinks its got some work to do Manager(['object-replicator']).once() # Now we're back to *three* .data files files = self.gather_object_files_by_ext() self.assertLengthEqual(files, 1) self.assertLengthEqual(files['.data'], 3) # But that's OK, audit watchers to the rescue! old_swift_dir = manager.SWIFT_DIR manager.SWIFT_DIR = self.conf_dest try: Manager(['object-auditor']).once() finally: manager.SWIFT_DIR = old_swift_dir # Verify that the policy was applied. self.check_on_disk_files(files['.data']) def check_on_disk_files(self, files): for file_path in files: # File's not there self.assertFalse(os.path.exists(file_path)) # And it's not quaratined, either! self.assertPathDoesNotExist( os.path.join(file_path[:file_path.index('objects')], 'quarantined')) def assertPathExists(self, path): msg = "Expected path %r to exist, but it doesn't" % path self.assertTrue(os.path.exists(path), msg) def assertPathDoesNotExist(self, path): msg = "Expected path %r to not exist, but it does" % path self.assertFalse(os.path.exists(path), msg)
class TestContainerMergePolicyIndex(ReplProbeTest): def setUp(self): if len(ENABLED_POLICIES) < 2: raise SkipTest('Need more than one policy') super(TestContainerMergePolicyIndex, self).setUp() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'container') def _get_object_patiently(self, policy_index): # use proxy to access object (bad container info might be cached...) timeout = time.time() + TIMEOUT while time.time() < timeout: try: return client.get_object(self.url, self.token, self.container_name, self.object_name) except ClientException as err: if err.http_status != HTTP_NOT_FOUND: raise time.sleep(1) else: self.fail('could not HEAD /%s/%s/%s/ from policy %s ' 'after %s seconds.' % ( self.account, self.container_name, self.object_name, int(policy_index), TIMEOUT)) def test_merge_storage_policy_index(self): # generic split brain self.brain.stop_primary_half() self.brain.put_container() self.brain.start_primary_half() self.brain.stop_handoff_half() self.brain.put_container() self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'}, contents='VERIFY') self.brain.start_handoff_half() # make sure we have some manner of split brain container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) self.assertTrue( len(found_policy_indexes) > 1, 'primary nodes did not disagree about policy index %r' % head_responses) # find our object orig_policy_index = None for policy_index in found_policy_indexes: object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift') part, nodes = object_ring.get_nodes( self.account, self.container_name, self.object_name) for node in nodes: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': policy_index}) except direct_client.ClientException as err: continue orig_policy_index = policy_index break if orig_policy_index is not None: break else: self.fail('Unable to find /%s/%s/%s in %r' % ( self.account, self.container_name, self.object_name, found_policy_indexes)) self.get_to_final_state() Manager(['container-reconciler']).once() # validate containers head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) self.assertTrue(len(found_policy_indexes) == 1, 'primary nodes disagree about policy index %r' % head_responses) expected_policy_index = found_policy_indexes.pop() self.assertNotEqual(orig_policy_index, expected_policy_index) # validate object placement orig_policy_ring = POLICIES.get_object_ring(orig_policy_index, '/etc/swift') for node in orig_policy_ring.devs: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': orig_policy_index}) except direct_client.ClientException as err: if err.http_status == HTTP_NOT_FOUND: continue raise else: self.fail('Found /%s/%s/%s in %s' % ( self.account, self.container_name, self.object_name, orig_policy_index)) # verify that the object data read by external client is correct headers, data = self._get_object_patiently(expected_policy_index) self.assertEqual('VERIFY', data) self.assertEqual('custom-meta', headers['x-object-meta-test']) def test_reconcile_delete(self): # generic split brain self.brain.stop_primary_half() self.brain.put_container() self.brain.put_object() self.brain.start_primary_half() self.brain.stop_handoff_half() self.brain.put_container() self.brain.delete_object() self.brain.start_handoff_half() # make sure we have some manner of split brain container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) self.assertTrue( len(found_policy_indexes) > 1, 'primary nodes did not disagree about policy index %r' % head_responses) # find our object orig_policy_index = ts_policy_index = None for policy_index in found_policy_indexes: object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift') part, nodes = object_ring.get_nodes( self.account, self.container_name, self.object_name) for node in nodes: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': policy_index}) except direct_client.ClientException as err: if 'x-backend-timestamp' in err.http_headers: ts_policy_index = policy_index break else: orig_policy_index = policy_index break if not orig_policy_index: self.fail('Unable to find /%s/%s/%s in %r' % ( self.account, self.container_name, self.object_name, found_policy_indexes)) if not ts_policy_index: self.fail('Unable to find tombstone /%s/%s/%s in %r' % ( self.account, self.container_name, self.object_name, found_policy_indexes)) self.get_to_final_state() Manager(['container-reconciler']).once() # validate containers head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) new_found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) self.assertTrue(len(new_found_policy_indexes) == 1, 'primary nodes disagree about policy index %r' % dict((node['port'], metadata['X-Backend-Storage-Policy-Index']) for node, metadata in head_responses)) expected_policy_index = new_found_policy_indexes.pop() self.assertEqual(orig_policy_index, expected_policy_index) # validate object fully deleted for policy_index in found_policy_indexes: object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift') part, nodes = object_ring.get_nodes( self.account, self.container_name, self.object_name) for node in nodes: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': policy_index}) except direct_client.ClientException as err: if err.http_status == HTTP_NOT_FOUND: continue else: self.fail('Found /%s/%s/%s in %s on %s' % ( self.account, self.container_name, self.object_name, orig_policy_index, node)) def test_reconcile_manifest(self): info_url = "%s://%s/info" % (urlparse(self.url).scheme, urlparse(self.url).netloc) proxy_conn = client.http_connection(info_url) cluster_info = client.get_capabilities(proxy_conn) if 'slo' not in cluster_info: raise SkipTest("SLO not enabled in proxy; " "can't test manifest reconciliation") # this test is not only testing a split brain scenario on # multiple policies with mis-placed objects - it even writes out # a static large object directly to the storage nodes while the # objects are unavailably mis-placed from *behind* the proxy and # doesn't know how to do that for EC_POLICY (clayg: why did you # guys let me write a test that does this!?) - so we force # wrong_policy (where the manifest gets written) to be one of # any of your configured REPL_POLICY (we know you have one # because this is a ReplProbeTest) wrong_policy = random.choice(POLICIES_BY_TYPE[REPL_POLICY]) policy = random.choice([p for p in ENABLED_POLICIES if p is not wrong_policy]) manifest_data = [] def write_part(i): body = 'VERIFY%0.2d' % i + '\x00' * 1048576 part_name = 'manifest_part_%0.2d' % i manifest_entry = { "path": "/%s/%s" % (self.container_name, part_name), "etag": md5(body).hexdigest(), "size_bytes": len(body), } client.put_object(self.url, self.token, self.container_name, part_name, contents=body) manifest_data.append(manifest_entry) # get an old container stashed self.brain.stop_primary_half() self.brain.put_container(int(policy)) self.brain.start_primary_half() # write some parts for i in range(10): write_part(i) self.brain.stop_handoff_half() self.brain.put_container(int(wrong_policy)) # write some more parts for i in range(10, 20): write_part(i) # write manifest with self.assertRaises(ClientException) as catcher: client.put_object(self.url, self.token, self.container_name, self.object_name, contents=utils.json.dumps(manifest_data), query_string='multipart-manifest=put') # so as it works out, you can't really upload a multi-part # manifest for objects that are currently misplaced - you have to # wait until they're all available - which is about the same as # some other failure that causes data to be unavailable to the # proxy at the time of upload self.assertEqual(catcher.exception.http_status, 400) # but what the heck, we'll sneak one in just to see what happens... direct_manifest_name = self.object_name + '-direct-test' object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift') part, nodes = object_ring.get_nodes( self.account, self.container_name, direct_manifest_name) container_part = self.container_ring.get_part(self.account, self.container_name) def translate_direct(data): return { 'hash': data['etag'], 'bytes': data['size_bytes'], 'name': data['path'], } direct_manifest_data = map(translate_direct, manifest_data) headers = { 'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n in self.container_ring.devs), 'x-container-device': ','.join(n['device'] for n in self.container_ring.devs), 'x-container-partition': container_part, 'X-Backend-Storage-Policy-Index': wrong_policy.idx, 'X-Static-Large-Object': 'True', } for node in nodes: direct_client.direct_put_object( node, part, self.account, self.container_name, direct_manifest_name, contents=utils.json.dumps(direct_manifest_data), headers=headers) break # one should do it... self.brain.start_handoff_half() self.get_to_final_state() Manager(['container-reconciler']).once() # clear proxy cache client.post_container(self.url, self.token, self.container_name, {}) # let's see how that direct upload worked out... metadata, body = client.get_object( self.url, self.token, self.container_name, direct_manifest_name, query_string='multipart-manifest=get') self.assertEqual(metadata['x-static-large-object'].lower(), 'true') for i, entry in enumerate(utils.json.loads(body)): for key in ('hash', 'bytes', 'name'): self.assertEqual(entry[key], direct_manifest_data[i][key]) metadata, body = client.get_object( self.url, self.token, self.container_name, direct_manifest_name) self.assertEqual(metadata['x-static-large-object'].lower(), 'true') self.assertEqual(int(metadata['content-length']), sum(part['size_bytes'] for part in manifest_data)) self.assertEqual(body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576 for i in range(20))) # and regular upload should work now too client.put_object(self.url, self.token, self.container_name, self.object_name, contents=utils.json.dumps(manifest_data), query_string='multipart-manifest=put') metadata = client.head_object(self.url, self.token, self.container_name, self.object_name) self.assertEqual(int(metadata['content-length']), sum(part['size_bytes'] for part in manifest_data)) def test_reconciler_move_object_twice(self): # select some policies old_policy = random.choice(ENABLED_POLICIES) new_policy = random.choice([p for p in ENABLED_POLICIES if p != old_policy]) # setup a split brain self.brain.stop_handoff_half() # get old_policy on two primaries self.brain.put_container(policy_index=int(old_policy)) self.brain.start_handoff_half() self.brain.stop_primary_half() # force a recreate on handoffs self.brain.put_container(policy_index=int(old_policy)) self.brain.delete_container() self.brain.put_container(policy_index=int(new_policy)) self.brain.put_object() # populate memcache with new_policy self.brain.start_primary_half() # at this point two primaries have old policy container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) old_container_node_ids = [ node['id'] for node, metadata in head_responses if int(old_policy) == int(metadata['X-Backend-Storage-Policy-Index'])] self.assertEqual(2, len(old_container_node_ids)) # hopefully memcache still has the new policy cached self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'}, contents='VERIFY') # double-check object correctly written to new policy conf_files = [] for server in Manager(['container-reconciler']).servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] int_client = InternalClient(conf_file, 'probe-test', 3) int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # shutdown the containers that know about the new policy self.brain.stop_handoff_half() # and get rows enqueued from old nodes for server_type in ('container-replicator', 'container-updater'): server = Manager([server_type]) tuple(server.once(number=n + 1) for n in old_container_node_ids) # verify entry in the queue for the "misplaced" new_policy for container in int_client.iter_containers('.misplaced_objects'): for obj in int_client.iter_objects('.misplaced_objects', container['name']): expected = '%d:/%s/%s/%s' % (new_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # verify object in old_policy int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # verify object is *not* in new_policy int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) self.get_to_final_state() # verify entry in the queue for container in int_client.iter_containers('.misplaced_objects'): for obj in int_client.iter_objects('.misplaced_objects', container['name']): expected = '%d:/%s/%s/%s' % (old_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # and now it flops back int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # make sure the queue is settled self.get_to_final_state() for container in int_client.iter_containers('.misplaced_objects'): for obj in int_client.iter_objects('.misplaced_objects', container['name']): self.fail('Found unexpected object %r in the queue' % obj) # verify that the object data read by external client is correct headers, data = self._get_object_patiently(int(new_policy)) self.assertEqual('VERIFY', data) self.assertEqual('custom-meta', headers['x-object-meta-test'])
class TestContainerMergePolicyIndex(unittest.TestCase): def setUp(self): if len(POLICIES) < 2: raise SkipTest() (self.pids, self.port2server, self.account_ring, self.container_ring, self.object_ring, self.policy, self.url, self.token, self.account, self.configs) = reset_environment() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'container') def test_merge_storage_policy_index(self): # generic split brain self.brain.stop_primary_half() self.brain.put_container() self.brain.start_primary_half() self.brain.stop_handoff_half() self.brain.put_container() self.brain.put_object() self.brain.start_handoff_half() # make sure we have some manner of split brain container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) self.assert_( len(found_policy_indexes) > 1, 'primary nodes did not disagree about policy index %r' % head_responses) # find our object orig_policy_index = None for policy_index in found_policy_indexes: object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift') part, nodes = object_ring.get_nodes(self.account, self.container_name, self.object_name) for node in nodes: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': policy_index }) except direct_client.ClientException as err: continue orig_policy_index = policy_index break if orig_policy_index is not None: break else: self.fail('Unable to find /%s/%s/%s in %r' % (self.account, self.container_name, self.object_name, found_policy_indexes)) get_to_final_state() Manager(['container-reconciler']).once() # validate containers head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) self.assert_( len(found_policy_indexes) == 1, 'primary nodes disagree about policy index %r' % head_responses) expected_policy_index = found_policy_indexes.pop() self.assertNotEqual(orig_policy_index, expected_policy_index) # validate object placement orig_policy_ring = POLICIES.get_object_ring(orig_policy_index, '/etc/swift') for node in orig_policy_ring.devs: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': orig_policy_index }) except direct_client.ClientException as err: if err.http_status == HTTP_NOT_FOUND: continue raise else: self.fail('Found /%s/%s/%s in %s' % (self.account, self.container_name, self.object_name, orig_policy_index)) # use proxy to access object (bad container info might be cached...) timeout = time.time() + TIMEOUT while time.time() < timeout: try: metadata = client.head_object(self.url, self.token, self.container_name, self.object_name) except ClientException as err: if err.http_status != HTTP_NOT_FOUND: raise time.sleep(1) else: break else: self.fail('could not HEAD /%s/%s/%s/ from policy %s ' 'after %s seconds.' % (self.account, self.container_name, self.object_name, expected_policy_index, TIMEOUT)) def test_reconcile_delete(self): # generic split brain self.brain.stop_primary_half() self.brain.put_container() self.brain.put_object() self.brain.start_primary_half() self.brain.stop_handoff_half() self.brain.put_container() self.brain.delete_object() self.brain.start_handoff_half() # make sure we have some manner of split brain container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) self.assert_( len(found_policy_indexes) > 1, 'primary nodes did not disagree about policy index %r' % head_responses) # find our object orig_policy_index = ts_policy_index = None for policy_index in found_policy_indexes: object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift') part, nodes = object_ring.get_nodes(self.account, self.container_name, self.object_name) for node in nodes: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': policy_index }) except direct_client.ClientException as err: if 'x-backend-timestamp' in err.http_headers: ts_policy_index = policy_index break else: orig_policy_index = policy_index break if not orig_policy_index: self.fail('Unable to find /%s/%s/%s in %r' % (self.account, self.container_name, self.object_name, found_policy_indexes)) if not ts_policy_index: self.fail('Unable to find tombstone /%s/%s/%s in %r' % (self.account, self.container_name, self.object_name, found_policy_indexes)) get_to_final_state() Manager(['container-reconciler']).once() # validate containers head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) new_found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) self.assert_( len(new_found_policy_indexes) == 1, 'primary nodes disagree about policy index %r' % dict( (node['port'], metadata['X-Backend-Storage-Policy-Index']) for node, metadata in head_responses)) expected_policy_index = new_found_policy_indexes.pop() self.assertEqual(orig_policy_index, expected_policy_index) # validate object fully deleted for policy_index in found_policy_indexes: object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift') part, nodes = object_ring.get_nodes(self.account, self.container_name, self.object_name) for node in nodes: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': policy_index }) except direct_client.ClientException as err: if err.http_status == HTTP_NOT_FOUND: continue else: self.fail('Found /%s/%s/%s in %s on %s' % (self.account, self.container_name, self.object_name, orig_policy_index, node)) def test_reconcile_manifest(self): manifest_data = [] def write_part(i): body = 'VERIFY%0.2d' % i + '\x00' * 1048576 part_name = 'manifest_part_%0.2d' % i manifest_entry = { "path": "/%s/%s" % (self.container_name, part_name), "etag": md5(body).hexdigest(), "size_bytes": len(body), } client.put_object(self.url, self.token, self.container_name, part_name, contents=body) manifest_data.append(manifest_entry) # get an old container stashed self.brain.stop_primary_half() policy = random.choice(list(POLICIES)) self.brain.put_container(policy.idx) self.brain.start_primary_half() # write some parts for i in range(10): write_part(i) self.brain.stop_handoff_half() wrong_policy = random.choice([p for p in POLICIES if p is not policy]) self.brain.put_container(wrong_policy.idx) # write some more parts for i in range(10, 20): write_part(i) # write manifest try: client.put_object(self.url, self.token, self.container_name, self.object_name, contents=utils.json.dumps(manifest_data), query_string='multipart-manifest=put') except ClientException as err: # so as it works out, you can't really upload a multi-part # manifest for objects that are currently misplaced - you have to # wait until they're all available - which is about the same as # some other failure that causes data to be unavailable to the # proxy at the time of upload self.assertEqual(err.http_status, 400) # but what the heck, we'll sneak one in just to see what happens... direct_manifest_name = self.object_name + '-direct-test' object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift') part, nodes = object_ring.get_nodes(self.account, self.container_name, direct_manifest_name) container_part = self.container_ring.get_part(self.account, self.container_name) def translate_direct(data): return { 'hash': data['etag'], 'bytes': data['size_bytes'], 'name': data['path'], } direct_manifest_data = map(translate_direct, manifest_data) headers = { 'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n in self.container_ring.devs), 'x-container-device': ','.join(n['device'] for n in self.container_ring.devs), 'x-container-partition': container_part, 'X-Backend-Storage-Policy-Index': wrong_policy.idx, 'X-Static-Large-Object': 'True', } for node in nodes: direct_client.direct_put_object( node, part, self.account, self.container_name, direct_manifest_name, contents=utils.json.dumps(direct_manifest_data), headers=headers) break # one should do it... self.brain.start_handoff_half() get_to_final_state() Manager(['container-reconciler']).once() # clear proxy cache client.post_container(self.url, self.token, self.container_name, {}) # let's see how that direct upload worked out... metadata, body = client.get_object( self.url, self.token, self.container_name, direct_manifest_name, query_string='multipart-manifest=get') self.assertEqual(metadata['x-static-large-object'].lower(), 'true') for i, entry in enumerate(utils.json.loads(body)): for key in ('hash', 'bytes', 'name'): self.assertEquals(entry[key], direct_manifest_data[i][key]) metadata, body = client.get_object(self.url, self.token, self.container_name, direct_manifest_name) self.assertEqual(metadata['x-static-large-object'].lower(), 'true') self.assertEqual(int(metadata['content-length']), sum(part['size_bytes'] for part in manifest_data)) self.assertEqual( body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576 for i in range(20))) # and regular upload should work now too client.put_object(self.url, self.token, self.container_name, self.object_name, contents=utils.json.dumps(manifest_data), query_string='multipart-manifest=put') metadata = client.head_object(self.url, self.token, self.container_name, self.object_name) self.assertEqual(int(metadata['content-length']), sum(part['size_bytes'] for part in manifest_data)) def test_reconciler_move_object_twice(self): # select some policies old_policy = random.choice(list(POLICIES)) new_policy = random.choice([p for p in POLICIES if p != old_policy]) # setup a split brain self.brain.stop_handoff_half() # get old_policy on two primaries self.brain.put_container(policy_index=int(old_policy)) self.brain.start_handoff_half() self.brain.stop_primary_half() # force a recreate on handoffs self.brain.put_container(policy_index=int(old_policy)) self.brain.delete_container() self.brain.put_container(policy_index=int(new_policy)) self.brain.put_object() # populate memcache with new_policy self.brain.start_primary_half() # at this point two primaries have old policy container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) old_container_node_ids = [ node['id'] for node, metadata in head_responses if int(old_policy) == int(metadata['X-Backend-Storage-Policy-Index']) ] self.assertEqual(2, len(old_container_node_ids)) # hopefully memcache still has the new policy cached self.brain.put_object() # double-check object correctly written to new policy conf_files = [] for server in Manager(['container-reconciler']).servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] client = InternalClient(conf_file, 'probe-test', 3) client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # shutdown the containers that know about the new policy self.brain.stop_handoff_half() # and get rows enqueued from old nodes for server_type in ('container-replicator', 'container-updater'): server = Manager([server_type]) tuple(server.once(number=n + 1) for n in old_container_node_ids) # verify entry in the queue for the "misplaced" new_policy for container in client.iter_containers('.misplaced_objects'): for obj in client.iter_objects('.misplaced_objects', container['name']): expected = '%d:/%s/%s/%s' % (new_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # verify object in old_policy client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # verify object is *not* in new_policy client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) get_to_final_state() # verify entry in the queue client = InternalClient(conf_file, 'probe-test', 3) for container in client.iter_containers('.misplaced_objects'): for obj in client.iter_objects('.misplaced_objects', container['name']): expected = '%d:/%s/%s/%s' % (old_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # and now it flops back client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # make sure the queue is settled get_to_final_state() for container in client.iter_containers('.misplaced_objects'): for obj in client.iter_objects('.misplaced_objects', container['name']): self.fail('Found unexpected object %r in the queue' % obj)
class TestObjectExpirer(ReplProbeTest): def setUp(self): self.expirer = Manager(['object-expirer']) self.expirer.start() err = self.expirer.stop() if err: raise unittest.SkipTest('Unable to verify object-expirer service') conf_files = [] for server in self.expirer.servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] self.client = InternalClient(conf_file, 'probe-test', 3) super(TestObjectExpirer, self).setUp() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name) def _check_obj_in_container_listing(self): for obj in self.client.iter_objects(self.account, self.container_name): if self.object_name == obj['name']: return True return False @unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy") def test_expirer_object_split_brain(self): old_policy = random.choice(ENABLED_POLICIES) wrong_policy = random.choice( [p for p in ENABLED_POLICIES if p != old_policy]) # create an expiring object and a container with the wrong policy self.brain.stop_primary_half() self.brain.put_container(int(old_policy)) self.brain.put_object(headers={'X-Delete-After': 2}) # get the object timestamp metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) create_timestamp = Timestamp(metadata['x-timestamp']) self.brain.start_primary_half() # get the expiring object updates in their queue, while we have all # the servers up Manager(['object-updater']).once() self.brain.stop_handoff_half() self.brain.put_container(int(wrong_policy)) # don't start handoff servers, only wrong policy is available # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # this guy should no-op since it's unable to expire the object self.expirer.once() self.brain.start_handoff_half() self.get_to_final_state() # validate object is expired found_in_policy = None metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) self.assertIn('x-backend-timestamp', metadata) self.assertEqual(Timestamp(metadata['x-backend-timestamp']), create_timestamp) # but it is still in the listing self.assertTrue(self._check_obj_in_container_listing(), msg='Did not find listing for %s' % self.object_name) # clear proxy cache client.post_container(self.url, self.token, self.container_name, {}) # run the expirier again after replication self.expirer.once() # object is not in the listing self.assertFalse(self._check_obj_in_container_listing(), msg='Found listing for %s' % self.object_name) # and validate object is tombstoned found_in_policy = None for policy in ENABLED_POLICIES: metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(policy)}) if 'x-backend-timestamp' in metadata: if found_in_policy: self.fail('found object in %s and also %s' % (found_in_policy, policy)) found_in_policy = policy self.assertIn('x-backend-timestamp', metadata) self.assertGreater(Timestamp(metadata['x-backend-timestamp']), create_timestamp) def test_expirer_object_should_not_be_expired(self): # Current object-expirer checks the correctness via x-if-delete-at # header that it can be deleted by expirer. If there are objects # either which doesn't have x-delete-at header as metadata or which # has different x-delete-at value from x-if-delete-at value, # object-expirer's delete will fail as 412 PreconditionFailed. # However, if some of the objects are in handoff nodes, the expirer # can put the tombstone with the timestamp as same as x-delete-at and # the object consistency will be resolved as the newer timestamp will # be winner (in particular, overwritten case w/o x-delete-at). This # test asserts such a situation that, at least, the overwriten object # which have larger timestamp than the original expirered date should # be safe. def put_object(headers): # use internal client to PUT objects so that X-Timestamp in headers # is effective headers['Content-Length'] = '0' path = self.client.make_path(self.account, self.container_name, self.object_name) try: self.client.make_request('PUT', path, headers, (2, )) except UnexpectedResponse as e: self.fail('Expected 201 for PUT object but got %s' % e.resp.status) obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated) # < T(expirer_executed) # Recreated obj should be appeared in any split brain case obj_brain.put_container() # T(obj_deleted with x-delete-at) # object-server accepts req only if X-Delete-At is later than 'now' # so here, T(obj_created) < T(obj_deleted with x-delete-at) now = time.time() delete_at = int(now + 2.0) recreate_at = delete_at + 1.0 put_object(headers={ 'X-Delete-At': str(delete_at), 'X-Timestamp': Timestamp(now).normal }) # some object servers stopped to make a situation that the # object-expirer can put tombstone in the primary nodes. obj_brain.stop_primary_half() # increment the X-Timestamp explicitly # (will be T(obj_deleted with x-delete-at) < T(obj_recreated)) put_object( headers={ 'X-Object-Meta-Expired': 'False', 'X-Timestamp': Timestamp(recreate_at).normal }) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # sanity, the newer object is still there try: metadata = self.client.get_object_metadata(self.account, self.container_name, self.object_name) except UnexpectedResponse as e: self.fail('Expected 200 for HEAD object but got %s' % e.resp.status) self.assertIn('x-object-meta-expired', metadata) # some object servers recovered obj_brain.start_primary_half() # sleep until after recreated_at while time.time() <= recreate_at: time.sleep(0.1) # Now, expirer runs at the time after obj is recreated self.expirer.once() # verify that original object was deleted by expirer obj_brain.stop_handoff_half() try: metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, )) except UnexpectedResponse as e: self.fail('Expected 404 for HEAD object but got %s' % e.resp.status) obj_brain.start_handoff_half() # and inconsistent state of objects is recovered by replicator Manager(['object-replicator']).once() # check if you can get recreated object try: metadata = self.client.get_object_metadata(self.account, self.container_name, self.object_name) except UnexpectedResponse as e: self.fail('Expected 200 for HEAD object but got %s' % e.resp.status) self.assertIn('x-object-meta-expired', metadata) def _test_expirer_delete_outdated_object_version(self, object_exists): # This test simulates a case where the expirer tries to delete # an outdated version of an object. # One case is where the expirer gets a 404, whereas the newest version # of the object is offline. # Another case is where the expirer gets a 412, since the old version # of the object mismatches the expiration time sent by the expirer. # In any of these cases, the expirer should retry deleting the object # later, for as long as a reclaim age has not passed. obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) obj_brain.put_container() if object_exists: obj_brain.put_object() # currently, the object either doesn't exist, or does not have # an expiration # stop primary servers and put a newer version of the object, this # time with an expiration. only the handoff servers will have # the new version obj_brain.stop_primary_half() now = time.time() delete_at = int(now + 2.0) obj_brain.put_object({'X-Delete-At': str(delete_at)}) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # update object record in the container listing Manager(['container-replicator']).once() # take handoff servers down, and bring up the outdated primary servers obj_brain.start_primary_half() obj_brain.stop_handoff_half() # wait until object expiration time while time.time() <= delete_at: time.sleep(0.1) # run expirer against the outdated servers. it should fail since # the outdated version does not match the expiration time self.expirer.once() # bring all servers up, and run replicator to update servers obj_brain.start_handoff_half() Manager(['object-replicator']).once() # verify the deletion has failed by checking the container listing self.assertTrue(self._check_obj_in_container_listing(), msg='Did not find listing for %s' % self.object_name) # run expirer again, delete should now succeed self.expirer.once() # this is mainly to paper over lp bug #1652323 self.get_to_final_state() # verify the deletion by checking the container listing self.assertFalse(self._check_obj_in_container_listing(), msg='Found listing for %s' % self.object_name) def test_expirer_delete_returns_outdated_404(self): self._test_expirer_delete_outdated_object_version(object_exists=False) def test_expirer_delete_returns_outdated_412(self): self._test_expirer_delete_outdated_object_version(object_exists=True)
class TestContainerMergePolicyIndex(ReplProbeTest): @unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy") def setUp(self): super(TestContainerMergePolicyIndex, self).setUp() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'container') def _get_object_patiently(self, policy_index): # use proxy to access object (bad container info might be cached...) timeout = time.time() + TIMEOUT while time.time() < timeout: try: return self.brain.get_object() except ClientException as err: if err.http_status != HTTP_NOT_FOUND: raise time.sleep(1) else: self.fail('could not GET /%s/%s/%s/ from policy %s ' 'after %s seconds.' % (self.account, self.container_name, self.object_name, int(policy_index), TIMEOUT)) def test_merge_storage_policy_index(self): # generic split brain self.brain.stop_primary_half() self.brain.put_container() self.brain.start_primary_half() self.brain.stop_handoff_half() self.brain.put_container() self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'}, contents=b'VERIFY') self.brain.start_handoff_half() # make sure we have some manner of split brain container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) found_policy_indexes = { metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses } self.assertGreater( len(found_policy_indexes), 1, 'primary nodes did not disagree about policy index %r' % head_responses) # find our object orig_policy_index = None for policy_index in found_policy_indexes: object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift') part, nodes = object_ring.get_nodes(self.account, self.container_name, self.object_name) for node in nodes: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': policy_index }) except direct_client.ClientException as err: continue orig_policy_index = policy_index break if orig_policy_index is not None: break else: self.fail('Unable to find /%s/%s/%s in %r' % (self.account, self.container_name, self.object_name, found_policy_indexes)) self.get_to_final_state() Manager(['container-reconciler']).once() # validate containers head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) found_policy_indexes = { metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses } self.assertEqual( len(found_policy_indexes), 1, 'primary nodes disagree about policy index %r' % head_responses) expected_policy_index = found_policy_indexes.pop() self.assertNotEqual(orig_policy_index, expected_policy_index) # validate object placement orig_policy_ring = POLICIES.get_object_ring(orig_policy_index, '/etc/swift') for node in orig_policy_ring.devs: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': orig_policy_index }) except direct_client.ClientException as err: if err.http_status == HTTP_NOT_FOUND: continue raise else: self.fail('Found /%s/%s/%s in %s' % (self.account, self.container_name, self.object_name, orig_policy_index)) # verify that the object data read by external client is correct headers, data = self._get_object_patiently(expected_policy_index) self.assertEqual(b'VERIFY', data) self.assertEqual('custom-meta', headers['x-object-meta-test']) def test_reconcile_delete(self): # generic split brain self.brain.stop_primary_half() self.brain.put_container() self.brain.put_object() self.brain.start_primary_half() self.brain.stop_handoff_half() self.brain.put_container() self.brain.delete_object() self.brain.start_handoff_half() # make sure we have some manner of split brain container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) found_policy_indexes = { metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses } self.assertGreater( len(found_policy_indexes), 1, 'primary nodes did not disagree about policy index %r' % head_responses) # find our object orig_policy_index = ts_policy_index = None for policy_index in found_policy_indexes: object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift') part, nodes = object_ring.get_nodes(self.account, self.container_name, self.object_name) for node in nodes: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': policy_index }) except direct_client.ClientException as err: if 'x-backend-timestamp' in err.http_headers: ts_policy_index = policy_index break else: orig_policy_index = policy_index break if not orig_policy_index: self.fail('Unable to find /%s/%s/%s in %r' % (self.account, self.container_name, self.object_name, found_policy_indexes)) if not ts_policy_index: self.fail('Unable to find tombstone /%s/%s/%s in %r' % (self.account, self.container_name, self.object_name, found_policy_indexes)) self.get_to_final_state() Manager(['container-reconciler']).once() # validate containers head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) node_to_policy = { node['port']: metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses } policies = set(node_to_policy.values()) self.assertEqual( len(policies), 1, 'primary nodes disagree about policy index %r' % node_to_policy) expected_policy_index = policies.pop() self.assertEqual(orig_policy_index, expected_policy_index) # validate object fully deleted for policy_index in found_policy_indexes: object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift') part, nodes = object_ring.get_nodes(self.account, self.container_name, self.object_name) for node in nodes: try: direct_client.direct_head_object( node, part, self.account, self.container_name, self.object_name, headers={ 'X-Backend-Storage-Policy-Index': policy_index }) except direct_client.ClientException as err: if err.http_status == HTTP_NOT_FOUND: continue else: self.fail('Found /%s/%s/%s in %s on %s' % (self.account, self.container_name, self.object_name, orig_policy_index, node)) def get_object_name(self, name): """ hook for sublcass to translate object names """ return name def test_reconcile_manifest(self): if 'slo' not in self.cluster_info: raise unittest.SkipTest( "SLO not enabled in proxy; can't test manifest reconciliation") # this test is not only testing a split brain scenario on # multiple policies with mis-placed objects - it even writes out # a static large object directly to the storage nodes while the # objects are unavailably mis-placed from *behind* the proxy and # doesn't know how to do that for EC_POLICY (clayg: why did you # guys let me write a test that does this!?) - so we force # wrong_policy (where the manifest gets written) to be one of # any of your configured REPL_POLICY (we know you have one # because this is a ReplProbeTest) wrong_policy = random.choice(POLICIES_BY_TYPE[REPL_POLICY]) policy = random.choice( [p for p in ENABLED_POLICIES if p is not wrong_policy]) manifest_data = [] def write_part(i): body = b'VERIFY%0.2d' % i + b'\x00' * 1048576 part_name = self.get_object_name('manifest_part_%0.2d' % i) manifest_entry = { "path": "/%s/%s" % (self.container_name, part_name), "etag": md5(body).hexdigest(), "size_bytes": len(body), } self.brain.client.put_object(self.container_name, part_name, {}, body) manifest_data.append(manifest_entry) # get an old container stashed self.brain.stop_primary_half() self.brain.put_container(int(policy)) self.brain.start_primary_half() # write some parts for i in range(10): write_part(i) self.brain.stop_handoff_half() self.brain.put_container(int(wrong_policy)) # write some more parts for i in range(10, 20): write_part(i) # write manifest with self.assertRaises(ClientException) as catcher: self.brain.client.put_object(self.container_name, self.object_name, {}, utils.json.dumps(manifest_data), query_string='multipart-manifest=put') # so as it works out, you can't really upload a multi-part # manifest for objects that are currently misplaced - you have to # wait until they're all available - which is about the same as # some other failure that causes data to be unavailable to the # proxy at the time of upload self.assertEqual(catcher.exception.http_status, 400) # but what the heck, we'll sneak one in just to see what happens... direct_manifest_name = self.object_name + '-direct-test' object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift') part, nodes = object_ring.get_nodes(self.account, self.container_name, direct_manifest_name) container_part = self.container_ring.get_part(self.account, self.container_name) def translate_direct(data): return { 'hash': data['etag'], 'bytes': data['size_bytes'], 'name': data['path'], } direct_manifest_data = [ translate_direct(item) for item in manifest_data ] headers = { 'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n in self.container_ring.devs), 'x-container-device': ','.join(n['device'] for n in self.container_ring.devs), 'x-container-partition': container_part, 'X-Backend-Storage-Policy-Index': wrong_policy.idx, 'X-Static-Large-Object': 'True', } body = utils.json.dumps(direct_manifest_data).encode('ascii') for node in nodes: direct_client.direct_put_object(node, part, self.account, self.container_name, direct_manifest_name, contents=body, headers=headers) break # one should do it... self.brain.start_handoff_half() self.get_to_final_state() Manager(['container-reconciler']).once() # clear proxy cache self.brain.client.post_container(self.container_name, {}) # let's see how that direct upload worked out... metadata, body = self.brain.client.get_object( self.container_name, direct_manifest_name, query_string='multipart-manifest=get') self.assertEqual(metadata['x-static-large-object'].lower(), 'true') for i, entry in enumerate(utils.json.loads(body)): for key in ('hash', 'bytes', 'name'): self.assertEqual(entry[key], direct_manifest_data[i][key]) metadata, body = self.brain.client.get_object(self.container_name, direct_manifest_name) self.assertEqual(metadata['x-static-large-object'].lower(), 'true') self.assertEqual(int(metadata['content-length']), sum(part['size_bytes'] for part in manifest_data)) self.assertEqual( body, b''.join(b'VERIFY%0.2d' % i + b'\x00' * 1048576 for i in range(20))) # and regular upload should work now too self.brain.client.put_object( self.container_name, self.object_name, {}, utils.json.dumps(manifest_data).encode('ascii'), query_string='multipart-manifest=put') metadata = self.brain.client.head_object(self.container_name, self.object_name) self.assertEqual(int(metadata['content-length']), sum(part['size_bytes'] for part in manifest_data)) def test_reconcile_symlink(self): if 'symlink' not in self.cluster_info: raise unittest.SkipTest("Symlink not enabled in proxy; can't test " "symlink reconciliation") wrong_policy = random.choice(ENABLED_POLICIES) policy = random.choice( [p for p in ENABLED_POLICIES if p is not wrong_policy]) # get an old container stashed self.brain.stop_primary_half() self.brain.put_container(int(policy)) self.brain.start_primary_half() # write some target data target_name = self.get_object_name('target') self.brain.client.put_object(self.container_name, target_name, {}, b'this is the target data') # write the symlink self.brain.stop_handoff_half() self.brain.put_container(int(wrong_policy)) symlink_name = self.get_object_name('symlink') self.brain.client.put_object( self.container_name, symlink_name, { 'X-Symlink-Target': '%s/%s' % (self.container_name, target_name), 'Content-Type': 'application/symlink', }, b'') # at this point we have a broken symlink (the container_info has the # proxy looking for the target in the wrong policy) with self.assertRaises(ClientException) as ctx: self.brain.client.get_object(self.container_name, symlink_name) self.assertEqual(ctx.exception.http_status, 404) # of course the symlink itself is fine metadata, body = self.brain.client.get_object( self.container_name, symlink_name, query_string='symlink=get') self.assertEqual( metadata['x-symlink-target'], utils.quote('%s/%s' % (self.container_name, target_name))) self.assertEqual(metadata['content-type'], 'application/symlink') self.assertEqual(body, b'') # ... although in the wrong policy object_ring = POLICIES.get_object_ring(int(wrong_policy), '/etc/swift') part, nodes = object_ring.get_nodes(self.account, self.container_name, symlink_name) for node in nodes: metadata = direct_client.direct_head_object( node, part, self.account, self.container_name, symlink_name, headers={'X-Backend-Storage-Policy-Index': int(wrong_policy)}) self.assertEqual( metadata['X-Object-Sysmeta-Symlink-Target'], utils.quote('%s/%s' % (self.container_name, target_name))) # let the reconciler run self.brain.start_handoff_half() self.get_to_final_state() Manager(['container-reconciler']).once() # clear proxy cache self.brain.client.post_container(self.container_name, {}) # now the symlink works metadata, body = self.brain.client.get_object(self.container_name, symlink_name) self.assertEqual(body, b'this is the target data') # and it's in the correct policy object_ring = POLICIES.get_object_ring(int(policy), '/etc/swift') part, nodes = object_ring.get_nodes(self.account, self.container_name, symlink_name) for node in nodes: metadata = direct_client.direct_head_object( node, part, self.account, self.container_name, symlink_name, headers={'X-Backend-Storage-Policy-Index': int(policy)}) self.assertEqual( metadata['X-Object-Sysmeta-Symlink-Target'], utils.quote('%s/%s' % (self.container_name, target_name))) def test_reconciler_move_object_twice(self): # select some policies old_policy = random.choice(ENABLED_POLICIES) new_policy = random.choice( [p for p in ENABLED_POLICIES if p != old_policy]) # setup a split brain self.brain.stop_handoff_half() # get old_policy on two primaries self.brain.put_container(policy_index=int(old_policy)) self.brain.start_handoff_half() self.brain.stop_primary_half() # force a recreate on handoffs self.brain.put_container(policy_index=int(old_policy)) self.brain.delete_container() self.brain.put_container(policy_index=int(new_policy)) self.brain.put_object() # populate memcache with new_policy self.brain.start_primary_half() # at this point two primaries have old policy container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [ (node, direct_client.direct_head_container(node, container_part, self.account, self.container_name)) for node in container_nodes ] old_container_nodes = [ node for node, metadata in head_responses if int(old_policy) == int(metadata['X-Backend-Storage-Policy-Index']) ] self.assertEqual(2, len(old_container_nodes)) # hopefully memcache still has the new policy cached self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'}, contents=b'VERIFY') # double-check object correctly written to new policy conf_files = [] for server in Manager(['container-reconciler']).servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] int_client = InternalClient(conf_file, 'probe-test', 3) int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # shutdown the containers that know about the new policy self.brain.stop_handoff_half() # and get rows enqueued from old nodes for server_type in ('container-replicator', 'container-updater'): server = Manager([server_type]) for node in old_container_nodes: server.once(number=self.config_number(node)) # verify entry in the queue for the "misplaced" new_policy for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT): for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT, container['name']): expected = '%d:/%s/%s/%s' % (new_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # verify object in old_policy int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # verify object is *not* in new_policy int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) self.get_to_final_state() # verify entry in the queue for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT): for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT, container['name']): expected = '%d:/%s/%s/%s' % (old_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # and now it flops back int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # make sure the queue is settled self.get_to_final_state() for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT): for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT, container['name']): self.fail('Found unexpected object %r in the queue' % obj) # verify that the object data read by external client is correct headers, data = self._get_object_patiently(int(new_policy)) self.assertEqual(b'VERIFY', data) self.assertEqual('custom-meta', headers['x-object-meta-test'])
class TestObjectExpirer(ReplProbeTest): def setUp(self): if len(ENABLED_POLICIES) < 2: raise SkipTest('Need more than one policy') self.expirer = Manager(['object-expirer']) self.expirer.start() err = self.expirer.stop() if err: raise SkipTest('Unable to verify object-expirer service') conf_files = [] for server in self.expirer.servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] self.client = InternalClient(conf_file, 'probe-test', 3) super(TestObjectExpirer, self).setUp() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name) def test_expirer_object_split_brain(self): old_policy = random.choice(ENABLED_POLICIES) wrong_policy = random.choice([p for p in ENABLED_POLICIES if p != old_policy]) # create an expiring object and a container with the wrong policy self.brain.stop_primary_half() self.brain.put_container(int(old_policy)) self.brain.put_object(headers={'X-Delete-After': 2}) # get the object timestamp metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) create_timestamp = Timestamp(metadata['x-timestamp']) self.brain.start_primary_half() # get the expiring object updates in their queue, while we have all # the servers up Manager(['object-updater']).once() self.brain.stop_handoff_half() self.brain.put_container(int(wrong_policy)) # don't start handoff servers, only wrong policy is available # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # this guy should no-op since it's unable to expire the object self.expirer.once() self.brain.start_handoff_half() self.get_to_final_state() # validate object is expired found_in_policy = None metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) self.assertTrue('x-backend-timestamp' in metadata) self.assertEqual(Timestamp(metadata['x-backend-timestamp']), create_timestamp) # but it is still in the listing for obj in self.client.iter_objects(self.account, self.container_name): if self.object_name == obj['name']: break else: self.fail('Did not find listing for %s' % self.object_name) # clear proxy cache client.post_container(self.url, self.token, self.container_name, {}) # run the expirier again after replication self.expirer.once() # object is not in the listing for obj in self.client.iter_objects(self.account, self.container_name): if self.object_name == obj['name']: self.fail('Found listing for %s' % self.object_name) # and validate object is tombstoned found_in_policy = None for policy in ENABLED_POLICIES: metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(policy)}) if 'x-backend-timestamp' in metadata: if found_in_policy: self.fail('found object in %s and also %s' % (found_in_policy, policy)) found_in_policy = policy self.assertTrue('x-backend-timestamp' in metadata) self.assertTrue(Timestamp(metadata['x-backend-timestamp']) > create_timestamp) def test_expirer_object_should_not_be_expired(self): obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated) # < T(expirer_executed) # Recreated obj should be appeared in any split brain case # T(obj_created) first_created_at = time.time() # T(obj_deleted with x-delete-at) # object-server accepts req only if X-Delete-At is later than 'now' delete_at = int(time.time() + 1.5) # T(obj_recreated) recreated_at = time.time() + 2.0 # T(expirer_executed) - 'now' sleep_for_expirer = 2.01 obj_brain.put_container(int(self.policy)) obj_brain.put_object( headers={'X-Delete-At': delete_at, 'X-Timestamp': Timestamp(first_created_at).internal}) # some object servers stopped obj_brain.stop_primary_half() obj_brain.put_object( headers={'X-Timestamp': Timestamp(recreated_at).internal, 'X-Object-Meta-Expired': 'False'}) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # some object servers recovered obj_brain.start_primary_half() # sleep to make sure expirer runs at the time after obj is recreated time.sleep(sleep_for_expirer) self.expirer.once() # inconsistent state of objects is recovered Manager(['object-replicator']).once() # check if you can get recreated object metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name) self.assertIn('x-object-meta-expired', metadata)
class TestObjectExpirer(ReplProbeTest): def setUp(self): self.expirer = Manager(['object-expirer']) self.expirer.start() err = self.expirer.stop() if err: raise unittest.SkipTest('Unable to verify object-expirer service') conf_files = [] for server in self.expirer.servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] self.client = InternalClient(conf_file, 'probe-test', 3) super(TestObjectExpirer, self).setUp() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name) def _check_obj_in_container_listing(self): for obj in self.client.iter_objects(self.account, self.container_name): if self.object_name == obj['name']: return True return False @unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy") def test_expirer_object_split_brain(self): old_policy = random.choice(ENABLED_POLICIES) wrong_policy = random.choice( [p for p in ENABLED_POLICIES if p != old_policy]) # create an expiring object and a container with the wrong policy self.brain.stop_primary_half() self.brain.put_container(int(old_policy)) self.brain.put_object(headers={'X-Delete-After': 2}) # get the object timestamp metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) create_timestamp = Timestamp(metadata['x-timestamp']) self.brain.start_primary_half() # get the expiring object updates in their queue, while we have all # the servers up Manager(['object-updater']).once() self.brain.stop_handoff_half() self.brain.put_container(int(wrong_policy)) # don't start handoff servers, only wrong policy is available # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # this guy should no-op since it's unable to expire the object self.expirer.once() self.brain.start_handoff_half() self.get_to_final_state() # validate object is expired found_in_policy = None metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) self.assertIn('x-backend-timestamp', metadata) self.assertEqual(Timestamp(metadata['x-backend-timestamp']), create_timestamp) # but it is still in the listing self.assertTrue(self._check_obj_in_container_listing(), msg='Did not find listing for %s' % self.object_name) # clear proxy cache client.post_container(self.url, self.token, self.container_name, {}) # run the expirer again after replication self.expirer.once() # object is not in the listing self.assertFalse(self._check_obj_in_container_listing(), msg='Found listing for %s' % self.object_name) # and validate object is tombstoned found_in_policy = None for policy in ENABLED_POLICIES: metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(policy)}) if 'x-backend-timestamp' in metadata: if found_in_policy: self.fail('found object in %s and also %s' % (found_in_policy, policy)) found_in_policy = policy self.assertIn('x-backend-timestamp', metadata) self.assertGreater(Timestamp(metadata['x-backend-timestamp']), create_timestamp) def test_expirer_doesnt_make_async_pendings(self): # The object expirer cleans up its own queue. The inner loop # basically looks like this: # # for obj in stuff_to_delete: # delete_the_object(obj) # remove_the_queue_entry(obj) # # By default, upon receipt of a DELETE request for an expiring # object, the object servers will create async_pending records to # clean the expirer queue. Since the expirer cleans its own queue, # this is unnecessary. The expirer can make requests in such a way # tha the object server does not write out any async pendings; this # test asserts that this is the case. # Make an expiring object in each policy for policy in ENABLED_POLICIES: container_name = "expirer-test-%d" % policy.idx container_headers = {'X-Storage-Policy': policy.name} client.put_container(self.url, self.token, container_name, headers=container_headers) now = time.time() delete_at = int(now + 2.0) client.put_object(self.url, self.token, container_name, "some-object", headers={ 'X-Delete-At': str(delete_at), 'X-Timestamp': Timestamp(now).normal }, contents='dontcare') time.sleep(2.0) # make sure auto-created expirer-queue containers get in the account # listing so the expirer can find them Manager(['container-updater']).once() # Make sure there's no async_pendings anywhere. Probe tests only run # on single-node installs anyway, so this set should be small enough # that an exhaustive check doesn't take too long. all_obj_nodes = self.get_all_object_nodes() pendings_before = self.gather_async_pendings(all_obj_nodes) # expire the objects Manager(['object-expirer']).once() pendings_after = self.gather_async_pendings(all_obj_nodes) self.assertEqual(pendings_after, pendings_before) def test_expirer_object_should_not_be_expired(self): # Current object-expirer checks the correctness via x-if-delete-at # header that it can be deleted by expirer. If there are objects # either which doesn't have x-delete-at header as metadata or which # has different x-delete-at value from x-if-delete-at value, # object-expirer's delete will fail as 412 PreconditionFailed. # However, if some of the objects are in handoff nodes, the expirer # can put the tombstone with the timestamp as same as x-delete-at and # the object consistency will be resolved as the newer timestamp will # be winner (in particular, overwritten case w/o x-delete-at). This # test asserts such a situation that, at least, the overwriten object # which have larger timestamp than the original expirered date should # be safe. def put_object(headers): # use internal client to PUT objects so that X-Timestamp in headers # is effective headers['Content-Length'] = '0' path = self.client.make_path(self.account, self.container_name, self.object_name) try: self.client.make_request('PUT', path, headers, (2, )) except UnexpectedResponse as e: self.fail('Expected 201 for PUT object but got %s' % e.resp.status) obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated) # < T(expirer_executed) # Recreated obj should be appeared in any split brain case obj_brain.put_container() # T(obj_deleted with x-delete-at) # object-server accepts req only if X-Delete-At is later than 'now' # so here, T(obj_created) < T(obj_deleted with x-delete-at) now = time.time() delete_at = int(now + 2.0) recreate_at = delete_at + 1.0 put_object(headers={ 'X-Delete-At': str(delete_at), 'X-Timestamp': Timestamp(now).normal }) # some object servers stopped to make a situation that the # object-expirer can put tombstone in the primary nodes. obj_brain.stop_primary_half() # increment the X-Timestamp explicitly # (will be T(obj_deleted with x-delete-at) < T(obj_recreated)) put_object( headers={ 'X-Object-Meta-Expired': 'False', 'X-Timestamp': Timestamp(recreate_at).normal }) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # sanity, the newer object is still there try: metadata = self.client.get_object_metadata(self.account, self.container_name, self.object_name) except UnexpectedResponse as e: self.fail('Expected 200 for HEAD object but got %s' % e.resp.status) self.assertIn('x-object-meta-expired', metadata) # some object servers recovered obj_brain.start_primary_half() # sleep until after recreated_at while time.time() <= recreate_at: time.sleep(0.1) # Now, expirer runs at the time after obj is recreated self.expirer.once() # verify that original object was deleted by expirer obj_brain.stop_handoff_half() try: metadata = self.client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, )) except UnexpectedResponse as e: self.fail('Expected 404 for HEAD object but got %s' % e.resp.status) obj_brain.start_handoff_half() # and inconsistent state of objects is recovered by replicator Manager(['object-replicator']).once() # check if you can get recreated object try: metadata = self.client.get_object_metadata(self.account, self.container_name, self.object_name) except UnexpectedResponse as e: self.fail('Expected 200 for HEAD object but got %s' % e.resp.status) self.assertIn('x-object-meta-expired', metadata) def _test_expirer_delete_outdated_object_version(self, object_exists): # This test simulates a case where the expirer tries to delete # an outdated version of an object. # One case is where the expirer gets a 404, whereas the newest version # of the object is offline. # Another case is where the expirer gets a 412, since the old version # of the object mismatches the expiration time sent by the expirer. # In any of these cases, the expirer should retry deleting the object # later, for as long as a reclaim age has not passed. obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) obj_brain.put_container() if object_exists: obj_brain.put_object() # currently, the object either doesn't exist, or does not have # an expiration # stop primary servers and put a newer version of the object, this # time with an expiration. only the handoff servers will have # the new version obj_brain.stop_primary_half() now = time.time() delete_at = int(now + 2.0) obj_brain.put_object({'X-Delete-At': str(delete_at)}) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() # update object record in the container listing Manager(['container-replicator']).once() # take handoff servers down, and bring up the outdated primary servers obj_brain.start_primary_half() obj_brain.stop_handoff_half() # wait until object expiration time while time.time() <= delete_at: time.sleep(0.1) # run expirer against the outdated servers. it should fail since # the outdated version does not match the expiration time self.expirer.once() # bring all servers up, and run replicator to update servers obj_brain.start_handoff_half() Manager(['object-replicator']).once() # verify the deletion has failed by checking the container listing self.assertTrue(self._check_obj_in_container_listing(), msg='Did not find listing for %s' % self.object_name) # run expirer again, delete should now succeed self.expirer.once() # verify the deletion by checking the container listing self.assertFalse(self._check_obj_in_container_listing(), msg='Found listing for %s' % self.object_name) def test_expirer_delete_returns_outdated_404(self): self._test_expirer_delete_outdated_object_version(object_exists=False) def test_expirer_delete_returns_outdated_412(self): self._test_expirer_delete_outdated_object_version(object_exists=True) def test_slo_async_delete(self): if not self.cluster_info.get('slo', {}).get('allow_async_delete'): raise unittest.SkipTest('allow_async_delete not enabled') segment_container = self.container_name + '_segments' client.put_container(self.url, self.token, self.container_name, {}) client.put_container(self.url, self.token, segment_container, {}) client.put_object(self.url, self.token, segment_container, 'segment_1', b'1234') client.put_object(self.url, self.token, segment_container, 'segment_2', b'5678') client.put_object(self.url, self.token, self.container_name, 'slo', json.dumps([ { 'path': segment_container + '/segment_1' }, { 'data': 'Cg==' }, { 'path': segment_container + '/segment_2' }, ]), query_string='multipart-manifest=put') _, body = client.get_object(self.url, self.token, self.container_name, 'slo') self.assertEqual(body, b'1234\n5678') client.delete_object( self.url, self.token, self.container_name, 'slo', query_string='multipart-manifest=delete&async=true') # Object's deleted _, objects = client.get_container(self.url, self.token, self.container_name) self.assertEqual(objects, []) with self.assertRaises(client.ClientException) as caught: client.get_object(self.url, self.token, self.container_name, 'slo') self.assertEqual(404, caught.exception.http_status) # But segments are still around and accessible _, objects = client.get_container(self.url, self.token, segment_container) self.assertEqual([o['name'] for o in objects], ['segment_1', 'segment_2']) _, body = client.get_object(self.url, self.token, segment_container, 'segment_1') self.assertEqual(body, b'1234') _, body = client.get_object(self.url, self.token, segment_container, 'segment_2') self.assertEqual(body, b'5678') # make sure auto-created expirer-queue containers get in the account # listing so the expirer can find them Manager(['container-updater']).once() self.expirer.once() # Now the expirer has cleaned up the segments _, objects = client.get_container(self.url, self.token, segment_container) self.assertEqual(objects, []) with self.assertRaises(client.ClientException) as caught: client.get_object(self.url, self.token, segment_container, 'segment_1') self.assertEqual(404, caught.exception.http_status) with self.assertRaises(client.ClientException) as caught: client.get_object(self.url, self.token, segment_container, 'segment_2') self.assertEqual(404, caught.exception.http_status)