def test_cleanup_deleted(self): self._common_test_cleanup() # Pretend the object got deleted inbetween and there is a tombstone fname_ts = self.expected_file[:-4] + "ts" os.rename(self.expected_file, fname_ts) self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True))
def test_cleanup_deleted(self): self._common_test_cleanup() # Pretend the object got deleted inbetween and there is a tombstone fname_ts = self.expected_file[:-4] + "ts" os.rename(self.expected_file, fname_ts) self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True))
def test_cleanup(self): self._common_test_cleanup() self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True)) # Old objectname should be removed, new should still exist self.assertTrue(os.path.isdir(self.expected_dir)) self.assertTrue(os.path.isfile(self.expected_file)) self.assertFalse( os.path.isfile(os.path.join(self.objdir, self.object_fname)))
def test_cleanup(self): self._common_test_cleanup() self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True)) # Old objectname should be removed, new should still exist self.assertTrue(os.path.isdir(self.expected_dir)) self.assertTrue(os.path.isfile(self.expected_file)) self.assertFalse(os.path.isfile( os.path.join(self.objdir, self.object_fname)))
def test_relink_cleanup(self): state_file = os.path.join(self.devices, self.existing_device, 'relink.objects.json') self.rb.prepare_increase_partition_power() self._save_ring() relinker.relink(self.testdir, self.devices, True) with open(state_file, 'rt') as f: self.assertEqual(json.load(f), {str(self.part): [True, False]}) self.rb.increase_partition_power() self.rb._ring = None # Force builder to reload ring self._save_ring() relinker.cleanup(self.testdir, self.devices, True) with open(state_file, 'rt') as f: self.assertEqual(json.load(f), {str(self.part): [True, True], str(self.next_part): [True, True]})
def test_cleanup_non_durable_fragment(self): self._common_test_cleanup() # Switch the policy type so that actually all fragments are non-durable # and raise a DiskFileNotExist in EC in this test. However, if the # counterpart exists in the new location, this is ok - it will be fixed # by the reconstructor later on self.assertEqual( 0, relinker.cleanup(self.testdir, self.devices, True, self.logger)) self.assertEqual(self.logger.get_lines_for_level('warning'), [])
def test_cleanup_device_filter_invalid(self): self._common_test_cleanup() self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True, device='none')) # Old objectname should still exist, new should still exist self.assertTrue(os.path.isdir(self.expected_dir)) self.assertTrue(os.path.isfile(self.expected_file)) self.assertTrue(os.path.isfile( os.path.join(self.objdir, self.object_fname)))
def test_cleanup_quarantined(self): self._common_test_cleanup() # Pretend the object in the new place got corrupted with open(self.expected_file, "wb") as obj: obj.write('trash') self.assertEqual( 1, relinker.cleanup(self.testdir, self.devices, True, self.logger)) self.assertIn('failed audit and was quarantined', self.logger.get_lines_for_level('warning')[0])
def test_cleanup_non_durable_fragment(self): self._common_test_cleanup() # Actually all fragments are non-durable and raise and DiskFileNotExist # in EC in this test. However, if the counterpart exists in the new # location, this is ok - it will be fixed by the reconstructor later on storage_policy._POLICIES[0].policy_type = 'erasure_coding' self.assertEqual( 0, relinker.cleanup(self.testdir, self.devices, True, self.logger)) self.assertEqual(self.logger.get_lines_for_level('warning'), [])
def test_cleanup_quarantined(self): self._common_test_cleanup() # Pretend the object in the new place got corrupted with open(self.expected_file, "wb") as obj: obj.write(b'trash') self.assertEqual( 1, relinker.cleanup(self.testdir, self.devices, True, self.logger)) self.assertIn('failed audit and was quarantined', self.logger.get_lines_for_level('warning')[0])
def test_cleanup_non_durable_fragment(self): self._common_test_cleanup() # Actually all fragments are non-durable and raise and DiskFileNotExist # in EC in this test. However, if the counterpart exists in the new # location, this is ok - it will be fixed by the reconstructor later on storage_policy._POLICIES[0].policy_type = 'erasure_coding' self.assertEqual( 0, relinker.cleanup(self.testdir, self.devices, True, self.logger)) self.assertEqual(self.logger.get_lines_for_level('warning'), [])
def test_cleanup_doesnotexist(self): self._common_test_cleanup() # Pretend the file in the new place got deleted inbetween os.remove(self.expected_file) self.assertEqual( 1, relinker.cleanup(self.testdir, self.devices, True, self.logger)) self.assertEqual(self.logger.get_lines_for_level('warning'), ['Error cleaning up %s: %s' % (self.objname, repr(exceptions.DiskFileNotExist()))])
def test_cleanup_not_yet_relinked(self): self._common_test_cleanup(relink=False) self.assertEqual(1, relinker.cleanup(self.testdir, self.devices, True)) self.assertTrue( os.path.isfile(os.path.join(self.objdir, self.object_fname)))
def _test_main(self, cancel=False): container = 'container-%s' % uuid4() obj = 'object-%s' % uuid4() obj2 = 'object-%s' % uuid4() # Create container headers = {'X-Storage-Policy': self.policy.name} client.put_container(self.url, self.token, container, headers=headers) # Create a new object client.put_object(self.url, self.token, container, obj, self.data) client.head_object(self.url, self.token, container, obj) # Prepare partition power increase builder = RingBuilder.load(self.builder_file) builder.prepare_increase_partition_power() builder.save(self.builder_file) ring_data = builder.get_ring() ring_data.save(self.ring_file) # Ensure the proxy uses the changed ring Manager(['proxy']).restart() # Ensure object is still accessible client.head_object(self.url, self.token, container, obj) # Relink existing objects for device in self.devices: self.assertEqual(0, relink(skip_mount_check=True, devices=device)) # Create second object after relinking and ensure it is accessible client.put_object(self.url, self.token, container, obj2, self.data) client.head_object(self.url, self.token, container, obj2) # Remember the original object locations org_locations = self._find_objs_ondisk(container, obj) org_locations += self._find_objs_ondisk(container, obj2) # Remember the new object locations new_locations = [] for loc in org_locations: new_locations.append( replace_partition_in_path(str(loc), self.object_ring.part_power + 1)) # Overwrite existing object - to ensure that older timestamp files # will be cleaned up properly later client.put_object(self.url, self.token, container, obj, self.data) # Ensure objects are still accessible client.head_object(self.url, self.token, container, obj) client.head_object(self.url, self.token, container, obj2) # Increase partition power builder = RingBuilder.load(self.builder_file) if not cancel: builder.increase_partition_power() else: builder.cancel_increase_partition_power() builder.save(self.builder_file) ring_data = builder.get_ring() ring_data.save(self.ring_file) # Ensure the proxy uses the changed ring Manager(['proxy']).restart() # Ensure objects are still accessible client.head_object(self.url, self.token, container, obj) client.head_object(self.url, self.token, container, obj2) # Overwrite existing object - to ensure that older timestamp files # will be cleaned up properly later client.put_object(self.url, self.token, container, obj, self.data) # Cleanup old objects in the wrong location for device in self.devices: self.assertEqual(0, cleanup(skip_mount_check=True, devices=device)) # Ensure objects are still accessible client.head_object(self.url, self.token, container, obj) client.head_object(self.url, self.token, container, obj2) # Ensure data in old or relinked object locations is removed if not cancel: for fn in org_locations: self.assertFalse(os.path.exists(fn)) else: for fn in new_locations: self.assertFalse(os.path.exists(fn))
def test_cleanup_not_yet_relinked(self): self._common_test_cleanup(relink=False) self.assertEqual(1, relinker.cleanup(self.testdir, self.devices, True)) self.assertTrue(os.path.isfile( os.path.join(self.objdir, self.object_fname)))