def test_relink_device_filter_invalid(self): self.rb.prepare_increase_partition_power() self._save_ring() relinker.relink(self.testdir, self.devices, True, device='none') self.assertFalse(os.path.isdir(self.expected_dir)) self.assertFalse(os.path.isfile(self.expected_file))
def test_relink(self): self.rb.prepare_increase_partition_power() self._save_ring() relinker.relink(self.testdir, self.devices, True) self.assertTrue(os.path.isdir(self.expected_dir)) self.assertTrue(os.path.isfile(self.expected_file)) stat_old = os.stat(os.path.join(self.objdir, self.object_fname)) stat_new = os.stat(self.expected_file) self.assertEqual(stat_old.st_ino, stat_new.st_ino)
def test_relink_cleanup(self): state_file = os.path.join(self.devices, self.existing_device, 'relink.objects.json') self.rb.prepare_increase_partition_power() self._save_ring() relinker.relink(self.testdir, self.devices, True) with open(state_file, 'rt') as f: self.assertEqual(json.load(f), {str(self.part): [True, False]}) self.rb.increase_partition_power() self.rb._ring = None # Force builder to reload ring self._save_ring() relinker.cleanup(self.testdir, self.devices, True) with open(state_file, 'rt') as f: self.assertEqual(json.load(f), {str(self.part): [True, True], str(self.next_part): [True, True]})
def _common_test_cleanup(self, relink=True): # Create a ring that has prev_part_power set self.rb.prepare_increase_partition_power() self._save_ring() if relink: conf = {'swift_dir': self.testdir, 'devices': self.devices, 'mount_check': False, 'files_per_second': 0} self.assertEqual(0, relinker.relink( conf, logger=self.logger, device=self.existing_device)) self.rb.increase_partition_power() self._save_ring()
def _test_main(self, cancel=False): container = 'container-%s' % uuid4() obj = 'object-%s' % uuid4() obj2 = 'object-%s' % uuid4() # Create container headers = {'X-Storage-Policy': self.policy.name} client.put_container(self.url, self.token, container, headers=headers) # Create a new object client.put_object(self.url, self.token, container, obj, self.data) client.head_object(self.url, self.token, container, obj) # Prepare partition power increase builder = RingBuilder.load(self.builder_file) builder.prepare_increase_partition_power() builder.save(self.builder_file) ring_data = builder.get_ring() ring_data.save(self.ring_file) # Ensure the proxy uses the changed ring Manager(['proxy']).restart() # Ensure object is still accessible client.head_object(self.url, self.token, container, obj) # Relink existing objects for device in self.devices: self.assertEqual(0, relink(skip_mount_check=True, devices=device)) # Create second object after relinking and ensure it is accessible client.put_object(self.url, self.token, container, obj2, self.data) client.head_object(self.url, self.token, container, obj2) # Remember the original object locations org_locations = self._find_objs_ondisk(container, obj) org_locations += self._find_objs_ondisk(container, obj2) # Remember the new object locations new_locations = [] for loc in org_locations: new_locations.append( replace_partition_in_path(str(loc), self.object_ring.part_power + 1)) # Overwrite existing object - to ensure that older timestamp files # will be cleaned up properly later client.put_object(self.url, self.token, container, obj, self.data) # Ensure objects are still accessible client.head_object(self.url, self.token, container, obj) client.head_object(self.url, self.token, container, obj2) # Increase partition power builder = RingBuilder.load(self.builder_file) if not cancel: builder.increase_partition_power() else: builder.cancel_increase_partition_power() builder.save(self.builder_file) ring_data = builder.get_ring() ring_data.save(self.ring_file) # Ensure the proxy uses the changed ring Manager(['proxy']).restart() # Ensure objects are still accessible client.head_object(self.url, self.token, container, obj) client.head_object(self.url, self.token, container, obj2) # Overwrite existing object - to ensure that older timestamp files # will be cleaned up properly later client.put_object(self.url, self.token, container, obj, self.data) # Cleanup old objects in the wrong location for device in self.devices: self.assertEqual(0, cleanup(skip_mount_check=True, devices=device)) # Ensure objects are still accessible client.head_object(self.url, self.token, container, obj) client.head_object(self.url, self.token, container, obj2) # Ensure data in old or relinked object locations is removed if not cancel: for fn in org_locations: self.assertFalse(os.path.exists(fn)) else: for fn in new_locations: self.assertFalse(os.path.exists(fn))