def read_conf_for_queue_access(self, swift): if self.conf.get('auto_create_account_prefix'): self.logger.warning('Option auto_create_account_prefix is ' 'deprecated. Configure ' 'auto_create_account_prefix under the ' 'swift-constraints section of ' 'swift.conf. This option will ' 'be ignored in a future release.') auto_create_account_prefix = \ self.conf['auto_create_account_prefix'] else: auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX self.expiring_objects_account = auto_create_account_prefix + \ (self.conf.get('expiring_objects_account_name') or 'expiring_objects') # This is for common parameter with general task queue in future self.task_container_prefix = '' request_tries = int(self.conf.get('request_tries') or 3) self.swift = swift or InternalClient(self.ic_conf_path, 'Swift Object Expirer', request_tries, use_replication_network=True) self.processes = int(self.conf.get('processes', 0)) self.process = int(self.conf.get('process', 0))
def main(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--config', default='/etc/swift/internal-client.conf', help=('internal-client config file ' '(default: /etc/swift/internal-client.conf')) parser.add_argument('--request-tries', type=int, default=3, help='(default: 3)') parser.add_argument('account', help='account from which to delete') parser.add_argument('container', help='container from which to delete') parser.add_argument( '--prefix', default='', help='only delete objects with this prefix (default: none)') parser.add_argument( '--marker', default='', help='only delete objects after this marker (default: none)') parser.add_argument( '--end-marker', default='', help='only delete objects before this end-marker (default: none)') parser.add_argument( '--timestamp', type=Timestamp, default=Timestamp.now(), help='delete all objects as of this time (default: now)') args = parser.parse_args() swift = InternalClient( args.config, 'Swift Container Deleter', args.request_tries) for deleted, marker in mark_for_deletion( swift, args.account, args.container, args.marker, args.end_marker, args.prefix, args.timestamp): if marker is None: print('Finished. Marked %d objects for deletion.' % deleted) else: print('Marked %d objects for deletion, through %r' % ( deleted, marker))
def __init__(self, conf, logger=None, swift=None): self.conf = conf self.logger = logger or get_logger(conf, log_route='object-expirer') self.interval = int(conf.get('interval') or 300) self.expiring_objects_account = \ (conf.get('auto_create_account_prefix') or '.') + \ (conf.get('expiring_objects_account_name') or 'expiring_objects') conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf' request_tries = int(conf.get('request_tries') or 3) self.swift = swift or InternalClient(conf_path, 'Swift Object Expirer', request_tries) self.report_interval = int(conf.get('report_interval') or 300) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = join(self.recon_cache_path, 'object.recon') self.concurrency = int(conf.get('concurrency', 1)) if self.concurrency < 1: raise ValueError("concurrency must be set to at least 1") self.processes = int(self.conf.get('processes', 0)) self.process = int(self.conf.get('process', 0)) # This option defines how long an un-processable expired object # marker will be retried before it is abandoned. It is not coupled # with the tombstone reclaim age in the consistency engine. self.reclaim_age = int(conf.get('reclaim_age', 604800))
def __init__(self, conf, logger=None, swift=None): self.conf = conf # This option defines how long an un-processable misplaced object # marker will be retried before it is abandoned. It is not coupled # with the tombstone reclaim age in the consistency engine. self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.interval = float(conf.get('interval', 30)) conf_path = conf.get('__file__') or \ '/etc/swift/container-reconciler.conf' self.logger = logger or get_logger(conf, log_route='container-reconciler') request_tries = int(conf.get('request_tries') or 3) self.swift = swift or InternalClient(conf_path, 'Swift Container Reconciler', request_tries, use_replication_network=True) self.swift_dir = conf.get('swift_dir', '/etc/swift') self.stats = defaultdict(int) self.last_stat_time = time.time() self.ring_check_interval = float(conf.get('ring_check_interval', 15)) self.concurrency = int(conf.get('concurrency', 1)) if self.concurrency < 1: raise ValueError("concurrency must be set to at least 1") self.processes = int(self.conf.get('processes', 0)) if self.processes < 0: raise ValueError( 'processes must be an integer greater than or equal to 0') self.process = int(self.conf.get('process', 0)) if self.process < 0: raise ValueError( 'process must be an integer greater than or equal to 0') if self.processes and self.process >= self.processes: raise ValueError('process must be less than processes')
def download(self, acc, container, u_agent, delay=0, request_tries=3): self.logger.info('Prefetching objects with InternalClient with ' + str(delay) + ' seconds of delay.') time.sleep(delay) swift = InternalClient(PROXY_PATH, u_agent, request_tries=request_tries) headers = {} prefetch_list = [] bytes_count = 0 for o in swift.iter_objects(acc, container): if bytes_count + int(o['bytes']) < self.cache_max_size: prefetch_list.append(o['name']) bytes_count += int(o['bytes']) else: break for name in prefetch_list: object_path = '/v1/' + acc + '/' + container + '/' + name oid = hashlib.md5(object_path).hexdigest() status, resp_headers, it = swift.get_object(acc, container, name, headers, ACCEPTABLE_STATUS) object_size = int(resp_headers.get('Content-Length')) object_etag = resp_headers.get('Etag') object_storage_policy_id = '0' # FIXME hardcoded to_evict = self.cache.access_cache("PUT", oid, object_size, object_etag, object_storage_policy_id) for ev_object_id in to_evict: os.remove(os.path.join(self.cache_path, ev_object_id)) self.logger.info('Prefetch Filter - Object ' + name + ' stored in cache with ID: ' + oid) with open(os.path.join(self.cache_path, oid), 'w') as f: for el in it: f.write(el)
def create_internal_client(conf, swift_dir): ic_config = conf.get('internal_client_path', os.path.join(swift_dir, 'internal-client.conf')) if not os.path.exists(ic_config): ic_config = ConfigString(INTERNAL_CLIENT_CONFIG) ic_name = conf.get('internal_client_logname', 'ContainerCrawler') return InternalClient(ic_config, ic_name, 3)
def __init__(self, status_dir, settings): self._status_dir = status_dir self._account = settings['account'] self._container = settings['container'] ic_config = ConfigString(self.INTERNAL_CLIENT_CONFIG) self._swift_client = InternalClient(ic_config, 'Metadata sync', 3) self._status_file = os.path.join(self._status_dir, self._account, self._container) self._status_account_dir = os.path.join(self._status_dir, self._account)
def __init__(self, conf): self.conf = conf self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.interval = int(conf.get('interval', 30)) conf_path = conf.get('__file__') or \ '/etc/swift/container-reconciler.conf' self.logger = get_logger(conf, log_route='container-reconciler') request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Container Reconciler', request_tries) self.stats = defaultdict(int) self.last_stat_time = time.time()
def download(oid, acc, container, name, u_agent, token, delay=0, request_tries=5): print 'Prefetching object with InternalClient: ' + oid + ' after ' + str(delay) + ' seconds of delay.' #time.sleep(delay) start_time = dt.now() swift = InternalClient(PROXY_PATH, u_agent, request_tries=request_tries) headers = {} headers['X-Auth-Token'] = token headers['X-No-Prefetch'] = 'True' status, head, it = swift.get_object(acc, container, name, headers, acc_status) data = [el for el in it] end_time = dt.now() diff = end_time - start_time return (oid, data, head, end_time, diff)
def make_swift_request(op, account, container=None, obj=None): """ Makes a swift request via a local proxy (cost expensive) :param op: opertation (PUT, GET, DELETE, HEAD) :param account: swift account :param container: swift container :param obj: swift object :returns: swift.common.swob.Response instance """ iclient = InternalClient(LOCAL_PROXY, 'Zion', 1) path = iclient.make_path(account, container, obj) resp = iclient.make_request(op, path, {'PATH_INFO': path}, [200]) return resp
def read_conf_for_queue_access(self, swift): self.expiring_objects_account = \ (self.conf.get('auto_create_account_prefix') or '.') + \ (self.conf.get('expiring_objects_account_name') or 'expiring_objects') # This is for common parameter with general task queue in future self.task_container_prefix = '' request_tries = int(self.conf.get('request_tries') or 3) self.swift = swift or InternalClient( self.ic_conf_path, 'Swift Object Expirer', request_tries) self.processes = int(self.conf.get('processes', 0)) self.process = int(self.conf.get('process', 0))
def __init__(self, conf): self.conf = conf # This option defines how long an un-processable misplaced object # marker will be retried before it is abandoned. It is not coupled # with the tombstone reclaim age in the consistency engine. self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.interval = int(conf.get('interval', 30)) conf_path = conf.get('__file__') or \ '/etc/swift/container-reconciler.conf' self.logger = get_logger(conf, log_route='container-reconciler') request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Container Reconciler', request_tries) self.stats = defaultdict(int) self.last_stat_time = time.time()
def __init__(self, conf): self.conf = conf self.logger = get_logger(conf, log_route='object-expirer') self.interval = int(conf.get('interval') or 300) self.expiring_objects_account = \ (conf.get('auto_create_account_prefix') or '.') + \ 'expiring_objects' conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf' request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Object Expirer', request_tries) self.report_interval = int(conf.get('report_interval') or 300) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = join(self.recon_cache_path, 'object.recon')
def setUp(cls): cls.conn = Connection(config) cls.conn.authenticate() cls.account = Account(cls.conn, config.get('account', config['username'])) cls.account.delete_containers() cls.container = cls.account.container(Utils.create_name()) if not cls.container.create(): raise ResponseError(cls.conn.response) cls.file_size = 8 cls.root_dir = os.path.join('/mnt/gluster-object', cls.account.conn.storage_url.split('/')[2].split('_')[1]) devices = config.get('devices', '/mnt/gluster-object') cls.client = InternalClient('/etc/swift/object-expirer.conf', 'Test Object Expirer', 1) cls.expirer = Manager(['object-expirer'])
def setUp(self): self.expirer = Manager(['object-expirer']) self.expirer.start() err = self.expirer.stop() if err: raise unittest.SkipTest('Unable to verify object-expirer service') conf_files = [] for server in self.expirer.servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] self.client = InternalClient(conf_file, 'probe-test', 3) super(TestObjectExpirer, self).setUp() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name)
def run(self): self.logger.debug('Prefetching object with InternalClient: ' + self.oid + ' after ' + str(self.delay) + ' seconds of delay.') eventlet.sleep(self.delay) start_time = dt.now() swift = InternalClient(PROXY_PATH, self.user_agent, request_tries=self.request_tries) headers = {} headers['X-Auth-Token'] = self.token headers['X-No-Prefetch'] = 'True' status, head, it = swift.get_object(self.acc, self.container, self.objname, headers, acc_status) data = [el for el in it] end_time = dt.now() diff = end_time - start_time self.log_results(self.oid, data, diff) self.delete_memory()
def download(oid, acc, container, name, u_agent, token, delay=0, request_tries=5): print 'Prefetching object with InternalClient: ' + oid + ' after ' + str( delay.total_seconds()) + ' seconds of delay.' time.sleep(delay.total_seconds()) swift = InternalClient(PROXY_PATH, u_agent, request_tries=request_tries) headers = {} headers['X-Auth-Token'] = token headers['X-No-Prefetch'] = 'True' status, head, it = swift.get_object(acc, container, name, headers, acc_status) print 'Request to Swift - Response Status: ' + str( status) + ' Response headers: ' + str(head) data = [el for el in it] return (oid, data, head)
def __init__(self, conf): self.conf = conf self.logger = get_logger(conf, log_route='object-expirer') self.interval = int(conf.get('interval') or 300) self.expiring_objects_account = \ (conf.get('auto_create_account_prefix') or '.') + \ 'expiring_objects' conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf' request_tries = int(conf.get('request_tries') or 3) self.swift = InternalClient(conf_path, 'Swift Object Expirer', request_tries) self.report_interval = int(conf.get('report_interval') or 300) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = join(self.recon_cache_path, 'object.recon') self.concurrency = int(conf.get('concurrency', 1)) if self.concurrency < 1: raise ValueError("concurrency must be set to at least 1") self.processes = int(self.conf.get('processes', 0)) self.process = int(self.conf.get('process', 0))
def __init__(self, account, auth_url=None, password=None, container='rings', internal=True): """ """ self.internal = internal self.account = account self.auth_url = auth_url self.password = password self.container = container self.conn = None retry_times = 3 if self.internal: try: conf_path = join(abspath(dirname(__file__)), 'stub.conf') self.conn = InternalClient(conf_path, 'swift_ring_sync', retry_times) except IOError, msg: raise RingSyncError('InternalClient Init Error: [%s]' % msg) except UnexpectedResponse, (msg, resp): raise RingSyncError('InternalClient Init Error: [%s]' % msg)
def setUp(self): if len(POLICIES) < 2: raise SkipTest('Need more than one policy') self.expirer = Manager(['object-expirer']) self.expirer.start() err = self.expirer.stop() if err: raise SkipTest('Unable to verify object-expirer service') conf_files = [] for server in self.expirer.servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] self.client = InternalClient(conf_file, 'probe-test', 3) (self.pids, self.port2server, self.account_ring, self.container_ring, self.object_ring, self.policy, self.url, self.token, self.account, self.configs) = reset_environment() self.container_name = 'container-%s' % uuid.uuid4() self.object_name = 'object-%s' % uuid.uuid4() self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name)
def test_reconciler_move_object_twice(self): # select some policies old_policy = random.choice(ENABLED_POLICIES) new_policy = random.choice( [p for p in ENABLED_POLICIES if p != old_policy]) # setup a split brain self.brain.stop_handoff_half() # get old_policy on two primaries self.brain.put_container(policy_index=int(old_policy)) self.brain.start_handoff_half() self.brain.stop_primary_half() # force a recreate on handoffs self.brain.put_container(policy_index=int(old_policy)) self.brain.delete_container() self.brain.put_container(policy_index=int(new_policy)) self.brain.put_object() # populate memcache with new_policy self.brain.start_primary_half() # at this point two primaries have old policy container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [ (node, direct_client.direct_head_container(node, container_part, self.account, self.container_name)) for node in container_nodes ] old_container_nodes = [ node for node, metadata in head_responses if int(old_policy) == int(metadata['X-Backend-Storage-Policy-Index']) ] self.assertEqual(2, len(old_container_nodes)) # hopefully memcache still has the new policy cached self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'}, contents=b'VERIFY') # double-check object correctly written to new policy conf_files = [] for server in Manager(['container-reconciler']).servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] int_client = InternalClient(conf_file, 'probe-test', 3) int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # shutdown the containers that know about the new policy self.brain.stop_handoff_half() # and get rows enqueued from old nodes for server_type in ('container-replicator', 'container-updater'): server = Manager([server_type]) for node in old_container_nodes: server.once(number=self.config_number(node)) # verify entry in the queue for the "misplaced" new_policy for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT): for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT, container['name']): expected = '%d:/%s/%s/%s' % (new_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # verify object in old_policy int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # verify object is *not* in new_policy int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) self.get_to_final_state() # verify entry in the queue for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT): for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT, container['name']): expected = '%d:/%s/%s/%s' % (old_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # and now it flops back int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # make sure the queue is settled self.get_to_final_state() for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT): for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT, container['name']): self.fail('Found unexpected object %r in the queue' % obj) # verify that the object data read by external client is correct headers, data = self._get_object_patiently(int(new_policy)) self.assertEqual(b'VERIFY', data) self.assertEqual('custom-meta', headers['x-object-meta-test'])
def __init__(self, conf, container_ring=None, logger=None): #: The dict of configuration values from the [container-sync] section #: of the container-server.conf. self.conf = conf #: Logger to use for container-sync log lines. self.logger = logger or get_logger(conf, log_route='container-sync') #: Path to the local device mount points. self.devices = conf.get('devices', '/srv/node') #: Indicates whether mount points should be verified as actual mount #: points (normally true, false for tests and SAIO). self.mount_check = config_true_value(conf.get('mount_check', 'true')) #: Minimum time between full scans. This is to keep the daemon from #: running wild on near empty systems. self.interval = int(conf.get('interval', 300)) #: Maximum amount of time to spend syncing a container before moving on #: to the next one. If a container sync hasn't finished in this time, #: it'll just be resumed next scan. self.container_time = int(conf.get('container_time', 60)) #: ContainerSyncCluster instance for validating sync-to values. self.realms_conf = ContainerSyncRealms( os.path.join(conf.get('swift_dir', '/etc/swift'), 'container-sync-realms.conf'), self.logger) #: The list of hosts we're allowed to send syncs to. This can be #: overridden by data in self.realms_conf self.allowed_sync_hosts = [ h.strip() for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',') if h.strip() ] self.http_proxies = [ a.strip() for a in conf.get('sync_proxy', '').split(',') if a.strip() ] #: ContainerSyncStore instance for iterating over synced containers self.sync_store = ContainerSyncStore(self.devices, self.logger, self.mount_check) #: Number of containers with sync turned on that were successfully #: synced. self.container_syncs = 0 #: Number of successful DELETEs triggered. self.container_deletes = 0 #: Number of successful PUTs triggered. self.container_puts = 0 #: Number of containers whose sync has been turned off, but #: are not yet cleared from the sync store. self.container_skips = 0 #: Number of containers that had a failure of some type. self.container_failures = 0 #: Per container stats. These are collected per container. #: puts - the number of puts that were done for the container #: deletes - the number of deletes that were fot the container #: bytes - the total number of bytes transferred per the container self.container_stats = collections.defaultdict(int) self.container_stats.clear() #: Time of last stats report. self.reported = time() self.swift_dir = conf.get('swift_dir', '/etc/swift') #: swift.common.ring.Ring for locating containers. self.container_ring = container_ring or Ring(self.swift_dir, ring_name='container') bind_ip = conf.get('bind_ip', '0.0.0.0') self._myips = whataremyips(bind_ip) self._myport = int(conf.get('bind_port', 6201)) swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) self.conn_timeout = float(conf.get('conn_timeout', 5)) request_tries = int(conf.get('request_tries') or 3) internal_client_conf_path = conf.get('internal_client_conf_path') if not internal_client_conf_path: self.logger.warning( _('Configuration option internal_client_conf_path not ' 'defined. Using default configuration, See ' 'internal-client.conf-sample for options')) internal_client_conf = ConfigString(ic_conf_body) else: internal_client_conf = internal_client_conf_path try: self.swift = InternalClient(internal_client_conf, 'Swift Container Sync', request_tries) except (OSError, IOError) as err: if err.errno != errno.ENOENT and \ not str(err).endswith(' not found'): raise raise SystemExit( _('Unable to load internal client from config: ' '%(conf)r (%(error)s)') % { 'conf': internal_client_conf_path, 'error': err })
def test_reconciler_move_object_twice(self): # select some policies old_policy = random.choice(list(POLICIES)) new_policy = random.choice([p for p in POLICIES if p != old_policy]) # setup a split brain self.brain.stop_handoff_half() # get old_policy on two primaries self.brain.put_container(policy_index=int(old_policy)) self.brain.start_handoff_half() self.brain.stop_primary_half() # force a recreate on handoffs self.brain.put_container(policy_index=int(old_policy)) self.brain.delete_container() self.brain.put_container(policy_index=int(new_policy)) self.brain.put_object() # populate memcache with new_policy self.brain.start_primary_half() # at this point two primaries have old policy container_part, container_nodes = self.container_ring.get_nodes( self.account, self.container_name) head_responses = [] for node in container_nodes: metadata = direct_client.direct_head_container( node, container_part, self.account, self.container_name) head_responses.append((node, metadata)) old_container_node_ids = [ node['id'] for node, metadata in head_responses if int(old_policy) == int(metadata['X-Backend-Storage-Policy-Index']) ] self.assertEqual(2, len(old_container_node_ids)) # hopefully memcache still has the new policy cached self.brain.put_object() # double-check object correctly written to new policy conf_files = [] for server in Manager(['container-reconciler']).servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] client = InternalClient(conf_file, 'probe-test', 3) client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # shutdown the containers that know about the new policy self.brain.stop_handoff_half() # and get rows enqueued from old nodes for server_type in ('container-replicator', 'container-updater'): server = Manager([server_type]) tuple(server.once(number=n + 1) for n in old_container_node_ids) # verify entry in the queue for the "misplaced" new_policy for container in client.iter_containers('.misplaced_objects'): for obj in client.iter_objects('.misplaced_objects', container['name']): expected = '%d:/%s/%s/%s' % (new_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # verify object in old_policy client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # verify object is *not* in new_policy client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) get_to_final_state() # verify entry in the queue client = InternalClient(conf_file, 'probe-test', 3) for container in client.iter_containers('.misplaced_objects'): for obj in client.iter_objects('.misplaced_objects', container['name']): expected = '%d:/%s/%s/%s' % (old_policy, self.account, self.container_name, self.object_name) self.assertEqual(obj['name'], expected) Manager(['container-reconciler']).once() # and now it flops back client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4, ), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # make sure the queue is settled get_to_final_state() for container in client.iter_containers('.misplaced_objects'): for obj in client.iter_objects('.misplaced_objects', container['name']): self.fail('Found unexpected object %r in the queue' % obj)
def __init__(self, conf_file, account='AUTH_test'): self.swift = InternalClient(conf_file, 'probe-test', 3) self.account = account
def client(self): # TODO(kota_): IMO, we need to make this to self._client environ to # get rid of redundant instanciation return InternalClient(self.conf_file, 'SA', 1)
from swift.common.storage_policy import POLICIES if len(sys.argv) < 4: print("Usage: %s <account> <container> <object> [y]") sys.exit() account = sys.argv[1] container = sys.argv[2] obj = sys.argv[3] post_container = False if len(sys.argv) == 5: if sys.argv[4] in ['y', 'Y', 'yes', 'YES']: post_container = True client = InternalClient('/etc/swift/internal-client.conf', 'check-cont', 3) for p in POLICIES: print('Checking policy name: %s (%d)' % (p.name, p.idx)) headers = { 'X-Backend-Storage-Policy-Index': p.idx} meta = client.get_object_metadata(account, container, obj, headers=headers, acceptable_statuses=(2, 4)) if 'x-timestamp' in meta: print(' >> Find object %s in policy %s' % (obj, p.name) ) if post_container: print('create container in policy %s' % p.name ) headers = { 'X-Storage-Policy': p.name} client.create_container(account, container, headers)