Esempio n. 1
0
 def __init__(self,
              conf,
              logger,
              volume,
              input_file=None,
              try_chunk_delete=False,
              beanstalkd_addr=None):
     self.conf = conf
     self.logger = logger or get_logger(conf)
     self.volume = volume
     self.run_time = 0
     self.passes = 0
     self.errors = 0
     self.last_reported = 0
     self.chunks_run_time = 0
     self.bytes_running_time = 0
     self.bytes_processed = 0
     self.total_bytes_processed = 0
     self.total_chunks_processed = 0
     self.dry_run = true_value(conf.get('dry_run', False))
     self.report_interval = int_value(conf.get('report_interval'), 3600)
     self.max_chunks_per_second = int_value(conf.get('chunks_per_second'),
                                            30)
     self.max_bytes_per_second = int_value(conf.get('bytes_per_second'),
                                           10000000)
     self.rdir_fetch_limit = int_value(conf.get('rdir_fetch_limit'), 100)
     self.allow_same_rawx = true_value(conf.get('allow_same_rawx'))
     self.input_file = input_file
     self.rdir_client = RdirClient(conf, logger=self.logger)
     self.content_factory = ContentFactory(conf)
     self.try_chunk_delete = try_chunk_delete
     self.beanstalkd_addr = beanstalkd_addr
     self.beanstalkd_tube = conf.get('beanstalkd_tube', 'rebuild')
     self.beanstalk = None
Esempio n. 2
0
    def __init__(self, endpoint=None, pool_manager=None,
                 connection='keep-alive', service_type='unknown', **kwargs):
        """
        :param pool_manager: an optional pool manager that will be reused
        :type pool_manager: `urllib3.PoolManager`
        :param endpoint: base of the URL that will requested
        :type endpoint: `str`
        :keyword admin_mode: allow talking to a slave/worm namespace
        :type admin_mode: `bool`

        :keyword perfdata: optional dictionary that will be filled with
            metrics of time spent to resolve the meta2 address and
            to do the meta2 request.
        :type perfdata: `dict`
        :keyword connection: 'keep-alive' to keep connections open (default)
            or 'close' to explicitly close them.
        """
        self.endpoint = endpoint

        if not pool_manager:
            pool_manager = get_pool_manager(**kwargs)
        self.pool_manager = pool_manager

        self.admin_mode = true_value(kwargs.get('admin_mode', False))
        self.force_master = true_value(kwargs.get('force_master', False))
        self.connection = connection
        self.service_type = service_type
Esempio n. 3
0
 def __init__(self, conf, logger=None, **kwargs):
     self.conf = conf
     self.logger = logger or get_logger(conf)
     volume = conf.get('volume')
     if not volume:
         raise ConfigurationException('No volume specified for converter')
     self.volume = volume
     self.namespace, self.volume_id = check_volume(self.volume)
     # cache
     self.name_by_cid = CacheDict()
     self.content_id_by_name = CacheDict()
     # client
     self.container_client = ContainerClient(conf, **kwargs)
     self.content_factory = ContentFactory(conf,
                                           self.container_client,
                                           logger=self.logger)
     # stats/logs
     self.errors = 0
     self.passes = 0
     self.total_chunks_processed = 0
     self.start_time = 0
     self.last_reported = 0
     self.report_interval = int_value(conf.get('report_interval'), 3600)
     # speed
     self.chunks_run_time = 0
     self.max_chunks_per_second = int_value(conf.get('chunks_per_second'),
                                            30)
     # backup
     self.no_backup = true_value(conf.get('no_backup', False))
     self.backup_dir = conf.get('backup_dir') or tempfile.gettempdir()
     self.backup_name = 'backup_%s_%f' \
         % (self.volume_id, time.time())
     # dry run
     self.dry_run = true_value(conf.get('dry_run', False))
Esempio n. 4
0
 def _check_objects(expected_objects, objects):
     self.assertEqual(len(expected_objects), len(objects))
     for i in range(len(expected_objects)):
         self.assertEqual(expected_objects[i]['name'],
                          objects[i]['name'])
         self.assertEqual(int(expected_objects[i]['version']),
                          int(objects[i]['version']))
         self.assertEqual(true_value(expected_objects[i]['deleted']),
                          true_value(objects[i]['deleted']))
Esempio n. 5
0
 def __init__(self, rebuilder, try_chunk_delete=False, **kwargs):
     super(BlobRebuilderWorker, self).__init__(rebuilder, **kwargs)
     self.dry_run = true_value(self.rebuilder.conf.get('dry_run', False))
     self.allow_same_rawx = true_value(
         self.rebuilder.conf.get('allow_same_rawx'))
     self.try_chunk_delete = try_chunk_delete
     self.rdir_client = self.rebuilder.rdir_client
     self.content_factory = ContentFactory(self.rebuilder.conf,
                                           logger=self.logger)
     self.sender = None
Esempio n. 6
0
 def __init__(self, conf, logger, volume, try_chunk_delete=False, **kwargs):
     super(BlobRebuilderWorker, self).__init__(conf, logger, **kwargs)
     self.volume = volume
     self.bytes_processed = 0
     self.total_bytes_processed = 0
     self.dry_run = true_value(conf.get('dry_run', False))
     self.allow_same_rawx = true_value(conf.get('allow_same_rawx'))
     self.rdir_client = RdirClient(conf, logger=self.logger)
     self.content_factory = ContentFactory(conf, logger=self.logger)
     self.try_chunk_delete = try_chunk_delete
Esempio n. 7
0
    def __init__(self, tool, queue_workers, queue_reply):
        super(BlobRebuilderWorker, self).__init__(
            tool, queue_workers, queue_reply)

        self.allow_frozen_container = true_value(self.tool.conf.get(
            'allow_frozen_container', self.tool.DEFAULT_ALLOW_FROZEN_CT))
        self.allow_same_rawx = true_value(self.tool.conf.get(
            'allow_same_rawx', self.tool.DEFAULT_ALLOW_SAME_RAWX))
        self.try_chunk_delete = true_value(self.tool.conf.get(
            'try_chunk_delete', self.tool.DEFAULT_TRY_CHUNK_DELETE))
        self.dry_run = true_value(self.tool.conf.get(
            'dry_run', self.tool.DEFAULT_DRY_RUN))

        self.chunk_operator = ChunkOperator(self.conf, logger=self.logger)
Esempio n. 8
0
    def __init__(self, tool, queue_workers, queue_reply):
        super(ContainerRepairerWorker, self).__init__(
            tool, queue_workers, queue_reply)

        self.rebuild_bases = true_value(self.tool.conf.get(
            'rebuild_bases', self.tool.DEFAULT_REBUILD_BASES))
        self.sync_bases = true_value(self.tool.conf.get(
            'sync_bases', self.tool.DEFAULT_SYNC_BASES))
        self.update_account = true_value(self.tool.conf.get(
            'update_account', self.tool.DEFAULT_UPDATE_ACCOUNT))

        self.admin_client = AdminClient(self.conf, logger=self.logger)
        self.container_client = ContainerClient(self.conf, logger=self.logger)
        self.meta2_database = Meta2Database(self.conf, logger=self.logger)
Esempio n. 9
0
    def _unmarshal_job_info(marshalled_job_info):
        job_info = dict(
            job=dict(),
            orchestrator=dict(),
            tasks=dict(),
            errors=dict(),
            results=dict(),
            config=dict())

        for key, value in marshalled_job_info.items():
            split_key = key.decode('utf-8').split('.', 1)
            value = value.decode('utf-8')
            if len(split_key) == 1:
                job_info[split_key[0]] = value
            else:
                job_info[split_key[0]][split_key[1]] = value

        job_main_info = job_info['job']
        job_main_info['ctime'] = float(job_main_info['ctime'])
        job_main_info['mtime'] = float(job_main_info['mtime'])
        job_main_info['request_pause'] = true_value(
            job_main_info['request_pause'])

        job_tasks = job_info['tasks']
        job_tasks['sent'] = int(job_tasks['sent'])
        job_tasks.setdefault('last_sent')
        job_tasks['all_sent'] = true_value(job_tasks['all_sent'])
        job_tasks['processed'] = int(job_tasks['processed'])
        # To have a coherent total if the estimate was not correct
        if job_tasks['all_sent']:
            job_tasks['total'] = job_tasks['sent']
        else:
            job_tasks['total'] = max(job_tasks['sent'],
                                     int(job_tasks['total']))
        job_tasks['is_total_temp'] = true_value(
            job_tasks['is_total_temp'])
        job_tasks.setdefault('total_marker')

        job_errors = job_info['errors']
        for key, value in job_errors.items():
            job_errors[key] = int(value)

        job_results = job_info.get('results', dict())
        for key, value in job_results.items():
            job_results[key] = int(value)

        job_info['config'] = json.loads(job_info['config'])

        return job_info
Esempio n. 10
0
    def __init__(self, endpoint=None, pool_manager=None,
                 connection='keep-alive', **kwargs):
        """
        :param pool_manager: an optional pool manager that will be reused
        :type pool_manager: `urllib3.PoolManager`
        :param endpoint: base of the URL that will requested
        :type endpoint: `str`
        :keyword admin_mode: allow talking to a slave/worm namespace
        :type admin_mode: `bool`

        :keyword perfdata: optional dictionary that will be filled with
            metrics of time spent to resolve the meta2 address and
            to do the meta2 request.
        :type perfdata: `dict`
        :keyword connection: 'keep-alive' to keep connections open (default)
            or 'close' to explicitly close them.
        """
        super(HttpApi, self).__init__()
        self.endpoint = endpoint

        if not pool_manager:
            pool_manager_conf = {k: int(v)
                                 for k, v in kwargs.iteritems()
                                 if k in _POOL_MANAGER_OPTIONS_KEYS}
            pool_manager = get_pool_manager(**pool_manager_conf)
        self.pool_manager = pool_manager

        self.admin_mode = true_value(kwargs.get('admin_mode', False))
        self.perfdata = kwargs.get('perfdata')
        self.connection = connection
Esempio n. 11
0
 def __init__(self, conf):
     self.conf = conf
     redis_conf = {k[6:]: v for k, v in self.conf.items()
                   if k.startswith("redis_")}
     redis_host = redis_conf.pop('host', None)
     if redis_host:
         parsed = urlparse('http://' + redis_host)
         if parsed.port is None:
             redis_host = '%s:%s' % (redis_host,
                                     redis_conf.pop('port', '6379'))
     redis_sentinel_hosts = redis_conf.pop(
         'sentinel_hosts',
         # TODO(adu): Delete when it will no longer be used
         self.conf.get('sentinel_hosts'))
     redis_sentinel_name = redis_conf.pop(
         'sentinel_name',
         # TODO(adu): Delete when it will no longer be used
         self.conf.get('sentinel_master_name'))
     super(AccountBackend, self).__init__(
         host=redis_host, sentinel_hosts=redis_sentinel_hosts,
         sentinel_name=redis_sentinel_name, **redis_conf)
     self.autocreate = true_value(conf.get('autocreate', 'true'))
     self.script_update_container = self.register_script(
         self.lua_update_container)
     self.script_refresh_account = self.register_script(
         self.lua_refresh_account)
     self.script_flush_account = self.register_script(
         self.lua_flush_account)
Esempio n. 12
0
 def _list_contents(self):
     for container in self._list_containers():
         marker = None
         while True:
             try:
                 _, listing = self.container_client.content_list(
                     account=self.account,
                     reference=container,
                     limit=self.content_fetch_limit,
                     marker=marker)
             except NotFound:
                 self.logger.warn(
                     "Container %s appears in account but doesn't exist",
                     container)
                 break
             if len(listing["objects"]) == 0:
                 break
             for obj in listing["objects"]:
                 marker = obj["name"]
                 if obj["mtime"] > time.time() - self.outdated_threshold:
                     continue
                 if obj["policy"] == self.new_policy:
                     continue
                 if true_value(obj['deleted']):
                     continue
                 yield (self.account, container, obj["name"],
                        obj["version"])
Esempio n. 13
0
    def status(self,
               volume,
               max=1000,
               prefix=None,
               marker=None,
               max_attempts=3,
               **kwargs):
        """
        Get the status of chunks belonging to the specified volume.

        :param volume: the volume to get chunks from
        :type volume: `str`
        :param max: maximum number of results to return per request
            to the rdir server.
        :type max: `int`
        :keyword prefix: get only chunks belonging to
           the specified prefix
        :type prefix: `str`
        :keyword marker: fetch only chunk that appear after
            this marker
        :type marker: `str`
        """
        req_params = {'max': max}
        if prefix:
            req_params['prefix'] = prefix
        if marker:
            req_params['marker'] = marker
        chunks = dict()
        containers = dict()

        while True:
            for i in range(max_attempts):
                try:
                    _resp, resp_body = self._rdir_request(volume,
                                                          'GET',
                                                          'status',
                                                          params=req_params,
                                                          **kwargs)
                    break
                except OioNetworkException:
                    # Monotonic backoff
                    if i < max_attempts - 1:
                        sleep(i * 1.0)
                        continue
                    # Too many attempts
                    raise

            for (key, value) in resp_body.get('chunk', dict()).items():
                chunks[key] = chunks.get(key, 0) + value
            for (cid, info) in resp_body.get('container', dict()).items():
                for (key, value) in info.items():
                    containers[cid][key] = containers.setdefault(
                        cid, dict()).get(key, 0) + value

            if not true_value(
                    _resp.headers.get(HEADER_PREFIX + 'list-truncated')):
                break
            req_params['marker'] = _resp.headers[HEADER_PREFIX + 'list-marker']

        return {'chunk': chunks, 'container': containers}
Esempio n. 14
0
 def __init__(self, conf, logger, volume):
     self.conf = conf
     self.logger = logger or get_logger(conf)
     self.volume = volume
     self.namespace, self.address = check_volume(self.volume)
     self.run_time = 0
     self.passes = 0
     self.errors = 0
     self.last_reported = 0
     self.last_usage_check = 0
     self.chunks_run_time = 0
     self.bytes_running_time = 0
     self.bytes_processed = 0
     self.total_bytes_processed = 0
     self.total_chunks_processed = 0
     self.usage_target = int_value(conf.get('usage_target'), 0)
     self.usage_check_interval = int_value(conf.get('usage_check_interval'),
                                           3600)
     self.report_interval = int_value(conf.get('report_interval'), 3600)
     self.max_chunks_per_second = int_value(conf.get('chunks_per_second'),
                                            30)
     self.max_bytes_per_second = int_value(conf.get('bytes_per_second'),
                                           10000000)
     self.limit = int_value(conf.get('limit'), 0)
     self.allow_links = true_value(conf.get('allow_links', True))
     self.blob_client = BlobClient(conf)
     self.container_client = ContainerClient(conf, logger=self.logger)
     self.content_factory = ContentFactory(conf)
Esempio n. 15
0
 def __init__(self, conf, logger, volume):
     self.conf = conf
     self.logger = logger or get_logger(conf)
     self.volume = volume
     self.namespace, self.address = check_volume(self.volume)
     self.running = False
     self.run_time = 0
     self.passes = 0
     self.errors = 0
     self.last_reported = 0
     self.last_usage_check = 0
     self.chunks_run_time = 0
     self.bytes_running_time = 0
     self.bytes_processed = 0
     self.total_bytes_processed = 0
     self.total_chunks_processed = 0
     self.concurrency = int_value(conf.get('concurrency'), 10)
     self.usage_target = int_value(conf.get('usage_target'), 0)
     self.usage_check_interval = int_value(conf.get('usage_check_interval'),
                                           60)
     self.report_interval = int_value(conf.get('report_interval'), 3600)
     self.max_chunks_per_second = int_value(conf.get('chunks_per_second'),
                                            30)
     self.limit = int_value(conf.get('limit'), 0)
     self.allow_links = true_value(conf.get('allow_links', True))
     self.blob_client = BlobClient(conf)
     self.container_client = ContainerClient(conf, logger=self.logger)
     self.content_factory = ContentFactory(
         conf,
         container_client=self.container_client,
         blob_client=self.blob_client)
     self.excluded_rawx = \
         [rawx for rawx in conf.get('excluded_rawx', '').split(',') if rawx]
     self.fake_excluded_chunks = self._generate_fake_excluded_chunks()
Esempio n. 16
0
    def on_account_containers(self, req):
        account_id = self._get_account_id(req)

        info = self.backend.info_account(account_id)
        if not info:
            return NotFound('Account not found')

        marker = req.args.get('marker', '')
        end_marker = req.args.get('end_marker', '')
        prefix = req.args.get('prefix', '')
        limit = int(req.args.get('limit', '1000'))
        limit = max(0, min(ACCOUNT_LISTING_MAX_LIMIT, int_value(
            req.args.get('limit'), 0)))
        if limit <= 0:
            limit = ACCOUNT_LISTING_DEFAULT_LIMIT
        delimiter = req.args.get('delimiter', '')
        s3_buckets_only = true_value(req.args.get('s3_buckets_only', False))

        user_list = self.backend.list_containers(
            account_id, limit=limit, marker=marker, end_marker=end_marker,
            prefix=prefix, delimiter=delimiter,
            s3_buckets_only=s3_buckets_only)

        info['listing'] = user_list
        # TODO(FVE): add "truncated" entry telling if the listing is truncated
        result = json.dumps(info)
        return Response(result, mimetype='text/json')
Esempio n. 17
0
 def __init__(self, conf, **kwargs):
     super(BlobIndexer, self).__init__(conf)
     self.logger = get_logger(conf)
     volume = conf.get('volume')
     if not volume:
         raise exc.ConfigurationException('No volume specified for indexer')
     self.volume = volume
     self.passes = 0
     self.errors = 0
     self.successes = 0
     self.last_reported = 0
     self.total_since_last_reported = 0
     self.chunks_run_time = 0
     self.interval = int_value(
         conf.get('interval'), 300)
     self.report_interval = int_value(
         conf.get('report_interval'), 3600)
     self.max_chunks_per_second = int_value(
         conf.get('chunks_per_second'), 30)
     pm = get_pool_manager(pool_connections=10)
     self.index_client = RdirClient(conf, logger=self.logger,
                                    pool_manager=pm)
     self.namespace, self.volume_id = check_volume(self.volume)
     self.convert_chunks = true_value(conf.get('convert_chunks'))
     if self.convert_chunks:
         converter_conf = self.conf.copy()
         converter_conf['no_backup'] = True
         self.converter = BlobConverter(converter_conf, logger=self.logger,
                                        pool_manager=pm)
     else:
         self.converter = None
Esempio n. 18
0
 def _get_current_objects(all_versions):
     current_objects = list()
     obj_names = sorted(all_versions.keys())
     for obj_name in obj_names:
         obj = all_versions[obj_name][-1]
         if not true_value(obj['deleted']):
             current_objects.append(obj)
     return current_objects
Esempio n. 19
0
 def __init__(self, conf, connection=None):
     self.conf = conf
     self.autocreate = true_value(conf.get('autocreate', 'true'))
     super(AccountBackend, self).__init__(conf, connection)
     self.script_update_container = self.register_script(
         self.lua_update_container)
     self.script_refresh_account = self.register_script(
         self.lua_refresh_account)
     self.script_flush_account = self.register_script(
         self.lua_flush_account)
Esempio n. 20
0
 def init(self):
     eventlet.monkey_patch(os=False)
     self.tube = self.conf.get("tube", DEFAULT_TUBE)
     self.cs = ConscienceClient(self.conf, logger=self.logger)
     self.rdir = RdirClient(self.conf, logger=self.logger)
     self._acct_addr = None
     self.acct_update = 0
     self.graceful_timeout = 1
     self.acct_refresh_interval = int_value(
         self.conf.get('acct_refresh_interval'), 60)
     self.acct_update = true_value(self.conf.get('acct_update', True))
     self.rdir_update = true_value(self.conf.get('rdir_update', True))
     self.app_env['acct_addr'] = self.acct_addr
     if 'handlers_conf' not in self.conf:
         raise ValueError("'handlers_conf' path not defined in conf")
     self.handlers = loadhandlers(self.conf.get('handlers_conf'),
                                  global_conf=self.conf,
                                  app=self)
     super(EventWorker, self).init()
Esempio n. 21
0
    def __init__(self, conf, service, **kwargs):
        self.conf = conf
        self.running = False

        for k in ['host', 'port', 'type']:
            if k not in service:
                raise Exception('Missing field "%s" in service configuration' %
                                k)
        self.name = '%s|%s|%s' % \
            (service['type'], service['host'], service['port'])

        self.service = service

        self.rise = int_value(self._load_item_config('rise'), 1)
        self.fall = int_value(self._load_item_config('fall'), 1)
        self.check_interval = float_value(
            self._load_item_config('check_interval'), 1)
        self.deregister_on_exit = true_value(
            self._load_item_config('deregister_on_exit', False))

        self.logger = get_logger(self.conf)
        self.pool_manager = get_pool_manager()
        self.cs = ConscienceClient(self.conf,
                                   pool_manager=self.pool_manager,
                                   logger=self.logger)
        # FIXME: explain that
        self.client = ProxyClient(self.conf,
                                  pool_manager=self.pool_manager,
                                  no_ns_in_url=True,
                                  logger=self.logger)
        self.last_status = False
        self.status = False
        self.failed = False
        self.service_definition = {
            'ns': self.conf['namespace'],
            'type': self.service['type'],
            'addr': '%s:%s' % (self.service['host'], self.service['port']),
            'score': 0,
            'tags': {}
        }
        if self.service.get('slots', None):
            self.service_definition['tags']['tag.slots'] = \
                    ','.join(self.service['slots'])
        for name, tag in (('location', 'tag.loc'),
                          ('service_id', 'tag.service_id'), ('tls',
                                                             'tag.tls')):
            if self.service.get(name):
                self.service_definition['tags'][tag] = \
                    self.service[name]

        self.service_checks = list()
        self.service_stats = list()
        self.init_checkers(service)
        self.init_stats(service)
Esempio n. 22
0
 def __init__(self,
              key_prefix='IAM:',
              subkey_separator='/',
              logger=None,
              allow_empty_policy_name=True,
              **redis_kwargs):
     self.allow_empty_policy_name = true_value(allow_empty_policy_name)
     self.key_prefix = key_prefix
     self.logger = logger or get_logger(None, 'IAM')
     self.subkey_sep = subkey_separator
     self.redis = RedisConnection(**redis_kwargs)
     self.name_regex = re.compile(r'[\w+=,.@-]+')
Esempio n. 23
0
    def test_proxy_decache(self):
        """
        Check that a decache order actually empties proxy's cache.
        """
        if not true_value(self.conf['config'].get('proxy.cache.enabled')):
            self.skipTest('Proxy cache disabled')
        # Creating a container will put something in both high and low caches.
        ct = 'test-decache-' + random_str(8)
        self.storage.container_create(self.account, ct)
        self._containers.append((self.account, ct))

        status0 = self.admin.proxy_get_cache_status()
        output = self.openio_admin('oioproxy decache' + self.get_format_opts())
        # FIXME(FVE): this will fail when we will deploy several proxies
        self.assertOutput('%s OK None\n' % self.conf['proxy'], output)
        status1 = self.admin.proxy_get_cache_status()
        self.assertLess(status1['csm0']['count'], status0['csm0']['count'])
        self.assertLess(status1['meta1']['count'], status0['meta1']['count'])
Esempio n. 24
0
    def __init__(self, conf, input_file=None, service_id=None, **kwargs):
        super(BlobRebuilder, self).__init__(conf, **kwargs)

        # counters
        self.bytes_processed = 0
        self.total_bytes_processed = 0

        # input
        self.input_file = input_file
        self.rawx_id = service_id

        # rawx/rdir
        self.rdir_client = RdirClient(self.conf, logger=self.logger)
        self.rdir_fetch_limit = int_value(self.conf.get('rdir_fetch_limit'),
                                          self.DEFAULT_RDIR_FETCH_LIMIT)
        self.rdir_shuffle_chunks = true_value(conf.get('rdir_shuffle_chunks'))
        self.rdir_timeout = float_value(conf.get('rdir_timeout'),
                                        self.DEFAULT_RDIR_TIMEOUT)
Esempio n. 25
0
    def apply(self, obj_meta, **kwargs):
        """
        Match then apply the set of rules of this lifecycle configuration
        on the specified object.

        :returns: tuples of (object metadata, rule name, action, status)
        :rtype: generator of 4-tuples

        :notice: you must consume the results or the rules won't be applied.
        """
        if true_value(obj_meta['deleted']):
            return
        for rule in self.rules:
            res = rule.apply(obj_meta, **kwargs)
            if res:
                for action in res:
                    yield obj_meta, rule.id, action[0], action[1]
                    if action[1] != 'Kept':
                        return
            else:
                yield obj_meta, rule.id, "n/a", "Kept"
Esempio n. 26
0
    def chunk_fetch(self, volume, limit=1000, rebuild=False,
                    container_id=None, max_attempts=3,
                    start_after=None, shuffle=False, **kwargs):
        """
        Fetch the list of chunks belonging to the specified volume.

        :param volume: the volume to get chunks from
        :type volume: `str`
        :param limit: maximum number of results to return per request
            to the rdir server.
        :type limit: `int`
        :param rebuild: fetch only the chunks that were there
            before the last incident.
        :type rebuild: `bool`
        :keyword container_id: get only chunks belonging to
           the specified container
        :type container_id: `str`
        :keyword start_after: fetch only chunk that appear after
            this container ID
        :type start_after: `str`
        """
        req_body = {'limit': limit}
        if rebuild:
            req_body['rebuild'] = True
        if container_id:
            req_body['container_id'] = container_id
        if start_after:
            req_body['start_after'] = start_after

        while True:
            for i in range(max_attempts):
                try:
                    _resp, resp_body = self._rdir_request(
                        volume, 'POST', 'fetch', json=req_body, **kwargs)
                    break
                except OioNetworkException:
                    # Monotonic backoff
                    if i < max_attempts - 1:
                        sleep(i * 1.0)
                        continue
                    # Too many attempts
                    raise

            truncated = _resp.headers.get(
                    HEADER_PREFIX + 'list-truncated')
            if truncated is None:
                # TODO(adu): Delete when it will no longer be used
                if not resp_body:
                    break
                truncated = True
                req_body['start_after'] = resp_body[-1][0]
            else:
                truncated = true_value(truncated)
                if truncated:
                    req_body['start_after'] = _resp.headers[
                        HEADER_PREFIX + 'list-marker']

            if shuffle:
                random.shuffle(resp_body)
            for (key, value) in resp_body:
                container, content, chunk = key.split('|')
                yield container, content, chunk, value

            if not truncated:
                break
Esempio n. 27
0
 def _force_master_wrapper(self, req, *args, **kwargs):
     force_master = true_value(req.args.get('force_master', ''))
     return func(self, req, *args, force_master=force_master, **kwargs)