Пример #1
0
 def take_action(self, parsed_args):
     self.log.debug('take_action(%s)', parsed_args)
     results = []
     srv_types = parsed_args.srv_types
     local_scores = boolean_value(
         self.app.client_manager.sds_conf.get('proxy.quirk.local_scores'),
         False)
     if not local_scores:
         self.log.warn("'proxy.quirk.local_scores' not set, "
                       "scores won't be realistic.")
     data = self.app.client_manager.conscience.local_services()
     for srv in data:
         tags = srv['tags']
         location = tags.get('tag.loc', 'n/a')
         slots = tags.get('tag.slots', 'n/a')
         volume = tags.get('tag.vol', 'n/a')
         service_id = tags.get('tag.service_id', 'n/a')
         addr = srv['addr']
         up = tags.get('tag.up', 'n/a')
         score = srv['score']
         locked = boolean_value(tags.get('tag.lock'), False)
         srv_type = srv['type']
         if not srv_types or srv_type in srv_types:
             results.append((srv_type, addr, service_id, volume, location,
                             slots, up, score, locked))
     columns = ('Type', 'Addr', 'Service Id', 'Volume', 'Location', 'Slots',
                'Up', 'Score', 'Locked')
     result_gen = (r for r in results)
     return columns, result_gen
Пример #2
0
 def init(self):
     super(ReplicateFilter, self).init()
     self.account = self.app_env['account_client']
     self.cache_duration = float_value(self.conf.get('cache_duration'),
                                       CACHE_DURATION)
     self.cache_size = int_value(self.conf.get('cache_size'), CACHE_SIZE)
     self.cache = CacheDict(self.cache_size)
     self.check_account = boolean_value(
         self.conf.get('check_replication_enabled'), False)
     self.connection_timeout = float_value(
         self.conf.get('connection_timeout'), CONNECTION_TIMEOUT)
     self.force_master = boolean_value(self.conf.get('force_master'), False)
     self.read_timeout = float_value(self.conf.get('read_timeout'),
                                     READ_TIMEOUT)
Пример #3
0
 def _list_services(self, parsed_args):
     if not parsed_args.srv_types:
         parsed_args.srv_types = \
                 self.app.client_manager.conscience.service_types()
     for srv_type in parsed_args.srv_types:
         try:
             data = self.app.client_manager.conscience.all_services(
                 srv_type, parsed_args.stats)
         except OioException as exc:
             self.success = False
             self.log.error("Failed to list services of type %s: %s",
                            srv_type, exc)
             continue
         for srv in data:
             tags = srv['tags']
             location = tags.get('tag.loc', 'n/a')
             slots = tags.get('tag.slots', 'n/a')
             volume = tags.get('tag.vol', 'n/a')
             service_id = tags.get('tag.service_id', 'n/a')
             addr = srv['addr']
             locked = boolean_value(tags.get('tag.lock', False))
             up = tags.get('tag.up', 'n/a')
             score = srv['score']
             if parsed_args.stats:
                 stats = [
                     "%s=%s" % (k, v) for k, v in tags.items()
                     if k.startswith('stat.')
                 ]
                 values = (srv_type, addr, service_id, volume, location,
                           slots, up, score, locked, " ".join(stats))
             else:
                 values = (srv_type, addr, service_id, volume, location,
                           slots, up, score, locked)
             yield values
Пример #4
0
 def container_flush(self,
                     account=None,
                     reference=None,
                     cid=None,
                     **kwargs):
     params = self._make_params(account, reference, cid=cid)
     resp, _ = self._request('POST', '/flush', params=params, **kwargs)
     return {
         'truncated': boolean_value(resp.getheader('x-oio-truncated',
                                                   False))
     }
Пример #5
0
    def sanitize_params(cls, job_params):
        sanitized_job_params, _ = super(RawxRebuildJob,
                                        cls).sanitize_params(job_params)

        # specific configuration
        service_id = job_params.get('service_id')
        if not service_id:
            raise ValueError('Missing service ID')
        sanitized_job_params['service_id'] = service_id

        sanitized_job_params['rawx_timeout'] = float_value(
            job_params.get('rawx_timeout'), cls.DEFAULT_RAWX_TIMEOUT)

        sanitized_job_params['dry_run'] = boolean_value(
            job_params.get('dry_run'), cls.DEFAULT_DRY_RUN)

        sanitized_job_params['allow_same_rawx'] = boolean_value(
            job_params.get('allow_same_rawx'), cls.DEFAULT_ALLOW_SAME_RAWX)

        sanitized_job_params['try_chunk_delete'] = boolean_value(
            job_params.get('try_chunk_delete'), cls.DEFAULT_TRY_CHUNK_DELETE)

        sanitized_job_params['allow_frozen_container'] = boolean_value(
            job_params.get('allow_frozen_container'),
            cls.DEFAULT_ALLOW_FROZEN_CT)

        set_specific_incident_date = int_value(
            job_params.get('set_specific_incident_date'), None)
        if set_specific_incident_date is None:
            set_incident_date = boolean_value(
                job_params.get('set_incident_date'),
                cls.DEFAULT_DECLARE_INCIDENT_DATE)
            if set_incident_date:
                set_specific_incident_date = int(time.time())
        else:
            set_incident_date = True
        sanitized_job_params['set_incident_date'] = set_incident_date
        sanitized_job_params['set_specific_incident_date'] = \
            set_specific_incident_date

        return sanitized_job_params, 'rawx/%s' % service_id
Пример #6
0
    def __init__(self, conf):
        self.conf = conf
        redis_conf = {
            k[6:]: v
            for k, v in self.conf.items() if k.startswith("redis_")
        }
        redis_host = redis_conf.pop('host', None)
        if redis_host:
            parsed = urlparse('http://' + redis_host)
            if parsed.port is None:
                redis_host = '%s:%s' % (redis_host,
                                        redis_conf.pop('port', '6379'))
        redis_sentinel_hosts = redis_conf.pop(
            'sentinel_hosts',
            # TODO(adu): Delete when it will no longer be used
            self.conf.get('sentinel_hosts'))
        redis_sentinel_name = redis_conf.pop(
            'sentinel_name',
            # TODO(adu): Delete when it will no longer be used
            self.conf.get('sentinel_master_name'))
        super(AccountBackend,
              self).__init__(host=redis_host,
                             sentinel_hosts=redis_sentinel_hosts,
                             sentinel_name=redis_sentinel_name,
                             **redis_conf)
        self.autocreate = boolean_value(conf.get('autocreate'), True)
        self._account_prefix = conf.get('account_prefix', ACCOUNT_KEY_PREFIX)
        self._bucket_prefix = conf.get('bucket_prefix', BUCKET_KEY_PREFIX)
        self._bucket_list_prefix = conf.get('bucket_list_prefix',
                                            BUCKET_LIST_PREFIX)
        self._container_list_prefix = conf.get('container_list_prefix',
                                               CONTAINER_LIST_PREFIX)
        self._bucket_lock_prefix = conf.get('bucket_lock_prefix',
                                            BUCKET_LOCK_KEY_PREFIX)

        update_container_patched = self.lua_update_container % {
            'bucket_list_prefix': self._bucket_list_prefix
        }
        self.script_update_container = self.register_script(
            update_container_patched)
        self.script_refresh_bucket = self.register_script(
            self.lua_refresh_bucket_batch)
        self.script_refresh_account = self.register_script(
            self.lua_refresh_account)
        self.script_flush_account = self.register_script(
            self.lua_flush_account)
        self.script_get_container_info = self.register_script(
            self.lua_get_extended_container_info)
        self.script_get_lock_bucket = self.register_script(
            self.lua_lock_bucket)
Пример #7
0
    def __init__(self, volume_path, conf, pool_manager=None):
        """
        Initializes an Indexing worker for indexing meta2 databases.

        Possible values of conf relating to this worker are:
        - interval: (int) in sec time between two full scans. Default: half an
                    hour.
        - report_interval: (int) in sec, time between two reports: Default: 300
        - scanned_per_second: (int) maximum number of indexed databases /s.
        - try_removing_faulty_indexes : In the event where we encounter a
            database that's not supposed to be handled by this volume, attempt
            to remove it from this volume rdir index if it exists
            WARNING: The decision is based off of a proxy response, that could
            be affected by cache inconsistencies for example, use at your own
            risk. Default: False

        :param volume_path: The volume path to be indexed
        :param conf: The configuration to be passed to the needed services
        :param pool_manager: A connection pool manager. If none is given, a
                new one with a default size of 10 will be created.
        """
        self.logger = get_logger(conf)
        self._stop = False
        self.volume = volume_path
        self.success_nb = 0
        self.failed_nb = 0
        self.full_scan_nb = 0
        self.last_report_time = 0
        self.last_scan_time = 0
        self.last_index_time = 0
        self.start_time = 0
        self.indexed_since_last_report = 0
        self.scans_interval = int_value(
            conf.get('interval'), 1800)
        self.report_interval = int_value(
            conf.get('report_interval'), 300)
        self.max_indexed_per_second = int_value(
            conf.get('scanned_per_second'), 3000)
        self.namespace, self.volume_id = check_volume_for_service_type(
            self.volume, "meta2")
        self.attempt_bad_index_removal = boolean_value(
            conf.get('try_removing_faulty_indexes', False)
        )

        if not pool_manager:
            pool_manager = get_pool_manager(pool_connections=10)
        self.index_client = RdirClient(conf, logger=self.logger,
                                       pool_manager=pool_manager)
        self.dir_client = DirectoryClient(conf, logger=self.logger,
                                          pool_manager=pool_manager)
Пример #8
0
 def cast_fields(self, info):
     """
     Cast dict entries to the type they are supposed to be.
     """
     for what in (b'bytes', b'objects'):
         try:
             info[what] = int_value(info.get(what), 0)
         except (TypeError, ValueError):
             pass
     for what in (BUCKET_PROP_REPLI_ENABLED.encode('utf-8'), ):
         try:
             val = info.get(what)
             decoded = val.decode('utf-8') if val is not None else None
             info[what] = boolean_value(decoded)
         except (TypeError, ValueError):
             pass
Пример #9
0
 def cast_fields(self, info):
     """
     Cast dict entries to the type they are supposed to be.
     """
     for what in (b'bytes', b'objects', b'damaged_objects',
                  b'missing_chunks'):
         try:
             info[what] = int_value(info.get(what), 0)
         except (TypeError, ValueError):
             pass
     for what in (BUCKET_PROP_REPLI_ENABLED.encode('utf-8'), ):
         try:
             decoded = info.get(what, b'').decode('utf-8')
             info[what] = boolean_value(decoded)
         except (TypeError, ValueError):
             pass
Пример #10
0
 def flush_and_check(truncated=False, objects=0, usage=0):
     resp = self.request('POST',
                         self.url_container('flush'),
                         params=params)
     self.assertEqual(204, resp.status)
     self.assertEqual(truncated,
                      boolean_value(resp.getheader('x-oio-truncated')))
     self._wait_account_meta2()
     resp = self.request('POST',
                         self.url_container('get_properties'),
                         params=params)
     data = self.json_loads(resp.data)
     self.assertEqual(data['system']['sys.m2.objects'], str(objects))
     self.assertEqual(data['system']['sys.m2.usage'], str(usage))
     resp = self.request('GET',
                         self.url_container('list'),
                         params=params)
     data = self.json_loads(resp.data)
     self.assertEqual(len(data['objects']), objects)