Beispiel #1
0
 def __init__(self, conf, **kwargs):
     super(BlobIndexer, self).__init__(conf)
     self.logger = get_logger(conf)
     volume = conf.get('volume')
     if not volume:
         raise exc.ConfigurationException('No volume specified for indexer')
     self.volume = volume
     self.passes = 0
     self.errors = 0
     self.successes = 0
     self.last_reported = 0
     self.total_since_last_reported = 0
     self.chunks_run_time = 0
     self.interval = int_value(
         conf.get('interval'), 300)
     self.report_interval = int_value(
         conf.get('report_interval'), 3600)
     self.max_chunks_per_second = int_value(
         conf.get('chunks_per_second'), 30)
     pm = get_pool_manager(pool_connections=10)
     self.index_client = RdirClient(conf, logger=self.logger,
                                    pool_manager=pm)
     self.namespace, self.volume_id = check_volume(self.volume)
     self.convert_chunks = true_value(conf.get('convert_chunks'))
     if self.convert_chunks:
         converter_conf = self.conf.copy()
         converter_conf['no_backup'] = True
         self.converter = BlobConverter(converter_conf, logger=self.logger,
                                        pool_manager=pm)
     else:
         self.converter = None
Beispiel #2
0
    def __init__(self, endpoint=None, pool_manager=None, **kwargs):
        """
        :param pool_manager: an optional pool manager that will be reused
        :type pool_manager: `urllib3.PoolManager`
        :param endpoint: base of the URL that will requested
        :type endpoint: `str`
        :keyword admin_mode: allow talking to a slave/worm namespace
        :type admin_mode: `bool`

        :keyword perfdata: optional dictionary that will be filled with
            metrics of time spent to resolve the meta2 address and
            to do the meta2 request.
        :type perfdata: `dict`
        """
        super(HttpApi, self).__init__()
        self.endpoint = endpoint

        if not pool_manager:
            pool_manager_conf = {
                k: int(v)
                for k, v in iteritems(kwargs)
                if k in _POOL_MANAGER_OPTIONS_KEYS
            }
            pool_manager = get_pool_manager(**pool_manager_conf)
        self.pool_manager = pool_manager

        self.admin_mode = kwargs.get('admin_mode', False)
        self.perfdata = kwargs.get('perfdata')
Beispiel #3
0
    def __init__(self, endpoint=None, pool_manager=None,
                 connection='keep-alive', service_type='unknown', **kwargs):
        """
        :param pool_manager: an optional pool manager that will be reused
        :type pool_manager: `urllib3.PoolManager`
        :param endpoint: base of the URL that will requested
        :type endpoint: `str`
        :keyword admin_mode: allow talking to a slave/worm namespace
        :type admin_mode: `bool`

        :keyword perfdata: optional dictionary that will be filled with
            metrics of time spent to resolve the meta2 address and
            to do the meta2 request.
        :type perfdata: `dict`
        :keyword connection: 'keep-alive' to keep connections open (default)
            or 'close' to explicitly close them.
        """
        self.endpoint = endpoint

        if not pool_manager:
            pool_manager = get_pool_manager(**kwargs)
        self.pool_manager = pool_manager

        self.admin_mode = true_value(kwargs.get('admin_mode', False))
        self.force_master = true_value(kwargs.get('force_master', False))
        self.connection = connection
        self.service_type = service_type
Beispiel #4
0
    def static_request(method,
                       url,
                       data=None,
                       params=None,
                       headers=None,
                       json=None,
                       http_pool=None):
        if not http_pool:
            http_pool = get_pool_manager()
        # Add query string
        if params:
            out_param = []
            for k, v in params.items():
                if v is not None:
                    if isinstance(v, unicode):
                        v = unicode(v).encode('utf-8')
                    out_param.append((k, v))
            encoded_args = urlencode(out_param)
            url += '?' + encoded_args

        # Convert json and add Content-Type
        headers = headers if headers else {}
        if json:
            headers["Content-Type"] = "application/json"
            data = jsonlib.dumps(json)

        out_kwargs = {}
        out_kwargs['headers'] = headers
        out_kwargs['body'] = data

        return http_pool.request(method, url, **out_kwargs)
Beispiel #5
0
 def setUp(self):
     super(CommonTestCase, self).setUp()
     self.conf = get_config()
     self.uri = 'http://' + self.conf['proxy']
     self.ns = self.conf['namespace']
     self.account = self.conf['account']
     self.http_pool = get_pool_manager()
Beispiel #6
0
 def __init__(self, conf, pool_manager=None):
     self._cache = dict()
     self.conf = conf
     self.pool_manager = pool_manager or get_pool_manager()
     self._client = ConscienceClient(conf=self.conf,
                                     pool_manager=self.pool_manager)
     self.logger = get_logger(conf)
Beispiel #7
0
    def _autocontainer_loop(self, account, marker=None, limit=None,
                            concurrency=1, **kwargs):
        from functools import partial
        container_marker = self.flatns_manager(marker) if marker else None
        count = 0
        kwargs['pool_manager'] = get_pool_manager(
            pool_maxsize=concurrency * 2)
        # Start to list contents at 'marker' inside the last visited container
        if container_marker:
            for element in depaginate(
                    self.app.client_manager.storage.object_list,
                    listing_key=lambda x: x['objects'],
                    marker_key=lambda x: x.get('next_marker'),
                    truncated_key=lambda x: x['truncated'],
                    account=account, container=container_marker,
                    marker=marker, **kwargs):
                count += 1
                yield element
                if limit and count >= limit:
                    return

        pool = GreenPool(concurrency)
        for object_list in pool.imap(
                partial(self._list_autocontainer_objects,
                        account=account, **kwargs),
                depaginate(self.app.client_manager.storage.container_list,
                           item_key=lambda x: x[0],
                           account=account,
                           marker=container_marker)):
            for element in object_list:
                count += 1
                yield element
                if limit and count >= limit:
                    return
Beispiel #8
0
 def __init__(self,
              conf=None,
              connection_pool=None,
              perfdata=None,
              **kwargs):
     self.http_pool = connection_pool or get_pool_manager(**kwargs)
     self.perfdata = perfdata
     self.cache = ServiceCache(conf, self.http_pool)
Beispiel #9
0
    def __init__(self, conf=None, perfdata=None,
                 logger=None, connection_pool=None, **kwargs):
        self.conf = conf
        self.perfdata = perfdata

        self.logger = logger or get_logger(self.conf)
        # FIXME(FVE): we do not target the same set of services,
        # we should use a separate connection pool for rawx services.
        self.http_pool = connection_pool or get_pool_manager(**kwargs)
        self.conscience_client = ConscienceClient(conf, logger=self.logger,
                                                  pool_manager=self.http_pool)
Beispiel #10
0
    def setUp(self):
        super(TestMeta2EventsEmission, self).setUp()
        if not self.conf.get('webhook', ''):
            self.skipTest('webhook is required')

        self.acct = 'AccountWebhook%f' % time.time()
        self.cnt_name = 'TestWebhookEvents%f' % time.time()
        self.obj_name = 'obj%f' % time.time()
        self.storage_api = ObjectStorageApi(self.ns)
        self.pool = get_pool_manager()
        self._clean()
Beispiel #11
0
    def __init__(self, conf, service, **kwargs):
        self.conf = conf
        self.running = False

        for k in ['host', 'port', 'type']:
            if k not in service:
                raise Exception('Missing field "%s" in service configuration' %
                                k)
        self.name = '%s|%s|%s' % \
            (service['type'], service['host'], service['port'])

        self.service = service

        self.rise = int_value(self._load_item_config('rise'), 1)
        self.fall = int_value(self._load_item_config('fall'), 1)
        self.check_interval = float_value(
            self._load_item_config('check_interval'), 1)
        self.deregister_on_exit = true_value(
            self._load_item_config('deregister_on_exit', False))

        self.logger = get_logger(self.conf)
        self.pool_manager = get_pool_manager()
        self.cs = ConscienceClient(self.conf,
                                   pool_manager=self.pool_manager,
                                   logger=self.logger)
        # FIXME: explain that
        self.client = ProxyClient(self.conf,
                                  pool_manager=self.pool_manager,
                                  no_ns_in_url=True,
                                  logger=self.logger)
        self.last_status = False
        self.status = False
        self.failed = False
        self.service_definition = {
            'ns': self.conf['namespace'],
            'type': self.service['type'],
            'addr': '%s:%s' % (self.service['host'], self.service['port']),
            'score': 0,
            'tags': {}
        }
        if self.service.get('slots', None):
            self.service_definition['tags']['tag.slots'] = \
                    ','.join(self.service['slots'])
        for name, tag in (('location', 'tag.loc'),
                          ('service_id', 'tag.service_id'), ('tls',
                                                             'tag.tls')):
            if self.service.get(name):
                self.service_definition['tags'][tag] = \
                    self.service[name]

        self.service_checks = list()
        self.service_stats = list()
        self.init_checkers(service)
        self.init_stats(service)
Beispiel #12
0
    def __init__(self, volume_path, conf, pool_manager=None):
        """
        Initializes an Indexing worker for indexing meta2 databases.

        Possible values of conf relating to this worker are:
        - interval: (int) in sec time between two full scans. Default: half an
                    hour.
        - report_interval: (int) in sec, time between two reports: Default: 300
        - scanned_per_second: (int) maximum number of indexed databases /s.
        - try_removing_faulty_indexes : In the event where we encounter a
            database that's not supposed to be handled by this volume, attempt
            to remove it from this volume rdir index if it exists
            WARNING: The decision is based off of a proxy response, that could
            be affected by cache inconsistencies for example, use at your own
            risk. Default: False

        :param volume_path: The volume path to be indexed
        :param conf: The configuration to be passed to the needed services
        :param pool_manager: A connection pool manager. If none is given, a
                new one with a default size of 10 will be created.
        """
        self.logger = get_logger(conf)
        self._stop = False
        self.volume = volume_path
        self.success_nb = 0
        self.failed_nb = 0
        self.full_scan_nb = 0
        self.last_report_time = 0
        self.last_scan_time = 0
        self.last_index_time = 0
        self.start_time = 0
        self.indexed_since_last_report = 0
        self.scans_interval = int_value(
            conf.get('interval'), 1800)
        self.report_interval = int_value(
            conf.get('report_interval'), 300)
        self.max_indexed_per_second = int_value(
            conf.get('scanned_per_second'), 3000)
        self.namespace, self.volume_id = check_volume_for_service_type(
            self.volume, "meta2")
        self.attempt_bad_index_removal = boolean_value(
            conf.get('try_removing_faulty_indexes', False)
        )

        if not pool_manager:
            pool_manager = get_pool_manager(pool_connections=10)
        self.index_client = RdirClient(conf, logger=self.logger,
                                       pool_manager=pool_manager)
        self.dir_client = DirectoryClient(conf, logger=self.logger,
                                          pool_manager=pool_manager)
Beispiel #13
0
 def get_chunks_info(chunks):
     pool_manager = get_pool_manager()
     chunk_hash = ""
     chunk_size = ""
     for c in chunks:
         resp = pool_manager.request('HEAD', c['url'])
         if resp.status != 200:
             chunk_size = "%d %s" % (resp.status, resp.reason)
             chunk_hash = "%d %s" % (resp.status, resp.reason)
         else:
             chunk_size = resp.headers.get(
                 'X-oio-chunk-meta-chunk-size',
                 'Missing chunk size header')
             chunk_hash = resp.headers.get(
                 'X-oio-chunk-meta-chunk-hash',
                 'Missing chunk hash header')
         yield (c['pos'], c['url'], c['size'], c['hash'], chunk_size,
                chunk_hash)
Beispiel #14
0
    def __init__(self, endpoint=None, pool_manager=None, **kwargs):
        """
        :param pool_manager: an optional pool manager that will be reused
        :type pool_manager: `urllib3.PoolManager`
        :param endpoint: base of the URL that will requested
        :type endpoint: `str`
        :keyword admin_mode: allow talking to a slave/worm namespace
        :type admin_mode: `bool`
        """
        super(HttpApi, self).__init__()
        self.endpoint = endpoint

        if not pool_manager:
            pool_manager_conf = {
                k: int(v)
                for k, v in kwargs.iteritems()
                if k in _POOL_MANAGER_OPTIONS_KEYS
            }
            pool_manager = get_pool_manager(**pool_manager_conf)
        self.pool_manager = pool_manager

        self.admin_mode = kwargs.get('admin_mode', False)
Beispiel #15
0
    def __init__(self,
                 endpoint=None,
                 pool_manager=None,
                 connection='keep-alive',
                 **kwargs):
        """
        :param pool_manager: an optional pool manager that will be reused
        :type pool_manager: `urllib3.PoolManager`
        :param endpoint: base of the URL that will requested
        :type endpoint: `str`
        :keyword admin_mode: allow talking to a slave/worm namespace
        :type admin_mode: `bool`

        :keyword perfdata: optional dictionary that will be filled with
            metrics of time spent to resolve the meta2 address and
            to do the meta2 request.
        :type perfdata: `dict`
        :keyword connection: 'keep-alive' to keep connections open (default)
            or 'close' to explicitly close them.
        """
        super(HttpApi, self).__init__()
        self.endpoint = endpoint

        if not pool_manager:
            pool_manager_conf = {
                k: int(v)
                for k, v in kwargs.iteritems()
                if k in _POOL_MANAGER_OPTIONS_KEYS
            }
            pool_manager = get_pool_manager(**pool_manager_conf)
        self.pool_manager = pool_manager

        self.admin_mode = true_value(kwargs.get('admin_mode', False))
        self.force_master = true_value(kwargs.get('force_master', False))
        self.perfdata = kwargs.get('perfdata')
        self.connection = connection
Beispiel #16
0
 def init(self):
     self.container_client = ContainerClient(self.conf, logger=self.logger)
     self.endpoint = self.conf.get('endpoint')
     # TODO configure pool manager
     self.http = get_pool_manager()
Beispiel #17
0
 def pool_manager(self):
     if self._pool_manager is None:
         from oio.common.http_urllib3 import get_pool_manager
         # TODO(FVE): load parameters from self._options or self.ns_conf
         self._pool_manager = get_pool_manager()
     return self._pool_manager
Beispiel #18
0
 def __init__(self):
     self.http_pool = get_pool_manager()
Beispiel #19
0
 def __init__(self, conf, log):
     from oio.common.http_urllib3 import get_pool_manager
     self.log = log
     self.pool = get_pool_manager()
     self.url_prefix = 'http://%s/v3.0/%s/admin/status?type=meta1&cid=' % (
         conf['proxyd_url'], conf['namespace'])
Beispiel #20
0
 def setUp(self):
     super(RdirTestCase, self).setUp()
     self._http_pool = get_pool_manager(max_retries=10, backoff_factor=0.05)
     self.garbage_files = list()
     self.garbage_procs = list()
Beispiel #21
0
 def setUp(self):
     super(RdirTestCase, self).setUp()
     self.http_pool = get_pool_manager(max_retries=10)
     self.garbage_files = list()
     self.garbage_procs = list()
Beispiel #22
0
 def test_pool_manager_parameters(self):
     get_pool_manager(pool_connections=5)
     get_pool_manager(pool_connections='5')
     self.assertRaises(ValueError,
                       get_pool_manager,
                       pool_connections='cinq')
     get_pool_manager(pool_maxsize=5)
     get_pool_manager(pool_maxsize='5')
     self.assertRaises(ValueError, get_pool_manager, pool_maxsize='cinq')
     get_pool_manager(max_retries=5)
     get_pool_manager(max_retries='5')
     self.assertRaises(ValueError, get_pool_manager, max_retries='cinq')
     get_pool_manager(backoff_factor=5, max_retries=5)
     get_pool_manager(backoff_factor='5', max_retries=5)
     self.assertRaises(ValueError,
                       get_pool_manager,
                       backoff_factor='cinq',
                       max_retries=5)
     get_pool_manager(ignored='ignored')
Beispiel #23
0
 def http_pool(self):
     if not self._http_pool:
         self._http_pool = get_pool_manager()
     return self._http_pool
Beispiel #24
0
 def __init__(self, connection_pool=None, perfdata=None, **kwargs):
     self.http_pool = connection_pool or get_pool_manager()
     self.perfdata = perfdata
Beispiel #25
0
 def pool_manager(self):
     if self._pool_manager is None:
         from oio.common.http_urllib3 import get_pool_manager
         # get_pool_manager already filters arguments it cares about
         self._pool_manager = get_pool_manager(**self._options)
     return self._pool_manager
    def test_container_snapshot(self):
        name = random_str(16)
        self.api.container_create(self.account, name)
        test_object = "test_object"
        self.api.object_create(self.account,
                               name,
                               data="0" * 128,
                               obj_name=test_object)
        # Snapshot cannot have same name and same account
        self.assertRaises(exc.ClientException, self.api.container_snapshot,
                          self.account, name, self.account, name)
        snapshot_name = random_str(16)
        self.assertNotEqual(snapshot_name, name)
        # Non existing snapshot should work
        self.api.container_snapshot(self.account, name, self.account,
                                    snapshot_name)
        # Already taken snapshot name should failed
        self.assertRaises(exc.ClientException, self.api.container_snapshot,
                          self.account, name, self.account, snapshot_name)
        # Check Container Frozen so create should failed
        self.assertRaises(exc.ServiceBusy,
                          self.api.object_create,
                          self.account,
                          snapshot_name,
                          data="1" * 128,
                          obj_name="should_not_be_created")

        # fullpath is set on every chunk
        chunk_list = self.api.object_locate(self.account, name, test_object)[1]
        # check that every chunk is different from the target
        snapshot_list = self.api.object_locate(self.account, snapshot_name,
                                               test_object)[1]

        for c, t in zip(chunk_list, snapshot_list):
            self.assertNotEqual(c['url'], t['url'])
        # check target can be used
        self.api.object_create(self.account,
                               name,
                               data="0" * 128,
                               obj_name="should_be_created")
        # Generate hard links of each chunk of the object
        url_list = [c['url'] for c in chunk_list]
        copy_list = self.api._generate_copies(url_list)
        # every chunks should have the fullpath
        fullpath = self.api._generate_fullpath(self.account, snapshot_name,
                                               'copy', 12456)
        self.api._link_chunks(url_list, copy_list, fullpath[0])
        # check that every copy exists
        pool_manager = get_pool_manager()
        for copy in copy_list:
            resp = pool_manager.request('HEAD', copy)
            self.assertEqual(resp.status, 200)
            self.assertIn(fullpath[0],
                          resp.headers[CHUNK_HEADERS['full_path']].split(','))
        # Snapshot on non existing container should failed
        self.assertRaises(exc.NoSuchContainer, self.api.container_snapshot,
                          random_str(16), random_str(16), random_str(16),
                          random_str(16))
        # Snapshot need to have a account
        self.assertRaises(exc.ClientException, self.api.container_snapshot,
                          self.account, name, None, random_str(16))
        # Snapshot need to have a name
        self.assertRaises(exc.ClientException, self.api.container_snapshot,
                          self.account, name, random_str(16), None)