Пример #1
0
    def _lazy_load(self, name):
        # NOTE(sileht): We don't care about raise error here, if something
        # fail, this will just raise a 500, until the backend is ready.
        if name not in self.backends:
            with self.BACKEND_LOCKS[name]:
                # Recheck, maybe it have been created in the meantime.
                if name not in self.backends:
                    if name == "coordinator":
                        # NOTE(jd) This coordinator is never stop. I don't
                        # think it's a real problem since the Web app can never
                        # really be stopped anyway, except by quitting it
                        # entirely.
                        self.backends[name] = (
                            metricd.get_coordinator_and_start(
                                str(uuid.uuid4()),
                                self.conf.coordination_url)
                        )
                    elif name == "storage":
                        self.backends[name] = (
                            gnocchi_storage.get_driver(self.conf)
                        )
                    elif name == "incoming":
                        self.backends[name] = (
                            gnocchi_incoming.get_driver(self.conf)
                        )
                    elif name == "indexer":
                        self.backends[name] = (
                            gnocchi_indexer.get_driver(self.conf)
                        )
                    else:
                        raise RuntimeError("Unknown driver %s" % name)

        return self.backends[name]
Пример #2
0
    def _lazy_load(self, name):
        # NOTE(sileht): We don't care about raise error here, if something
        # fail, this will just raise a 500, until the backend is ready.
        if name not in self.backends:
            with self.BACKEND_LOCKS[name]:
                # Recheck, maybe it have been created in the meantime.
                if name not in self.backends:
                    if name == "coordinator":
                        # NOTE(jd) This coordinator is never stop. I don't
                        # think it's a real problem since the Web app can never
                        # really be stopped anyway, except by quitting it
                        # entirely.
                        self.backends[name] = (
                            metricd.get_coordinator_and_start(
                                str(uuid.uuid4()), self.conf.coordination_url))
                    elif name == "storage":
                        self.backends[name] = (gnocchi_storage.get_driver(
                            self.conf))
                    elif name == "incoming":
                        self.backends[name] = (gnocchi_incoming.get_driver(
                            self.conf))
                    elif name == "indexer":
                        self.backends[name] = (gnocchi_indexer.get_driver(
                            self.conf))
                    else:
                        raise RuntimeError("Unknown driver %s" % name)

        return self.backends[name]
Пример #3
0
def load_app(conf, indexer=None, storage=None, incoming=None,
             not_implemented_middleware=True):
    global APPCONFIGS

    # NOTE(sileht): We load config, storage and indexer,
    # so all
    if not storage:
        storage = gnocchi_storage.get_driver(conf)
    if not incoming:
        incoming = gnocchi_incoming.get_driver(conf)
    if not indexer:
        indexer = gnocchi_indexer.get_driver(conf)

    # Build the WSGI app
    cfg_path = conf.api.paste_config
    if not os.path.isabs(cfg_path):
        cfg_path = conf.find_file(cfg_path)

    if cfg_path is None or not os.path.exists(cfg_path):
        LOG.debug("No api-paste configuration file found! Using default.")
        cfg_path = os.path.abspath(pkg_resources.resource_filename(
            __name__, "api-paste.ini"))

    config = dict(conf=conf, indexer=indexer, storage=storage,
                  incoming=incoming,
                  not_implemented_middleware=not_implemented_middleware)
    configkey = str(uuid.uuid4())
    APPCONFIGS[configkey] = config

    LOG.info("WSGI config used: %s", cfg_path)

    appname = "gnocchi+" + conf.api.auth_mode
    app = deploy.loadapp("config:" + cfg_path, name=appname,
                         global_conf={'configkey': configkey})
    return cors.CORS(app, conf=conf)
Пример #4
0
def injector():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("metrics", default=1, min=1),
        cfg.StrOpt("archive-policy-name", default="low"),
        cfg.StrOpt("creator", default="admin"),
        cfg.IntOpt("batch-of-measures", default=1000),
        cfg.IntOpt("measures-per-batch", default=10),
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    instore = incoming.get_driver(conf)

    def todo():
        metric = index.create_metric(
            uuid.uuid4(),
            creator=conf.creator,
            archive_policy_name=conf.archive_policy_name)

        for _ in six.moves.range(conf.batch_of_measures):
            measures = [
                incoming.Measure(
                    utils.dt_in_unix_ns(utils.utcnow()), random.random())
                for __ in six.moves.range(conf.measures_per_batch)]
            instore.add_measures(metric, measures)

    with futures.ThreadPoolExecutor(max_workers=conf.metrics) as executor:
        for m in six.moves.range(conf.metrics):
            executor.submit(todo)
Пример #5
0
    def __init__(self, conf):
        self.conf = conf
        self.incoming = incoming.get_driver(self.conf)
        self.indexer = indexer.get_driver(self.conf)
        self._ensure_resource_type_exists()

        self._hosts = {}
        self._measures = collections.defaultdict(
            lambda: collections.defaultdict(list))
Пример #6
0
    def __init__(self, conf):
        self.conf = conf
        self.incoming = incoming.get_driver(self.conf)
        self.indexer = indexer.get_driver(self.conf)
        self.resource_type = None

        self._ensure_resource_type_exists()
        self.gauges = {}
        self.counters = {}
        self.absolute = {}
Пример #7
0
def _inject_from_conf(conf,
                      metrics, measures, archive_policy_name="low",
                      process=False, interval=None):
    inc = incoming.get_driver(conf)
    coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
                                              conf.coordination_url)
    store = storage.get_driver(conf)
    idx = indexer.get_driver(conf)
    return _inject(inc, coord, store, idx,
                   metrics, measures, archive_policy_name, process, interval)
Пример #8
0
 def __init__(self, url, count, conf):
     super(Amqp, self).__init__()
     self.url = url
     self.expected = count
     self.received = 0
     self.conf = conf
     self.incoming = incoming.get_driver(self.conf)
     self.indexer = indexer.get_driver(self.conf)
     self.gauges = {}
     self.counters = {}
     self.absolute = {}
     self.conf = service.prepare_service()
     self.peer_close_is_error = True
Пример #9
0
 def _configure(self):
     member_id = "%s.%s.%s" % (
         socket.gethostname(),
         self.worker_id,
         # NOTE(jd) Still use a uuid here so we're
         # sure there's no conflict in case of
         # crash/restart
         str(uuid.uuid4()))
     self.coord = get_coordinator_and_start(member_id,
                                            self.conf.coordination_url)
     self.store = storage.get_driver(self.conf, self.coord)
     self.incoming = incoming.get_driver(self.conf)
     self.index = indexer.get_driver(self.conf)
Пример #10
0
 def _configure(self):
     member_id = "%s.%s.%s" % (socket.gethostname(),
                               self.worker_id,
                               # NOTE(jd) Still use a uuid here so we're
                               # sure there's no conflict in case of
                               # crash/restart
                               str(uuid.uuid4()))
     self.coord = get_coordinator_and_start(member_id,
                                            self.conf.coordination_url)
     self.store = storage.get_driver(self.conf)
     self.incoming = incoming.get_driver(self.conf)
     self.index = indexer.get_driver(self.conf)
     self.chef = chef.Chef(self.coord, self.incoming,
                           self.index, self.store)
Пример #11
0
def metricd_tester(conf):
    # NOTE(sileht): This method is designed to be profiled, we
    # want to avoid issues with profiler and os.fork(), that
    # why we don't use the MetricdServiceManager.
    index = indexer.get_driver(conf)
    s = storage.get_driver(conf)
    inc = incoming.get_driver(conf)
    metrics = set()
    for i in six.moves.range(inc.NUM_SACKS):
        metrics.update(inc.list_metric_with_measures_to_process(i))
        if len(metrics) >= conf.stop_after_processing_metrics:
            break
    s.process_new_measures(index, inc,
                           list(metrics)[:conf.stop_after_processing_metrics],
                           True)
Пример #12
0
def metricd_tester(conf):
    # NOTE(sileht): This method is designed to be profiled, we
    # want to avoid issues with profiler and os.fork(), that
    # why we don't use the MetricdServiceManager.
    index = indexer.get_driver(conf)
    s = storage.get_driver(conf)
    inc = incoming.get_driver(conf)
    metrics = set()
    for sack in inc.iter_sacks():
        metrics.update(inc.list_metric_with_measures_to_process(sack))
        if len(metrics) >= conf.stop_after_processing_metrics:
            break
    c = chef.Chef(None, inc, index, s)
    c.process_new_measures(
        list(metrics)[:conf.stop_after_processing_metrics], True)
Пример #13
0
def metricd_tester(conf):
    # NOTE(sileht): This method is designed to be profiled, we
    # want to avoid issues with profiler and os.fork(), that
    # why we don't use the MetricdServiceManager.
    index = indexer.get_driver(conf)
    s = storage.get_driver(conf)
    inc = incoming.get_driver(conf)
    c = chef.Chef(None, inc, index, s)
    metrics_count = 0
    for sack in inc.iter_sacks():
        try:
            metrics_count += c.process_new_measures_for_sack(s, True)
        except chef.SackAlreadyLocked:
            continue
        if metrics_count >= conf.stop_after_processing_metrics:
            break
Пример #14
0
def metricd_tester(conf):
    # NOTE(sileht): This method is designed to be profiled, we
    # want to avoid issues with profiler and os.fork(), that
    # why we don't use the MetricdServiceManager.
    index = indexer.get_driver(conf)
    s = storage.get_driver(conf)
    inc = incoming.get_driver(conf)
    c = chef.Chef(None, inc, index, s)
    metrics_count = 0
    for sack in inc.iter_sacks():
        try:
            metrics_count += c.process_new_measures_for_sack(s, True)
        except chef.SackAlreadyLocked:
            continue
        if metrics_count >= conf.stop_after_processing_metrics:
            break
Пример #15
0
 def __init__(self, conf):
     self.conf = conf
     self.incoming = incoming.get_driver(self.conf)
     self.indexer = indexer.get_driver(self.conf)
     try:
         self.indexer.create_resource('generic',
                                      self.conf.statsd.resource_id,
                                      self.conf.statsd.creator)
     except indexer.ResourceAlreadyExists:
         LOG.debug("Resource %s already exists",
                   self.conf.statsd.resource_id)
     else:
         LOG.info("Created resource %s", self.conf.statsd.resource_id)
     self.gauges = {}
     self.counters = {}
     self.times = {}
Пример #16
0
def load_app(conf,
             indexer=None,
             storage=None,
             incoming=None,
             coord=None,
             not_implemented_middleware=True):
    global APPCONFIGS

    if not storage:
        if not coord:
            # NOTE(jd) This coordinator is never stop. I don't think it's a
            # real problem since the Web app can never really be stopped
            # anyway, except by quitting it entirely.
            coord = metricd.get_coordinator_and_start(conf.coordination_url)
        storage = gnocchi_storage.get_driver(conf, coord)
    if not incoming:
        incoming = gnocchi_incoming.get_driver(conf)
    if not indexer:
        indexer = gnocchi_indexer.get_driver(conf)

    # Build the WSGI app
    cfg_path = conf.api.paste_config
    if not os.path.isabs(cfg_path):
        cfg_path = conf.find_file(cfg_path)

    if cfg_path is None or not os.path.exists(cfg_path):
        LOG.debug("No api-paste configuration file found! Using default.")
        cfg_path = os.path.abspath(
            pkg_resources.resource_filename(__name__, "api-paste.ini"))

    config = dict(conf=conf,
                  indexer=indexer,
                  storage=storage,
                  incoming=incoming,
                  not_implemented_middleware=not_implemented_middleware)
    configkey = str(uuid.uuid4())
    APPCONFIGS[configkey] = config

    LOG.info("WSGI config used: %s", cfg_path)

    appname = "gnocchi+" + conf.api.auth_mode
    app = deploy.loadapp("config:" + cfg_path,
                         name=appname,
                         global_conf={'configkey': configkey})
    return cors.CORS(app, conf=conf)
Пример #17
0
def upgrade():
    conf = cfg.ConfigOpts()
    sack_number_opt = copy.copy(_SACK_NUMBER_OPT)
    sack_number_opt.default = 128
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False, help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage",
                    default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-incoming",
                    default=False,
                    help="Skip incoming storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation",
                    default=False,
                    help="Skip default archive policies creation."),
        sack_number_opt,
    ])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        LOG.info("Upgrading indexer %s", index)
        index.upgrade()
    if not conf.skip_storage:
        # FIXME(jd) Pass None as coordinator because it's not needed in this
        # case. This will be removed when the storage will stop requiring a
        # coordinator object.
        s = storage.get_driver(conf, None)
        LOG.info("Upgrading storage %s", s)
        s.upgrade()
    if not conf.skip_incoming:
        i = incoming.get_driver(conf)
        LOG.info("Upgrading incoming storage %s", i)
        i.upgrade(conf.sacks_number)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        if conf.skip_index:
            index = indexer.get_driver(conf)
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
Пример #18
0
def change_sack_size():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([_SACK_NUMBER_OPT])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    s = incoming.get_driver(conf)
    try:
        report = s.measures_report(details=False)
    except incoming.SackDetectionError:
        # issue is already logged by NUM_SACKS, abort.
        return
    remainder = report['summary']['measures']
    if remainder:
        LOG.error(
            'Cannot change sack when non-empty backlog. Process '
            'remaining %s measures and try again', remainder)
        return
    LOG.info("Changing sack size to: %s", conf.sacks_number)
    old_num_sacks = s.get_storage_sacks()
    s.set_storage_settings(conf.sacks_number)
    s.remove_sack_group(old_num_sacks)
Пример #19
0
    def _configure(self):
        self.store = storage.get_driver(self.conf, self.coord)
        self.incoming = incoming.get_driver(self.conf)
        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # create fallback in case paritioning fails or assigned no tasks
        self.fallback_tasks = list(six.moves.range(self.incoming.NUM_SACKS))
        try:
            self.partitioner = self.coord.join_partitioned_group(
                self.GROUP_ID, partitions=200)
            LOG.info('Joined coordination group: %s', self.GROUP_ID)
        except NotImplementedError:
            LOG.warning('Coordinator does not support partitioning. Worker '
                        'will battle against other workers for jobs.')
        except tooz.ToozError as e:
            LOG.error(
                'Unexpected error configuring coordinator for '
                'partitioning. Retrying: %s', e)
            raise tenacity.TryAgain(e)
Пример #20
0
def change_sack_size():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([_SACK_NUMBER_OPT])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    s = incoming.get_driver(conf)
    try:
        report = s.measures_report(details=False)
    except incoming.SackDetectionError:
        LOG.error('Unable to detect the number of storage sacks.\n'
                  'Ensure gnocchi-upgrade has been executed.')
        return
    remainder = report['summary']['measures']
    if remainder:
        LOG.error('Cannot change sack when non-empty backlog. Process '
                  'remaining %s measures and try again', remainder)
        return
    LOG.info("Removing current %d sacks", s.NUM_SACKS)
    s.remove_sacks()
    LOG.info("Creating new %d sacks", conf.sacks_number)
    s.upgrade(conf.sacks_number)
Пример #21
0
def upgrade():
    conf = cfg.ConfigOpts()
    sack_number_opt = copy.copy(_SACK_NUMBER_OPT)
    sack_number_opt.default = 128
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False, help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage",
                    default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-incoming",
                    default=False,
                    help="Skip incoming storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation",
                    default=False,
                    help="Skip default archive policies creation."),
        sack_number_opt,
    ])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        index.connect()
        LOG.info("Upgrading indexer %s", index)
        index.upgrade()
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s", s)
        s.upgrade()
    if not conf.skip_incoming:
        i = incoming.get_driver(conf)
        LOG.info("Upgrading incoming storage %s", i)
        i.upgrade(conf.sacks_number)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        if conf.skip_index:
            index = indexer.get_driver(conf)
            index.connect()
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
Пример #22
0
def change_sack_size():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([_SACK_NUMBER_OPT])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    s = incoming.get_driver(conf)
    try:
        report = s.measures_report(details=False)
    except incoming.SackDetectionError:
        LOG.error('Unable to detect the number of storage sacks.\n'
                  'Ensure gnocchi-upgrade has been executed.')
        return
    remainder = report['summary']['measures']
    if remainder:
        LOG.error(
            'Cannot change sack when non-empty backlog. Process '
            'remaining %s measures and try again', remainder)
        return
    LOG.info("Removing current %d sacks", s.NUM_SACKS)
    s.remove_sacks()
    LOG.info("Creating new %d sacks", conf.sacks_number)
    s.upgrade(conf.sacks_number)
Пример #23
0
    def setUp(self):
        super(TestCase, self).setUp()
        if swexc:
            self.useFixture(
                fixtures.MockPatch('swiftclient.client.Connection',
                                   FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath', tempdir.path, 'storage')
        elif self.conf.storage.driver == 'ceph':
            pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call("rados -c %s mkpool %s" %
                                (os.getenv("CEPH_CONF"), pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix",
                               str(uuid.uuid4())[:26], "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT)

        self.storage.upgrade()
        self.incoming.upgrade(128)
        self.chef = chef.Chef(self.coord, self.incoming, self.index,
                              self.storage)
Пример #24
0
def upgrade():
    conf = cfg.ConfigOpts()
    sack_number_opt = copy.copy(_SACK_NUMBER_OPT)
    sack_number_opt.default = 128
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False,
                    help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage", default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-incoming", default=False,
                    help="Skip incoming storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation", default=False,
                    help="Skip default archive policies creation."),
        sack_number_opt,
    ])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        LOG.info("Upgrading indexer %s", index)
        index.upgrade()
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s", s)
        s.upgrade()
    if not conf.skip_incoming:
        i = incoming.get_driver(conf)
        LOG.info("Upgrading incoming storage %s", i)
        i.upgrade(conf.sacks_number)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        if conf.skip_index:
            index = indexer.get_driver(conf)
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
Пример #25
0
 def _configure(self):
     self.incoming = incoming.get_driver(self.conf)
Пример #26
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""
        global LOAD_APP_KWARGS

        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            self.output = base.CaptureOutput()
            self.output.setUp()

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([],
                                       conf=utils.prepare_conf(),
                                       default_config_files=dcf,
                                       logging_level=logging.DEBUG,
                                       skip_log_opts=True)

        py_root = os.path.abspath(
            os.path.join(
                os.path.dirname(__file__),
                '..',
                '..',
            ))
        conf.set_override('paste_config',
                          os.path.join(py_root, 'rest', 'api-paste.ini'),
                          group="api")
        conf.set_override('policy_file',
                          os.path.join(py_root, 'rest', 'policy.yaml'),
                          group="oslo_policy")

        # NOTE(sileht): This is not concurrency safe, but only this tests file
        # deal with cors, so we are fine. set_override don't work because cors
        # group doesn't yet exists, and we the CORS middleware is created it
        # register the option and directly copy value of all configurations
        # options making impossible to override them properly...
        cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        if conf.indexer.url is None:
            raise case.SkipTest("No indexer configured")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")

        conf.set_override('driver', storage_driver, 'storage')
        if conf.storage.driver == 'file':
            conf.set_override('file_basepath', data_tmp_dir, 'storage')
        elif conf.storage.driver == 'ceph':
            conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
                              'storage')
            self.ceph_pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call(("ceph -c %s osd pool create %s "
                                 "16 16 replicated") %
                                (os.getenv("CEPH_CONF"), self.ceph_pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
                subprocess.call(("ceph -c %s osd pool application "
                                 "enable %s rbd") %
                                (os.getenv("CEPH_CONF"), self.ceph_pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
            conf.set_override('ceph_pool', self.ceph_pool_name, 'storage')
        elif conf.storage.driver == "s3":
            conf.set_override('s3_endpoint_url',
                              os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                              group="storage")
            conf.set_override('s3_access_key_id', "gnocchi", group="storage")
            conf.set_override('s3_secret_access_key',
                              "anythingworks",
                              group="storage")
            conf.set_override("s3_bucket_prefix",
                              str(uuid.uuid4())[:26], "storage")
        elif conf.storage.driver == "swift":
            # NOTE(sileht): This fixture must start before any driver stuff
            swift_fixture = fixtures.MockPatch('swiftclient.client.Connection',
                                               base.FakeSwiftClient)
            swift_fixture.setUp()

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url), 'indexer')

        index = indexer.get_driver(conf)
        index.upgrade()

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')

        conf.set_override('enable_proxy_headers_parsing', True, group="api")

        self.index = index

        self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
                                                       conf.coordination_url)
        s = storage.get_driver(conf)
        i = incoming.get_driver(conf)

        if conf.storage.driver == 'redis':
            # Create one prefix per test
            s.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if conf.incoming.driver == 'redis':
            i.SACK_NAME_FORMAT = (str(uuid.uuid4()) +
                                  incoming.IncomingDriver.SACK_NAME_FORMAT)

        self.fixtures = [
            fixtures.MockPatch("gnocchi.storage.get_driver", return_value=s),
            fixtures.MockPatch("gnocchi.incoming.get_driver", return_value=i),
            fixtures.MockPatch("gnocchi.indexer.get_driver",
                               return_value=self.index),
            fixtures.MockPatch("gnocchi.cli.metricd.get_coordinator_and_start",
                               return_value=self.coord),
        ]
        for f in self.fixtures:
            f.setUp()

        if conf.storage.driver == 'swift':
            self.fixtures.append(swift_fixture)

        LOAD_APP_KWARGS = {
            'conf': conf,
        }

        s.upgrade()
        i.upgrade(128)

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(chef.Chef(self.coord, i, index, s))
        self.metricd_thread.start()
Пример #27
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""
        global LOAD_APP_KWARGS

        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            self.output = base.CaptureOutput()
            self.output.setUp()

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([], conf=utils.prepare_conf(),
                                       default_config_files=dcf,
                                       logging_level=logging.DEBUG,
                                       skip_log_opts=True)

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..', '..',))
        conf.set_override('paste_config',
                          os.path.join(py_root, 'rest', 'api-paste.ini'),
                          group="api")
        conf.set_override('policy_file',
                          os.path.join(py_root, 'rest', 'policy.json'),
                          group="oslo_policy")

        # NOTE(sileht): This is not concurrency safe, but only this tests file
        # deal with cors, so we are fine. set_override don't work because cors
        # group doesn't yet exists, and we the CORS middleware is created it
        # register the option and directly copy value of all configurations
        # options making impossible to override them properly...
        cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        if conf.indexer.url is None:
            raise case.SkipTest("No indexer configured")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")

        conf.set_override('driver', storage_driver, 'storage')
        if conf.storage.driver == 'file':
            conf.set_override('file_basepath', data_tmp_dir, 'storage')
        elif conf.storage.driver == 'ceph':
            conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
                              'storage')
            pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call("rados -c %s mkpool %s" % (
                    os.getenv("CEPH_CONF"), pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
            conf.set_override('ceph_pool', pool_name, 'storage')
        elif conf.storage.driver == "s3":
            conf.set_override('s3_endpoint_url',
                              os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                              group="storage")
            conf.set_override('s3_access_key_id', "gnocchi", group="storage")
            conf.set_override('s3_secret_access_key', "anythingworks",
                              group="storage")
            conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
                              "storage")
        elif conf.storage.driver == "swift":
            # NOTE(sileht): This fixture must start before any driver stuff
            swift_fixture = fixtures.MockPatch(
                'swiftclient.client.Connection',
                base.FakeSwiftClient)
            swift_fixture.setUp()

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url),
            'indexer')

        index = indexer.get_driver(conf)
        index.upgrade()

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')

        self.index = index

        self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
                                                       conf.coordination_url)
        s = storage.get_driver(conf)
        i = incoming.get_driver(conf)

        if conf.storage.driver == 'redis':
            # Create one prefix per test
            s.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if conf.incoming.driver == 'redis':
            i.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
            )

        self.fixtures = [
            fixtures.MockPatch("gnocchi.storage.get_driver",
                               return_value=s),
            fixtures.MockPatch("gnocchi.incoming.get_driver",
                               return_value=i),
            fixtures.MockPatch("gnocchi.indexer.get_driver",
                               return_value=self.index),
            fixtures.MockPatch(
                "gnocchi.cli.metricd.get_coordinator_and_start",
                return_value=self.coord),
        ]
        for f in self.fixtures:
            f.setUp()

        if conf.storage.driver == 'swift':
            self.fixtures.append(swift_fixture)

        LOAD_APP_KWARGS = {
            'conf': conf,
        }

        s.upgrade()
        i.upgrade(128)

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(chef.Chef(self.coord, i, index, s))
        self.metricd_thread.start()
Пример #28
0
 def _configure(self):
     self.incoming = incoming.get_driver(self.conf)
Пример #29
0
    def setUp(self):
        super(TestCase, self).setUp()

        self.conf = service.prepare_service(
            [], conf=utils.prepare_conf(),
            default_config_files=[],
            logging_level=logging.DEBUG,
            skip_log_opts=True)

        self.index = indexer.get_driver(self.conf)

        self.coord = metricd.get_coordinator_and_start(
            str(uuid.uuid4()),
            self.conf.coordination_url)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..',))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.json'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id', "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key', "anythingworks",
                                   group="storage")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')

        if swexc:
            self.useFixture(fixtures.MockPatch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'ceph':
            self.conf.set_override('ceph_conffile',
                                   os.getenv("CEPH_CONF"),
                                   'storage')
            pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call("rados -c %s mkpool %s" % (
                    os.getenv("CEPH_CONF"), pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
                               "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
            )

        self.storage.upgrade()
        self.incoming.upgrade(3)
        self.chef = chef.Chef(
            self.coord, self.incoming, self.index, self.storage)
Пример #30
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""
        global LOAD_APP_KWARGS

        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            self.output = base.CaptureOutput()
            self.output.setUp()

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([],
                                       conf=utils.prepare_conf(),
                                       default_config_files=dcf)
        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            daiquiri.setup(outputs=[])

        py_root = os.path.abspath(
            os.path.join(
                os.path.dirname(__file__),
                '..',
                '..',
            ))
        conf.set_override('paste_config',
                          os.path.join(py_root, 'rest', 'api-paste.ini'),
                          group="api")
        conf.set_override('policy_file',
                          os.path.join(py_root, 'rest', 'policy.json'),
                          group="oslo_policy")

        # NOTE(sileht): This is not concurrency safe, but only this tests file
        # deal with cors, so we are fine. set_override don't work because cors
        # group doesn't yet exists, and we the CORS middleware is created it
        # register the option and directly copy value of all configurations
        # options making impossible to override them properly...
        cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        if conf.indexer.url is None:
            raise case.SkipTest("No indexer configured")

        conf.set_override('driver', 'file', 'storage')
        conf.set_override('file_basepath', data_tmp_dir, 'storage')

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url), 'indexer')

        index = indexer.get_driver(conf)
        index.upgrade()

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')

        self.index = index

        self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
                                                       conf.coordination_url)
        s = storage.get_driver(conf, self.coord)
        s.upgrade()
        i = incoming.get_driver(conf)
        i.upgrade(128)

        self.fixtures = [
            fixtures.MockPatch("gnocchi.storage.get_driver", return_value=s),
            fixtures.MockPatch("gnocchi.incoming.get_driver", return_value=i),
            fixtures.MockPatch("gnocchi.indexer.get_driver",
                               return_value=self.index),
            fixtures.MockPatch("gnocchi.cli.metricd.get_coordinator_and_start",
                               return_value=self.coord),
        ]
        for f in self.fixtures:
            f.setUp()

        LOAD_APP_KWARGS = {
            'conf': conf,
        }

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(index, s, i)
        self.metricd_thread.start()
Пример #31
0
    def setUp(self):
        super(TestCase, self).setUp()

        self.conf = service.prepare_service(
            [], conf=utils.prepare_conf(),
            default_config_files=[],
            logging_level=logging.DEBUG,
            skip_log_opts=True)

        self.index = indexer.get_driver(self.conf)

        self.coord = metricd.get_coordinator_and_start(
            str(uuid.uuid4()),
            self.conf.coordination_url)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..',))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.yaml'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id', "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key', "anythingworks",
                                   group="storage")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')

        if swexc:
            self.useFixture(fixtures.MockPatch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'ceph':
            self.conf.set_override('ceph_conffile',
                                   os.getenv("CEPH_CONF"),
                                   'storage')
            self.ceph_pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call(("ceph -c %s osd pool create %s "
                                 "16 16 replicated") % (
                    os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
                subprocess.call(("ceph -c %s osd pool application "
                                 "enable %s rbd") % (
                    os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', self.ceph_pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
                               "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
            )

        self.storage.upgrade()
        self.incoming.upgrade(3)
        self.chef = chef.Chef(
            self.coord, self.incoming, self.index, self.storage)
Пример #32
0
 def _configure(self):
     self.store = storage.get_driver(self.conf)
     self.incoming = incoming.get_driver(self.conf)
     self.index = indexer.get_driver(self.conf)
     self.index.connect()