def injector():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("metrics", default=None),
        cfg.IntOpt("batch-of-measures", default=1000),
        cfg.IntOpt("measures-per-batch", default=10),
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    index.connect()
    s = storage.get_driver(conf)

    metrics = index.list_metrics()
    if conf.metrics:
        metrics = metrics[:conf.metrics]

    def todo(metric):
        for _ in six.moves.range(conf.batch_of_measures):
            measures = [
                storage.Measure(utils.to_timestamp(datetime.datetime.now()),
                                random.random())
                for __ in six.moves.range(conf.measures_per_batch)
            ]
            s.add_measures(metric, measures)

    with futures.ThreadPoolExecutor(max_workers=len(metrics)) as executor:
        # We use 'list' to iterate all threads here to raise the first
        # exception now, not much choice
        list(executor.map(todo, metrics))
示例#2
0
    def _lazy_load(self, name):
        # NOTE(sileht): We don't care about raise error here, if something
        # fail, this will just raise a 500, until the backend is ready.
        if name not in self.backends:
            with self.BACKEND_LOCKS[name]:
                # Recheck, maybe it have been created in the meantime.
                if name not in self.backends:
                    if name == "coordinator":
                        # NOTE(jd) This coordinator is never stop. I don't
                        # think it's a real problem since the Web app can never
                        # really be stopped anyway, except by quitting it
                        # entirely.
                        self.backends[name] = (
                            metricd.get_coordinator_and_start(
                                str(uuid.uuid4()), self.conf.coordination_url))
                    elif name == "storage":
                        self.backends[name] = (gnocchi_storage.get_driver(
                            self.conf))
                    elif name == "incoming":
                        self.backends[name] = (gnocchi_incoming.get_driver(
                            self.conf))
                    elif name == "indexer":
                        self.backends[name] = (gnocchi_indexer.get_driver(
                            self.conf))
                    else:
                        raise RuntimeError("Unknown driver %s" % name)

        return self.backends[name]
示例#3
0
文件: app.py 项目: NeCTAR-RC/gnocchi
def setup_app(config=None, cfg=None):
    if cfg is None:
        # NOTE(jd) That sucks but pecan forces us to use kwargs :(
        raise RuntimeError("Config is actually mandatory")
    config = config or PECAN_CONFIG
    s = config.get('storage')
    if not s:
        s = storage.get_driver(cfg)
    i = config.get('indexer')
    if not i:
        i = indexer.get_driver(cfg)
        i.connect()

    # NOTE(sileht): pecan debug won't work in multi-process environment
    pecan_debug = cfg.api.pecan_debug
    if cfg.api.workers != 1 and pecan_debug:
        pecan_debug = False
        LOG.warning('pecan_debug cannot be enabled, if workers is > 1, '
                    'the value is overrided with False')

    app = pecan.make_app(
        config['app']['root'],
        debug=pecan_debug,
        hooks=(GnocchiHook(s, i, cfg),),
        guess_content_type_from_ext=False,
        custom_renderers={'json': OsloJSONRenderer},
    )

    if config.get('not_implemented_middleware', True):
        app = webob.exc.HTTPExceptionMiddleware(NotImplementedMiddleware(app))

    return app
def load_app(conf, appname=None, indexer=None, storage=None,
             not_implemented_middleware=True):
    global APPCONFIGS

    # NOTE(sileht): We load config, storage and indexer,
    # so all
    if not storage:
        storage = gnocchi_storage.get_driver(conf)
    if not indexer:
        indexer = gnocchi_indexer.get_driver(conf)
        indexer.connect()

    # Build the WSGI app
    cfg_path = conf.api.paste_config
    if not os.path.isabs(cfg_path):
        cfg_path = conf.find_file(cfg_path)

    if cfg_path is None or not os.path.exists(cfg_path):
        raise cfg.ConfigFilesNotFoundError([conf.api.paste_config])

    config = dict(conf=conf, indexer=indexer, storage=storage,
                  not_implemented_middleware=not_implemented_middleware)
    configkey = str(uuid.uuid4())
    APPCONFIGS[configkey] = config

    LOG.info("WSGI config used: %s" % cfg_path)
    return deploy.loadapp("config:" + cfg_path, name=appname,
                          global_conf={'configkey': configkey})
示例#5
0
文件: app.py 项目: luo-zn/gnocchi
    def _lazy_load(self, name):
        # NOTE(sileht): We don't care about raise error here, if something
        # fail, this will just raise a 500, until the backend is ready.
        if name not in self.backends:
            with self.BACKEND_LOCKS[name]:
                # Recheck, maybe it have been created in the meantime.
                if name not in self.backends:
                    if name == "coordinator":
                        # NOTE(jd) This coordinator is never stop. I don't
                        # think it's a real problem since the Web app can never
                        # really be stopped anyway, except by quitting it
                        # entirely.
                        self.backends[name] = (
                            metricd.get_coordinator_and_start(
                                str(uuid.uuid4()),
                                self.conf.coordination_url)
                        )
                    elif name == "storage":
                        self.backends[name] = (
                            gnocchi_storage.get_driver(self.conf)
                        )
                    elif name == "incoming":
                        self.backends[name] = (
                            gnocchi_incoming.get_driver(self.conf)
                        )
                    elif name == "indexer":
                        self.backends[name] = (
                            gnocchi_indexer.get_driver(self.conf)
                        )
                    else:
                        raise RuntimeError("Unknown driver %s" % name)

        return self.backends[name]
示例#6
0
def injector():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("metrics", default=1, min=1),
        cfg.StrOpt("archive-policy-name", default="low"),
        cfg.StrOpt("creator", default="admin"),
        cfg.IntOpt("batch-of-measures", default=1000),
        cfg.IntOpt("measures-per-batch", default=10),
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    instore = incoming.get_driver(conf)

    def todo():
        metric = index.create_metric(
            uuid.uuid4(),
            creator=conf.creator,
            archive_policy_name=conf.archive_policy_name)

        for _ in six.moves.range(conf.batch_of_measures):
            measures = [
                incoming.Measure(
                    utils.dt_in_unix_ns(utils.utcnow()), random.random())
                for __ in six.moves.range(conf.measures_per_batch)]
            instore.add_measures(metric, measures)

    with futures.ThreadPoolExecutor(max_workers=conf.metrics) as executor:
        for m in six.moves.range(conf.metrics):
            executor.submit(todo)
示例#7
0
def load_app(conf, indexer=None, storage=None, incoming=None,
             not_implemented_middleware=True):
    global APPCONFIGS

    # NOTE(sileht): We load config, storage and indexer,
    # so all
    if not storage:
        storage = gnocchi_storage.get_driver(conf)
    if not incoming:
        incoming = gnocchi_incoming.get_driver(conf)
    if not indexer:
        indexer = gnocchi_indexer.get_driver(conf)

    # Build the WSGI app
    cfg_path = conf.api.paste_config
    if not os.path.isabs(cfg_path):
        cfg_path = conf.find_file(cfg_path)

    if cfg_path is None or not os.path.exists(cfg_path):
        LOG.debug("No api-paste configuration file found! Using default.")
        cfg_path = os.path.abspath(pkg_resources.resource_filename(
            __name__, "api-paste.ini"))

    config = dict(conf=conf, indexer=indexer, storage=storage,
                  incoming=incoming,
                  not_implemented_middleware=not_implemented_middleware)
    configkey = str(uuid.uuid4())
    APPCONFIGS[configkey] = config

    LOG.info("WSGI config used: %s", cfg_path)

    appname = "gnocchi+" + conf.api.auth_mode
    app = deploy.loadapp("config:" + cfg_path, name=appname,
                         global_conf={'configkey': configkey})
    return cors.CORS(app, conf=conf)
示例#8
0
def injector():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("metrics", default=None),
        cfg.IntOpt("batch-of-measures", default=1000),
        cfg.IntOpt("measures-per-batch", default=10),
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    index.connect()
    s = storage.get_driver(conf)

    metrics = index.list_metrics()
    if conf.metrics:
        metrics = metrics[:conf.metrics]

    def todo(metric):
        for _ in six.moves.range(conf.batch_of_measures):
            measures = [
                storage.Measure(utils.to_timestamp(datetime.datetime.now()),
                                random.random())
                for __ in six.moves.range(conf.measures_per_batch)]
            s.add_measures(metric, measures)

    with futures.ThreadPoolExecutor(max_workers=len(metrics)) as executor:
        # We use 'list' to iterate all threads here to raise the first
        # exception now, not much choice
        list(executor.map(todo, metrics))
示例#9
0
    def setUp(self):
        super(TestCase, self).setUp()
        self.conf = self.useFixture(config.Config()).conf
        self.conf.import_opt('debug', 'gnocchi.openstack.common.log')
        self.conf.set_override('debug', True)

        self.conf.set_override('driver', self.indexer_engine, 'indexer')
        self.conf.import_opt('connection',
                             'gnocchi.openstack.common.db.options',
                             group='database')
        self.conf.set_override('connection',
                               getattr(self, "db_url", "sqlite:///"),
                               'database')
        self.index = indexer.get_driver(self.conf)
        try:
            self.index.upgrade()
        except Exception:
            # FIXME(jd) We should be smarter in upgrade() for sqlalchemy to
            # remove that
            pass

        self.useFixture(mockpatch.Patch(
            'swiftclient.client.Connection',
            FakeSwiftClient))

        self.conf.set_override('driver', self.storage_engine, 'storage')
        self.storage = storage.get_driver(self.conf)
示例#10
0
    def setUp(self):
        super(ModelsMigrationsSync, self).setUp()
        self.useFixture(fixtures.Timeout(120, gentle=True))
        self.db = mock.Mock()
        self.conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                self.conf.indexer.url),
            'indexer')
        self.index = indexer.get_driver(self.conf)
        self.index.upgrade(nocreate=True)
        self.addCleanup(self._drop_database)

        # NOTE(sileht): remove tables dynamically created by other tests
        valid_resource_type_tables = []
        for rt in self.index.list_resource_types():
            valid_resource_type_tables.append(rt.tablename)
            valid_resource_type_tables.append("%s_history" % rt.tablename)
            # NOTE(sileht): load it in sqlalchemy metadata
            self.index._RESOURCE_TYPE_MANAGER.get_classes(rt)

        for table in sqlalchemy_base.Base.metadata.sorted_tables:
            if (table.name.startswith("rt_") and
                    table.name not in valid_resource_type_tables):
                sqlalchemy_base.Base.metadata.remove(table)
                self.index._RESOURCE_TYPE_MANAGER._cache.pop(
                    table.name.replace('_history', ''), None)
示例#11
0
文件: app.py 项目: shushen/gnocchi
def load_app(conf,
             appname=None,
             indexer=None,
             storage=None,
             not_implemented_middleware=True):
    global APPCONFIGS

    # NOTE(sileht): We load config, storage and indexer,
    # so all
    if not storage:
        storage = gnocchi_storage.get_driver(conf)
    if not indexer:
        indexer = gnocchi_indexer.get_driver(conf)
        indexer.connect()

    # Build the WSGI app
    cfg_path = conf.api.paste_config
    if not os.path.isabs(cfg_path):
        cfg_path = conf.find_file(cfg_path)

    if cfg_path is None or not os.path.exists(cfg_path):
        raise cfg.ConfigFilesNotFoundError([conf.api.paste_config])

    config = dict(conf=conf,
                  indexer=indexer,
                  storage=storage,
                  not_implemented_middleware=not_implemented_middleware)
    configkey = str(uuid.uuid4())
    APPCONFIGS[configkey] = config

    LOG.info("WSGI config used: %s" % cfg_path)
    return deploy.loadapp("config:" + cfg_path,
                          name=appname,
                          global_conf={'configkey': configkey})
def upgrade():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False,
                    help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage", default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation", default=False,
                    help="Skip default archive policies creation.")
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    index.connect()
    if not conf.skip_index:
        LOG.info("Upgrading indexer %s" % index)
        index.upgrade()
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s" % s)
        s.upgrade(index)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
示例#13
0
    def setUp(self):
        super(ModelsMigrationsSync, self).setUp()
        self.useFixture(fixtures.Timeout(120, gentle=True))
        self.db = mock.Mock()
        self.conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                self.conf.indexer.url), 'indexer')
        self.index = indexer.get_driver(self.conf)
        self.index.upgrade(nocreate=True)
        self.addCleanup(self._drop_database)

        # NOTE(sileht): remove tables dynamically created by other tests
        valid_resource_type_tables = []
        for rt in self.index.list_resource_types():
            valid_resource_type_tables.append(rt.tablename)
            valid_resource_type_tables.append("%s_history" % rt.tablename)
            # NOTE(sileht): load it in sqlalchemy metadata
            self.index._RESOURCE_TYPE_MANAGER.get_classes(rt)

        for table in sqlalchemy_base.Base.metadata.sorted_tables:
            if (table.name.startswith("rt_")
                    and table.name not in valid_resource_type_tables):
                sqlalchemy_base.Base.metadata.remove(table)
                self.index._RESOURCE_TYPE_MANAGER._cache.pop(
                    table.name.replace('_history', ''), None)
示例#14
0
    def setUp(self):
        super(TestCase, self).setUp()
        self.conf = self.useFixture(config.Config()).conf
        self.conf.import_opt('debug', 'gnocchi.openstack.common.log')
        self.conf.set_override('debug', True)

        self.conf.set_override('driver', self.indexer_engine, 'indexer')
        self.conf.import_opt('connection',
                             'gnocchi.openstack.common.db.options',
                             group='database')
        self.conf.set_override('connection',
                               getattr(self, "db_url", "sqlite:///"),
                               'database')
        # No env var exported, no integration tests
        if self.conf.database.connection is None:
            raise NotImplementedError
        self.index = indexer.get_driver(self.conf)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run in
        # parallel.
        with lockutils.lock("gnocchi-tests-db-lock", external=True):
            self.index.upgrade()

        self.useFixture(mockpatch.Patch(
            'swiftclient.client.Connection',
            FakeSwiftClient))

        self.conf.set_override('driver', self.storage_engine, 'storage')
        self.storage = storage.get_driver(self.conf)
示例#15
0
文件: cli.py 项目: shushen/gnocchi
def upgrade():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False, help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage",
                    default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation",
                    default=False,
                    help="Skip default archive policies creation."),
        cfg.BoolOpt("create-legacy-resource-types",
                    default=False,
                    help="Creation of Ceilometer legacy resource types.")
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    index.connect()
    if not conf.skip_index:
        LOG.info("Upgrading indexer %s" % index)
        index.upgrade(
            create_legacy_resource_types=conf.create_legacy_resource_types)
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s" % s)
        s.upgrade(index)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
示例#16
0
    def setUpClass(self):
        super(TestCase, self).setUpClass()

        self.conf = service.prepare_service(
            [], conf=utils.prepare_conf(),
            default_config_files=[],
            logging_level=logging.DEBUG)

        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            daiquiri.setup(outputs=[])

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..',))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.json'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id', "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key', "anythingworks",
                                   group="storage")

        self.index = indexer.get_driver(self.conf)

        self.coord = metricd.get_coordinator_and_start(
            str(uuid.uuid4()),
            self.conf.coordination_url)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')
        if storage_driver == 'ceph':
            self.conf.set_override('ceph_conffile',
                                   os.getenv("CEPH_CONF"),
                                   'storage')
示例#17
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""

        global CONF

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')
        coordination_dir = os.path.join(data_tmp_dir, 'tooz')
        os.mkdir(coordination_dir)
        coordination_url = 'file://%s' % coordination_dir

        conf = service.prepare_service([])

        CONF = self.conf = conf
        self.tmp_dir = data_tmp_dir

        # Use the indexer set in the conf, unless we have set an
        # override via the environment.
        if 'GNOCCHI_TEST_INDEXER_URL' in os.environ:
            conf.set_override('url',
                              os.environ.get("GNOCCHI_TEST_INDEXER_URL"),
                              'indexer')

        # TODO(jd) It would be cool if Gabbi was able to use the null://
        # indexer, but this makes the API returns a lot of 501 error, which
        # Gabbi does not want to see, so let's just disable it.
        if conf.indexer.url is None or conf.indexer.url == "null://":
            raise case.SkipTest("No indexer configured")

        # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore
        # to signal we are not in a gate driven functional test
        # and thus should override conf settings.
        if 'DEVSTACK_GATE_TEMPEST' not in os.environ:
            conf.set_override('driver', 'file', 'storage')
            conf.set_override('coordination_url', coordination_url, 'storage')
            conf.set_override('policy_file',
                              os.path.abspath('etc/gnocchi/policy.json'),
                              group="oslo_policy")
            conf.set_override('file_basepath', data_tmp_dir, 'storage')

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        url = sqlalchemy_url.make_url(conf.indexer.url)

        url.database = url.database + str(uuid.uuid4()).replace('-', '')
        db_url = str(url)
        conf.set_override('url', db_url, 'indexer')
        sqlalchemy_utils.create_database(db_url)

        index = indexer.get_driver(conf)
        index.connect()
        index.upgrade()

        conf.set_override('pecan_debug', False, 'api')

        # Turn off any middleware.
        conf.set_override('middlewares', [], 'api')

        self.index = index
示例#18
0
    def __init__(self, conf):
        self.conf = conf
        self.incoming = incoming.get_driver(self.conf)
        self.indexer = indexer.get_driver(self.conf)
        self._ensure_resource_type_exists()

        self._hosts = {}
        self._measures = collections.defaultdict(
            lambda: collections.defaultdict(list))
示例#19
0
def _inject_from_conf(conf,
                      metrics, measures, archive_policy_name="low",
                      process=False, interval=None):
    inc = incoming.get_driver(conf)
    coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
                                              conf.coordination_url)
    store = storage.get_driver(conf)
    idx = indexer.get_driver(conf)
    return _inject(inc, coord, store, idx,
                   metrics, measures, archive_policy_name, process, interval)
示例#20
0
    def __init__(self, conf):
        self.conf = conf
        self.incoming = incoming.get_driver(self.conf)
        self.indexer = indexer.get_driver(self.conf)
        self.resource_type = None

        self._ensure_resource_type_exists()
        self.gauges = {}
        self.counters = {}
        self.absolute = {}
示例#21
0
文件: manage.py 项目: sum12/gnocchi
def upgrade():
    conf = cfg.ConfigOpts()
    sack_number_opt = copy.copy(_SACK_NUMBER_OPT)
    sack_number_opt.default = 128
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False, help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage",
                    default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-incoming",
                    default=False,
                    help="Skip incoming storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation",
                    default=False,
                    help="Skip default archive policies creation."),
        sack_number_opt,
    ])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        LOG.info("Upgrading indexer %s", index)
        index.upgrade()
    if not conf.skip_storage:
        # FIXME(jd) Pass None as coordinator because it's not needed in this
        # case. This will be removed when the storage will stop requiring a
        # coordinator object.
        s = storage.get_driver(conf, None)
        LOG.info("Upgrading storage %s", s)
        s.upgrade()
    if not conf.skip_incoming:
        i = incoming.get_driver(conf)
        LOG.info("Upgrading incoming storage %s", i)
        i.upgrade(conf.sacks_number)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        if conf.skip_index:
            index = indexer.get_driver(conf)
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
示例#22
0
 def setUp(self):
     super(ModelsMigrationsSync, self).setUp()
     self.db = mock.Mock()
     self.conf.set_override(
         'url',
         sqlalchemy.SQLAlchemyIndexer._create_new_database(
             self.conf.indexer.url),
         'indexer')
     self.index = indexer.get_driver(self.conf)
     self.index.connect()
     self.index.upgrade(nocreate=True, create_legacy_resource_types=True)
示例#23
0
def upgrade():
    conf = cfg.ConfigOpts()
    sack_number_opt = copy.copy(_SACK_NUMBER_OPT)
    sack_number_opt.default = 128
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False, help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage",
                    default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-incoming",
                    default=False,
                    help="Skip incoming storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation",
                    default=False,
                    help="Skip default archive policies creation."),
        sack_number_opt,
    ])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        index.connect()
        LOG.info("Upgrading indexer %s", index)
        index.upgrade()
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s", s)
        s.upgrade()
    if not conf.skip_incoming:
        i = incoming.get_driver(conf)
        LOG.info("Upgrading incoming storage %s", i)
        i.upgrade(conf.sacks_number)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        if conf.skip_index:
            index = indexer.get_driver(conf)
            index.connect()
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
示例#24
0
文件: cli.py 项目: shushen/gnocchi
 def _configure(self):
     try:
         self.store = storage.get_driver(self.conf)
     except storage.StorageError as e:
         LOG.error("Unable to initialize storage: %s" % e)
         raise Retry(e)
     try:
         self.index = indexer.get_driver(self.conf)
         self.index.connect()
     except indexer.IndexerException as e:
         LOG.error("Unable to initialize indexer: %s" % e)
         raise Retry(e)
示例#25
0
 def setUp(self):
     super(ModelsMigrationsSync, self).setUp()
     self.useFixture(fixtures.Timeout(120, gentle=True))
     self.db = mock.Mock()
     self.conf.set_override(
         'url',
         sqlalchemy.SQLAlchemyIndexer._create_new_database(
             self.conf.indexer.url),
         'indexer')
     self.index = indexer.get_driver(self.conf)
     self.index.upgrade(nocreate=True)
     self.addCleanup(self._drop_database)
示例#26
0
文件: cli.py 项目: shushen/gnocchi
 def _configure(self):
     try:
         self.store = storage.get_driver(self.conf)
     except storage.StorageError as e:
         LOG.error("Unable to initialize storage: %s" % e)
         raise Retry(e)
     try:
         self.index = indexer.get_driver(self.conf)
         self.index.connect()
     except indexer.IndexerException as e:
         LOG.error("Unable to initialize indexer: %s" % e)
         raise Retry(e)
示例#27
0
文件: app.py 项目: calvin67/gnocchi
def setup_app(config=PECAN_CONFIG, cfg=None):
    if cfg is None:
        # NOTE(jd) That sucks but pecan forces us to use kwargs :(
        raise RuntimeError("Config is actually mandatory")
    s = config.get('storage')
    if not s:
        s = storage.get_driver(cfg)
    i = config.get('indexer')
    if not i:
        i = indexer.get_driver(cfg)
    i.connect()

    root_dir = os.path.dirname(os.path.abspath(__file__))

    # NOTE(sileht): pecan debug won't work in multi-process environment
    pecan_debug = cfg.api.pecan_debug
    if cfg.api.workers != 1 and pecan_debug:
        pecan_debug = False
        LOG.warning('pecan_debug cannot be enabled, if workers is > 1, '
                    'the value is overrided with False')

    app = pecan.make_app(
        config['app']['root'],
        debug=pecan_debug,
        hooks=(GnocchiHook(s, i, cfg),),
        guess_content_type_from_ext=False,
        custom_renderers={'json': OsloJSONRenderer,
                          'gnocchi_jinja': GnocchiJinjaRenderer},
        default_renderer='gnocchi_jinja',
        template_path=root_dir + "/templates",
    )

    app = wsgi.SharedDataMiddleware(
        app,
        {"/static": root_dir + "/static"},
        cache=not cfg.api.pecan_debug)

    if config.get('not_implemented_middleware', True):
        app = webob.exc.HTTPExceptionMiddleware(NotImplementedMiddleware(app))

    for middleware in reversed(cfg.api.middlewares):
        if not middleware:
            continue
        klass = importutils.import_class(middleware)
        # FIXME(jd) Find a way to remove that special handling…
        if klass == keystonemiddleware.auth_token.AuthProtocol:
            middleware_config = dict(cfg.keystone_authtoken)
        else:
            middleware_config = dict(cfg)
        app = klass(app, middleware_config)

    return app
示例#28
0
 def __init__(self, url, count, conf):
     super(Amqp, self).__init__()
     self.url = url
     self.expected = count
     self.received = 0
     self.conf = conf
     self.incoming = incoming.get_driver(self.conf)
     self.indexer = indexer.get_driver(self.conf)
     self.gauges = {}
     self.counters = {}
     self.absolute = {}
     self.conf = service.prepare_service()
     self.peer_close_is_error = True
示例#29
0
文件: metricd.py 项目: sum12/gnocchi
 def _configure(self):
     member_id = "%s.%s.%s" % (
         socket.gethostname(),
         self.worker_id,
         # NOTE(jd) Still use a uuid here so we're
         # sure there's no conflict in case of
         # crash/restart
         str(uuid.uuid4()))
     self.coord = get_coordinator_and_start(member_id,
                                            self.conf.coordination_url)
     self.store = storage.get_driver(self.conf, self.coord)
     self.incoming = incoming.get_driver(self.conf)
     self.index = indexer.get_driver(self.conf)
示例#30
0
文件: metricd.py 项目: luo-zn/gnocchi
 def _configure(self):
     member_id = "%s.%s.%s" % (socket.gethostname(),
                               self.worker_id,
                               # NOTE(jd) Still use a uuid here so we're
                               # sure there's no conflict in case of
                               # crash/restart
                               str(uuid.uuid4()))
     self.coord = get_coordinator_and_start(member_id,
                                            self.conf.coordination_url)
     self.store = storage.get_driver(self.conf)
     self.incoming = incoming.get_driver(self.conf)
     self.index = indexer.get_driver(self.conf)
     self.chef = chef.Chef(self.coord, self.incoming,
                           self.index, self.store)
示例#31
0
def setup_app(pecan_config=PECAN_CONFIG):
    conf = pecan_config['conf']
    s = pecan_config.get('storage')
    if not s:
        s = storage.get_driver(conf)
    i = pecan_config.get('indexer')
    if not i:
        i = indexer.get_driver(conf)
    return pecan.make_app(
        pecan_config['app']['root'],
        debug=conf.debug,
        hooks=(DBHook(s, i),),
        guess_content_type_from_ext=False,
    )
示例#32
0
文件: manage.py 项目: luo-zn/gnocchi
def upgrade():
    conf = cfg.ConfigOpts()
    sack_number_opt = copy.copy(_SACK_NUMBER_OPT)
    sack_number_opt.default = 128
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False,
                    help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage", default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-incoming", default=False,
                    help="Skip incoming storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation", default=False,
                    help="Skip default archive policies creation."),
        sack_number_opt,
    ])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        LOG.info("Upgrading indexer %s", index)
        index.upgrade()
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s", s)
        s.upgrade()
    if not conf.skip_incoming:
        i = incoming.get_driver(conf)
        LOG.info("Upgrading incoming storage %s", i)
        i.upgrade(conf.sacks_number)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        if conf.skip_index:
            index = indexer.get_driver(conf)
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
示例#33
0
def setup_app(config=None, cfg=None):
    if cfg is None:
        # NOTE(jd) That sucks but pecan forces us to use kwargs :(
        raise RuntimeError("Config is actually mandatory")
    config = config or PECAN_CONFIG
    s = config.get('storage')
    if not s:
        s = storage.get_driver(cfg)
    i = config.get('indexer')
    if not i:
        i = indexer.get_driver(cfg)
        i.connect()

    # NOTE(sileht): pecan debug won't work in multi-process environment
    pecan_debug = cfg.api.pecan_debug
    if cfg.api.workers != 1 and pecan_debug:
        pecan_debug = False
        LOG.warning('pecan_debug cannot be enabled, if workers is > 1, '
                    'the value is overrided with False')

    app = pecan.make_app(
        config['app']['root'],
        debug=pecan_debug,
        hooks=(GnocchiHook(s, i, cfg),),
        guess_content_type_from_ext=False,
        custom_renderers={'json': OsloJSONRenderer},
    )

    if config.get('not_implemented_middleware', True):
        app = webob.exc.HTTPExceptionMiddleware(NotImplementedMiddleware(app))

    for middleware in reversed(cfg.api.middlewares):
        if not middleware:
            continue
        klass = importutils.import_class(middleware)
        # FIXME(jd) Find a way to remove that special handling…
        # next version of keystonemiddleware > 2.1.0 will support
        # 'oslo_config_project' option, so we could remove this
        # workaround.
        if klass == keystonemiddleware.auth_token.AuthProtocol:
            middleware_config = dict(cfg.keystone_authtoken)
        else:
            middleware_config = dict(cfg)
            # NOTE(sileht): Allow oslo.config compatible middleware to load
            # our configuration file.
            middleware_config['oslo_config_project'] = 'gnocchi'
        app = klass(app, middleware_config)

    return app
示例#34
0
文件: metricd.py 项目: lamby/gnocchi
def metricd_tester(conf):
    # NOTE(sileht): This method is designed to be profiled, we
    # want to avoid issues with profiler and os.fork(), that
    # why we don't use the MetricdServiceManager.
    index = indexer.get_driver(conf)
    s = storage.get_driver(conf)
    inc = incoming.get_driver(conf)
    metrics = set()
    for sack in inc.iter_sacks():
        metrics.update(inc.list_metric_with_measures_to_process(sack))
        if len(metrics) >= conf.stop_after_processing_metrics:
            break
    c = chef.Chef(None, inc, index, s)
    c.process_new_measures(
        list(metrics)[:conf.stop_after_processing_metrics], True)
示例#35
0
def metricd_tester(conf):
    # NOTE(sileht): This method is designed to be profiled, we
    # want to avoid issues with profiler and os.fork(), that
    # why we don't use the MetricdServiceManager.
    index = indexer.get_driver(conf)
    s = storage.get_driver(conf)
    inc = incoming.get_driver(conf)
    metrics = set()
    for i in six.moves.range(inc.NUM_SACKS):
        metrics.update(inc.list_metric_with_measures_to_process(i))
        if len(metrics) >= conf.stop_after_processing_metrics:
            break
    s.process_new_measures(index, inc,
                           list(metrics)[:conf.stop_after_processing_metrics],
                           True)
示例#36
0
文件: metricd.py 项目: luo-zn/gnocchi
def metricd_tester(conf):
    # NOTE(sileht): This method is designed to be profiled, we
    # want to avoid issues with profiler and os.fork(), that
    # why we don't use the MetricdServiceManager.
    index = indexer.get_driver(conf)
    s = storage.get_driver(conf)
    inc = incoming.get_driver(conf)
    c = chef.Chef(None, inc, index, s)
    metrics_count = 0
    for sack in inc.iter_sacks():
        try:
            metrics_count += c.process_new_measures_for_sack(s, True)
        except chef.SackAlreadyLocked:
            continue
        if metrics_count >= conf.stop_after_processing_metrics:
            break
示例#37
0
 def __init__(self, conf):
     self.conf = conf
     self.incoming = incoming.get_driver(self.conf)
     self.indexer = indexer.get_driver(self.conf)
     try:
         self.indexer.create_resource('generic',
                                      self.conf.statsd.resource_id,
                                      self.conf.statsd.creator)
     except indexer.ResourceAlreadyExists:
         LOG.debug("Resource %s already exists",
                   self.conf.statsd.resource_id)
     else:
         LOG.info("Created resource %s", self.conf.statsd.resource_id)
     self.gauges = {}
     self.counters = {}
     self.times = {}
示例#38
0
文件: metricd.py 项目: rabi/gnocchi
def metricd_tester(conf):
    # NOTE(sileht): This method is designed to be profiled, we
    # want to avoid issues with profiler and os.fork(), that
    # why we don't use the MetricdServiceManager.
    index = indexer.get_driver(conf)
    s = storage.get_driver(conf)
    inc = incoming.get_driver(conf)
    c = chef.Chef(None, inc, index, s)
    metrics_count = 0
    for sack in inc.iter_sacks():
        try:
            metrics_count += c.process_new_measures_for_sack(s, True)
        except chef.SackAlreadyLocked:
            continue
        if metrics_count >= conf.stop_after_processing_metrics:
            break
示例#39
0
def load_app(conf,
             indexer=None,
             storage=None,
             incoming=None,
             coord=None,
             not_implemented_middleware=True):
    global APPCONFIGS

    if not storage:
        if not coord:
            # NOTE(jd) This coordinator is never stop. I don't think it's a
            # real problem since the Web app can never really be stopped
            # anyway, except by quitting it entirely.
            coord = metricd.get_coordinator_and_start(conf.coordination_url)
        storage = gnocchi_storage.get_driver(conf, coord)
    if not incoming:
        incoming = gnocchi_incoming.get_driver(conf)
    if not indexer:
        indexer = gnocchi_indexer.get_driver(conf)

    # Build the WSGI app
    cfg_path = conf.api.paste_config
    if not os.path.isabs(cfg_path):
        cfg_path = conf.find_file(cfg_path)

    if cfg_path is None or not os.path.exists(cfg_path):
        LOG.debug("No api-paste configuration file found! Using default.")
        cfg_path = os.path.abspath(
            pkg_resources.resource_filename(__name__, "api-paste.ini"))

    config = dict(conf=conf,
                  indexer=indexer,
                  storage=storage,
                  incoming=incoming,
                  not_implemented_middleware=not_implemented_middleware)
    configkey = str(uuid.uuid4())
    APPCONFIGS[configkey] = config

    LOG.info("WSGI config used: %s", cfg_path)

    appname = "gnocchi+" + conf.api.auth_mode
    app = deploy.loadapp("config:" + cfg_path,
                         name=appname,
                         global_conf={'configkey': configkey})
    return cors.CORS(app, conf=conf)
示例#40
0
文件: cli.py 项目: fabian4/gnocchi
def upgrade():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False,
                    help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage", default=False,
                    help="Skip storage upgrade.")
    ])
    conf = service.prepare_service(conf=conf)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        index.connect()
        LOG.info("Upgrading indexer %s" % index)
        index.upgrade()
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s" % s)
        s.upgrade(index)
示例#41
0
 def __init__(self, conf):
     self.conf = conf
     self.storage = storage.get_driver(self.conf)
     self.indexer = indexer.get_driver(self.conf)
     self.indexer.connect()
     try:
         self.indexer.create_resource('generic',
                                      self.conf.statsd.resource_id,
                                      self.conf.statsd.user_id,
                                      self.conf.statsd.project_id)
     except indexer.ResourceAlreadyExists:
         LOG.info("Resource %s already exists"
                  % self.conf.statsd.resource_id)
     else:
         LOG.info("Created resource %s" % self.conf.statsd.resource_id)
     self.gauges = {}
     self.counters = {}
     self.times = {}
示例#42
0
 def __init__(self, conf):
     self.conf = conf
     self.storage = storage.get_driver(self.conf)
     self.indexer = indexer.get_driver(self.conf)
     self.indexer.connect()
     try:
         self.indexer.create_resource('generic',
                                      self.conf.statsd.resource_id,
                                      self.conf.statsd.user_id,
                                      self.conf.statsd.project_id)
     except indexer.ResourceAlreadyExists:
         LOG.info("Resource %s already exists"
                  % self.conf.statsd.resource_id)
     else:
         LOG.info("Created resource %s" % self.conf.statsd.resource_id)
     self.gauges = {}
     self.counters = {}
     self.times = {}
示例#43
0
    def _configure(self):
        self.store = storage.get_driver(self.conf, self.coord)
        self.incoming = incoming.get_driver(self.conf)
        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # create fallback in case paritioning fails or assigned no tasks
        self.fallback_tasks = list(six.moves.range(self.incoming.NUM_SACKS))
        try:
            self.partitioner = self.coord.join_partitioned_group(
                self.GROUP_ID, partitions=200)
            LOG.info('Joined coordination group: %s', self.GROUP_ID)
        except NotImplementedError:
            LOG.warning('Coordinator does not support partitioning. Worker '
                        'will battle against other workers for jobs.')
        except tooz.ToozError as e:
            LOG.error(
                'Unexpected error configuring coordinator for '
                'partitioning. Retrying: %s', e)
            raise tenacity.TryAgain(e)
示例#44
0
文件: base.py 项目: shushen/gnocchi
    def setUpClass(self):
        super(TestCase, self).setUpClass()
        self.conf = service.prepare_service([],
                                            default_config_files=[])
        self.conf.set_override('policy_file',
                               self.path_get('etc/gnocchi/policy.json'),
                               group="oslo_policy")

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start(start_heart=True)

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        self.archive_policies.update(archive_policy.DEFAULT_ARCHIVE_POLICIES)
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        self.conf.set_override(
            'driver',
            os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
            'storage')
示例#45
0
 def test_percent_in_url(self):
     url = 'mysql+pymysql://user:pass%word@localhost/foobar'
     self.conf.set_override('url', url, 'indexer')
     alembic = indexer.get_driver(self.conf)._get_alembic_config()
     self.assertEqual(url, alembic.get_main_option("sqlalchemy.url"))
示例#46
0
文件: base.py 项目: luo-zn/gnocchi
    def setUp(self):
        super(TestCase, self).setUp()

        self.conf = service.prepare_service(
            [], conf=utils.prepare_conf(),
            default_config_files=[],
            logging_level=logging.DEBUG,
            skip_log_opts=True)

        self.index = indexer.get_driver(self.conf)

        self.coord = metricd.get_coordinator_and_start(
            str(uuid.uuid4()),
            self.conf.coordination_url)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..',))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.json'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id', "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key', "anythingworks",
                                   group="storage")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')

        if swexc:
            self.useFixture(fixtures.MockPatch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'ceph':
            self.conf.set_override('ceph_conffile',
                                   os.getenv("CEPH_CONF"),
                                   'storage')
            pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call("rados -c %s mkpool %s" % (
                    os.getenv("CEPH_CONF"), pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
                               "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
            )

        self.storage.upgrade()
        self.incoming.upgrade(3)
        self.chef = chef.Chef(
            self.coord, self.incoming, self.index, self.storage)
示例#47
0
 def test_get_driver(self):
     self.conf.set_override('driver', 'null', 'indexer')
     driver = indexer.get_driver(self.conf)
     self.assertIsInstance(driver, null.NullIndexer)
示例#48
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""
        global LOAD_APP_KWARGS

        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            self.output = base.CaptureOutput()
            self.output.setUp()

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([],
                                       conf=utils.prepare_conf(),
                                       default_config_files=dcf,
                                       logging_level=logging.DEBUG,
                                       skip_log_opts=True)

        py_root = os.path.abspath(
            os.path.join(
                os.path.dirname(__file__),
                '..',
                '..',
            ))
        conf.set_override('paste_config',
                          os.path.join(py_root, 'rest', 'api-paste.ini'),
                          group="api")
        conf.set_override('policy_file',
                          os.path.join(py_root, 'rest', 'policy.yaml'),
                          group="oslo_policy")

        # NOTE(sileht): This is not concurrency safe, but only this tests file
        # deal with cors, so we are fine. set_override don't work because cors
        # group doesn't yet exists, and we the CORS middleware is created it
        # register the option and directly copy value of all configurations
        # options making impossible to override them properly...
        cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        if conf.indexer.url is None:
            raise case.SkipTest("No indexer configured")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")

        conf.set_override('driver', storage_driver, 'storage')
        if conf.storage.driver == 'file':
            conf.set_override('file_basepath', data_tmp_dir, 'storage')
        elif conf.storage.driver == 'ceph':
            conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
                              'storage')
            self.ceph_pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call(("ceph -c %s osd pool create %s "
                                 "16 16 replicated") %
                                (os.getenv("CEPH_CONF"), self.ceph_pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
                subprocess.call(("ceph -c %s osd pool application "
                                 "enable %s rbd") %
                                (os.getenv("CEPH_CONF"), self.ceph_pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
            conf.set_override('ceph_pool', self.ceph_pool_name, 'storage')
        elif conf.storage.driver == "s3":
            conf.set_override('s3_endpoint_url',
                              os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                              group="storage")
            conf.set_override('s3_access_key_id', "gnocchi", group="storage")
            conf.set_override('s3_secret_access_key',
                              "anythingworks",
                              group="storage")
            conf.set_override("s3_bucket_prefix",
                              str(uuid.uuid4())[:26], "storage")
        elif conf.storage.driver == "swift":
            # NOTE(sileht): This fixture must start before any driver stuff
            swift_fixture = fixtures.MockPatch('swiftclient.client.Connection',
                                               base.FakeSwiftClient)
            swift_fixture.setUp()

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url), 'indexer')

        index = indexer.get_driver(conf)
        index.upgrade()

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')

        conf.set_override('enable_proxy_headers_parsing', True, group="api")

        self.index = index

        self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
                                                       conf.coordination_url)
        s = storage.get_driver(conf)
        i = incoming.get_driver(conf)

        if conf.storage.driver == 'redis':
            # Create one prefix per test
            s.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if conf.incoming.driver == 'redis':
            i.SACK_NAME_FORMAT = (str(uuid.uuid4()) +
                                  incoming.IncomingDriver.SACK_NAME_FORMAT)

        self.fixtures = [
            fixtures.MockPatch("gnocchi.storage.get_driver", return_value=s),
            fixtures.MockPatch("gnocchi.incoming.get_driver", return_value=i),
            fixtures.MockPatch("gnocchi.indexer.get_driver",
                               return_value=self.index),
            fixtures.MockPatch("gnocchi.cli.metricd.get_coordinator_and_start",
                               return_value=self.coord),
        ]
        for f in self.fixtures:
            f.setUp()

        if conf.storage.driver == 'swift':
            self.fixtures.append(swift_fixture)

        LOAD_APP_KWARGS = {
            'conf': conf,
        }

        s.upgrade()
        i.upgrade(128)

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(chef.Chef(self.coord, i, index, s))
        self.metricd_thread.start()
示例#49
0
    def setUp(self):
        super(TestCase, self).setUp()
        self.conf = service.prepare_service([])
        self.conf.set_override('policy_file',
                               self.path_get('etc/gnocchi/policy.json'),
                               group="oslo_policy")

        self.conf.set_override(
            'url',
            os.environ.get("GNOCCHI_TEST_INDEXER_URL", "null://"),
            'indexer')

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        self.conf.set_override('coordination_url',
                               os.getenv("GNOCCHI_COORDINATION_URL", "ipc://"),
                               'storage')

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            os.getenv("GNOCCHI_COORDINATION_URL", "ipc://"),
            str(uuid.uuid4()).encode('ascii'))

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            # Force upgrading using Alembic rather than creating the
            # database from scratch so we are sure we don't miss anything
            # in the Alembic upgrades. We have a test to check that
            # upgrades == create but it misses things such as custom CHECK
            # constraints.
            self.index.upgrade(nocreate=True)

        self.archive_policies = self.ARCHIVE_POLICIES
        # Used in gnocchi.gendoc
        if not getattr(self, "skip_archive_policies_creation", False):
            for name, ap in six.iteritems(self.ARCHIVE_POLICIES):
                # Create basic archive policies
                try:
                    self.index.create_archive_policy(ap)
                except indexer.ArchivePolicyAlreadyExists:
                    pass

        self.useFixture(mockpatch.Patch(
            'swiftclient.client.Connection',
            FakeSwiftClient))

        self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados',
                                        FakeRadosModule()))

        self.conf.set_override(
            'driver',
            os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
            'storage')

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')

        self.storage = storage.get_driver(self.conf)

        self.mgr = extension.ExtensionManager('gnocchi.aggregates',
                                              invoke_on_load=True)
        self.custom_agg = dict((x.name, x.obj) for x in self.mgr)
示例#50
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""

        global LOAD_APP_KWARGS

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([],
                                       default_config_files=dcf)

        conf.set_override('paste_config',
                          os.path.abspath('etc/gnocchi/api-paste.ini'),
                          'api')

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        # TODO(jd) It would be cool if Gabbi was able to use the null://
        # indexer, but this makes the API returns a lot of 501 error, which
        # Gabbi does not want to see, so let's just disable it.
        if conf.indexer.url is None or conf.indexer.url == "null://":
            raise case.SkipTest("No indexer configured")

        # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore
        # to signal we are not in a gate driven functional test
        # and thus should override conf settings.
        if 'DEVSTACK_GATE_TEMPEST' not in os.environ:
            conf.set_override('driver', 'file', 'storage')
            conf.set_override('policy_file',
                              os.path.abspath('etc/gnocchi/policy.json'),
                              group="oslo_policy")
            conf.set_override('file_basepath', data_tmp_dir, 'storage')

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url),
            'indexer')

        index = indexer.get_driver(conf)
        index.connect()
        index.upgrade(create_legacy_resource_types=True)

        conf.set_override('pecan_debug', False, 'api')

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')

        self.index = index

        s = storage.get_driver(conf)
        s.upgrade(index)

        LOAD_APP_KWARGS = {
            'appname': 'gnocchi+noauth',
            'storage': s,
            'indexer': index,
            'conf': conf,
        }

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(index, s)
        self.metricd_thread.start()
示例#51
0
 def test_get_driver(self):
     driver = indexer.get_driver(self.conf)
     self.assertIsInstance(driver, indexer.IndexerDriver)
示例#52
0
 def test_get_driver(self):
     driver = indexer.get_driver(self.conf)
     self.assertIsInstance(driver, indexer.IndexerDriver)
示例#53
0
文件: base.py 项目: fabian4/gnocchi
    def setUp(self):
        super(TestCase, self).setUp()
        self.conf = service.prepare_service([],
                                            default_config_files=[])
        self.conf.set_override('policy_file',
                               self.path_get('etc/gnocchi/policy.json'),
                               group="oslo_policy")

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start()

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            # Force upgrading using Alembic rather than creating the
            # database from scratch so we are sure we don't miss anything
            # in the Alembic upgrades. We have a test to check that
            # upgrades == create but it misses things such as custom CHECK
            # constraints.
            self.index.upgrade(nocreate=True)

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES
        # Used in gnocchi.gendoc
        if not getattr(self, "skip_archive_policies_creation", False):
            for name, ap in six.iteritems(self.ARCHIVE_POLICIES):
                # Create basic archive policies
                try:
                    self.index.create_archive_policy(ap)
                except indexer.ArchivePolicyAlreadyExists:
                    pass

        if swexc:
            self.useFixture(mockpatch.Patch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados',
                                        FakeRadosModule()))

        self.conf.set_override(
            'driver',
            os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
            'storage')

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'influxdb':
            self.conf.set_override('influxdb_block_until_data_ingested', True,
                                   'storage')
            self.conf.set_override('influxdb_database', 'test', 'storage')
            self.conf.set_override('influxdb_password', 'root', 'storage')
            self.conf.set_override('influxdb_port',
                                   os.getenv("GNOCCHI_TEST_INFLUXDB_PORT",
                                             51234), 'storage')
            # NOTE(ityaptin) Creating unique database for every test may cause
            # tests failing by timeout, but in may be useful in some cases
            if os.getenv("GNOCCHI_TEST_INFLUXDB_UNIQUE_DATABASES"):
                self.conf.set_override("influxdb_database",
                                       "gnocchi_%s" % uuid.uuid4().hex,
                                       'storage')

        self.storage = storage.get_driver(self.conf)
        # NOTE(jd) Do not upgrade the storage. We don't really need the storage
        # upgrade for now, and the code that upgrade from pre-1.3
        # (TimeSerieArchive) uses a lot of parallel lock, which makes tooz
        # explodes because MySQL does not support that many connections in real
        # life.
        # self.storage.upgrade(self.index)

        self.mgr = extension.ExtensionManager('gnocchi.aggregates',
                                              invoke_on_load=True)
        self.custom_agg = dict((x.name, x.obj) for x in self.mgr)
 def _configure(self):
     self.store = storage.get_driver(self.conf)
     self.store.partition = self.worker_id
     self.index = indexer.get_driver(self.conf)
     self.index.connect()
示例#55
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""
        global LOAD_APP_KWARGS

        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            self.output = base.CaptureOutput()
            self.output.setUp()

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([], conf=utils.prepare_conf(),
                                       default_config_files=dcf,
                                       logging_level=logging.DEBUG,
                                       skip_log_opts=True)

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..', '..',))
        conf.set_override('paste_config',
                          os.path.join(py_root, 'rest', 'api-paste.ini'),
                          group="api")
        conf.set_override('policy_file',
                          os.path.join(py_root, 'rest', 'policy.json'),
                          group="oslo_policy")

        # NOTE(sileht): This is not concurrency safe, but only this tests file
        # deal with cors, so we are fine. set_override don't work because cors
        # group doesn't yet exists, and we the CORS middleware is created it
        # register the option and directly copy value of all configurations
        # options making impossible to override them properly...
        cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        if conf.indexer.url is None:
            raise case.SkipTest("No indexer configured")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")

        conf.set_override('driver', storage_driver, 'storage')
        if conf.storage.driver == 'file':
            conf.set_override('file_basepath', data_tmp_dir, 'storage')
        elif conf.storage.driver == 'ceph':
            conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
                              'storage')
            pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call("rados -c %s mkpool %s" % (
                    os.getenv("CEPH_CONF"), pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
            conf.set_override('ceph_pool', pool_name, 'storage')
        elif conf.storage.driver == "s3":
            conf.set_override('s3_endpoint_url',
                              os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                              group="storage")
            conf.set_override('s3_access_key_id', "gnocchi", group="storage")
            conf.set_override('s3_secret_access_key', "anythingworks",
                              group="storage")
            conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
                              "storage")
        elif conf.storage.driver == "swift":
            # NOTE(sileht): This fixture must start before any driver stuff
            swift_fixture = fixtures.MockPatch(
                'swiftclient.client.Connection',
                base.FakeSwiftClient)
            swift_fixture.setUp()

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url),
            'indexer')

        index = indexer.get_driver(conf)
        index.upgrade()

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')

        self.index = index

        self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
                                                       conf.coordination_url)
        s = storage.get_driver(conf)
        i = incoming.get_driver(conf)

        if conf.storage.driver == 'redis':
            # Create one prefix per test
            s.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if conf.incoming.driver == 'redis':
            i.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
            )

        self.fixtures = [
            fixtures.MockPatch("gnocchi.storage.get_driver",
                               return_value=s),
            fixtures.MockPatch("gnocchi.incoming.get_driver",
                               return_value=i),
            fixtures.MockPatch("gnocchi.indexer.get_driver",
                               return_value=self.index),
            fixtures.MockPatch(
                "gnocchi.cli.metricd.get_coordinator_and_start",
                return_value=self.coord),
        ]
        for f in self.fixtures:
            f.setUp()

        if conf.storage.driver == 'swift':
            self.fixtures.append(swift_fixture)

        LOAD_APP_KWARGS = {
            'conf': conf,
        }

        s.upgrade()
        i.upgrade(128)

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(chef.Chef(self.coord, i, index, s))
        self.metricd_thread.start()
示例#56
0
文件: cli.py 项目: cernops/gnocchi
 def _configure(self):
     self.store = storage.get_driver(self.conf)
     self.index = indexer.get_driver(self.conf)
     self.index.connect()
示例#57
0
    def setUp(self):
        super(TestCase, self).setUp()

        self.conf = service.prepare_service(
            [], conf=utils.prepare_conf(),
            default_config_files=[],
            logging_level=logging.DEBUG,
            skip_log_opts=True)

        self.index = indexer.get_driver(self.conf)

        self.coord = metricd.get_coordinator_and_start(
            str(uuid.uuid4()),
            self.conf.coordination_url)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..',))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.yaml'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id', "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key', "anythingworks",
                                   group="storage")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')

        if swexc:
            self.useFixture(fixtures.MockPatch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'ceph':
            self.conf.set_override('ceph_conffile',
                                   os.getenv("CEPH_CONF"),
                                   'storage')
            self.ceph_pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call(("ceph -c %s osd pool create %s "
                                 "16 16 replicated") % (
                    os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
                subprocess.call(("ceph -c %s osd pool application "
                                 "enable %s rbd") % (
                    os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', self.ceph_pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
                               "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
            )

        self.storage.upgrade()
        self.incoming.upgrade(3)
        self.chef = chef.Chef(
            self.coord, self.incoming, self.index, self.storage)
示例#58
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""

        self.output = output.CaptureOutput()
        self.output.setUp()
        self.log = log.ConfigureLogging()
        self.log.setUp()

        global LOAD_APP_KWARGS

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([],
                                       default_config_files=dcf)
        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..', '..',))
        conf.set_override('paste_config',
                          os.path.join(py_root, 'rest', 'api-paste.ini'),
                          group="api")
        conf.set_override('policy_file',
                          os.path.join(py_root, 'rest', 'policy.json'),
                          group="oslo_policy")

        # NOTE(sileht): This is not concurrency safe, but only this tests file
        # deal with cors, so we are fine. set_override don't work because cors
        # group doesn't yet exists, and we the CORS middleware is created it
        # register the option and directly copy value of all configurations
        # options making impossible to override them properly...
        cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        if conf.indexer.url is None:
            raise case.SkipTest("No indexer configured")

        # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore
        # to signal we are not in a gate driven functional test
        # and thus should override conf settings.
        if 'DEVSTACK_GATE_TEMPEST' not in os.environ:
            conf.set_override('driver', 'file', 'storage')
            conf.set_override('file_basepath', data_tmp_dir, 'storage')

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url),
            'indexer')

        index = indexer.get_driver(conf)
        index.connect()
        index.upgrade()

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')
        # Those tests uses noauth mode
        # TODO(jd) Rewrite them for basic
        conf.set_override("auth_mode", "noauth", 'api')

        self.index = index

        s = storage.get_driver(conf)
        s.upgrade(index)

        LOAD_APP_KWARGS = {
            'storage': s,
            'indexer': index,
            'conf': conf,
        }

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(index, s)
        self.metricd_thread.start()