示例#1
0
文件: cli.py 项目: shushen/gnocchi
def upgrade():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False, help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage",
                    default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation",
                    default=False,
                    help="Skip default archive policies creation."),
        cfg.BoolOpt("create-legacy-resource-types",
                    default=False,
                    help="Creation of Ceilometer legacy resource types.")
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    index.connect()
    if not conf.skip_index:
        LOG.info("Upgrading indexer %s" % index)
        index.upgrade(
            create_legacy_resource_types=conf.create_legacy_resource_types)
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s" % s)
        s.upgrade(index)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
示例#2
0
def start():
    conf = service.prepare_service()

    for field in ["resource_id", "user_id", "project_id"]:
        if conf.statsd[field] is None:
            raise cfg.RequiredOptError(field, cfg.OptGroup("statsd"))

    stats = Stats(conf)

    loop = asyncio.get_event_loop()
    # TODO(jd) Add TCP support
    listen = loop.create_datagram_endpoint(
        lambda: StatsdServer(stats),
        local_addr=(conf.statsd.host, conf.statsd.port))

    def _flush():
        loop.call_later(conf.statsd.flush_delay, _flush)
        stats.flush()

    loop.call_later(conf.statsd.flush_delay, _flush)
    transport, protocol = loop.run_until_complete(listen)

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass

    transport.close()
    loop.close()
def metricd():
    conf = service.prepare_service()
    if (conf.storage.metric_reporting_delay <
            conf.storage.metric_processing_delay):
        LOG.error("Metric reporting must run less frequently then processing")
        sys.exit(0)

    signal.signal(signal.SIGTERM, _metricd_terminate)

    try:
        queues = []
        workers = []
        for worker in range(conf.metricd.workers):
            queue = multiprocessing.Queue()
            metric_worker = MetricProcessor(
                conf, worker, conf.storage.metric_processing_delay, queue)
            metric_worker.start()
            queues.append(queue)
            workers.append(metric_worker)

        metric_report = MetricReporting(conf, 0,
                                        conf.storage.metric_reporting_delay,
                                        queues)
        metric_report.start()
        workers.append(metric_report)

        for worker in workers:
            worker.join()
    except KeyboardInterrupt:
        _metricd_cleanup(workers)
        sys.exit(0)
    except Exception:
        LOG.warning("exiting", exc_info=True)
        _metricd_cleanup(workers)
        sys.exit(1)
示例#4
0
def injector():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("metrics", default=1, min=1),
        cfg.StrOpt("archive-policy-name", default="low"),
        cfg.StrOpt("creator", default="admin"),
        cfg.IntOpt("batch-of-measures", default=1000),
        cfg.IntOpt("measures-per-batch", default=10),
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    instore = incoming.get_driver(conf)

    def todo():
        metric = index.create_metric(
            uuid.uuid4(),
            creator=conf.creator,
            archive_policy_name=conf.archive_policy_name)

        for _ in six.moves.range(conf.batch_of_measures):
            measures = [
                incoming.Measure(
                    utils.dt_in_unix_ns(utils.utcnow()), random.random())
                for __ in six.moves.range(conf.measures_per_batch)]
            instore.add_measures(metric, measures)

    with futures.ThreadPoolExecutor(max_workers=conf.metrics) as executor:
        for m in six.moves.range(conf.metrics):
            executor.submit(todo)
def injector():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("metrics", default=None),
        cfg.IntOpt("batch-of-measures", default=1000),
        cfg.IntOpt("measures-per-batch", default=10),
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    index.connect()
    s = storage.get_driver(conf)

    metrics = index.list_metrics()
    if conf.metrics:
        metrics = metrics[:conf.metrics]

    def todo(metric):
        for _ in six.moves.range(conf.batch_of_measures):
            measures = [
                storage.Measure(utils.to_timestamp(datetime.datetime.now()),
                                random.random())
                for __ in six.moves.range(conf.measures_per_batch)
            ]
            s.add_measures(metric, measures)

    with futures.ThreadPoolExecutor(max_workers=len(metrics)) as executor:
        # We use 'list' to iterate all threads here to raise the first
        # exception now, not much choice
        list(executor.map(todo, metrics))
def upgrade():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False,
                    help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage", default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation", default=False,
                    help="Skip default archive policies creation.")
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    index.connect()
    if not conf.skip_index:
        LOG.info("Upgrading indexer %s" % index)
        index.upgrade()
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s" % s)
        s.upgrade(index)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
示例#7
0
文件: cli.py 项目: cug-heshun/gnocchi
def metricd():
    conf = service.prepare_service()
    if (conf.storage.metric_reporting_delay <
            conf.storage.metric_processing_delay):
        LOG.error("Metric reporting must run less frequently then processing")
        sys.exit(0)
    MetricdServiceManager(conf).run()
def metricd():
    conf = service.prepare_service()
    if (conf.storage.metric_reporting_delay <
            conf.storage.metric_processing_delay):
        LOG.error("Metric reporting must run less frequently then processing")
        sys.exit(0)

    signal.signal(signal.SIGTERM, _metricd_terminate)

    try:
        queues = []
        workers = []
        for worker in range(conf.metricd.workers):
            queue = multiprocessing.Queue()
            metric_worker = MetricProcessor(
                conf, worker, conf.storage.metric_processing_delay, queue)
            metric_worker.start()
            queues.append(queue)
            workers.append(metric_worker)

        metric_report = MetricReporting(
            conf, 0, conf.storage.metric_reporting_delay, queues)
        metric_report.start()
        workers.append(metric_report)

        for worker in workers:
            worker.join()
    except KeyboardInterrupt:
        _metricd_cleanup(workers)
        sys.exit(0)
    except Exception:
        LOG.warning("exiting", exc_info=True)
        _metricd_cleanup(workers)
        sys.exit(1)
示例#9
0
文件: statsd.py 项目: amar266/gnocchi
def start():
    conf = service.prepare_service()

    if conf.statsd.resource_id is None:
        raise cfg.RequiredOptError("resource_id", cfg.OptGroup("statsd"))

    stats = Stats(conf)

    loop = asyncio.get_event_loop()
    # TODO(jd) Add TCP support
    listen = loop.create_datagram_endpoint(
        lambda: StatsdServer(stats),
        local_addr=(conf.statsd.host, conf.statsd.port))

    def _flush():
        loop.call_later(conf.statsd.flush_delay, _flush)
        stats.flush()

    loop.call_later(conf.statsd.flush_delay, _flush)
    transport, protocol = loop.run_until_complete(listen)

    LOG.info("Started on %s:%d", conf.statsd.host, conf.statsd.port)
    LOG.info("Flush delay: %d seconds", conf.statsd.flush_delay)

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass

    transport.close()
    loop.close()
示例#10
0
def start():
    conf = service.prepare_service()
    server = proton.reactor.Container(AMQP1Server(conf))
    try:
        server.run()
    except KeyboardInterrupt:
        pass
示例#11
0
文件: cli.py 项目: acomisario/gnocchi
def metricd():
    conf = service.prepare_service()

    # Check that the storage driver actually needs this daemon to run
    s = storage.get_driver_class(conf)
    if s.process_measures == storage.StorageDriver.process_measures:
        LOG.debug("This storage driver does not need metricd to run, exiting")
        return 0

    signal.signal(signal.SIGTERM, _metricd_terminate)

    try:
        workers = []
        for worker in range(conf.metricd.workers):
            metric_worker = MetricProcessor(conf, worker)
            metric_worker.start()
            workers.append(metric_worker)

        for worker in workers:
            worker.join()
    except KeyboardInterrupt:
        _metricd_cleanup(workers)
        sys.exit(0)
    except Exception:
        LOG.warn("exiting", exc_info=True)
        _metricd_cleanup(workers)
        sys.exit(1)
示例#12
0
    def test_aggregation_methods(self):
        conf = service.prepare_service([], default_config_files=[])

        ap = archive_policy.ArchivePolicy("foobar", 0, [], ["*"])
        self.assertEqual(
            archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS,
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar", 0, [], ["last"])
        self.assertEqual(set(["last"]), ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar", 0, [], ["*", "-mean"])
        self.assertEqual(
            (archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS -
             set(["mean"])), ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar", 0, [], ["-mean", "-last"])
        self.assertEqual(
            (set(conf.archive_policy.default_aggregation_methods) -
             set(["mean", "last"])), ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar", 0, [], ["+12pct"])
        self.assertEqual(
            (set(conf.archive_policy.default_aggregation_methods).union(
                set(["12pct"]))), ap.aggregation_methods)
示例#13
0
文件: cli.py 项目: cernops/gnocchi
def metricd():
    conf = service.prepare_service()

    signal.signal(signal.SIGTERM, _metricd_terminate)

    try:
        metric_report = MetricReporting(
            conf, 0, conf.storage.metric_reporting_delay)
        metric_report.start()

        workers = [metric_report]
        for worker in range(conf.metricd.workers):
            metric_worker = MetricProcessor(
                conf, worker, conf.storage.metric_processing_delay)
            metric_worker.start()
            workers.append(metric_worker)

        for worker in workers:
            worker.join()
    except KeyboardInterrupt:
        _metricd_cleanup(workers)
        sys.exit(0)
    except Exception:
        LOG.warn("exiting", exc_info=True)
        _metricd_cleanup(workers)
        sys.exit(1)
示例#14
0
def injector():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("metrics", default=None),
        cfg.IntOpt("batch-of-measures", default=1000),
        cfg.IntOpt("measures-per-batch", default=10),
    ])
    conf = service.prepare_service(conf=conf)
    index = indexer.get_driver(conf)
    index.connect()
    s = storage.get_driver(conf)

    metrics = index.list_metrics()
    if conf.metrics:
        metrics = metrics[:conf.metrics]

    def todo(metric):
        for _ in six.moves.range(conf.batch_of_measures):
            measures = [
                storage.Measure(utils.to_timestamp(datetime.datetime.now()),
                                random.random())
                for __ in six.moves.range(conf.measures_per_batch)]
            s.add_measures(metric, measures)

    with futures.ThreadPoolExecutor(max_workers=len(metrics)) as executor:
        # We use 'list' to iterate all threads here to raise the first
        # exception now, not much choice
        list(executor.map(todo, metrics))
示例#15
0
    def setUpClass(self):
        super(TestCase, self).setUpClass()

        self.conf = service.prepare_service(
            [], conf=utils.prepare_conf(),
            default_config_files=[],
            logging_level=logging.DEBUG)

        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            daiquiri.setup(outputs=[])

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..',))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.json'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id', "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key', "anythingworks",
                                   group="storage")

        self.index = indexer.get_driver(self.conf)

        self.coord = metricd.get_coordinator_and_start(
            str(uuid.uuid4()),
            self.conf.coordination_url)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')
        if storage_driver == 'ceph':
            self.conf.set_override('ceph_conffile',
                                   os.getenv("CEPH_CONF"),
                                   'storage')
示例#16
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""

        global CONF

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')
        coordination_dir = os.path.join(data_tmp_dir, 'tooz')
        os.mkdir(coordination_dir)
        coordination_url = 'file://%s' % coordination_dir

        conf = service.prepare_service([])

        CONF = self.conf = conf
        self.tmp_dir = data_tmp_dir

        # Use the indexer set in the conf, unless we have set an
        # override via the environment.
        if 'GNOCCHI_TEST_INDEXER_URL' in os.environ:
            conf.set_override('url',
                              os.environ.get("GNOCCHI_TEST_INDEXER_URL"),
                              'indexer')

        # TODO(jd) It would be cool if Gabbi was able to use the null://
        # indexer, but this makes the API returns a lot of 501 error, which
        # Gabbi does not want to see, so let's just disable it.
        if conf.indexer.url is None or conf.indexer.url == "null://":
            raise case.SkipTest("No indexer configured")

        # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore
        # to signal we are not in a gate driven functional test
        # and thus should override conf settings.
        if 'DEVSTACK_GATE_TEMPEST' not in os.environ:
            conf.set_override('driver', 'file', 'storage')
            conf.set_override('coordination_url', coordination_url, 'storage')
            conf.set_override('policy_file',
                              os.path.abspath('etc/gnocchi/policy.json'),
                              group="oslo_policy")
            conf.set_override('file_basepath', data_tmp_dir, 'storage')

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        url = sqlalchemy_url.make_url(conf.indexer.url)

        url.database = url.database + str(uuid.uuid4()).replace('-', '')
        db_url = str(url)
        conf.set_override('url', db_url, 'indexer')
        sqlalchemy_utils.create_database(db_url)

        index = indexer.get_driver(conf)
        index.connect()
        index.upgrade()

        conf.set_override('pecan_debug', False, 'api')

        # Turn off any middleware.
        conf.set_override('middlewares', [], 'api')

        self.index = index
示例#17
0
    def test_aggregation_methods(self):
        conf = service.prepare_service([],
                                       default_config_files=[],
                                       logging_level=logging.DEBUG,
                                       skip_log_opts=True)

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["*"])
        self.assertEqual(
            archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS,
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["last"])
        self.assertEqual(
            set(["last"]),
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["*", "-mean"])
        self.assertEqual(
            (archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS
             - set(["mean"])),
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["-mean", "-last"])
        self.assertEqual(
            (set(conf.archive_policy.default_aggregation_methods)
             - set(["mean", "last"])),
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["+12pct"])
        self.assertEqual(
            (set(conf.archive_policy.default_aggregation_methods)
             .union(set(["12pct"]))),
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["+rate:last"])
        self.assertEqual(
            (set(conf.archive_policy.default_aggregation_methods)
             .union(set(["rate:last"]))),
            ap.aggregation_methods)
示例#18
0
def api():
    # Compat with previous pbr script
    try:
        double_dash = sys.argv.index("--")
    except ValueError:
        double_dash = None
    else:
        sys.argv.pop(double_dash)

    conf = cfg.ConfigOpts()
    for opt in app.API_OPTS:
        # NOTE(jd) Register the API options without a default, so they are only
        # used to override the one in the config file
        c = copy.copy(opt)
        c.default = None
        conf.register_cli_opt(c)
    conf = service.prepare_service(conf=conf)

    if double_dash is not None:
        # NOTE(jd) Wait to this stage to log so we're sure the logging system
        # is in place
        LOG.warning(
            "No need to pass `--' in gnocchi-api command line anymore, "
            "please remove")

    uwsgi = spawn.find_executable("uwsgi")
    if not uwsgi:
        LOG.error("Unable to find `uwsgi'.\n"
                  "Be sure it is installed and in $PATH.")
        return 1

    workers = utils.get_default_workers()

    return os.execl(
        uwsgi,
        uwsgi,
        "--http",
        "%s:%d" % (conf.host or conf.api.host, conf.port or conf.api.port),
        "--master",
        "--enable-threads",
        "--die-on-term",
        # NOTE(jd) See https://github.com/gnocchixyz/gnocchi/issues/156
        "--add-header",
        "Connection: close",
        "--processes",
        str(math.floor(workers * 1.5)),
        "--threads",
        str(workers),
        "--lazy-apps",
        "--chdir",
        "/",
        "--wsgi",
        "gnocchi.rest.wsgi",
        "--pyargv",
        " ".join(sys.argv[1:]),
    )
示例#19
0
 def __init__(self, url, count, conf):
     super(Amqp, self).__init__()
     self.url = url
     self.expected = count
     self.received = 0
     self.conf = conf
     self.incoming = incoming.get_driver(self.conf)
     self.indexer = indexer.get_driver(self.conf)
     self.gauges = {}
     self.counters = {}
     self.absolute = {}
     self.conf = service.prepare_service()
     self.peer_close_is_error = True
示例#20
0
def start():
    conf = service.prepare_service()
    if conf.amqp1d.resource_name is None:
        raise cfg.RequiredOptError("resource_name", cfg.OptGroup("amqp1d"))
    try:
        if conf.amqp1d.data_source == "collectd":
            stats = CollectdStats(conf)
            Container(AMQP1Server(conf, stats)).run()
        else:
            raise ValueError(
                "Unknown data source type '%s'" %
                conf.amqp1d.data_source)
    except KeyboardInterrupt:
        pass
示例#21
0
文件: metricd.py 项目: luo-zn/gnocchi
def metricd():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("stop-after-processing-metrics",
                   default=0,
                   min=0,
                   help="Number of metrics to process without workers, "
                   "for testing purpose"),
    ])
    conf = service.prepare_service(conf=conf)

    if conf.stop_after_processing_metrics:
        metricd_tester(conf)
    else:
        MetricdServiceManager(conf).run()
示例#22
0
文件: metricd.py 项目: rabi/gnocchi
def metricd():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("stop-after-processing-metrics",
                   default=0,
                   min=0,
                   help="Number of metrics to process without workers, "
                   "for testing purpose"),
    ])
    conf = service.prepare_service(conf=conf)

    if conf.stop_after_processing_metrics:
        metricd_tester(conf)
    else:
        MetricdServiceManager(conf).run()
示例#23
0
文件: api.py 项目: luo-zn/gnocchi
def prepare_service(conf=None):
    if conf is None:
        conf = cfg.ConfigOpts()

    opts.set_defaults()
    policy_opts.set_defaults(conf)
    conf = service.prepare_service(conf=conf)
    cfg_path = conf.oslo_policy.policy_file
    if not os.path.isabs(cfg_path):
        cfg_path = conf.find_file(cfg_path)
    if cfg_path is None or not os.path.exists(cfg_path):
        cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                   '..', 'rest', 'policy.json'))
    conf.set_default('policy_file', cfg_path, group='oslo_policy')
    return conf
示例#24
0
def prepare_service(conf=None):
    if conf is None:
        conf = cfg.ConfigOpts()

    opts.set_defaults()
    policy_opts.set_defaults(conf)
    conf = service.prepare_service(conf=conf)
    cfg_path = conf.oslo_policy.policy_file
    if not os.path.isabs(cfg_path):
        cfg_path = conf.find_file(cfg_path)
    if cfg_path is None or not os.path.exists(cfg_path):
        cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                   '..', 'rest', 'policy.json'))
    conf.set_default('policy_file', cfg_path, group='oslo_policy')
    return conf
示例#25
0
    def test_aggregation_methods(self):
        conf = service.prepare_service([])

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["*"])
        self.assertEqual(
            archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS,
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["last"])
        self.assertEqual(
            set(["last"]),
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["*", "-mean"])
        self.assertEqual(
            (archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS
             - set(["mean"])),
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["-mean", "-last"])
        self.assertEqual(
            (set(conf.archive_policy.default_aggregation_methods)
             - set(["mean", "last"])),
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar",
                                          0,
                                          [],
                                          ["+12pct"])
        self.assertEqual(
            (set(conf.archive_policy.default_aggregation_methods)
             .union(set(["12pct"]))),
            ap.aggregation_methods)
示例#26
0
文件: cli.py 项目: fabian4/gnocchi
def upgrade():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False,
                    help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage", default=False,
                    help="Skip storage upgrade.")
    ])
    conf = service.prepare_service(conf=conf)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        index.connect()
        LOG.info("Upgrading indexer %s" % index)
        index.upgrade()
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s" % s)
        s.upgrade(index)
示例#27
0
文件: manage.py 项目: sum12/gnocchi
def upgrade():
    conf = cfg.ConfigOpts()
    sack_number_opt = copy.copy(_SACK_NUMBER_OPT)
    sack_number_opt.default = 128
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False, help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage",
                    default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-incoming",
                    default=False,
                    help="Skip incoming storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation",
                    default=False,
                    help="Skip default archive policies creation."),
        sack_number_opt,
    ])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        LOG.info("Upgrading indexer %s", index)
        index.upgrade()
    if not conf.skip_storage:
        # FIXME(jd) Pass None as coordinator because it's not needed in this
        # case. This will be removed when the storage will stop requiring a
        # coordinator object.
        s = storage.get_driver(conf, None)
        LOG.info("Upgrading storage %s", s)
        s.upgrade()
    if not conf.skip_incoming:
        i = incoming.get_driver(conf)
        LOG.info("Upgrading incoming storage %s", i)
        i.upgrade(conf.sacks_number)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        if conf.skip_index:
            index = indexer.get_driver(conf)
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
示例#28
0
def injector():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("--measures", help="Measures per metric."),
        cfg.IntOpt("--metrics", help="Number of metrics to create."),
        cfg.IntOpt("--archive-policy-name",
                   help="Name of archive policy to use.",
                   default="low"),
        cfg.IntOpt("--interval",
                   help="Interval to sleep between metrics sending."),
        cfg.BoolOpt("--process",
                    default=False,
                    help="Process the ingested measures."),
    ])
    return _inject(service.prepare_service(conf=conf, log_to_std=True),
                   metrics=conf.metrics,
                   measures=conf.measures,
                   archive_policy_name=conf.archive_policy_name,
                   process=conf.process,
                   interval=conf.interval)
示例#29
0
def change_sack_size():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([_SACK_NUMBER_OPT])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    s = incoming.get_driver(conf)
    try:
        report = s.measures_report(details=False)
    except incoming.SackDetectionError:
        # issue is already logged by NUM_SACKS, abort.
        return
    remainder = report['summary']['measures']
    if remainder:
        LOG.error(
            'Cannot change sack when non-empty backlog. Process '
            'remaining %s measures and try again', remainder)
        return
    LOG.info("Changing sack size to: %s", conf.sacks_number)
    old_num_sacks = s.get_storage_sacks()
    s.set_storage_settings(conf.sacks_number)
    s.remove_sack_group(old_num_sacks)
示例#30
0
文件: manage.py 项目: luo-zn/gnocchi
def change_sack_size():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([_SACK_NUMBER_OPT])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    s = incoming.get_driver(conf)
    try:
        report = s.measures_report(details=False)
    except incoming.SackDetectionError:
        LOG.error('Unable to detect the number of storage sacks.\n'
                  'Ensure gnocchi-upgrade has been executed.')
        return
    remainder = report['summary']['measures']
    if remainder:
        LOG.error('Cannot change sack when non-empty backlog. Process '
                  'remaining %s measures and try again', remainder)
        return
    LOG.info("Removing current %d sacks", s.NUM_SACKS)
    s.remove_sacks()
    LOG.info("Creating new %d sacks", conf.sacks_number)
    s.upgrade(conf.sacks_number)
示例#31
0
def start():
    conf = service.prepare_service()

    stats = Stats(conf)

    loop = asyncio.get_event_loop()
    # TODO(jd) Add TCP support
    listen = loop.create_datagram_endpoint(
        # TODO(jd) Add config options for host/port
        lambda: StatsdServer(stats), local_addr=('0.0.0.0', 8125))
    loop.call_later(conf.statsd.flush_delay, stats.flush)
    transport, protocol = loop.run_until_complete(listen)

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass

    transport.close()
    loop.close()
示例#32
0
def upgrade():
    conf = cfg.ConfigOpts()
    sack_number_opt = copy.copy(_SACK_NUMBER_OPT)
    sack_number_opt.default = 128
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False, help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage",
                    default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-incoming",
                    default=False,
                    help="Skip incoming storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation",
                    default=False,
                    help="Skip default archive policies creation."),
        sack_number_opt,
    ])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        index.connect()
        LOG.info("Upgrading indexer %s", index)
        index.upgrade()
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s", s)
        s.upgrade()
    if not conf.skip_incoming:
        i = incoming.get_driver(conf)
        LOG.info("Upgrading incoming storage %s", i)
        i.upgrade(conf.sacks_number)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        if conf.skip_index:
            index = indexer.get_driver(conf)
            index.connect()
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
示例#33
0
def injector():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.IntOpt("--measures",
                   help="Measures per metric."),
        cfg.IntOpt("--metrics",
                   help="Number of metrics to create."),
        cfg.IntOpt("--archive-policy-name",
                   help="Name of archive policy to use.",
                   default="low"),
        cfg.IntOpt("--interval",
                   help="Interval to sleep between metrics sending."),
        cfg.BoolOpt("--process", default=False,
                    help="Process the ingested measures."),
    ])
    return _inject(service.prepare_service(conf=conf, log_to_std=True),
                   metrics=conf.metrics,
                   measures=conf.measures,
                   archive_policy_name=conf.archive_policy_name,
                   process=conf.process,
                   interval=conf.interval)
示例#34
0
def change_sack_size():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([_SACK_NUMBER_OPT])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    s = incoming.get_driver(conf)
    try:
        report = s.measures_report(details=False)
    except incoming.SackDetectionError:
        LOG.error('Unable to detect the number of storage sacks.\n'
                  'Ensure gnocchi-upgrade has been executed.')
        return
    remainder = report['summary']['measures']
    if remainder:
        LOG.error(
            'Cannot change sack when non-empty backlog. Process '
            'remaining %s measures and try again', remainder)
        return
    LOG.info("Removing current %d sacks", s.NUM_SACKS)
    s.remove_sacks()
    LOG.info("Creating new %d sacks", conf.sacks_number)
    s.upgrade(conf.sacks_number)
示例#35
0
文件: base.py 项目: shushen/gnocchi
    def setUpClass(self):
        super(TestCase, self).setUpClass()
        self.conf = service.prepare_service([],
                                            default_config_files=[])
        self.conf.set_override('policy_file',
                               self.path_get('etc/gnocchi/policy.json'),
                               group="oslo_policy")

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start(start_heart=True)

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        self.archive_policies.update(archive_policy.DEFAULT_ARCHIVE_POLICIES)
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        self.conf.set_override(
            'driver',
            os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
            'storage')
示例#36
0
文件: manage.py 项目: luo-zn/gnocchi
def upgrade():
    conf = cfg.ConfigOpts()
    sack_number_opt = copy.copy(_SACK_NUMBER_OPT)
    sack_number_opt.default = 128
    conf.register_cli_opts([
        cfg.BoolOpt("skip-index", default=False,
                    help="Skip index upgrade."),
        cfg.BoolOpt("skip-storage", default=False,
                    help="Skip storage upgrade."),
        cfg.BoolOpt("skip-incoming", default=False,
                    help="Skip incoming storage upgrade."),
        cfg.BoolOpt("skip-archive-policies-creation", default=False,
                    help="Skip default archive policies creation."),
        sack_number_opt,
    ])
    conf = service.prepare_service(conf=conf, log_to_std=True)
    if not conf.skip_index:
        index = indexer.get_driver(conf)
        LOG.info("Upgrading indexer %s", index)
        index.upgrade()
    if not conf.skip_storage:
        s = storage.get_driver(conf)
        LOG.info("Upgrading storage %s", s)
        s.upgrade()
    if not conf.skip_incoming:
        i = incoming.get_driver(conf)
        LOG.info("Upgrading incoming storage %s", i)
        i.upgrade(conf.sacks_number)

    if (not conf.skip_archive_policies_creation
            and not index.list_archive_policies()
            and not index.list_archive_policy_rules()):
        if conf.skip_index:
            index = indexer.get_driver(conf)
        for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
            index.create_archive_policy(ap)
        index.create_archive_policy_rule("default", "*", "low")
示例#37
0
def storage_dbsync():
    service.prepare_service()
    indexer = sql_db.SQLAlchemyIndexer(cfg.CONF)
    indexer.upgrade()
示例#38
0
def api():
    service.prepare_service()
    app.build_server()
def build_server():
    conf = service.prepare_service()
    serving.run_simple(conf.api.host,
                       conf.api.port,
                       WerkzeugApp(conf),
                       processes=conf.api.workers)
示例#40
0
文件: cli.py 项目: shushen/gnocchi
def metricd():
    conf = service.prepare_service()
    MetricdServiceManager(conf).run()
示例#41
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""
        global LOAD_APP_KWARGS

        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            self.output = base.CaptureOutput()
            self.output.setUp()

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([],
                                       conf=utils.prepare_conf(),
                                       default_config_files=dcf,
                                       logging_level=logging.DEBUG,
                                       skip_log_opts=True)

        py_root = os.path.abspath(
            os.path.join(
                os.path.dirname(__file__),
                '..',
                '..',
            ))
        conf.set_override('paste_config',
                          os.path.join(py_root, 'rest', 'api-paste.ini'),
                          group="api")
        conf.set_override('policy_file',
                          os.path.join(py_root, 'rest', 'policy.yaml'),
                          group="oslo_policy")

        # NOTE(sileht): This is not concurrency safe, but only this tests file
        # deal with cors, so we are fine. set_override don't work because cors
        # group doesn't yet exists, and we the CORS middleware is created it
        # register the option and directly copy value of all configurations
        # options making impossible to override them properly...
        cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        if conf.indexer.url is None:
            raise case.SkipTest("No indexer configured")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")

        conf.set_override('driver', storage_driver, 'storage')
        if conf.storage.driver == 'file':
            conf.set_override('file_basepath', data_tmp_dir, 'storage')
        elif conf.storage.driver == 'ceph':
            conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
                              'storage')
            self.ceph_pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call(("ceph -c %s osd pool create %s "
                                 "16 16 replicated") %
                                (os.getenv("CEPH_CONF"), self.ceph_pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
                subprocess.call(("ceph -c %s osd pool application "
                                 "enable %s rbd") %
                                (os.getenv("CEPH_CONF"), self.ceph_pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
            conf.set_override('ceph_pool', self.ceph_pool_name, 'storage')
        elif conf.storage.driver == "s3":
            conf.set_override('s3_endpoint_url',
                              os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                              group="storage")
            conf.set_override('s3_access_key_id', "gnocchi", group="storage")
            conf.set_override('s3_secret_access_key',
                              "anythingworks",
                              group="storage")
            conf.set_override("s3_bucket_prefix",
                              str(uuid.uuid4())[:26], "storage")
        elif conf.storage.driver == "swift":
            # NOTE(sileht): This fixture must start before any driver stuff
            swift_fixture = fixtures.MockPatch('swiftclient.client.Connection',
                                               base.FakeSwiftClient)
            swift_fixture.setUp()

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url), 'indexer')

        index = indexer.get_driver(conf)
        index.upgrade()

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')

        conf.set_override('enable_proxy_headers_parsing', True, group="api")

        self.index = index

        self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
                                                       conf.coordination_url)
        s = storage.get_driver(conf)
        i = incoming.get_driver(conf)

        if conf.storage.driver == 'redis':
            # Create one prefix per test
            s.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if conf.incoming.driver == 'redis':
            i.SACK_NAME_FORMAT = (str(uuid.uuid4()) +
                                  incoming.IncomingDriver.SACK_NAME_FORMAT)

        self.fixtures = [
            fixtures.MockPatch("gnocchi.storage.get_driver", return_value=s),
            fixtures.MockPatch("gnocchi.incoming.get_driver", return_value=i),
            fixtures.MockPatch("gnocchi.indexer.get_driver",
                               return_value=self.index),
            fixtures.MockPatch("gnocchi.cli.metricd.get_coordinator_and_start",
                               return_value=self.coord),
        ]
        for f in self.fixtures:
            f.setUp()

        if conf.storage.driver == 'swift':
            self.fixtures.append(swift_fixture)

        LOAD_APP_KWARGS = {
            'conf': conf,
        }

        s.upgrade()
        i.upgrade(128)

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(chef.Chef(self.coord, i, index, s))
        self.metricd_thread.start()
示例#42
0
文件: app.py 项目: cug-heshun/gnocchi
def build_wsgi_app():
    return load_app(service.prepare_service())
示例#43
0
文件: app.py 项目: NeCTAR-RC/gnocchi
def app_factory(global_config, **local_conf):
    cfg = service.prepare_service()
    return setup_app(None, cfg=cfg)
示例#44
0
文件: base.py 项目: fabian4/gnocchi
    def setUp(self):
        super(TestCase, self).setUp()
        self.conf = service.prepare_service([],
                                            default_config_files=[])
        self.conf.set_override('policy_file',
                               self.path_get('etc/gnocchi/policy.json'),
                               group="oslo_policy")

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start()

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            # Force upgrading using Alembic rather than creating the
            # database from scratch so we are sure we don't miss anything
            # in the Alembic upgrades. We have a test to check that
            # upgrades == create but it misses things such as custom CHECK
            # constraints.
            self.index.upgrade(nocreate=True)

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES
        # Used in gnocchi.gendoc
        if not getattr(self, "skip_archive_policies_creation", False):
            for name, ap in six.iteritems(self.ARCHIVE_POLICIES):
                # Create basic archive policies
                try:
                    self.index.create_archive_policy(ap)
                except indexer.ArchivePolicyAlreadyExists:
                    pass

        if swexc:
            self.useFixture(mockpatch.Patch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados',
                                        FakeRadosModule()))

        self.conf.set_override(
            'driver',
            os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
            'storage')

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'influxdb':
            self.conf.set_override('influxdb_block_until_data_ingested', True,
                                   'storage')
            self.conf.set_override('influxdb_database', 'test', 'storage')
            self.conf.set_override('influxdb_password', 'root', 'storage')
            self.conf.set_override('influxdb_port',
                                   os.getenv("GNOCCHI_TEST_INFLUXDB_PORT",
                                             51234), 'storage')
            # NOTE(ityaptin) Creating unique database for every test may cause
            # tests failing by timeout, but in may be useful in some cases
            if os.getenv("GNOCCHI_TEST_INFLUXDB_UNIQUE_DATABASES"):
                self.conf.set_override("influxdb_database",
                                       "gnocchi_%s" % uuid.uuid4().hex,
                                       'storage')

        self.storage = storage.get_driver(self.conf)
        # NOTE(jd) Do not upgrade the storage. We don't really need the storage
        # upgrade for now, and the code that upgrade from pre-1.3
        # (TimeSerieArchive) uses a lot of parallel lock, which makes tooz
        # explodes because MySQL does not support that many connections in real
        # life.
        # self.storage.upgrade(self.index)

        self.mgr = extension.ExtensionManager('gnocchi.aggregates',
                                              invoke_on_load=True)
        self.custom_agg = dict((x.name, x.obj) for x in self.mgr)
示例#45
0
    def setUp(self):
        super(TestCase, self).setUp()
        self.conf = service.prepare_service([])
        self.conf.set_override('policy_file',
                               self.path_get('etc/gnocchi/policy.json'),
                               group="oslo_policy")

        self.conf.set_override(
            'url',
            os.environ.get("GNOCCHI_TEST_INDEXER_URL", "null://"),
            'indexer')

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        self.conf.set_override('coordination_url',
                               os.getenv("GNOCCHI_COORDINATION_URL", "ipc://"),
                               'storage')

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            os.getenv("GNOCCHI_COORDINATION_URL", "ipc://"),
            str(uuid.uuid4()).encode('ascii'))

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            # Force upgrading using Alembic rather than creating the
            # database from scratch so we are sure we don't miss anything
            # in the Alembic upgrades. We have a test to check that
            # upgrades == create but it misses things such as custom CHECK
            # constraints.
            self.index.upgrade(nocreate=True)

        self.archive_policies = self.ARCHIVE_POLICIES
        # Used in gnocchi.gendoc
        if not getattr(self, "skip_archive_policies_creation", False):
            for name, ap in six.iteritems(self.ARCHIVE_POLICIES):
                # Create basic archive policies
                try:
                    self.index.create_archive_policy(ap)
                except indexer.ArchivePolicyAlreadyExists:
                    pass

        self.useFixture(mockpatch.Patch(
            'swiftclient.client.Connection',
            FakeSwiftClient))

        self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados',
                                        FakeRadosModule()))

        self.conf.set_override(
            'driver',
            os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
            'storage')

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')

        self.storage = storage.get_driver(self.conf)

        self.mgr = extension.ExtensionManager('gnocchi.aggregates',
                                              invoke_on_load=True)
        self.custom_agg = dict((x.name, x.obj) for x in self.mgr)
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""

        global LOAD_APP_KWARGS

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([], default_config_files=dcf)

        conf.set_override('paste_config',
                          os.path.abspath('etc/gnocchi/api-paste.ini'), 'api')

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        # TODO(jd) It would be cool if Gabbi was able to use the null://
        # indexer, but this makes the API returns a lot of 501 error, which
        # Gabbi does not want to see, so let's just disable it.
        if conf.indexer.url is None or conf.indexer.url == "null://":
            raise case.SkipTest("No indexer configured")

        # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore
        # to signal we are not in a gate driven functional test
        # and thus should override conf settings.
        if 'DEVSTACK_GATE_TEMPEST' not in os.environ:
            conf.set_override('driver', 'file', 'storage')
            conf.set_override('policy_file',
                              os.path.abspath('etc/gnocchi/policy.json'),
                              group="oslo_policy")
            conf.set_override('file_basepath', data_tmp_dir, 'storage')

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        url = sqlalchemy_url.make_url(conf.indexer.url)

        url.database = url.database + str(uuid.uuid4()).replace('-', '')
        db_url = str(url)
        conf.set_override('url', db_url, 'indexer')
        sqlalchemy_utils.create_database(db_url)

        index = indexer.get_driver(conf)
        index.connect()
        index.upgrade()

        conf.set_override('pecan_debug', False, 'api')

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')

        self.index = index

        s = storage.get_driver(conf)
        s.upgrade(index)

        LOAD_APP_KWARGS = {
            'appname': 'gnocchi+noauth',
            'storage': s,
            'indexer': index,
            'conf': conf,
        }

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(index, s)
        self.metricd_thread.start()
示例#47
0
文件: app.py 项目: shushen/gnocchi
def build_wsgi_app():
    return load_app(service.prepare_service())
示例#48
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    conf = config.conf
    indexer = sqlalchemy.SQLAlchemyIndexer(conf)
    with indexer.facade.writer_connection() as connectable:

        with connectable.connect() as connection:
            context.configure(connection=connection,
                              target_metadata=target_metadata)

            with context.begin_transaction():
                context.run_migrations()

    indexer.disconnect()


# If `alembic' was used directly from the CLI
if not hasattr(config, "conf"):
    from gnocchi import service
    config.conf = service.prepare_service([])

if context.is_offline_mode():
    run_migrations_offline()
else:
    run_migrations_online()
示例#49
0
文件: cli.py 项目: calvin67/gnocchi
def storage_dbsync():
    conf = service.prepare_service()
    indexer = sql_db.SQLAlchemyIndexer(conf)
    indexer.connect()
    indexer.upgrade()
示例#50
0
文件: base.py 项目: luo-zn/gnocchi
    def setUp(self):
        super(TestCase, self).setUp()

        self.conf = service.prepare_service(
            [], conf=utils.prepare_conf(),
            default_config_files=[],
            logging_level=logging.DEBUG,
            skip_log_opts=True)

        self.index = indexer.get_driver(self.conf)

        self.coord = metricd.get_coordinator_and_start(
            str(uuid.uuid4()),
            self.conf.coordination_url)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..',))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.json'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id', "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key', "anythingworks",
                                   group="storage")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')

        if swexc:
            self.useFixture(fixtures.MockPatch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'ceph':
            self.conf.set_override('ceph_conffile',
                                   os.getenv("CEPH_CONF"),
                                   'storage')
            pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call("rados -c %s mkpool %s" % (
                    os.getenv("CEPH_CONF"), pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
                               "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
            )

        self.storage.upgrade()
        self.incoming.upgrade(3)
        self.chef = chef.Chef(
            self.coord, self.incoming, self.index, self.storage)
示例#51
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""
        global LOAD_APP_KWARGS

        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            self.output = base.CaptureOutput()
            self.output.setUp()

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([],
                                       conf=utils.prepare_conf(),
                                       default_config_files=dcf)
        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            daiquiri.setup(outputs=[])

        py_root = os.path.abspath(
            os.path.join(
                os.path.dirname(__file__),
                '..',
                '..',
            ))
        conf.set_override('paste_config',
                          os.path.join(py_root, 'rest', 'api-paste.ini'),
                          group="api")
        conf.set_override('policy_file',
                          os.path.join(py_root, 'rest', 'policy.json'),
                          group="oslo_policy")

        # NOTE(sileht): This is not concurrency safe, but only this tests file
        # deal with cors, so we are fine. set_override don't work because cors
        # group doesn't yet exists, and we the CORS middleware is created it
        # register the option and directly copy value of all configurations
        # options making impossible to override them properly...
        cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        if conf.indexer.url is None:
            raise case.SkipTest("No indexer configured")

        conf.set_override('driver', 'file', 'storage')
        conf.set_override('file_basepath', data_tmp_dir, 'storage')

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url), 'indexer')

        index = indexer.get_driver(conf)
        index.upgrade()

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')

        self.index = index

        self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
                                                       conf.coordination_url)
        s = storage.get_driver(conf, self.coord)
        s.upgrade()
        i = incoming.get_driver(conf)
        i.upgrade(128)

        self.fixtures = [
            fixtures.MockPatch("gnocchi.storage.get_driver", return_value=s),
            fixtures.MockPatch("gnocchi.incoming.get_driver", return_value=i),
            fixtures.MockPatch("gnocchi.indexer.get_driver",
                               return_value=self.index),
            fixtures.MockPatch("gnocchi.cli.metricd.get_coordinator_and_start",
                               return_value=self.coord),
        ]
        for f in self.fixtures:
            f.setUp()

        LOAD_APP_KWARGS = {
            'conf': conf,
        }

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(index, s, i)
        self.metricd_thread.start()
示例#52
0
文件: app.py 项目: cernops/gnocchi
def build_server():
    conf = service.prepare_service()
    serving.run_simple(conf.api.host, conf.api.port,
                       WerkzeugApp(conf),
                       processes=conf.api.workers)
示例#53
0
    def setUp(self):
        super(TestCase, self).setUp()
        self.conf = service.prepare_service([],
                                            default_config_files=[])
        self.conf.set_override('policy_file',
                               self.path_get('etc/gnocchi/policy.json'),
                               group="oslo_policy")

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start()

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            # Force upgrading using Alembic rather than creating the
            # database from scratch so we are sure we don't miss anything
            # in the Alembic upgrades. We have a test to check that
            # upgrades == create but it misses things such as custom CHECK
            # constraints.
            self.index.upgrade(nocreate=True)

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        self.archive_policies.update(archive_policy.DEFAULT_ARCHIVE_POLICIES)
        # Used in gnocchi.gendoc
        if not getattr(self, "skip_archive_policies_creation", False):
            for name, ap in six.iteritems(self.archive_policies):
                # Create basic archive policies
                try:
                    self.index.create_archive_policy(ap)
                except indexer.ArchivePolicyAlreadyExists:
                    pass

        if swexc:
            self.useFixture(mockpatch.Patch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados',
                                        FakeRadosModule()))

        self.conf.set_override(
            'driver',
            os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
            'storage')

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'influxdb':
            self.conf.set_override('influxdb_block_until_data_ingested', True,
                                   'storage')
            self.conf.set_override('influxdb_database', 'test', 'storage')
            self.conf.set_override('influxdb_password', 'root', 'storage')
            self.conf.set_override('influxdb_port',
                                   os.getenv("GNOCCHI_TEST_INFLUXDB_PORT",
                                             51234), 'storage')
            # NOTE(ityaptin) Creating unique database for every test may cause
            # tests failing by timeout, but in may be useful in some cases
            if os.getenv("GNOCCHI_TEST_INFLUXDB_UNIQUE_DATABASES"):
                self.conf.set_override("influxdb_database",
                                       "gnocchi_%s" % uuid.uuid4().hex,
                                       'storage')

        self.storage = storage.get_driver(self.conf)
        # NOTE(jd) Do not upgrade the storage. We don't really need the storage
        # upgrade for now, and the code that upgrade from pre-1.3
        # (TimeSerieArchive) uses a lot of parallel lock, which makes tooz
        # explodes because MySQL does not support that many connections in real
        # life.
        # self.storage.upgrade(self.index)

        self.mgr = extension.ExtensionManager('gnocchi.aggregates',
                                              invoke_on_load=True)
        self.custom_agg = dict((x.name, x.obj) for x in self.mgr)
示例#54
0
文件: cli.py 项目: shushen/gnocchi
def metricd():
    conf = service.prepare_service()
    MetricdServiceManager(conf).run()
示例#55
0
        if self.conf.amqpd.archive_policy_name:
            return self.conf.amqpd.archive_policy_name
        # NOTE(sileht): We didn't catch NoArchivePolicyRuleMatch to log it
        ap = self.indexer.get_archive_policy_for_metric(metric_name)
        return ap.name


parser = optparse.OptionParser(usage="usage: %prog [options]")
parser.add_option(
    "-a",
    "--address",
    default="localhost:5672/examples",
    help="address from which messages are received (default %default)")
parser.add_option(
    "-m",
    "--messages",
    type="int",
    default=100,
    help=
    "number of messages to receive; 0 receives indefinitely (default %default)"
)

conf = service.prepare_service()
#if conf.statsd.resource_id is None:
#raise cfg.RequiredOptError("resource_id", cfg.OptGroup("amqpd"))

try:
    Container(Amqp(opts.address, opts.messages, conf)).run()
except KeyboardInterrupt:
    pass
示例#56
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""

        self.output = output.CaptureOutput()
        self.output.setUp()
        self.log = log.ConfigureLogging()
        self.log.setUp()

        global LOAD_APP_KWARGS

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([],
                                       default_config_files=dcf)
        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..', '..',))
        conf.set_override('paste_config',
                          os.path.join(py_root, 'rest', 'api-paste.ini'),
                          group="api")
        conf.set_override('policy_file',
                          os.path.join(py_root, 'rest', 'policy.json'),
                          group="oslo_policy")

        # NOTE(sileht): This is not concurrency safe, but only this tests file
        # deal with cors, so we are fine. set_override don't work because cors
        # group doesn't yet exists, and we the CORS middleware is created it
        # register the option and directly copy value of all configurations
        # options making impossible to override them properly...
        cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        if conf.indexer.url is None:
            raise case.SkipTest("No indexer configured")

        # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore
        # to signal we are not in a gate driven functional test
        # and thus should override conf settings.
        if 'DEVSTACK_GATE_TEMPEST' not in os.environ:
            conf.set_override('driver', 'file', 'storage')
            conf.set_override('file_basepath', data_tmp_dir, 'storage')

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url),
            'indexer')

        index = indexer.get_driver(conf)
        index.connect()
        index.upgrade()

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')
        # Those tests uses noauth mode
        # TODO(jd) Rewrite them for basic
        conf.set_override("auth_mode", "noauth", 'api')

        self.index = index

        s = storage.get_driver(conf)
        s.upgrade(index)

        LOAD_APP_KWARGS = {
            'storage': s,
            'indexer': index,
            'conf': conf,
        }

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(index, s)
        self.metricd_thread.start()
示例#57
0
    def start_fixture(self):
        """Create necessary temp files and do the config dance."""

        global LOAD_APP_KWARGS

        data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')

        if os.getenv("GABBI_LIVE"):
            dcf = None
        else:
            dcf = []
        conf = service.prepare_service([],
                                       default_config_files=dcf)

        conf.set_override('paste_config',
                          os.path.abspath('etc/gnocchi/api-paste.ini'),
                          'api')

        self.conf = conf
        self.tmp_dir = data_tmp_dir

        # TODO(jd) It would be cool if Gabbi was able to use the null://
        # indexer, but this makes the API returns a lot of 501 error, which
        # Gabbi does not want to see, so let's just disable it.
        if conf.indexer.url is None or conf.indexer.url == "null://":
            raise case.SkipTest("No indexer configured")

        # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore
        # to signal we are not in a gate driven functional test
        # and thus should override conf settings.
        if 'DEVSTACK_GATE_TEMPEST' not in os.environ:
            conf.set_override('driver', 'file', 'storage')
            conf.set_override('policy_file',
                              os.path.abspath('etc/gnocchi/policy.json'),
                              group="oslo_policy")
            conf.set_override('file_basepath', data_tmp_dir, 'storage')

        # NOTE(jd) All of that is still very SQL centric but we only support
        # SQL for now so let's say it's good enough.
        conf.set_override(
            'url',
            sqlalchemy.SQLAlchemyIndexer._create_new_database(
                conf.indexer.url),
            'indexer')

        index = indexer.get_driver(conf)
        index.connect()
        index.upgrade(create_legacy_resource_types=True)

        conf.set_override('pecan_debug', False, 'api')

        # Set pagination to a testable value
        conf.set_override('max_limit', 7, 'api')

        self.index = index

        s = storage.get_driver(conf)
        s.upgrade(index)

        LOAD_APP_KWARGS = {
            'appname': 'gnocchi+noauth',
            'storage': s,
            'indexer': index,
            'conf': conf,
        }

        # start up a thread to async process measures
        self.metricd_thread = MetricdThread(index, s)
        self.metricd_thread.start()
示例#58
0
    def setUp(self):
        super(TestCase, self).setUp()

        self.conf = service.prepare_service(
            [], conf=utils.prepare_conf(),
            default_config_files=[],
            logging_level=logging.DEBUG,
            skip_log_opts=True)

        self.index = indexer.get_driver(self.conf)

        self.coord = metricd.get_coordinator_and_start(
            str(uuid.uuid4()),
            self.conf.coordination_url)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..',))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.yaml'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id', "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key', "anythingworks",
                                   group="storage")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')

        if swexc:
            self.useFixture(fixtures.MockPatch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'ceph':
            self.conf.set_override('ceph_conffile',
                                   os.getenv("CEPH_CONF"),
                                   'storage')
            self.ceph_pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call(("ceph -c %s osd pool create %s "
                                 "16 16 replicated") % (
                    os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
                subprocess.call(("ceph -c %s osd pool application "
                                 "enable %s rbd") % (
                    os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', self.ceph_pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
                               "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
            )

        self.storage.upgrade()
        self.incoming.upgrade(3)
        self.chef = chef.Chef(
            self.coord, self.incoming, self.index, self.storage)
示例#59
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    conf = config.conf
    indexer = sqlalchemy.SQLAlchemyIndexer(conf)
    indexer.connect()
    connectable = indexer.engine_facade.get_engine()

    with connectable.connect() as connection:
        context.configure(
            connection=connection,
            target_metadata=target_metadata
        )

        with context.begin_transaction():
            context.run_migrations()

# If `alembic' was used directly from the CLI
if not hasattr(config, "conf"):
    from gnocchi import service
    config.conf = service.prepare_service([])

if context.is_offline_mode():
    run_migrations_offline()
else:
    run_migrations_online()