Пример #1
0
def setup_app(pecan_config=None, extra_hooks=None):
    # FIXME: Replace DBHook with a hooks.TransactionHook
    app_hooks = [
        hooks.ConfigHook(),
        hooks.DBHook(
            storage.get_connection_from_config(cfg.CONF, 'metering'),
            storage.get_connection_from_config(cfg.CONF, 'event'),
            storage.get_connection_from_config(cfg.CONF, 'alarm'),
        ),
        hooks.PipelineHook(),
        hooks.TranslationHook()
    ]
    if extra_hooks:
        app_hooks.extend(extra_hooks)

    if not pecan_config:
        pecan_config = get_pecan_config()

    pecan.configuration.set_config(dict(pecan_config), overwrite=True)

    cfg.set_defaults(API_OPTS, pecan_debug=CONF.debug)

    app = pecan.make_app(pecan_config.app.root,
                         debug=CONF.api.pecan_debug,
                         force_canonical=getattr(pecan_config.app,
                                                 'force_canonical', True),
                         hooks=app_hooks,
                         wrap_app=middleware.ParsableErrorMiddleware,
                         guess_content_type_from_ext=False)

    return app
Пример #2
0
def setup_app(pecan_config=None, extra_hooks=None):
    # FIXME: Replace DBHook with a hooks.TransactionHook
    app_hooks = [hooks.ConfigHook(),
                 hooks.DBHook(
                     storage.get_connection_from_config(cfg.CONF, 'metering'),
                     storage.get_connection_from_config(cfg.CONF, 'event'),
                     storage.get_connection_from_config(cfg.CONF, 'alarm'),),
                 hooks.PipelineHook(),
                 hooks.TranslationHook()]
    if extra_hooks:
        app_hooks.extend(extra_hooks)

    if not pecan_config:
        pecan_config = get_pecan_config()

    pecan.configuration.set_config(dict(pecan_config), overwrite=True)

    # NOTE(sileht): pecan debug won't work in multi-process environment
    pecan_debug = CONF.api.pecan_debug
    if service.get_workers('api') != 1 and pecan_debug:
        pecan_debug = False
        LOG.warning(_LW('pecan_debug cannot be enabled, if workers is > 1, '
                        'the value is overrided with False'))

    app = pecan.make_app(
        pecan_config.app.root,
        debug=pecan_debug,
        force_canonical=getattr(pecan_config.app, 'force_canonical', True),
        hooks=app_hooks,
        wrap_app=middleware.ParsableErrorMiddleware,
        guess_content_type_from_ext=False
    )

    return app
Пример #3
0
def upgrade():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.BoolOpt('skip-metering-database',
                    help='Skip metering database upgrade.',
                    default=False),
        cfg.BoolOpt('skip-event-database',
                    help='Skip event database upgrade.',
                    default=False),
        cfg.BoolOpt('skip-gnocchi-resource-types',
                    help='Skip gnocchi resource-types upgrade.',
                    default=False),
    ])

    service.prepare_service(conf=conf)
    if conf.skip_metering_database:
        LOG.info("Skipping metering database upgrade")
    else:
        LOG.debug("Upgrading metering database")
        storage.get_connection_from_config(conf, 'metering').upgrade()

    if conf.skip_event_database:
        LOG.info("Skipping event database upgrade")
    else:
        LOG.debug("Upgrading event database")
        storage.get_connection_from_config(conf, 'event').upgrade()

    if conf.skip_gnocchi_resource_types:
        LOG.info("Skipping Gnocchi resource types upgrade")
    else:
        LOG.debug("Upgrading Gnocchi resource types")
        from ceilometer import gnocchi_client
        gnocchi_client.upgrade_resource_types(conf)
Пример #4
0
def upgrade():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.BoolOpt('skip-metering-database',
                    help='Skip metering database upgrade.',
                    default=False),
        cfg.BoolOpt('skip-event-database',
                    help='Skip event database upgrade.',
                    default=False),
        cfg.BoolOpt('skip-gnocchi-resource-types',
                    help='Skip gnocchi resource-types upgrade.',
                    default=False),
    ])

    service.prepare_service(conf=conf)
    if conf.skip_metering_database:
        LOG.info("Skipping metering database upgrade")
    else:
        LOG.debug("Upgrading metering database")
        storage.get_connection_from_config(conf, 'metering').upgrade()

    if conf.skip_event_database:
        LOG.info("Skipping event database upgrade")
    else:
        LOG.debug("Upgrading event database")
        storage.get_connection_from_config(conf, 'event').upgrade()

    if conf.skip_gnocchi_resource_types:
        LOG.info("Skipping Gnocchi resource types upgrade")
    else:
        LOG.debug("Upgrading Gnocchi resource types")
        from ceilometer import gnocchi_client
        gnocchi_client.upgrade_resource_types(conf)
Пример #5
0
def setup_app(pecan_config=None, extra_hooks=None):
    # FIXME: Replace DBHook with a hooks.TransactionHook
    app_hooks = [hooks.ConfigHook(),
                 hooks.DBHook(
                     storage.get_connection_from_config(cfg.CONF, 'metering'),
                     storage.get_connection_from_config(cfg.CONF, 'event'),
                     storage.get_connection_from_config(cfg.CONF, 'alarm'),),
                 hooks.PipelineHook(),
                 hooks.TranslationHook()]
    if extra_hooks:
        app_hooks.extend(extra_hooks)

    if not pecan_config:
        pecan_config = get_pecan_config()

    pecan.configuration.set_config(dict(pecan_config), overwrite=True)

    app = pecan.make_app(
        pecan_config.app.root,
        static_root=pecan_config.app.static_root,
        template_path=pecan_config.app.template_path,
        debug=CONF.api.pecan_debug,
        force_canonical=getattr(pecan_config.app, 'force_canonical', True),
        hooks=app_hooks,
        wrap_app=middleware.ParsableErrorMiddleware,
        guess_content_type_from_ext=False
    )

    return app
Пример #6
0
def expirer():
    service.prepare_service()

    if cfg.CONF.database.metering_time_to_live > 0:
        LOG.debug(_("Clearing expired metering data"))
        storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering')
        storage_conn.clear_expired_metering_data(
            cfg.CONF.database.metering_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database metering time to live "
                     "is disabled"))

    if cfg.CONF.database.event_time_to_live > 0:
        LOG.debug(_("Clearing expired event data"))
        event_conn = storage.get_connection_from_config(cfg.CONF, 'event')
        event_conn.clear_expired_event_data(
            cfg.CONF.database.event_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database event time to live "
                     "is disabled"))

    if cfg.CONF.database.alarm_history_time_to_live > 0:
        LOG.debug("Clearing expired alarm history data")
        storage_conn = storage.get_connection_from_config(cfg.CONF, 'alarm')
        storage_conn.clear_expired_alarm_history_data(
            cfg.CONF.database.alarm_history_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database alarm history time to live "
                     "is disabled"))
Пример #7
0
def expirer():
    service.prepare_service()

    if cfg.CONF.database.metering_time_to_live > 0:
        LOG.debug("Clearing expired metering data")
        storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering')
        storage_conn.clear_expired_metering_data(
            cfg.CONF.database.metering_time_to_live)
    else:
        LOG.info(
            _LI("Nothing to clean, database metering time to live "
                "is disabled"))

    if cfg.CONF.database.event_time_to_live > 0:
        LOG.debug("Clearing expired event data")
        event_conn = storage.get_connection_from_config(cfg.CONF, 'event')
        event_conn.clear_expired_event_data(
            cfg.CONF.database.event_time_to_live)
    else:
        LOG.info(
            _LI("Nothing to clean, database event time to live "
                "is disabled"))

    if cfg.CONF.database.alarm_history_time_to_live > 0:
        LOG.debug("Clearing expired alarm history data")
        storage_conn = storage.get_connection_from_config(cfg.CONF, 'alarm')
        storage_conn.clear_expired_alarm_history_data(
            cfg.CONF.database.alarm_history_time_to_live)
    else:
        LOG.info(
            _LI("Nothing to clean, database alarm history time to live "
                "is disabled"))
Пример #8
0
 def test_sqlalchemy_driver(self):
     self.CONF.set_override("connection", "sqlite+pysqlite://",
                            group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_sqlalchemy.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'metering')
     self.assertIsInstance(conn, impl_sqlalchemy.Connection)
Пример #9
0
 def test_only_default_url(self):
     self.CONF.set_override("connection", "log://", group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'metering')
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'alarm')
     self.assertIsInstance(conn, impl_log_alarm.Connection)
 def test_sqlalchemy_driver(self):
     self.CONF.set_override("connection",
                            "sqlite+pysqlite://",
                            group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_sqlalchemy.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'metering')
     self.assertIsInstance(conn, impl_sqlalchemy.Connection)
Пример #11
0
 def test_only_default_url(self):
     self.CONF.set_override("connection", "log://", group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'metering')
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'alarm')
     self.assertIsInstance(conn, impl_log_alarm.Connection)
Пример #12
0
 def test_two_urls(self):
     self.CONF.set_override("connection", "log://", group="database")
     self.CONF.set_override("alarm_connection", "sqlite://",
                            group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'metering')
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'alarm')
     self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection)
Пример #13
0
 def test_retries(self):
     with mock.patch.object(storage, 'get_connection') as retries:
         try:
             self.CONF.set_override("connection", "no-such-engine://",
                                    group="database")
             self.CONF.set_override("retry_interval", 0.00001,
                                    group="database")
             storage.get_connection_from_config(self.CONF)
         except RuntimeError:
             self.assertEqual(10, retries.call_count)
Пример #14
0
 def test_three_urls_no_default(self):
     self.CONF.set_override("connection", None, group="database")
     self.CONF.set_override("metering_connection", "log://",
                            group="database")
     self.CONF.set_override("event_connection", "hbase://__test__",
                            group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'event')
     self.assertIsInstance(conn, impl_hbase_event.Connection)
Пример #15
0
 def test_retries(self):
     with mock.patch.object(retrying.time, 'sleep') as retry_sleep:
         try:
             self.CONF.set_override("connection", "no-such-engine://",
                                    group="database")
             storage.get_connection_from_config(self.CONF)
         except RuntimeError as err:
             self.assertIn('no-such-engine', six.text_type(err))
             self.assertEqual(retry_sleep.call_count, 9)
             retry_sleep.assert_called_with(10.0)
Пример #16
0
 def test_retries(self):
     with mock.patch.object(storage, 'get_connection') as retries:
         try:
             self.CONF.set_override("connection", "no-such-engine://",
                                    group="database")
             self.CONF.set_override("retry_interval", 0.00001,
                                    group="database")
             storage.get_connection_from_config(self.CONF)
         except RuntimeError:
             self.assertEqual(10, retries.call_count)
Пример #17
0
 def test_retries(self):
     with mock.patch.object(retrying.time, 'sleep') as retry_sleep:
         try:
             self.CONF.set_override("connection", "no-such-engine://",
                                    group="database")
             storage.get_connection_from_config(self.CONF)
         except RuntimeError as err:
             self.assertIn('no-such-engine', six.text_type(err))
             self.assertEqual(9, retry_sleep.call_count)
             retry_sleep.assert_called_with(10.0)
Пример #18
0
 def test_two_urls(self):
     self.CONF.set_override("connection", "log://", group="database")
     self.CONF.set_override("alarm_connection", "sqlite://",
                            group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'metering')
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'alarm')
     self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection)
 def test_three_urls(self):
     self.CONF.set_override("connection", "log://", group="database")
     self.CONF.set_override("event_connection",
                            "hbase://__test__",
                            group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'metering')
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'event')
     self.assertIsInstance(conn, impl_hbase_event.Connection)
Пример #20
0
 def test_retries(self):
     with mock.patch.object(
             retrying.Retrying, 'should_reject') as retry_reject:
         try:
             self.CONF.set_override("connection", "no-such-engine://",
                                    group="database")
             self.CONF.set_override("retry_interval", 0.00001,
                                    group="database")
             storage.get_connection_from_config(self.CONF)
         except RuntimeError as err:
             self.assertIn('no-such-engine', six.text_type(err))
             self.assertEqual(10, retry_reject.call_count)
Пример #21
0
 def test_retries(self):
     with mock.patch.object(
             retrying.Retrying, 'should_reject') as retry_reject:
         try:
             self.CONF.set_override("connection", "no-such-engine://",
                                    group="database")
             self.CONF.set_override("retry_interval", 0.00001,
                                    group="database")
             storage.get_connection_from_config(self.CONF)
         except RuntimeError as err:
             self.assertIn('no-such-engine', six.text_type(err))
             self.assertEqual(10, retry_reject.call_count)
Пример #22
0
 def test_three_urls_no_default(self):
     self.CONF.set_override("connection", None, group="database")
     self.CONF.set_override("metering_connection", "log://",
                            group="database")
     self.CONF.set_override("alarm_connection", "sqlite://",
                            group="database")
     self.CONF.set_override("event_connection", "hbase://__test__",
                            group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'alarm')
     self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection)
     conn = storage.get_connection_from_config(self.CONF, 'event')
     self.assertIsInstance(conn, impl_hbase_event.Connection)
Пример #23
0
 def get_connection(purpose):
     try:
         return storage.get_connection_from_config(cfg.CONF, purpose)
     except Exception as err:
         params = {"purpose": purpose, "err": err}
         LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s "
                           "retry later: %(err)s") % params)
Пример #24
0
def setup_app(pecan_config=None, extra_hooks=None):
    # FIXME: Replace DBHook with a hooks.TransactionHook
    app_hooks = [hooks.ConfigHook(),
                 hooks.DBHook(
                     storage.get_connection_from_config(cfg.CONF),
                 ),
                 hooks.PipelineHook(),
                 hooks.TranslationHook()]
    if extra_hooks:
        app_hooks.extend(extra_hooks)

    if not pecan_config:
        pecan_config = get_pecan_config()

    pecan.configuration.set_config(dict(pecan_config), overwrite=True)

    app = pecan.make_app(
        pecan_config.app.root,
        static_root=pecan_config.app.static_root,
        template_path=pecan_config.app.template_path,
        debug=CONF.debug,
        force_canonical=getattr(pecan_config.app, 'force_canonical', True),
        hooks=app_hooks,
        wrap_app=middleware.ParsableErrorMiddleware,
        guess_content_type_from_ext=False
    )

    return app
Пример #25
0
    def start_fixture(self):
        """Create some samples."""
        conf = fixture_config.Config().conf
        self.conn = storage.get_connection_from_config(conf)
        timestamp = datetime.datetime.utcnow()
        project_id = str(uuid.uuid4())
        self.source = str(uuid.uuid4())
        resource_metadata = {'farmed_by': 'nancy'}

        for name in ['cow', 'pig', 'sheep']:
            resource_metadata.update({'breed': name}),
            c = sample.Sample(name='livestock',
                              type='gauge',
                              unit='head',
                              volume=int(10 * random.random()),
                              user_id='farmerjon',
                              project_id=project_id,
                              resource_id=project_id,
                              timestamp=timestamp,
                              resource_metadata=resource_metadata,
                              source=self.source,
                              )
            data = utils.meter_message_from_counter(
                c, conf.publisher.telemetry_secret)
            self.conn.record_metering_data(data)
Пример #26
0
    def start_fixture(self):
        """Create some samples."""
        global LOAD_APP_KWARGS
        conf = LOAD_APP_KWARGS['conf']
        self.conn = storage.get_connection_from_config(conf)
        timestamp = datetime.datetime.utcnow()
        project_id = str(uuid.uuid4())
        self.source = str(uuid.uuid4())
        resource_metadata = {'farmed_by': 'nancy'}

        for name in ['cow', 'pig', 'sheep']:
            resource_metadata.update({'breed': name}),
            c = sample.Sample(name='livestock',
                              type='gauge',
                              unit='head',
                              volume=int(10 * random.random()),
                              user_id='farmerjon',
                              project_id=project_id,
                              resource_id=project_id,
                              timestamp=timestamp,
                              resource_metadata=resource_metadata,
                              source=self.source)
            data = utils.meter_message_from_counter(
                c, conf.publisher.telemetry_secret)
            self.conn.record_metering_data(data)
Пример #27
0
    def start_fixture(self):
        """Create some samples."""
        conf = fixture_config.Config().conf
        self.conn = storage.get_connection_from_config(conf)
        timestamp = datetime.datetime.utcnow()
        project_id = str(uuid.uuid4())
        self.source = str(uuid.uuid4())
        resource_metadata = {"farmed_by": "nancy"}

        for name in ["cow", "pig", "sheep"]:
            resource_metadata.update({"breed": name}),
            c = sample.Sample(
                name="livestock",
                type="gauge",
                unit="head",
                volume=int(10 * random.random()),
                user_id="farmerjon",
                project_id=project_id,
                resource_id=project_id,
                timestamp=timestamp,
                resource_metadata=resource_metadata,
                source=self.source,
            )
            data = utils.meter_message_from_counter(c, conf.publisher.telemetry_secret)
            self.conn.record_metering_data(data)
Пример #28
0
 def _get_db_conn(self, purpose, ignore_exception=False):
     try:
         return storage.get_connection_from_config(self.conf, purpose)
     except Exception as err:
         params = {"purpose": purpose, "err": err}
         LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " "re-try later: %(err)s") % params)
         if not ignore_exception:
             raise
def get_native_storage_conn(metering_connection):
    storage_conf = cfg.ConfigOpts()
    db_options.set_defaults(storage_conf)
    storage_conf.register_opts(storage.OPTS, 'database')
    storage_conf.set_override('metering_connection', metering_connection,
                              'database')
    storage_conn = storage.get_connection_from_config(storage_conf)
    return storage_conn
Пример #30
0
def expirer():
    service.prepare_service()
    if cfg.CONF.database.time_to_live > 0:
        LOG.debug(_("Clearing expired metering data"))
        storage_conn = storage.get_connection_from_config(cfg.CONF)
        storage_conn.clear_expired_metering_data(
            cfg.CONF.database.time_to_live)
    else:
        LOG.info(_("Nothing to clean, database time to live is disabled"))
Пример #31
0
 def _get_db_conn(self, purpose, ignore_exception=False):
     try:
         return storage.get_connection_from_config(self.conf, purpose)
     except Exception as err:
         params = {"purpose": purpose, "err": err}
         LOG.exception(_("Failed to connect to db, purpose %(purpose)s "
                         "re-try later: %(err)s") % params)
         if not ignore_exception:
             raise
Пример #32
0
def storage_expirer():
    service.prepare_service()
    if cfg.CONF.database.time_to_live > 0:
        LOG.debug(_("Clearing expired metering data"))
        storage_conn = storage.get_connection_from_config(cfg.CONF)
        storage_conn.clear_expired_metering_data(
            cfg.CONF.database.time_to_live)
    else:
        LOG.info(_("Nothing to clean, database time to live is disabled"))
Пример #33
0
def main(argv):
    extra_args = cfg.CONF(
        sys.argv[1:],
        # NOTE(dhellmann): Read the configuration file(s) for the
        # ceilometer collector by default.
        default_config_files=['/etc/ceilometer/ceilometer.conf'],
    )
    db = storage.get_connection_from_config(cfg.CONF)
    command = extra_args[0] if extra_args else 'help'
    COMMANDS[command](db, extra_args[1:])
Пример #34
0
def main(argv):
    extra_args = cfg.CONF(
        sys.argv[1:],
        # NOTE(dhellmann): Read the configuration file(s) for the
        # ceilometer collector by default.
        default_config_files=['/etc/ceilometer/ceilometer.conf'],
    )
    db = storage.get_connection_from_config(cfg.CONF)
    command = extra_args[0] if extra_args else 'help'
    COMMANDS[command](db, extra_args[1:])
Пример #35
0
def main():
    # Connect to the metering database
    cfg.CONF([], project='ceilometer')
    conn = storage.get_connection_from_config(cfg.CONF)
    print("Connection succeeded trying get_meters")
    res = conn.get_meters()
    print("1 Number of meters: %d" % sum(1 for i in res))
    res = conn.get_meters()
    print("2 Number of meters: %d" % sum(1 for i in res))
    res = conn.get_meters()
    print("3 Number of meters: %d" % sum(1 for i in res))
Пример #36
0
def main():
    # Connect to the metering database
    cfg.CONF([], project='ceilometer')
    conn = storage.get_connection_from_config(cfg.CONF)
    print("Connection succeeded trying get_meters")
    res = conn.get_meters()
    print("1 Number of meters: %d" % sum(1 for i in res))
    res = conn.get_meters()
    print("2 Number of meters: %d" % sum(1 for i in res))
    res = conn.get_meters()
    print("3 Number of meters: %d" % sum(1 for i in res))
Пример #37
0
def expirer():
    conf = service.prepare_service()

    if conf.database.metering_time_to_live > 0:
        LOG.debug("Clearing expired metering data")
        storage_conn = storage.get_connection_from_config(conf, 'metering')
        storage_conn.clear_expired_metering_data(
            conf.database.metering_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database metering time to live "
                     "is disabled"))

    if conf.database.event_time_to_live > 0:
        LOG.debug("Clearing expired event data")
        event_conn = storage.get_connection_from_config(conf, 'event')
        event_conn.clear_expired_event_data(
            conf.database.event_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database event time to live "
                     "is disabled"))
Пример #38
0
def main():
    cfg.CONF([], project='ceilometer')

    parser = argparse.ArgumentParser(
        description='generate event data',
    )
    parser.add_argument(
        '--interval',
        default=10,
        type=int,
        help='The period between events, in minutes.',
    )
    parser.add_argument(
        '--start',
        default=31,
        type=int,
        help='The number of days in the past to start timestamps.',
    )
    parser.add_argument(
        '--end',
        default=2,
        type=int,
        help='The number of days into the future to continue timestamps.',
    )
    parser.add_argument(
        '--event_types',
        default=3,
        type=int,
        help='The number of unique event_types.',
    )
    args = parser.parse_args()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    root_logger = logging.getLogger('')
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)

    # Connect to the event database
    conn = storage.get_connection_from_config(cfg.CONF, 'event')

    # Compute the correct time span
    start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start)
    end = datetime.datetime.utcnow() + datetime.timedelta(days=args.end)

    make_test_data(conn=conn,
                   start=start,
                   end=end,
                   interval=args.interval,
                   event_types=args.event_types
    )
Пример #39
0
def expirer():
    conf = service.prepare_service()

    if conf.database.metering_time_to_live > 0:
        LOG.debug("Clearing expired metering data")
        storage_conn = storage.get_connection_from_config(conf)
        storage_conn.clear_expired_metering_data(
            conf.database.metering_time_to_live)
    else:
        LOG.info("Nothing to clean, database metering time to live "
                 "is disabled")
def main():
    cfg.CONF([], project='ceilometer')
    # Connect to the metering database
    conn = storage.get_connection_from_config(cfg.CONF)

    args = get_parser().parse_args()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    root_logger = logging.getLogger('')
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)


    # Find the user and/or project for a real resource
    if not (args.user_id or args.project_id):
        for r in conn.get_resources():
            if r.resource_id == args.resource_id:
                args.user_id = r.user_id
                args.project_id = r.project_id
                break

    # Compute the correct time span
    format = '%Y-%m-%dT%H:%M:%S'

    try:
        start = datetime.datetime.utcnow() - datetime.timedelta(
            days=int(args.start))
    except ValueError:
        try:
            start = datetime.datetime.strptime(args.start, format)
        except ValueError:
            raise

    try:
        end = datetime.datetime.utcnow() + datetime.timedelta(
            days=int(args.end))
    except ValueError:
        try:
            end = datetime.datetime.strptime(args.end, format)
        except ValueError:
            raise
    args.start = start
    args.end = end

    args.resource_list = [str(uuid.uuid4()) for _ in xrange(100)]

    record_test_data(conn=conn, **args.__dict__)

    return 0
Пример #41
0
def main():
    cfg.CONF([], project='ceilometer')

    parser = argparse.ArgumentParser(
        description='generate event data',
    )
    parser.add_argument(
        '--interval',
        default=10,
        type=int,
        help='The period between events, in minutes.',
    )
    parser.add_argument(
        '--start',
        default=31,
        type=int,
        help='The number of days in the past to start timestamps.',
    )
    parser.add_argument(
        '--end',
        default=2,
        type=int,
        help='The number of days into the future to continue timestamps.',
    )
    parser.add_argument(
        '--event_types',
        default=3,
        type=int,
        help='The number of unique event_types.',
    )
    args = parser.parse_args()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    root_logger = logging.getLogger('')
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)

    # Connect to the event database
    conn = storage.get_connection_from_config(cfg.CONF, 'event')

    # Compute the correct time span
    start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start)
    end = datetime.datetime.utcnow() + datetime.timedelta(days=args.end)

    make_test_data(conn=conn,
                   start=start,
                   end=end,
                   interval=args.interval,
                   event_types=args.event_types)
Пример #42
0
def main():
    cfg.CONF([], project='ceilometer')
    # Connect to the metering database
    conn = storage.get_connection_from_config(cfg.CONF)

    args = get_parser().parse_args()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    root_logger = logging.getLogger('')
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)

    # Find the user and/or project for a real resource
    if not (args.user_id or args.project_id):
        for r in conn.get_resources():
            if r.resource_id == args.resource_id:
                args.user_id = r.user_id
                args.project_id = r.project_id
                break

    # Compute the correct time span
    format = '%Y-%m-%dT%H:%M:%S'

    try:
        start = datetime.datetime.utcnow() - datetime.timedelta(
            days=int(args.start))
    except ValueError:
        try:
            start = datetime.datetime.strptime(args.start, format)
        except ValueError:
            raise

    try:
        end = datetime.datetime.utcnow() + datetime.timedelta(
            days=int(args.end))
    except ValueError:
        try:
            end = datetime.datetime.strptime(args.end, format)
        except ValueError:
            raise
    args.start = start
    args.end = end

    args.resource_list = [str(uuid.uuid4()) for _ in xrange(100)]

    record_test_data(conn=conn, **args.__dict__)

    return 0
Пример #43
0
 def __init__(self, conf):
     super(RedisDispatcher, self).__init__(conf)
     self.storage_conn = storage.get_connection_from_config(conf)
     self.instance_map = {}
     self.host_map = {}
     print redis.__file__
     #self.redis_conn = redis.Redis(host='192.168.39.16',
     #                              port=6379, db=0, password='******')
     self.redis_conn = redis.Redis(
         host=conf.redis_database.redis_host,
         port=conf.redis_database.redis_port,
         db=conf.redis_database.redis_db,
         password=conf.redis_database.redis_password)
Пример #44
0
def setup_app(pecan_config=None, extra_hooks=None):
    # FIXME: Replace DBHook with a hooks.TransactionHook
    app_hooks = [
        hooks.ConfigHook(),
        hooks.DBHook(
            storage.get_connection_from_config(cfg.CONF, 'metering'),
            storage.get_connection_from_config(cfg.CONF, 'event'),
            storage.get_connection_from_config(cfg.CONF, 'alarm'),
        ),
        hooks.PipelineHook(),
        hooks.TranslationHook()
    ]
    if extra_hooks:
        app_hooks.extend(extra_hooks)

    if not pecan_config:
        pecan_config = get_pecan_config()

    pecan.configuration.set_config(dict(pecan_config), overwrite=True)

    cfg.set_defaults(API_OPTS, pecan_debug=CONF.debug)

    # NOTE(sileht): pecan debug won't work in multi-process environment
    pecan_debug = CONF.api.pecan_debug
    if service.get_workers('api') != 1 and pecan_debug:
        pecan_debug = False
        LOG.warning(
            _LW('pecan_debug cannot be enabled, if workers is > 1, '
                'the value is overrided with False'))

    app = pecan.make_app(pecan_config.app.root,
                         debug=pecan_debug,
                         force_canonical=getattr(pecan_config.app,
                                                 'force_canonical', True),
                         hooks=app_hooks,
                         wrap_app=middleware.ParsableErrorMiddleware,
                         guess_content_type_from_ext=False)

    return app
Пример #45
0
 def start_fixture(self):
     """Create some events."""
     conf = fixture_config.Config().conf
     self.conn = storage.get_connection_from_config(conf, "event")
     events = []
     name_list = ["chocolate.chip", "peanut.butter", "sugar"]
     for ix, name in enumerate(name_list):
         timestamp = datetime.datetime.utcnow()
         message_id = "fea1b15a-1d47-4175-85a5-a4bb2c72924{}".format(ix)
         traits = [models.Trait("type", 1, name), models.Trait("ate", 2, ix)]
         event = models.Event(
             message_id, "cookies_{}".format(name), timestamp, traits, {"nested": {"inside": "value"}}
         )
         events.append(event)
     self.conn.record_events(events)
Пример #46
0
 def start_fixture(self):
     """Create some events."""
     conf = fixture_config.Config().conf
     self.conn = storage.get_connection_from_config(conf, 'event')
     events = []
     name_list = ['chocolate.chip', 'peanut.butter', 'sugar']
     for ix, name in enumerate(name_list):
         timestamp = datetime.datetime.utcnow()
         message_id = 'fea1b15a-1d47-4175-85a5-a4bb2c72924{}'.format(ix)
         traits = [models.Trait('type', 1, name),
                   models.Trait('ate', 2, ix)]
         event = models.Event(message_id,
                              'cookies_{}'.format(name),
                              timestamp,
                              traits, {'nested': {'inside': 'value'}})
         events.append(event)
     self.conn.record_events(events)
Пример #47
0
 def start_fixture(self):
     """Create some events."""
     conf = fixture_config.Config().conf
     self.conn = storage.get_connection_from_config(conf, 'event')
     events = []
     name_list = ['chocolate.chip', 'peanut.butter', 'sugar']
     for ix, name in enumerate(name_list):
         timestamp = datetime.datetime.utcnow()
         message_id = 'fea1b15a-1d47-4175-85a5-a4bb2c72924{}'.format(ix)
         traits = [models.Trait('type', 1, name),
                   models.Trait('ate', 2, ix)]
         event = models.Event(message_id,
                              'cookies_{}'.format(name),
                              timestamp,
                              traits, {'nested': {'inside': 'value'}})
         events.append(event)
     self.conn.record_events(events)
Пример #48
0
def main():
    cfg.CONF([], project='ceilometer')

    parser = argparse.ArgumentParser(
        description='generate event data',
    )
    parser.add_argument(
        '--interval',
        default=10,
        type=int,
        help='The period between events, in minutes.',
    )
    parser.add_argument(
        '--start',
        default=31,
        type=int,
        help='The number of days in the past to start timestamps.',
    )
    parser.add_argument(
        '--end',
        default=2,
        type=int,
        help='The number of days into the future to continue timestamps.',
    )
    parser.add_argument(
        '--event_types',
        default=3,
        type=int,
        help='The number of unique event_types.',
    )
    args = parser.parse_args()

    # Connect to the event database
    conn = storage.get_connection_from_config(cfg.CONF, 'event')

    # Compute the correct time span
    start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start)
    end = datetime.datetime.utcnow() + datetime.timedelta(days=args.end)

    make_test_data(conn=conn,
                   start=start,
                   end=end,
                   interval=args.interval,
                   event_types=args.event_types)
def main():
    cfg.CONF([], project='ceilometer')

    parser = argparse.ArgumentParser(description='generate event data', )
    parser.add_argument(
        '--interval',
        default=10,
        type=int,
        help='The period between events, in minutes.',
    )
    parser.add_argument(
        '--start',
        default=31,
        type=int,
        help='The number of days in the past to start timestamps.',
    )
    parser.add_argument(
        '--end',
        default=2,
        type=int,
        help='The number of days into the future to continue timestamps.',
    )
    parser.add_argument(
        '--event_types',
        default=3,
        type=int,
        help='The number of unique event_types.',
    )
    args = parser.parse_args()

    # Connect to the event database
    conn = storage.get_connection_from_config(cfg.CONF, 'event')

    # Compute the correct time span
    start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start)
    end = datetime.datetime.utcnow() + datetime.timedelta(days=args.end)

    make_test_data(conn=conn,
                   start=start,
                   end=end,
                   interval=args.interval,
                   event_types=args.event_types)
Пример #50
0
def main():

    args = get_parser().parse_args()
    conf = service.prepare_service([])

    # Connect to the metering database
    conn = storage.get_connection_from_config(conf)

    # Find the user and/or project for a real resource
    if not (args.user_id or args.project_id):
        for r in conn.get_resources():
            if r.resource_id == args.resource_id:
                args.user_id = r.user_id
                args.project_id = r.project_id
                break

    # Compute the correct time span
    format = '%Y-%m-%dT%H:%M:%S'

    try:
        start = datetime.datetime.utcnow() - datetime.timedelta(
            days=int(args.start))
    except ValueError:
        try:
            start = datetime.datetime.strptime(args.start, format)
        except ValueError:
            raise

    try:
        end = datetime.datetime.utcnow() + datetime.timedelta(
            days=int(args.end))
    except ValueError:
        try:
            end = datetime.datetime.strptime(args.end, format)
        except ValueError:
            raise
    args.start = start
    args.end = end
    record_test_data(conf, conn=conn, **args.__dict__)

    return 0
Пример #51
0
def main():
    cfg.CONF([], project='ceilometer')

    args = get_parser().parse_args()

    # Connect to the metering database
    conn = storage.get_connection_from_config(cfg.CONF)

    # Find the user and/or project for a real resource
    if not (args.user_id or args.project_id):
        for r in conn.get_resources():
            if r.resource_id == args.resource_id:
                args.user_id = r.user_id
                args.project_id = r.project_id
                break

    # Compute the correct time span
    format = '%Y-%m-%dT%H:%M:%S'

    try:
        start = datetime.datetime.utcnow() - datetime.timedelta(
            days=int(args.start))
    except ValueError:
        try:
            start = datetime.datetime.strptime(args.start, format)
        except ValueError:
            raise

    try:
        end = datetime.datetime.utcnow() + datetime.timedelta(
            days=int(args.end))
    except ValueError:
        try:
            end = datetime.datetime.strptime(args.end, format)
        except ValueError:
            raise
    args.start = start
    args.end = end
    record_test_data(conn=conn, **args.__dict__)

    return 0
Пример #52
0
 def test_two_urls(self):
     self.CONF.set_override("connection", "log://", group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
Пример #53
0
 def get_connection(conf):
     try:
         return storage.get_connection_from_config(conf)
     except Exception as err:
         LOG.exception(_LE("Failed to connect to db" "retry later: %s"),
                       err)
Пример #54
0
def storage_dbsync():
    service.prepare_service()
    storage.get_connection_from_config(cfg.CONF).upgrade()
Пример #55
0
 def stop_fixture(self):
     """Reset the config and remove data."""
     if self.conf:
         storage.get_connection_from_config(self.conf).clear()
         self.conf.reset()
Пример #56
0
 def get_connection(conf):
     try:
         return storage.get_connection_from_config(conf)
     except Exception as err:
         LOG.exception("Failed to connect to db" "retry later: %s", err)
Пример #57
0
 def test_two_urls(self):
     self.CONF.set_override("connection", "log://", group="database")
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
Пример #58
0
 def stop_fixture(self):
     """Reset the config and remove data."""
     if self.conf:
         storage.get_connection_from_config(self.conf).clear()
         self.conf.reset()
Пример #59
0
 def conn(self):
     if not hasattr(self, "_conn"):
         self._conn = storage.get_connection_from_config(
             self.conf)
     return self._conn