Example #1
0
    def setUp(self):
        super(DietTestCase, self).setUp()

        # FIXME(amuller): this must be called in the Neutron unit tests base
        # class. Moving this may cause non-deterministic failures. Bug #1489098
        # for more info.
        db_options.set_defaults(cfg.CONF, connection='sqlite://')

        # Configure this first to ensure pm debugging support for setUp()
        debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
        if debugger:
            self.addOnException(post_mortem_debug.get_exception_handler(
                debugger))

        # Make sure we see all relevant deprecation warnings when running tests
        self.useFixture(tools.WarningsFixture())

        # NOTE(ihrachys): oslotest already sets stopall for cleanup, but it
        # does it using six.moves.mock (the library was moved into
        # unittest.mock in Python 3.4). So until we switch to six.moves.mock
        # everywhere in unit tests, we can't remove this setup. The base class
        # is used in 3party projects, so we would need to switch all of them to
        # six before removing the cleanup callback from here.
        self.addCleanup(mock.patch.stopall)

        self.addCleanup(self.reset_model_query_hooks)
        self.addCleanup(self.reset_resource_extend_functions)

        self.addOnException(self.check_for_systemexit)
        self.orig_pid = os.getpid()

        tools.reset_random_seed()
    def _setup_database(self):
        sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
        options.set_defaults(CONF, connection=sql_connection)
        glance.db.sqlalchemy.api.clear_db_env()
        glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE'
        if glance_db_env in os.environ:
            # use the empty db created and cached as a tempfile
            # instead of spending the time creating a new one
            db_location = os.environ[glance_db_env]
            test_utils.execute('cp %s %s/tests.sqlite'
                               % (db_location, self.test_dir))
        else:
            migration.db_sync()

            # copy the clean db to a temp location so that it
            # can be reused for future tests
            (osf, db_location) = tempfile.mkstemp()
            os.close(osf)
            test_utils.execute('cp %s/tests.sqlite %s'
                               % (self.test_dir, db_location))
            os.environ[glance_db_env] = db_location

            # cleanup the temp file when the test suite is
            # complete
            def _delete_cached_db():
                try:
                    os.remove(os.environ[glance_db_env])
                except Exception:
                    glance_tests.logger.exception(
                        "Error cleaning up the file %s" %
                        os.environ[glance_db_env])
            atexit.register(_delete_cached_db)
Example #3
0
    def setUp(self):
        super(IsolatedUnitTest, self).setUp()
        options.set_defaults(CONF, connection='sqlite://',
                             sqlite_db='aflo.sqlite')
        lockutils.set_defaults(os.path.join(self.test_dir))

        self.config(debug=False)
Example #4
0
def prepare_service(argv=None, config_files=None, share=False):
    conf = cfg.ConfigOpts()
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)
    db_options.set_defaults(conf)
    if profiler_opts:
        profiler_opts.set_defaults(conf)
    if not share:
        defaults.set_cors_middleware_defaults()
        oslo_i18n.enable_lazy()
        log.register_options(conf)

    if argv is None:
        argv = sys.argv
    conf(argv[1:], project='panko', validate_default_values=True,
         version=version.version_info.version_string(),
         default_config_files=config_files)

    if not share:
        log.setup(conf, 'panko')
    profiler.setup(conf)
    # NOTE(liusheng): guru cannot run with service under apache daemon, so when
    # panko-api running with mod_wsgi, the argv is [], we don't start
    # guru.
    if argv:
        gmr.TextGuruMeditation.setup_autorun(version)
    return conf
def initialize_sql_session():
    # Make sure the DB is located in the correct location, in this case set
    # the default value, as this should be able to be overridden in some
    # test cases.
    db_options.set_defaults(
        CONF,
        connection=unit.IN_MEM_DB_CONN_STRING)
Example #6
0
File: base.py Project: mahak/glance
    def setUp(self):
        super(IsolatedUnitTest, self).setUp()
        options.set_defaults(CONF, connection='sqlite://')
        lockutils.set_defaults(os.path.join(self.test_dir))

        self.config(debug=False)

        self.config(default_store='filesystem',
                    filesystem_store_datadir=self.test_dir,
                    group="glance_store")

        store.create_stores()

        def fake_get_conection_type(client):
            DEFAULT_REGISTRY_PORT = 9191
            DEFAULT_API_PORT = 9292

            if client.port == DEFAULT_API_PORT:
                return stubs.FakeGlanceConnection
            elif client.port == DEFAULT_REGISTRY_PORT:
                return stubs.FakeRegistryConnection(registry=self.registry)

        self.patcher = mock.patch(
            'glance.common.client.BaseClient.get_connection_type',
            fake_get_conection_type)
        self.addCleanup(self.patcher.stop)
        self.patcher.start()
Example #7
0
def prepare_service(args=None, conf=None,
                    default_config_files=None):
    if conf is None:
        conf = cfg.ConfigOpts()
    # FIXME(jd) Use the pkg_entry info to register the options of these libs
    log.register_options(conf)
    db_options.set_defaults(conf)
    policy_opts.set_defaults(conf)

    # Register our own Gnocchi options
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)

    # HACK(jd) I'm not happy about that, fix AP class to handle a conf object?
    archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = (
        conf.archive_policy.default_aggregation_methods
    )

    try:
        default_workers = multiprocessing.cpu_count() or 1
    except NotImplementedError:
        default_workers = 1

    conf.set_default("workers", default_workers, group="api")
    conf.set_default("workers", default_workers, group="metricd")

    conf(args, project='gnocchi', validate_default_values=True,
         default_config_files=default_config_files)
    log.setup(conf, 'gnocchi')
    conf.log_opt_values(LOG, logging.DEBUG)

    return conf
Example #8
0
def setup_dummy_db():
    options.cfg.set_defaults(options.database_opts, sqlite_synchronous=False)
    options.set_defaults(cfg.CONF, connection="sqlite://",
                         sqlite_db='kingbird.db')
    engine = get_engine()
    db_api.db_sync(engine)
    engine.connect()
Example #9
0
def initialize():
    """Initialize the module."""
    db_options.set_defaults(
        CONF,
        connection="sqlite:///keystone.db")
    # Configure OSprofiler options
    profiler.set_defaults(CONF, enabled=False, trace_sqlalchemy=False)
Example #10
0
def parse_args(argv, default_config_files=None):
    db_options.set_defaults(cfg.CONF, sqlite_db='tuskar.sqlite')

    cfg.CONF(argv[1:],
             project='tuskar',
             version=version.version_info.release_string(),
             default_config_files=default_config_files)
Example #11
0
def main():
    args = get_parser().parse_args()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    formatter = logging.Formatter(
        '[%(asctime)s] %(levelname)-8s %(message)s')
    console.setFormatter(formatter)
    root_logger.addHandler(console)
    if args.debug:
        root_logger.setLevel(logging.DEBUG)
    else:
        root_logger.setLevel(logging.INFO)

    _validate_conn_options(args)

    nosql_conf = cfg.ConfigOpts()
    db_options.set_defaults(nosql_conf, args.nosql_conn)
    nosql_conf.register_opts(storage.OPTS, 'database')
    nosql_conn = storage.get_connection_from_config(nosql_conf)

    sql_conf = cfg.ConfigOpts()
    db_options.set_defaults(sql_conf, args.sql_conn)
    sql_conf.register_opts(storage.OPTS, 'database')
    sql_conn = storage.get_connection_from_config(sql_conf)

    root_logger.info(
        _LI("Starting to migrate alarms data from NoSQL to SQL..."))

    count = 0
    for alarm in nosql_conn.get_alarms():
        root_logger.debug("Migrating alarm %s..." % alarm.alarm_id)
        try:
            sql_conn.create_alarm(alarm)
            count += 1
        except exception.DBDuplicateEntry:
            root_logger.warning(_LW("Duplicated alarm %s found, skipped."),
                                alarm.alarm_id)
        if not args.migrate_history:
            continue

        history_count = 0
        for history in nosql_conn.get_alarm_changes(alarm.alarm_id, None):
            history_data = history.as_dict()
            root_logger.debug("    Migrating alarm history data with"
                              " event_id %s..." % history_data['event_id'])
            try:
                sql_conn.record_alarm_change(history_data)
                history_count += 1
            except exception.DBDuplicateEntry:
                root_logger.warning(
                    _LW("    Duplicated alarm history %s found, skipped."),
                    history_data['event_id'])
        root_logger.info(_LI("    Migrated %(count)s history data of alarm "
                             "%(alarm_id)s"),
                         {'count': history_count, 'alarm_id': alarm.alarm_id})

    root_logger.info(_LI("End alarms data migration from NoSQL to SQL, %s"
                         " alarms have been migrated."), count)
Example #12
0
def set_db_defaults():
    # Update the default QueuePool parameters. These can be tweaked by the
    # conf variables - max_pool_size, max_overflow and pool_timeout
    db_options.set_defaults(
        cfg.CONF,
        connection='sqlite://',
        sqlite_db='', max_pool_size=10,
        max_overflow=20, pool_timeout=10)
Example #13
0
    def test_set_defaults(self):
        conf = cfg.ConfigOpts()

        options.set_defaults(conf,
                             connection='sqlite:///:memory:')

        self.assertTrue(len(conf.database.items()) > 1)
        self.assertEqual('sqlite:///:memory:', conf.database.connection)
Example #14
0
File: utils.py Project: aaratn/heat
def setup_dummy_db():
    options.cfg.set_defaults(options.database_opts, sqlite_synchronous=False)
    # Uncomment to log SQL
    # options.cfg.set_defaults(options.database_opts, connection_debug=100)
    options.set_defaults(cfg.CONF, connection="sqlite://")
    engine = get_engine()
    models.BASE.metadata.create_all(engine)
    engine.connect()
Example #15
0
File: base.py Project: mahak/glance
    def setUp(self):
        super(MultiIsolatedUnitTest, self).setUp()
        options.set_defaults(CONF, connection='sqlite://')
        lockutils.set_defaults(os.path.join(self.test_dir))

        self.config(debug=False)
        stubs.stub_out_registry_and_store_server(self,
                                                 self.test_dir,
                                                 registry=self.registry)
Example #16
0
def parse_args(argv, **kwargs):
    log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
    log.register_options(CONF)
    options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
                        sqlite_db='daolicontroller.sqlite')
    CONF(argv[1:],
         project='daolicontroller',
         version='1.0',
         **kwargs)
Example #17
0
def parse_args(argv, default_config_files=None):
    log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
    log.register_options(CONF)
    options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION)

    cfg.CONF(argv[1:],
             project='ec2api',
             version=version.version_info.version_string(),
             default_config_files=default_config_files)
Example #18
0
def parse_args(argv, default_config_files=None):
    log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
    log.register_options(CONF)
    options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
                         sqlite_db='daoliproxy.sqlite')
    debugger.register_cli_opts()
    CONF(argv[1:],
         project='daoliproxy',
         version=version.version_string(),
         default_config_files=default_config_files)
Example #19
0
 def setUp(self):
     super(TestGlanceManage, self).setUp()
     conf_dir = os.path.join(self.test_dir, 'etc')
     utils.safe_mkdirs(conf_dir)
     self.conf_filepath = os.path.join(conf_dir, 'glance-manage.conf')
     self.db_filepath = os.path.join(self.test_dir, 'tests.sqlite')
     self.connection = ('sql_connection = sqlite:///%s' %
                        self.db_filepath)
     db_options.set_defaults(CONF, connection='sqlite:///%s' %
                                              self.db_filepath)
def _patch_db_config():
    """propagate db config down to rally
    """

    db_options.set_defaults(CONF, connection=DB_CONNECTION,
                            sqlite_db="rally.sqlite")

    IMPL = db_api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING)

    _db_api.IMPL = IMPL
Example #21
0
def parse_args(argv, default_config_files=None):
    log.register_options(CONF)
    options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
                         sqlite_db='conveyor.sqlite')
    rpc.set_defaults(control_exchange='conveyor')
    debugger.register_cli_opts()
    CONF(argv[1:],
         project='conveyor',
         version=version.version_string(),
         default_config_files=default_config_files)
    rpc.init(CONF)
Example #22
0
def setup_conf(conf=cfg.CONF):
    """Setup the cfg for the status check utility.

    Use separate setup_conf for the utility because there are many options
    from the main config that do not apply during checks.
    """

    neutron_conf_base.register_core_common_config_opts(conf)
    neutron_conf_service.register_service_opts(
        neutron_conf_service.SERVICE_OPTS, cfg.CONF)
    db_options.set_defaults(conf)
    return conf
Example #23
0
def main():
    command_opt = cfg.SubCommandOpt('command',
                                    title='Command',
                                    help='Available commands',
                                    handler=add_command_parsers)
    CONF.register_cli_opt(command_opt)

    # set_defaults() is called to register the db options.
    options.set_defaults(CONF)

    CONF(project='magnum')
    CONF.command.func(get_manager())
Example #24
0
File: base.py Project: dlq84/glance
    def setUp(self):
        super(IsolatedUnitTest, self).setUp()
        options.set_defaults(CONF, connection="sqlite://", sqlite_db="glance.sqlite")
        lockutils.set_defaults(os.path.join(self.test_dir))

        self.config(verbose=False, debug=False)

        self.config(
            default_store="filesystem", filesystem_store_datadir=os.path.join(self.test_dir), group="glance_store"
        )

        store.create_stores()
        stubs.stub_out_registry_and_store_server(self.stubs, self.test_dir, registry=self.registry)
Example #25
0
def main(args=sys.argv[1:]):
    conf = cfg.ConfigOpts()
    conf.register_cli_opt(command_opt)
    db_options.set_defaults(conf)
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == 'Default' else group)
    conf(args, project='enamel')
    db_utils.init(conf)
    al_conf = get_alembic_config()
    al_conf.enamel_config = conf

    conf.command.func(al_conf)
Example #26
0
def prepare_service(args=None, conf=None,
                    default_config_files=None):
    if conf is None:
        conf = cfg.ConfigOpts()
    opts.set_defaults()
    # FIXME(jd) Use the pkg_entry info to register the options of these libs
    log.register_options(conf)
    db_options.set_defaults(conf)
    policy_opts.set_defaults(conf)

    # Register our own Gnocchi options
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)

    # HACK(jd) I'm not happy about that, fix AP class to handle a conf object?
    archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = (
        conf.archive_policy.default_aggregation_methods
    )

    try:
        default_workers = multiprocessing.cpu_count() or 1
    except NotImplementedError:
        default_workers = 1

    conf.set_default("workers", default_workers, group="metricd")

    conf(args, project='gnocchi', validate_default_values=True,
         default_config_files=default_config_files,
         version=pbr.version.VersionInfo('gnocchi').version_string())

    # If no coordination URL is provided, default to using the indexer as
    # coordinator
    if conf.storage.coordination_url is None:
        parsed = urlparse.urlparse(conf.indexer.url)
        proto, _, _ = parsed.scheme.partition("+")
        parsed = list(parsed)
        # Set proto without the + part
        parsed[0] = proto
        conf.set_default("coordination_url",
                         urlparse.urlunparse(parsed),
                         "storage")

    log.set_defaults(default_log_levels=log.get_default_log_levels() +
                     ["passlib.utils.compat=INFO"])
    log.setup(conf, 'gnocchi')
    conf.log_opt_values(LOG, log.DEBUG)

    return conf
Example #27
0
def initialize_sql_session(connection_str=unit.IN_MEM_DB_CONN_STRING,
                           enforce_sqlite_fks=True):
    # Make sure the DB is located in the correct location, in this case set
    # the default value, as this should be able to be overridden in some
    # test cases.
    db_options.set_defaults(
        CONF,
        connection=connection_str)

    # Enable the  Sqlite FKs for global engine by default.
    facade = enginefacade.get_legacy_facade()
    engine = facade.get_engine()
    f_key = 'ON' if enforce_sqlite_fks else 'OFF'
    if engine.name == 'sqlite':
        engine.connect().execute('PRAGMA foreign_keys = ' + f_key)
def parse_args(argv, default_config_files=None, configure_db=True):
    log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
    log.register_options(CONF)
    options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
                         sqlite_db='nova.sqlite')
    rpc.set_defaults(control_exchange='nova')
    debugger.register_cli_opts()
    CONF(argv[1:],
         project='nova',
         version=version.version_string(),
         default_config_files=default_config_files)
    rpc.init(CONF)

    if configure_db:
        sqlalchemy_api.configure(CONF)
Example #29
0
    def setUp(self):
        super(IsolatedUnitTest, self).setUp()
        options.set_defaults(CONF, connection="sqlite://", sqlite_db="glance.sqlite")
        lockutils.set_defaults(os.path.join(self.test_dir))

        self.config(verbose=False, debug=False)

        self.config(
            default_store="filesystem", filesystem_store_datadir=os.path.join(self.test_dir), group="glance_store"
        )

        store.create_stores()
        stubs.stub_out_registry_and_store_server(self.stubs, self.test_dir, registry=self.registry)

        # clear context left-over from any previous test executions
        if hasattr(local.store, "context"):
            delattr(local.store, "context")
Example #30
0
    def setUp(self):
        super(DietTestCase, self).setUp()

        # Suppress some log messages during test runs, otherwise it may cause
        # issues with subunit parser when running on Python 3. It happened for
        # example for neutron-functional tests.
        # With this suppress of log levels DEBUG logs will not be captured by
        # stestr on pythonlogging stream and will not cause this parser issue.
        supress_logs = ['neutron', 'neutron_lib', 'stevedore', 'oslo_policy',
                        'oslo_concurrency', 'oslo_db', 'alembic', 'ovsdbapp']
        for supress_log in supress_logs:
            logger = logging.getLogger(supress_log)
            logger.setLevel(logging.ERROR)

        # FIXME(amuller): this must be called in the Neutron unit tests base
        # class. Moving this may cause non-deterministic failures. Bug #1489098
        # for more info.
        db_options.set_defaults(cfg.CONF, connection='sqlite://')

        # Configure this first to ensure pm debugging support for setUp()
        debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
        if debugger:
            self.addOnException(post_mortem_debug.get_exception_handler(
                debugger))

        # Make sure we see all relevant deprecation warnings when running tests
        self.useFixture(tools.WarningsFixture())

        self.useFixture(fixture.DBQueryHooksFixture())

        # NOTE(ihrachys): oslotest already sets stopall for cleanup, but it
        # does it using six.moves.mock (the library was moved into
        # unittest.mock in Python 3.4). So until we switch to six.moves.mock
        # everywhere in unit tests, we can't remove this setup. The base class
        # is used in 3party projects, so we would need to switch all of them to
        # six before removing the cleanup callback from here.
        self.addCleanup(mock.patch.stopall)

        self.useFixture(fixture.DBResourceExtendFixture())

        self.addOnException(self.check_for_systemexit)
        self.orig_pid = os.getpid()

        tools.reset_random_seed()
Example #31
0
 def setUp(self):
     super(SqlFixtureTestCase, self).setUp()
     options.set_defaults(cfg.CONF, connection='sqlite://')
     self.useFixture(fixture.SqlFixture())
Example #32
0
def initialize():
    """Initialize the module."""
    db_options.set_defaults(CONF, connection="sqlite:///keystone.db")
Example #33
0
#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#    See the License for the specific language governing permissions and
#    limitations under the License.

from oslo_config import cfg
from oslo_db import options
from oslo_db.sqlalchemy import enginefacade
import osprofiler.sqlalchemy
import sqlalchemy as sa

from mistral.db.sqlalchemy import sqlite_lock
from mistral import exceptions as exc
from mistral_lib import utils

# Note(dzimine): sqlite only works for basic testing.
options.set_defaults(cfg.CONF, connection="sqlite:///mistral.sqlite")

_DB_SESSION_THREAD_LOCAL_NAME = "db_sql_alchemy_session"

_facade = None
_sqlalchemy_create_engine_orig = sa.create_engine


def _get_facade():
    global _facade

    if not _facade:
        _facade = enginefacade.LegacyEngineFacade(
            cfg.CONF.database.connection,
            sqlite_fk=True,
            autocommit=False,
Example #34
0
    def setUp(self):
        super(BaseTestCase, self).setUp()
        self.useFixture(fixture.PluginDirectoryFixture())

        # Enabling 'use_fatal_exceptions' allows us to catch string
        # substitution format errors in exception messages.
        mock.patch.object(exceptions.NeutronException,
                          'use_fatal_exceptions',
                          return_value=True).start()

        db_options.set_defaults(cfg.CONF, connection='sqlite://')

        self.useFixture(
            fixtures.MonkeyPatch(
                'oslo_config.cfg.find_config_files',
                lambda project=None, prog=None, extension=None: []))

        self.setup_config()

        # Configure this first to ensure pm debugging support for setUp()
        debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
        if debugger:
            self.addOnException(
                post_mortem_debug.get_exception_handler(debugger))

        # Make sure we see all relevant deprecation warnings when running tests
        self.useFixture(tools.WarningsFixture())

        if bool_from_env('OS_DEBUG'):
            _level = std_logging.DEBUG
        else:
            _level = std_logging.INFO
        capture_logs = bool_from_env('OS_LOG_CAPTURE')
        if not capture_logs:
            std_logging.basicConfig(format=LOG_FORMAT, level=_level)
        self.log_fixture = self.useFixture(
            fixtures.FakeLogger(
                format=LOG_FORMAT,
                level=_level,
                nuke_handlers=capture_logs,
            ))

        test_timeout = get_test_timeout()
        if test_timeout == -1:
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))

        # If someone does use tempfile directly, ensure that it's cleaned up
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        self.addCleanup(mock.patch.stopall)

        if bool_from_env('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if bool_from_env('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.addOnException(self.check_for_systemexit)
        self.orig_pid = os.getpid()
Example #35
0
CONF = cfg.CONF

database_group = cfg.OptGroup('database',
                              title='Database options',
                              help="""
Database configuration.
""")

api_group = cfg.OptGroup('api',
                         title='API options',
                         help="""
Options under this group are used to define Nova API.
""")

auth_opts = [
    cfg.StrOpt("auth_strategy",
               default="keystone",
               choices=("keystone", "noauth2"),
               deprecated_group="DEFAULT",
               help="""
This determines the strategy to use for authentication: keystone or noauth2.
'noauth2' is designed for testing only, as it does no actual credential
checking. 'noauth2' provides administrative credentials only if 'admin' is
specified as the username.
"""),
]

oslo_db_options.set_defaults(CONF)
CONF.register_group(api_group)
CONF.register_opts(auth_opts, group=api_group)
Example #36
0
def initialize_sql_session(connection_str=unit.IN_MEM_DB_CONN_STRING):
    # Make sure the DB is located in the correct location, in this case set
    # the default value, as this should be able to be overridden in some
    # test cases.
    db_options.set_defaults(CONF, connection=connection_str)
Example #37
0
from ironic_inspector import conf  # noqa
from ironic_inspector import introspection_state as istate


class ModelBase(models.ModelBase):
    __table_args__ = {'mysql_engine': "InnoDB",
                      'mysql_charset': "utf8"}


Base = declarative_base(cls=ModelBase)
CONF = cfg.CONF
_DEFAULT_SQL_CONNECTION = 'sqlite:///ironic_inspector.sqlite'
_FACADE = None

db_opts.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION,
                     'ironic_inspector.sqlite')


class Node(Base):
    __tablename__ = 'nodes'
    uuid = Column(String(36), primary_key=True)
    version_id = Column(String(36), server_default='')
    state = Column(Enum(*istate.States.all()), nullable=False,
                   default=istate.States.finished,
                   server_default=istate.States.finished)
    started_at = Column(DateTime, nullable=True)
    finished_at = Column(DateTime, nullable=True)
    error = Column(Text, nullable=True)

    # version_id is being tracked in the NodeInfo object
    # for the sake of consistency. See also SQLAlchemy docs:
Example #38
0
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm

from ironic.common.i18n import _
from ironic.common import paths

sql_opts = [
    cfg.StrOpt('mysql_engine',
               default='InnoDB',
               help=_('MySQL engine to use.'))
]

_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('ironic.sqlite')

cfg.CONF.register_opts(sql_opts, 'database')
db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'ironic.sqlite')


def table_args():
    engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme
    if engine_name == 'mysql':
        return {
            'mysql_engine': cfg.CONF.database.mysql_engine,
            'mysql_charset': "utf8"
        }
    return None


class IronicBase(models.TimestampMixin, models.ModelBase):

    metadata = None
Example #39
0
#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#    See the License for the specific language governing permissions and
#    limitations under the License.

import functools

from oslo_config import cfg
from oslo_db import options as db_options
from oslo_db.sqlalchemy import session as db_session

from qinling.db.sqlalchemy import sqlite_lock
from qinling import exceptions as exc
from qinling.utils import thread_local

# Note(dzimine): sqlite only works for basic testing.
db_options.set_defaults(cfg.CONF, connection="sqlite:///qinling.sqlite")
_FACADE = None
_DB_SESSION_THREAD_LOCAL_NAME = "db_sql_alchemy_session"


def _get_facade():
    global _FACADE
    if _FACADE is None:
        _FACADE = db_session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True)
    return _FACADE


def get_session(expire_on_commit=False, autocommit=False):
    """Helper method to grab session."""
    facade = _get_facade()
    return facade.get_session(expire_on_commit=expire_on_commit,
Example #40
0
from oslo_config import cfg
from oslo_db import options
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging
import osprofiler.sqlalchemy
import sqlalchemy

from miper import exception
from miper.i18n import _

CONF = cfg.CONF
CONF.import_group("profiler", "miper.service")
LOG = logging.getLogger(__name__)

options.set_defaults(CONF, connection='sqlite:///$state_path/miper.sqlite')

_LOCK = threading.Lock()
_FACADE = None


def _create_facade_lazily():
    global _LOCK
    with _LOCK:
        global _FACADE
        if _FACADE is None:
            _FACADE = db_session.EngineFacade(CONF.database.connection,
                                              **dict(CONF.database))

            if CONF.profiler.profiler_enabled:
                if CONF.profiler.trace_sqlalchemy:
Example #41
0
sql_opts = [
    cfg.StrOpt('mysql_engine',
               default='InnoDB',
               help='MySQL engine to use.'),
    cfg.IntOpt('max_db_entries',
               default=10,
               help=('Maximum test result entries to be persisted ')),

]

_DEFAULT_SQL_CONNECTION = ('sqlite:///' +
                           paths.state_path_def('cloudpulse.sqlite'))

cfg.CONF.register_opts(sql_opts, 'database')
db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'cloudpulse.sqlite')


def table_args():
    engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme
    if engine_name == 'mysql':
        return {'mysql_engine': cfg.CONF.database.mysql_engine,
                'mysql_charset': "utf8"}
    return None


class JsonEncodedType(TypeDecorator):
    """Abstract base type serialized as json-encoded string in db."""
    type = None
    impl = TEXT
LOG = log.getLogger(__name__)

OPTS = [
    cfg.StrOpt('connection',
               secret=True,
               default=None,
               help='The connection string used to connect to the database'),
    cfg.IntOpt('max_retries',
               default=1,
               help='Max retries num if failed to connect to the database'),
    cfg.IntOpt('retry_interval', default=5, help='Retry interval time ')
]

cfg.CONF.register_opts(OPTS, group='database')
"""
db_options.set_defaults(cfg.CONF)
"""


class StorageUnknownWriteError(Exception):
    """Error raised when an unknown error occurs while recording."""


class StorageBadVersion(Exception):
    """Error raised when the storage backend version is not good enough."""


class StorageBadAggregate(Exception):
    """Error raised when an aggregate is unacceptable to storage backend."""
    code = 400
Example #43
0
def setup_dummy_db():
    options.cfg.set_defaults(options.database_opts, sqlite_synchronous=False)
    options.set_defaults(cfg.CONF, connection="sqlite://")
    engine = get_engine()
    models.BASE.metadata.create_all(engine)
    engine.connect()
Example #44
0
from oslo_db.sqlalchemy import types as db_types
import six.moves.urllib.parse as urlparse
from sqlalchemy import Boolean, Column, DateTime, false, Index
from sqlalchemy import ForeignKey, Integer
from sqlalchemy import schema, String, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm

from ironic.common import exception
from ironic.common.i18n import _
from ironic.conf import CONF

_DEFAULT_SQL_CONNECTION = 'sqlite:///' + path.join('$state_path',
                                                   'ironic.sqlite')

db_options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION)


def table_args():
    engine_name = urlparse.urlparse(CONF.database.connection).scheme
    if engine_name == 'mysql':
        return {
            'mysql_engine': CONF.database.mysql_engine,
            'mysql_charset': "utf8"
        }
    return None


class IronicBase(models.TimestampMixin, models.ModelBase):

    metadata = None
Example #45
0
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""Database setup and migration commands."""

import os
import threading

from oslo_config import cfg
from oslo_db import options as db_options
from stevedore import driver

_IMPL = None
_LOCK = threading.Lock()

db_options.set_defaults(cfg.CONF)


def get_backend():
    global _IMPL
    if _IMPL is None:
        with _LOCK:
            if _IMPL is None:
                _IMPL = driver.DriverManager(
                    "glance.database.migration_backend",
                    cfg.CONF.database.backend).driver
    return _IMPL


# Migration-related constants
EXPAND_BRANCH = 'expand'
Example #46
0
:db_backend:  string to lookup in the list of LazyPluggable backends.
              `sqlalchemy` is the only supported backend right now.

:sql_connection:  string specifying the sqlalchemy connection to use, like:
                  `mysql://user:password@localhost/sahara`.

"""

from oslo_config import cfg
from oslo_db import api as db_api
from oslo_db import options
from oslo_log import log as logging

CONF = cfg.CONF

options.set_defaults(CONF)

_BACKEND_MAPPING = {
    'sqlalchemy': 'sahara.db.sqlalchemy.api',
}

IMPL = db_api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING)
LOG = logging.getLogger(__name__)


def setup_db():
    """Set up database, create tables, etc.

    Return True on success, False otherwise
    """
    return IMPL.setup_db()
Example #47
0
def initialize():
    db_options.set_defaults(cfg.CONF, connection='sqlite:///:memory:')
Example #48
0
cfg.CONF.register_opts(A10_VTHUNDER_OPTS, group='vthunder')
cfg.CONF.register_opts(A10_GLM_LICENSE_OPTS, group='glm_license')
cfg.CONF.register_opts(A10_SLB_OPTS, group='slb')
cfg.CONF.register_opts(A10_HEALTH_MONITOR_OPTS, group='health_monitor')
cfg.CONF.register_opts(A10_LISTENER_OPTS, group='listener')
cfg.CONF.register_opts(A10_SERVICE_GROUP_OPTS, group='service_group')
cfg.CONF.register_opts(A10_SERVER_OPTS, group='server')
cfg.CONF.register_opts(A10_HARDWARE_THUNDER_OPTS, group='hardware_thunder')


# Ensure that the control exchange is set correctly
messaging.set_transport_defaults(control_exchange='octavia')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(cfg.CONF, connection=_SQL_CONNECTION_DEFAULT,
                        max_pool_size=10, max_overflow=20, pool_timeout=10)

logging.register_options(cfg.CONF)

ks_loading.register_auth_conf_options(cfg.CONF, constants.SERVICE_AUTH)
ks_loading.register_session_conf_options(cfg.CONF, constants.SERVICE_AUTH)


def init(*args, **kwargs):
    """ Initialize the cfg.CONF object for octavia project"""
    cfg.CONF(*args, project='octavia',
             version='%%prog %s' % version.version_info.release_string(),
             **kwargs)


def setup_logging(conf):
Example #49
0
def register_opts(conf):
    oslo_db_options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION)
    conf.register_group(database)
    conf.register_opts(SQL_OPTS, group=database)
def initialize():
    """Initialize the module."""
    db_options.set_defaults(CONF, connection="sqlite:///keystone.db")
    # Configure OSprofiler options
    profiler.set_defaults(CONF, enabled=False, trace_sqlalchemy=False)
Example #51
0
 def configure(self):
     options.cfg.set_defaults(options.database_opts,
                              sqlite_synchronous=False)
     options.set_defaults(cfg.CONF,
                          connection='sqlite:///%s' % self.db_file)
Example #52
0
    def __init__(self, observer, openrc, inventory, **params):
        super(RallyLoader, self).__init__(observer, openrc, inventory,
                                          **params)

        self.scenario_file = os.path.abspath(
            os.path.join(RallyLoader.scenarios_path, params['scenario_file']))

        # TODO (dratushnyy) fallback to default path only if file not found
        self.scenario_args_file = params.get('scenario_args_file', None)
        if self.scenario_args_file:
            self.scenario_args_file = os.path.abspath(
                os.path.join(RallyLoader.scenarios_path,
                             self.scenario_args_file))

        self.start_delay = params['start_delay']
        self.deployment_name = params['deployment_name']
        self.deployment_config = {
            "type": "ExistingCloud",
            "admin": {
                "username": openrc["username"],
                "password": openrc["password"],
                "tenant_name": openrc["tenant_name"]
            },
            "auth_url": openrc["auth_url"],
            "region_name": openrc["region_name"],
            "https_insecure": openrc['https_insecure'],
            "https_cacert": openrc["https_cacert"]
        }
        self.scenario_args = params.get('scenario_args', None)
        # Need to be set to None to avoid exception in stop() method
        self.rally_task = None

        load_rally_plugins()
        if params.get('db'):
            db_connection = RallyLoader.conn_template.format(
                user=params["db"]["user"],
                passwd=params["db"]["pass"],
                host=params["db"]["host"],
                db_name=params["db"]["name"])

            db_options.set_defaults(CONF, connection=db_connection)
        try:
            rally_api.Deployment.get(self.deployment_name)
        except DBNonExistentTable as e:
            db.schema_create()
        except DeploymentNotFound as e:
            try:
                rally_api.Deployment.create(config=self.deployment_config,
                                            name=self.deployment_name)
            except ValidationError as e:
                LOGGER.exception(e)
                raise e
        except OperationalError as e:
            LOGGER.exception(e)
            raise e

        # Since there is no api method to do this - using cli
        deployment_cli.DeploymentCommands().use(self.deployment_name)
        # Using rally task cli to load and validate task
        # TODO check is API support this?
        try:
            self.scenario_config = task_cli.TaskCommands().\
                _load_and_validate_task(self.scenario_file,
                                        json.dumps(self.scenario_args),
                                        self.scenario_args_file,
                                        self.deployment_name)
        except Exception as e:
            LOGGER.exception(e)
            raise e
Example #53
0
    def setUp(self):
        super(MultiIsolatedUnitTest, self).setUp()
        options.set_defaults(CONF, connection='sqlite://')
        lockutils.set_defaults(os.path.join(self.test_dir))

        self.config(debug=False)
Example #54
0
from oslo_db import exception as db_exc
from oslo_db import options as db_options
from oslo_db.sqlalchemy import session as db_session
import sqlalchemy as sa
import sqlalchemy.orm  # noqa

from rally.common import cfg
from rally.common.db import models
from rally import consts
from rally import exceptions
from rally.task.processing import charts

CONF = cfg.CONF

db_options.set_defaults(CONF,
                        connection="sqlite:///%s/rally.sqlite" %
                        tempfile.gettempdir())

_FACADE = None
_SESSION_MAKER = None


def _create_facade_lazily():
    global _FACADE

    if _FACADE is None:
        _FACADE = db_session.EngineFacade.from_config(CONF)

    return _FACADE

Example #55
0
def register_opts(conf):
    oslo_db_options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION)
    conf.register_opts(api_db_opts, group=api_db_group)
    conf.register_opts(placement_db_opts, group=placement_db_group)
Example #56
0
import six

from oslo_config import cfg
from oslo_db import options
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging

from lbaas import exceptions as exc
from lbaas import utils


LOG = logging.getLogger(__name__)

# Note(dzimine): sqlite only works for basic testing.
options.set_defaults(cfg.CONF, connection="sqlite:///lbaas.sqlite")

_DB_SESSION_THREAD_LOCAL_NAME = "db_sql_alchemy_session"

_facade = None


def _get_facade():
    global _facade

    if not _facade:
        _facade = db_session.EngineFacade(
            cfg.CONF.database.connection,
            sqlite_fk=True,
            autocommit=False,
            **dict(six.iteritems(cfg.CONF.database))
Example #57
0
def get_db(config):
    options.set_defaults(CONF, connection='sqlite://')
    config(debug=False)
    db_api = glance.db.sqlalchemy.api
    return db_api
Example #58
0
                default=True,
                help='Services to be added to the available pool on create'),
    cfg.StrOpt('volume_name_template',
               default='volume-%s',
               help='Template string to be used to generate volume names'),
    cfg.StrOpt('snapshot_name_template',
               default='snapshot-%s',
               help='Template string to be used to generate snapshot names'),
    cfg.StrOpt('backup_name_template',
               default='backup-%s',
               help='Template string to be used to generate backup names'),
]

CONF = cfg.CONF
CONF.register_opts(db_opts)
db_options.set_defaults(CONF)
CONF.set_default('sqlite_db', 'cinder.sqlite', group='database')

_BACKEND_MAPPING = {'sqlalchemy': 'cinder.db.sqlalchemy.api'}

IMPL = db_concurrency.TpoolDbapiWrapper(CONF, _BACKEND_MAPPING)

# The maximum value a signed INT type may have
MAX_INT = 0x7FFFFFFF

###################


def dispose_engine():
    """Force the engine to establish new connections."""
Example #59
0
:connection:  string specifying the sqlalchemy connection to use, like:
              `sqlite:///var/lib/cinder/cinder.sqlite`.

:enable_new_services:  when adding a new service to the database, is it in the
                       pool of available hardware (Default: True)

"""

from oslo_config import cfg
from oslo_db import api as db_api
from oslo_db import options as db_options

CONF = cfg.CONF

db_options.set_defaults(CONF, connection="sqlite:////tmp/rally.sqlite")

IMPL = None


def get_impl():
    global IMPL

    if not IMPL:
        _BACKEND_MAPPING = {"sqlalchemy": "rally.common.db.sqlalchemy.api"}
        IMPL = db_api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING)

    return IMPL


def engine_reset():
Example #60
0
from cue.common.i18n import _  # noqa
from cue.db import api
from cue.db.sqlalchemy import models

from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_db import options as db_options
from oslo_db.sqlalchemy import session
from oslo_utils import timeutils
from sqlalchemy.orm import exc as sql_exception

CONF = cfg.CONF

CONF.register_opt(cfg.StrOpt('sqlite_db', default='cue.sqlite'))

db_options.set_defaults(cfg.CONF,
                        connection='sqlite:///$state_path/$sqlite_db')

_FACADE = None


def _create_facade_lazily():
    global _FACADE

    if _FACADE is None:
        _FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True)

    return _FACADE


def get_engine():
    """Helper method to grab engine."""