Beispiel #1
0
    def _setUp(self):
        self._init_resources()

        # check if the fixtures failed to get
        # an engine.  The test setUp() itself should also be checking
        # this and raising skipTest.
        if not hasattr(self, 'engine'):
            return

        engine = self.engine
        self.addCleanup(lambda: self._delete_from_schema(engine))

        self.sessionmaker = session.get_maker(engine)

        _restore_factory = db_api.get_context_manager()._root_factory

        self.enginefacade_factory = enginefacade._TestTransactionFactory(
            self.engine, self.sessionmaker, from_factory=_restore_factory,
            apply_global=False)

        db_api.get_context_manager()._root_factory = self.enginefacade_factory

        engine = db_api.CONTEXT_WRITER.get_engine()

        self.addCleanup(
            lambda: setattr(
                db_api.get_context_manager(),
                "_root_factory", _restore_factory))

        self.useFixture(EnableSQLiteFKsFixture(engine))
Beispiel #2
0
    def _setUp(self):
        self._init_resources()

        # check if the fixtures failed to get
        # an engine.  The test setUp() itself should also be checking
        # this and raising skipTest.
        if not hasattr(self, 'engine'):
            return

        engine = self.engine
        self.addCleanup(lambda: self._delete_from_schema(engine))

        self.sessionmaker = session.get_maker(engine)

        _restore_factory = db_api.get_context_manager()._root_factory

        self.enginefacade_factory = enginefacade._TestTransactionFactory(
            self.engine,
            self.sessionmaker,
            from_factory=_restore_factory,
            apply_global=False)

        db_api.get_context_manager()._root_factory = self.enginefacade_factory

        engine = db_api.CONTEXT_WRITER.get_engine()

        self.addCleanup(lambda: setattr(db_api.get_context_manager(),
                                        "_root_factory", _restore_factory))

        self.useFixture(_EnableSQLiteFKsFixture(engine))
Beispiel #3
0
 def dispatch_events(self):
     # TODO(kevinbenton): now that we are batching these, convert to a
     # single get_objects call for all of them
     LOG.debug('Thread %(name)s started', {'name': self._worker.name})
     while not self._stop.is_set():
         try:
             resource_id, context_dict = self._resources_to_push.get(
                 timeout=self.MAX_IDLE_FOR)
             context = n_ctx.Context.from_dict(context_dict)
             # attempt to get regardless of event type so concurrent delete
             # after create/update is the same code-path as a delete event
             with db_api.get_context_manager().independent.reader.using(
                     context):
                 obj = self._obj_class.get_object(context, id=resource_id)
             # CREATE events are always treated as UPDATE events to ensure
             # listeners are written to handle out-of-order messages
             if obj is None:
                 rpc_event = rpc_events.DELETED
                 # construct a fake object with the right ID so we can
                 # have a payload for the delete message.
                 obj = self._obj_class(id=resource_id)
             else:
                 rpc_event = rpc_events.UPDATED
             self._resource_push_api.push(context, [obj], rpc_event)
             self._resources_to_push.task_done()
         except queue.Empty:
             pass
         except Exception as e:
             LOG.exception(
                 "Exception while dispatching %(res)s events: %(e)s",
                 {'res': self._resource, 'e': e})
     LOG.debug('Thread %(name)s finished with %(msgs)s unsent messages',
               {'name': self._worker.name,
                'msgs': self._resources_to_push.unfinished_tasks})
Beispiel #4
0
def _start_workers(workers, neutron_api=None):
    process_workers = [
        plugin_worker for plugin_worker in workers
        if plugin_worker.worker_process_count > 0
    ]

    try:
        if process_workers:
            # Get eventual already existing instance from WSGI app
            worker_launcher = None
            if neutron_api:
                worker_launcher = neutron_api.wsgi_app.process_launcher
            if worker_launcher is None:
                worker_launcher = common_service.ProcessLauncher(
                    cfg.CONF, wait_interval=1.0, restart_method='mutate'
                )

            # add extra process worker and spawn there all workers with
            # worker_process_count == 0
            thread_workers = [
                plugin_worker for plugin_worker in workers
                if plugin_worker.worker_process_count < 1
            ]
            if thread_workers:
                process_workers.append(
                    AllServicesNeutronWorker(thread_workers)
                )

            # dispose the whole pool before os.fork, otherwise there will
            # be shared DB connections in child processes which may cause
            # DB errors.
            session.get_context_manager().dispose_pool()

            for worker in process_workers:
                worker_launcher.launch_service(worker,
                                               worker.worker_process_count)
        else:
            worker_launcher = common_service.ServiceLauncher(cfg.CONF)
            for worker in workers:
                worker_launcher.launch_service(worker)
        return worker_launcher
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception('Unrecoverable error: please check log for '
                          'details.')
Beispiel #5
0
 def _init_resources(cls):
     if cls._GLOBAL_RESOURCES:
         return
     else:
         cls._GLOBAL_RESOURCES = True
         cls.database_resource = provision.DatabaseResource(
             "sqlite", db_api.get_context_manager())
         dependency_resources = {}
         for name, resource in cls.database_resource.resources:
             dependency_resources[name] = resource.getResource()
         cls.engine = dependency_resources['backend'].engine
 def _init_resources(cls):
     if cls._GLOBAL_RESOURCES:
         return
     else:
         cls._GLOBAL_RESOURCES = True
         cls.database_resource = provision.DatabaseResource(
             "sqlite", db_api.get_context_manager())
         dependency_resources = {}
         for name, resource in cls.database_resource.resources:
             dependency_resources[name] = resource.getResource()
         cls.engine = dependency_resources['backend'].engine
Beispiel #7
0
 def _launch(self, application, workers=0):
     service = WorkerService(self, application, self.disable_ssl, workers)
     if workers < 1:
         # The API service should run in the current process.
         self._server = service
         # Dump the initial option values
         cfg.CONF.log_opt_values(LOG, logging.DEBUG)
         service.start()
         systemd.notify_once()
     else:
         # dispose the whole pool before os.fork, otherwise there will
         # be shared DB connections in child processes which may cause
         # DB errors.
         db_api.get_context_manager().dispose_pool()
         # The API service runs in a number of child processes.
         # Minimize the cost of checking for child exit by extending the
         # wait interval past the default of 0.01s.
         self._server = common_service.ProcessLauncher(
             cfg.CONF, wait_interval=1.0, restart_method='mutate')
         self._server.launch_service(service,
                                     workers=service.worker_process_count)
Beispiel #8
0
def _start_workers(workers):
    process_workers = [
        plugin_worker for plugin_worker in workers
        if plugin_worker.worker_process_count > 0
    ]

    try:
        if process_workers:
            worker_launcher = common_service.ProcessLauncher(
                cfg.CONF, wait_interval=1.0, restart_method='mutate'
            )

            # add extra process worker and spawn there all workers with
            # worker_process_count == 0
            thread_workers = [
                plugin_worker for plugin_worker in workers
                if plugin_worker.worker_process_count < 1
            ]
            if thread_workers:
                process_workers.append(
                    AllServicesNeutronWorker(thread_workers)
                )

            # dispose the whole pool before os.fork, otherwise there will
            # be shared DB connections in child processes which may cause
            # DB errors.
            session.get_context_manager().dispose_pool()

            for worker in process_workers:
                worker_launcher.launch_service(worker,
                                               worker.worker_process_count)
        else:
            worker_launcher = common_service.ServiceLauncher(cfg.CONF)
            for worker in workers:
                worker_launcher.launch_service(worker)
        return worker_launcher
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception('Unrecoverable error: please check log for '
                          'details.')
Beispiel #9
0
    def _setUp(self):
        self.engine = db_api.CONTEXT_WRITER.get_engine()
        self.generate_schema(self.engine)
        self._init_resources()

        self.sessionmaker = session.get_maker(self.engine)

        _restore_factory = db_api.get_context_manager()._root_factory

        self.enginefacade_factory = enginefacade._TestTransactionFactory(
            self.engine,
            self.sessionmaker,
            from_factory=_restore_factory,
            apply_global=False)

        db_api.get_context_manager()._root_factory = self.enginefacade_factory

        self.addCleanup(lambda: self.delete_from_schema(self.engine))
        self.addCleanup(lambda: setattr(db_api.get_context_manager(),
                                        "_root_factory", _restore_factory))

        self.useFixture(_EnableSQLiteFKsFixture(self.engine))
Beispiel #10
0
    def _setUp(self):
        # Register all data models
        engine = db_api.get_context_manager().writer.get_engine()
        if not SqlFixture._TABLES_ESTABLISHED:
            model_base.BASEV2.metadata.create_all(engine)
            SqlFixture._TABLES_ESTABLISHED = True

        def clear_tables():
            with engine.begin() as conn:
                for table in reversed(
                        model_base.BASEV2.metadata.sorted_tables):
                    conn.execute(table.delete())

        self.addCleanup(clear_tables)
Beispiel #11
0
 def _launch(self, application, workers=0, desc=None):
     set_proctitle = "off" if desc is None else CONF.setproctitle
     service = WorkerService(self, application, set_proctitle,
                             self.disable_ssl, workers)
     if workers < 1:
         # The API service should run in the current process.
         self._server = service
         # Dump the initial option values
         cfg.CONF.log_opt_values(LOG, logging.DEBUG)
         service.start(desc=desc)
         systemd.notify_once()
     else:
         # dispose the whole pool before os.fork, otherwise there will
         # be shared DB connections in child processes which may cause
         # DB errors.
         db_api.get_context_manager().dispose_pool()
         # The API service runs in a number of child processes.
         # Minimize the cost of checking for child exit by extending the
         # wait interval past the default of 0.01s.
         self._server = common_service.ProcessLauncher(
             cfg.CONF, wait_interval=1.0, restart_method='mutate')
         self._server.launch_service(service,
                                     workers=service.worker_process_count)
Beispiel #12
0
 def _init_resources(cls):
     # this is a classlevel version of what testresources
     # does w/ the resources attribute as well as the
     # setUpResources() step (which requires a test instance, that
     # SqlFixture does not have).  Because this is a SQLite memory
     # database, we don't actually tear it down, so we can keep
     # it running throughout all tests.
     if cls._GLOBAL_RESOURCES:
         return
     else:
         cls._GLOBAL_RESOURCES = True
         cls.schema_resource = provision.SchemaResource(
             provision.DatabaseResource(
                 "sqlite", db_api.get_context_manager()),
             cls._generate_schema, teardown=False)
         dependency_resources = {}
         for name, resource in cls.schema_resource.resources:
             dependency_resources[name] = resource.getResource()
         cls.schema_resource.make(dependency_resources)
         cls.engine = dependency_resources['database'].engine
Beispiel #13
0
 def _init_resources(cls):
     # this is a classlevel version of what testresources
     # does w/ the resources attribute as well as the
     # setUpResources() step (which requires a test instance, that
     # SqlFixture does not have).  Because this is a SQLite memory
     # database, we don't actually tear it down, so we can keep
     # it running throughout all tests.
     if cls._GLOBAL_RESOURCES:
         return
     else:
         cls._GLOBAL_RESOURCES = True
         cls.schema_resource = provision.SchemaResource(
             provision.DatabaseResource(
                 "sqlite", db_api.get_context_manager()),
             cls._generate_schema, teardown=False)
         dependency_resources = {}
         for name, resource in cls.schema_resource.resources:
             dependency_resources[name] = resource.getResource()
         cls.schema_resource.make(dependency_resources)
         cls.engine = dependency_resources['database'].engine
Beispiel #14
0
 def dispatch_events(self):
     # this is guarded by a lock to ensure we don't get too many concurrent
     # dispatchers hitting the database simultaneously.
     to_dispatch, self._resources_to_push = self._resources_to_push, {}
     # TODO(kevinbenton): now that we are batching these, convert to a
     # single get_objects call for all of them
     for resource_id, context_dict in to_dispatch.items():
         context = n_ctx.Context.from_dict(context_dict)
         # attempt to get regardless of event type so concurrent delete
         # after create/update is the same code-path as a delete event
         with db_api.get_context_manager().independent.reader.using(
                 context):
             obj = self._obj_class.get_object(context, id=resource_id)
         # CREATE events are always treated as UPDATE events to ensure
         # listeners are written to handle out-of-order messages
         if obj is None:
             rpc_event = rpc_events.DELETED
             # construct a fake object with the right ID so we can
             # have a payload for the delete message.
             obj = self._obj_class(id=resource_id)
         else:
             rpc_event = rpc_events.UPDATED
         self._resource_push_api.push(context, [obj], rpc_event)
Beispiel #15
0
from sqlalchemy import event  # noqa
from sqlalchemy import exc as sql_exc
from sqlalchemy import orm
from sqlalchemy.orm import exc

from neutron._i18n import _LE
from neutron.objects import exceptions as obj_exc


def set_hook(engine):
    if (profiler_opts.is_trace_enabled()
            and profiler_opts.is_db_trace_enabled()):
        osprofiler.sqlalchemy.add_tracing(sqlalchemy, engine, 'neutron.db')


context_manager = api.get_context_manager()

# TODO(ihrachys) the hook assumes options defined by osprofiler, and the only
# public function that is provided by osprofiler that will register them is
# set_defaults, that's why we call it here even though we don't need to change
# defaults
profiler_opts.set_defaults(cfg.CONF)
context_manager.append_on_engine_create(set_hook)

MAX_RETRIES = 10
LOG = logging.getLogger(__name__)


def is_retriable(e):
    if getattr(e, '_RETRY_EXCEEDED', False):
        return False
Beispiel #16
0
 def get_session(self):
     engine = db_api.get_context_manager().writer.get_engine()
     Session = sessionmaker(bind=engine)
     return Session()
Beispiel #17
0
from pecan import util as p_util
import six
import sqlalchemy
from sqlalchemy import event  # noqa
from sqlalchemy import exc as sql_exc
from sqlalchemy import orm
from sqlalchemy.orm import exc


def set_hook(engine):
    if (profiler_opts.is_trace_enabled() and
            profiler_opts.is_db_trace_enabled()):
        osprofiler.sqlalchemy.add_tracing(sqlalchemy, engine, 'neutron.db')


context_manager = api.get_context_manager()

# TODO(ihrachys) the hook assumes options defined by osprofiler, and the only
# public function that is provided by osprofiler that will register them is
# set_defaults, that's why we call it here even though we don't need to change
# defaults
profiler_opts.set_defaults(cfg.CONF)
context_manager.append_on_engine_create(set_hook)


MAX_RETRIES = 10
LOG = logging.getLogger(__name__)


def is_retriable(e):
    if getattr(e, '_RETRY_EXCEEDED', False):