def setUp(self): super(DbTestCase, self).setUp() sql_connection = 'sqlite:///' + TEST_DB_PATH self.config(group='database', connection=sql_connection) self.dbapi = dbapi.get_instance() global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(migration, sql_connection=sql_connection) self.useFixture(_DB_CACHE)
def __init__(self, notifier, management_address, scheduler, proc_name): self._ignore_directory = cfg.CONF.ignored_router_directory self._queue_warning_threshold = cfg.CONF.queue_warning_threshold self._reboot_error_threshold = cfg.CONF.reboot_error_threshold self.host = cfg.CONF.host self.work_queue = Queue.Queue() self.lock = threading.Lock() self._keep_going = True self.tenant_managers = {} self.management_address = management_address self.scheduler = scheduler self.proc_name = proc_name self.resource_cache = TenantResourceCache() # This process-global context should not be used in the # threads, since the clients are not thread-safe. self._context = WorkerContext(self.management_address) self.notifier = notifier # The notifier needs to be started here to ensure that it # happens inside the worker process and not the parent. self.notifier.start() # The DB is used for tracking debug modes self.db_api = db_api.get_instance() # Thread locks for the routers so we only put one copy in the # work queue at a time self._resource_locks = collections.defaultdict(threading.Lock) # Messages about what each thread is doing, keyed by thread id # and reported by the debug command. self._thread_status = {} # Start the threads last, so they can use the instance # variables created above. self.threads = [ threading.Thread( name='t%02d' % i, target=self._thread_target, ) for i in six.moves.range(cfg.CONF.num_worker_threads) ] self.hash_ring_mgr = hash_ring.HashRingManager() for t in self.threads: t.setDaemon(True) t.start()
def __init__(self, notifier, management_address, scheduler, proc_name): self._ignore_directory = cfg.CONF.ignored_router_directory self._queue_warning_threshold = cfg.CONF.queue_warning_threshold self._reboot_error_threshold = cfg.CONF.reboot_error_threshold self.host = cfg.CONF.host self.work_queue = Queue.Queue() self.lock = threading.Lock() self._keep_going = True self.tenant_managers = {} self.management_address = management_address self.scheduler = scheduler self.proc_name = proc_name self.resource_cache = TenantResourceCache() # This process-global context should not be used in the # threads, since the clients are not thread-safe. self._context = WorkerContext(self.management_address) self.notifier = notifier # The notifier needs to be started here to ensure that it # happens inside the worker process and not the parent. self.notifier.start() # The DB is used for tracking debug modes self.db_api = db_api.get_instance() # Thread locks for the routers so we only put one copy in the # work queue at a time self._resource_locks = collections.defaultdict(threading.Lock) # Messages about what each thread is doing, keyed by thread id # and reported by the debug command. self._thread_status = {} # Start the threads last, so they can use the instance # variables created above. self.threads = [ threading.Thread( name='t%02d' % i, target=self._thread_target, ) for i in six.moves.range(cfg.CONF.num_worker_threads) ] self.hash_ring_mgr = hash_ring.HashRingManager() self._deferred_messages = [] for t in self.threads: t.setDaemon(True) t.start()