def get_lock(uuid): if CONF.standalone: return InternalLock(uuid) coordinator = coordination.get_coordinator() lock = coordinator.get_lock(uuid) return ToozLock(lock)
def init_host(self): """Initialize Worker host Init db connection, load and validate processing hooks, runs periodic tasks. :returns None """ if CONF.processing.store_data == 'none': LOG.warning('Introspection data will not be stored. Change ' '"[processing] store_data" option if this is not ' 'the desired behavior') else: LOG.info('Introspection data will be stored in the %s backend', CONF.processing.store_data) db.init() try: hooks = plugins_base.validate_processing_hooks() except Exception as exc: LOG.critical(str(exc)) sys.exit(1) LOG.info('Enabled processing hooks: %s', [h.name for h in hooks]) driver = pxe_filter.driver() driver.init_filter() periodic_clean_up_ = periodics.periodic( spacing=CONF.clean_up_period)(periodic_clean_up) self._periodics_worker = periodics.PeriodicWorker( callables=[(driver.get_periodic_sync_task(), None, None), (periodic_clean_up_, None, None)], executor_factory=periodics.ExistingExecutor(utils.executor()), on_failure=self._periodics_watchdog) utils.executor().submit(self._periodics_worker.start) if CONF.enable_mdns: endpoint = keystone.get_endpoint('service_catalog') self._zeroconf = mdns.Zeroconf() self._zeroconf.register_service('baremetal-introspection', endpoint) if not CONF.standalone: try: coordinator = coordination.get_coordinator(prefix='conductor') coordinator.start(heartbeat=True) coordinator.join_group() except tooz.ToozError: with excutils.save_and_reraise_exception(): LOG.critical('Failed when connecting to coordination ' 'backend.') self.del_host() else: LOG.info('Successfully connected to coordination backend.')
def get_random_topic(): coordinator = coordination.get_coordinator(prefix='api') members = coordinator.get_members() hosts = [] for member in members: # NOTE(kaifeng) recomposite host in case it contains '.' parts = member.decode('ascii').split('.') if len(parts) < 3: LOG.warning('Found invalid member %s', member) continue if parts[1] == 'conductor': hosts.append('.'.join(parts[2:])) if not hosts: raise utils.NoAvailableConductor('No available conductor service') topic = '%s.%s' % (manager.MANAGER_TOPIC, random.choice(hosts)) return topic
def del_host(self): """Shutdown the ironic inspector conductor service.""" if not CONF.standalone: try: coordinator = coordination.get_coordinator(prefix='conductor') if coordinator.started: coordinator.leave_group() coordinator.stop() except tooz.ToozError: LOG.exception('Failed to stop coordinator') if not self._shutting_down.acquire(blocking=False): LOG.warning('Attempted to shut down while already shutting down') return pxe_filter.driver().tear_down_filter() if self._periodics_worker is not None: try: self._periodics_worker.stop() self._periodics_worker.wait() except Exception as e: LOG.exception( 'Service error occurred when stopping ' 'periodic workers. Error: %s', e) self._periodics_worker = None if utils.executor().alive: utils.executor().shutdown(wait=True) if self._zeroconf is not None: self._zeroconf.close() self._zeroconf = None self._shutting_down.release() LOG.info('Shut down successfully')
def start_coordinator(): """Create a coordinator instance for non-standalone case.""" if not CONF.standalone: coordinator = coordination.get_coordinator(prefix='api') coordinator.start(heartbeat=False) LOG.info('Sucessfully created coordinator.')
def test_get_with_prefix(self, mock_coordinator): coordination.get_coordinator(prefix='conductor') mock_coordinator.assert_called_once_with(prefix='conductor')
def test_get(self, mock_coordinator): coordination.get_coordinator() mock_coordinator.assert_called_once_with(prefix=None)
def init_host(self): """Initialize Worker host Init db connection, load and validate processing hooks, runs periodic tasks. :returns None """ if CONF.processing.store_data == 'none': LOG.warning('Introspection data will not be stored. Change ' '"[processing] store_data" option if this is not ' 'the desired behavior') else: LOG.info('Introspection data will be stored in the %s backend', CONF.processing.store_data) db.init() self.coordinator = None try: self.coordinator = coordination.get_coordinator(prefix='conductor') self.coordinator.start(heartbeat=True) self.coordinator.join_group() except Exception as exc: if CONF.standalone: LOG.info( 'Coordination backend cannot be started, assuming ' 'no other instances are running. Error: %s', exc) self.coordinator = None else: with excutils.save_and_reraise_exception(): LOG.critical( 'Failure when connecting to coordination ' 'backend', exc_info=True) self.del_host() else: LOG.info('Successfully connected to coordination backend.') try: hooks = plugins_base.validate_processing_hooks() except Exception as exc: LOG.critical(str(exc)) sys.exit(1) LOG.info('Enabled processing hooks: %s', [h.name for h in hooks]) driver = pxe_filter.driver() driver.init_filter() periodic_clean_up_ = periodics.periodic( spacing=CONF.clean_up_period, enabled=(CONF.clean_up_period != 0))(periodic_clean_up) sync_with_ironic_ = periodics.periodic( spacing=CONF.clean_up_period, enabled=(CONF.clean_up_period != 0))(sync_with_ironic) callables = [(periodic_clean_up_, None, None), (sync_with_ironic_, (self, ), None)] driver_task = driver.get_periodic_sync_task() if driver_task is not None: callables.append((driver_task, None, None)) # run elections periodically if we have a coordinator # that we were able to start if (self.coordinator and self.coordinator.started): periodic_leader_election_ = periodics.periodic( spacing=CONF.leader_election_interval)( periodic_leader_election) callables.append((periodic_leader_election_, (self, ), None)) self._periodics_worker = periodics.PeriodicWorker( callables=callables, executor_factory=periodics.ExistingExecutor(utils.executor()), on_failure=self._periodics_watchdog) utils.executor().submit(self._periodics_worker.start) if CONF.enable_mdns: endpoint = keystone.get_endpoint('service_catalog') self._zeroconf = mdns.Zeroconf() self._zeroconf.register_service('baremetal-introspection', endpoint)