def test_base_dir_win32(self): coord = coordination.get_coordinator( 'file:///C:/path/', self._FAKE_MEMBER_ID) self.assertEqual('C:\\path\\', coord._dir) coord = coordination.get_coordinator( 'file:////share_addr/share_path/', self._FAKE_MEMBER_ID) self.assertEqual('\\\\share_addr\\share_path\\', coord._dir) # Administrative shares should be handled properly. coord = coordination.get_coordinator( 'file:////c$/path/', self._FAKE_MEMBER_ID) self.assertEqual('\\\\c$\\path\\', coord._dir)
def test_base_dir_win32(self): coord = coordination.get_coordinator('file:///C:/path/', self._FAKE_MEMBER_ID) self.assertEqual('C:\\path\\', coord._dir) coord = coordination.get_coordinator('file:////share_addr/share_path/', self._FAKE_MEMBER_ID) self.assertEqual('\\\\share_addr\\share_path\\', coord._dir) # Administrative shares should be handled properly. coord = coordination.get_coordinator('file:////c$/path/', self._FAKE_MEMBER_ID) self.assertEqual('\\\\c$\\path\\', coord._dir)
def tooz_make_change(driver, url, session, vol_id, initial, destination, attach_status): global coordinator global lock global acquired # If coordinator is not the one we want we cannot reuse it if not isinstance(coordinator, driver): if coordinator: coordinator.stop() # Create new coordinator and lock coordinator = coordination.get_coordinator(url, str(session)) coordinator.start() lock = coordinator.get_lock(vol_id) # When going from available to any other state we acquire the lock if initial == 'available': # If this is a retry we've already acquired the lock if not acquired: while not lock.acquire(): coordinator.heartbeat() time.sleep(0.01) acquired = True n = 0 while n == 0: n = safe_update(session, vol_id, {'status': destination, 'attach_status': attach_status}, {'status': initial}) coordinator.heartbeat() if destination == 'available': lock.release() acquired = False
def initialize(self): LOG.debug("Initializing Ansible ML2 driver") # Get ML2 config self.ml2config = config.Config() # Build a network runner inventory object # and instatiate network runner _inv = Inventory() _inv.deserialize({'all': {'hosts': self.ml2config.inventory}}) self.net_runr = net_runr_api.NetworkRunner(_inv) # build the extra_params dict. # this holds extra config params per host passed to network runner self.extra_params = {} for host_name in self.ml2config.inventory: self.extra_params[host_name] = {} for i in c.EXTRA_PARAMS: if i in self.ml2config.inventory[host_name]: self.extra_params[host_name][i] = \ self.ml2config.inventory[host_name].get(i) self.coordinator = coordination.get_coordinator( cfg.CONF.ml2_ansible.coordination_uri, '{}-{}'.format(CONF.host, os.getpid())) # the heartbeat will have the default timeout of 30 seconds # that can be changed per-driver. Both Redis and etcd drivers # use 30 second timeouts. self.coordinator.start(start_heart=True) LOG.debug("Ansible ML2 coordination started via uri %s", cfg.CONF.ml2_ansible.coordination_uri) self.trunk_driver = trunk_driver.NetAnsibleTrunkDriver.create(self)
def __init__(self): self._coord = coordination.get_coordinator( CONF.orchestrator.coordination_url, uuidutils.generate_uuid().encode('ascii')) self._state = state.StateManager() self._storage = storage.get_storage() self._coord.start(start_heart=True)
def __init__(self, device_cfg): super(NetmikoSwitch, self).__init__(device_cfg) device_type = self.config.get('device_type', '') # use part that is after 'netmiko_' device_type = device_type.partition('netmiko_')[2] if device_type not in netmiko.platforms: raise exc.GenericSwitchNetmikoNotSupported(device_type=device_type) self.config['device_type'] = device_type self.locker = None if CONF.ngs_coordination.backend_url: self.locker = coordination.get_coordinator( CONF.ngs_coordination.backend_url, ('ngs-' + CONF.host).encode('ascii')) self.locker.start() atexit.register(self.locker.stop) self.lock_kwargs = { 'locks_pool_size': int(self.ngs_config['ngs_max_connections']), 'locks_prefix': self.config.get('host', '') or self.config.get('ip', ''), 'timeout': CONF.ngs_coordination.acquire_timeout }
def coordinator_setup(start_heart=True): """ Sets up the client for the coordination service. URL examples for connection: zake:// file:///tmp redis://username:password@host:port mysql://username:password@host:port/dbname """ url = cfg.CONF.coordination.url lock_timeout = cfg.CONF.coordination.lock_timeout member_id = get_member_id() if url: coordinator = coordination.get_coordinator(url, member_id, lock_timeout=lock_timeout) else: # Use a no-op backend # Note: We don't use tooz to obtain a reference since for this to work we would need to # register a plugin inside setup.py entry_point and use python setup.py develop for tests # to work coordinator = NoOpDriver(member_id) coordinator.start(start_heart=start_heart) return coordinator
def __init__(self, options): self.options = options self.rc = 0 # Configure logging self.logger = logging.getLogger('coolock') levels = { "debug": logging.DEBUG, "info": logging.INFO, "information": logging.INFO, "warning": logging.WARNING, "warn": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, "crit": logging.CRITICAL } self.logger.setLevel(levels[options.log_level]) handler = logging.handlers.RotatingFileHandler( options.log_file, maxBytes=options.log_max_size, backupCount=options.rotate_log_copies) formatter = logging.Formatter( '%(asctime)s - %(name)s (%(process)d) - %(levelname)s - %(message)s' ) handler.setFormatter(formatter) self.logger.addHandler(handler) # Configure coordination client self.logger.debug( "Initializing coordination client for node: %s. Using: %s" % (options.node, options.coordination_backend)) self.coord_client = coordination.get_coordinator( options.coordination_backend, options.node) self.coord_client.start() self.lock = self.coord_client.get_lock(options.lock)
def coordinator_setup(): """ Sets up the client for the coordination service. URL examples for connection: zake:// file:///tmp redis://username:password@host:port mysql://username:password@host:port/dbname """ url = cfg.CONF.coordination.url lock_timeout = cfg.CONF.coordination.lock_timeout proc_info = system_info.get_process_info() member_id = '%s_%d' % (proc_info['hostname'], proc_info['pid']) if url: coordinator = coordination.get_coordinator(url, member_id, lock_timeout=lock_timeout) else: # Use a no-op backend # Note: We don't use tooz to obtain a reference since for this to work we would need to # register a plugin inside setup.py entry_point and use python setup.py develop for tests # to work coordinator = NoOpDriver() coordinator.start() return coordinator
def coordinator_setup(): """ Sets up the client for the coordination service. URL examples for connection: zake:// file:///tmp redis://username:password@host:port mysql://username:password@host:port/dbname """ url = cfg.CONF.coordination.url lock_timeout = cfg.CONF.coordination.lock_timeout proc_info = system_info.get_process_info() member_id = six.b('%s_%d' % (proc_info['hostname'], proc_info['pid'])) if url: coordinator = coordination.get_coordinator(url, member_id, lock_timeout=lock_timeout) else: # Use a no-op backend # Note: We don't use tooz to obtain a reference since for this to work we would need to # register a plugin inside setup.py entry_point and use python setup.py develop for tests # to work coordinator = NoOpDriver(member_id) coordinator.start() return coordinator
def __init__(self, device_cfg): self.config = {} self.config = device_cfg self.sw_internal_cfg = { "sw_ssh_connect_timeout": 60, "sw_ssh_connect_interval": 10, "sw_max_connections": CONF.sw_coordination.max_connections } device_type = self.config.get('device_type', 'huawei') if device_type not in netmiko.platforms: raise exceptions.SwitchNetmikoNotSupported( device_type=device_type) self.config['device_type'] = device_type self.locker = None self.session_id = None if CONF.sw_coordination.backend_url: self.locker = coordination.get_coordinator( CONF.sw_coordination.backend_url, ('switch-' + self.config['ip']).encode('ascii')) self.locker.start() self.session_id = hex(self.locker._coord.client_id[0]) logger.debug("zookeeper client connection[session_id:%s] opened." % self.session_id) self.lock_kwargs = { 'locks_pool_size': int(self.sw_internal_cfg['sw_max_connections']), 'locks_prefix': self.config['ip'], 'timeout': CONF.sw_coordination.acquire_lock_timeout}
def __init__(self, worker_id): self._worker_id = worker_id super(Orchestrator, self).__init__(self._worker_id) self.fetcher = driver.DriverManager( FETCHERS_NAMESPACE, CONF.fetcher.backend, invoke_on_load=True, ).driver transformers = transformer.get_transformers() self.collector = collector.get_collector(transformers) self.storage = storage.get_storage() self._state = state.StateManager() # RPC self.server = None self._rating_endpoint = RatingEndpoint(self) self._init_messaging() # DLM self.coord = coordination.get_coordinator( CONF.orchestrator.coordination_url, uuidutils.generate_uuid().encode('ascii')) self.coord.start(start_heart=True)
def __init__(self, username, password, host, port=23, timeout=10): super(CiscoSwitch, self).__init__() self.host = host self.username = username self.password = password self.port = port self.timeout = timeout self.sw_internal_cfg = { "sw_telnet_connect_timeout": 60, "sw_telnet_connect_interval": 10, "sw_max_connections": CONF.sw_coordination.max_connections } self.locker = None self.session_id = None if CONF.sw_coordination.backend_url: self.locker = coordination.get_coordinator( CONF.sw_coordination.backend_url, ('switch-' + self.host).encode('ascii')) self.locker.start() self.session_id = hex(self.locker._coord.client_id[0]) logger.debug("zookeeper client connection[session_id:%s] opened." % self.session_id) self.lock_kwargs = { 'locks_pool_size': int(self.sw_internal_cfg['sw_max_connections']), 'locks_prefix': self.host, 'timeout': CONF.sw_coordination.acquire_lock_timeout }
def __init__(self, worker_id): self._worker_id = worker_id super(CloudKittyProcessor, self).__init__(self._worker_id) self.tenants = [] self.fetcher = driver.DriverManager( FETCHERS_NAMESPACE, CONF.fetcher.backend, invoke_on_load=True, ).driver self.collector = collector.get_collector() self.storage = storage.get_storage() self._state = state.StateManager() # RPC self.server = None self._rating_endpoint = RatingEndpoint(self) self._scope_endpoint = ScopeEndpoint() self._init_messaging() # DLM self.coord = coordination.get_coordinator( CONF.orchestrator.coordination_url, uuidutils.generate_uuid().encode('ascii')) self.coord.start(start_heart=True) self.next_timestamp_to_process = functools.partial( _check_state, self, CONF.collect.period) self.worker_class = Worker self.log_worker_initiated()
def _add_members(self, number_of_members, weight=1): for _ in six.moves.range(number_of_members): m = tests.get_random_uuid() coord = coordination.get_coordinator(self.url, m) coord.start() coord.join_partitioned_group(self.group_id, weight=weight) self._extra_coords.append(coord) self._coord.run_watchers()
def test_client_failure_heartbeat(self, mock_client_cls): mock_client = mock.MagicMock() mock_client_cls.return_value = mock_client member_id = tests.get_random_uuid() coord = coordination.get_coordinator(self.FAKE_URL, member_id) coord.start() mock_client.set.side_effect = socket.timeout('timed-out') self.assertRaises(coordination.ToozConnectionError, coord.heartbeat)
def test_client_failure_heartbeat(self, mock_client_cls): mock_client = mock.MagicMock() mock_client_cls.return_value = mock_client member_id = str(uuid.uuid4()).encode('ascii') coord = coordination.get_coordinator(self.FAKE_URL, member_id) coord.start() mock_client.set.side_effect = socket.timeout('timed-out') self.assertRaises(coordination.ToozConnectionError, coord.heartbeat)
def __init__(self, worker_id, conf, namespaces=None): namespaces = namespaces or ['compute', 'central'] group_prefix = conf.polling.partitioning_group_prefix super(AgentManager, self).__init__(worker_id) self.conf = conf if type(namespaces) is not list: namespaces = [namespaces] # we'll have default ['compute', 'central'] here if no namespaces will # be passed extensions = (self._extensions('poll', namespace, self.conf).extensions for namespace in namespaces) # get the extensions from pollster builder extensions_fb = (self._extensions_from_builder('poll', namespace) for namespace in namespaces) # Create dynamic pollsters extensions_dynamic_pollsters = self.create_dynamic_pollsters() self.extensions = list(itertools.chain(*list(extensions))) + list( itertools.chain( *list(extensions_fb))) + list(extensions_dynamic_pollsters) if not self.extensions: LOG.warning( 'No valid pollsters can be loaded from %s ' 'namespaces', namespaces) discoveries = (self._extensions('discover', namespace, self.conf).extensions for namespace in namespaces) self.discoveries = list(itertools.chain(*list(discoveries))) self.polling_periodics = None self.hashrings = None self.partition_coordinator = None if self.conf.coordination.backend_url: # XXX uuid4().bytes ought to work, but it requires ascii for now coordination_id = str(uuid.uuid4()).encode('ascii') self.partition_coordinator = coordination.get_coordinator( self.conf.coordination.backend_url, coordination_id) # Compose coordination group prefix. # We'll use namespaces as the basement for this partitioning. namespace_prefix = '-'.join(sorted(namespaces)) self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix) if group_prefix else namespace_prefix) self.notifier = oslo_messaging.Notifier( messaging.get_transport(self.conf), driver=self.conf.publisher_notifier.telemetry_driver, publisher_id="ceilometer.polling") self._keystone = None self._keystone_last_exception = None
def test_client_failure_leave(self, mock_client_cls): mock_client = mock.MagicMock() mock_client_cls.return_value = mock_client member_id = tests.get_random_uuid() coord = coordination.get_coordinator(self.FAKE_URL, member_id) coord.start() mock_client.gets.side_effect = socket.timeout('timed-out') fut = coord.leave_group(tests.get_random_uuid()) self.assertRaises(coordination.ToozConnectionError, fut.get)
def __init__(self, config, cache_db): logger.info("init CFA Selection UA Handler.") self.handler_name = 'handler_cfaselectionua' super(CFASelectionUAHandler, self).__init__(config, cache_db) coordinator_config = config.get('coordinator_config', {}) self.coordinator = coordination.get_coordinator( coordinator_config.get('connection_url', 'redis://'), coordinator_config.get('coordinator_name', 'bridge')) self.coordinator.start(start_heart=True)
def test_client_run_watchers_mixin(self, mock_client_cls, mock_run_watchers): mock_client = mock.MagicMock() mock_client_cls.return_value = mock_client member_id = tests.get_random_uuid() coord = coordination.get_coordinator(self.FAKE_URL, member_id) coord.start() coord.run_watchers() self.assertTrue(mock_run_watchers.called)
def _get_lock_distributed(name): if not LockManager._coordinator: LOG.debug("Initialized coordinator with connect string %s", LockManager._connect_string) LockManager._coordinator = coordination.get_coordinator( LockManager._connect_string, "vmware-neutron-plugin" ) LOG.debug("Retrieved lock for ", name) return LockManager._coordinator.get_lock(name)
def test_client_failure_leave(self, mock_client_cls): mock_client = mock.MagicMock() mock_client_cls.return_value = mock_client member_id = str(uuid.uuid4()).encode('ascii') coord = coordination.get_coordinator(self.FAKE_URL, member_id) coord.start() mock_client.gets.side_effect = socket.timeout('timed-out') fut = coord.leave_group(str(uuid.uuid4()).encode('ascii')) self.assertRaises(coordination.ToozConnectionError, fut.get)
def __init__(self, config, cache_db): logger.info("Init priceQuotation second phase commit handler.") self.handler_name = "handler_priceQuotationBot" super(PQSecondPhaseCommit, self).__init__(config, cache_db) coordinator_config = config.get("coordinator_config", {}) self.coordinator = coordination.get_coordinator( coordinator_config.get("connection_url", "redis://"), coordinator_config.get("coordinator_name", "bridge")) self.coordinator.start(start_heart=True)
def test_client_run_watchers_mixin(self, mock_client_cls, mock_run_watchers): mock_client = mock.MagicMock() mock_client_cls.return_value = mock_client member_id = str(uuid.uuid4()).encode('ascii') coord = coordination.get_coordinator(self.FAKE_URL, member_id) coord.start() coord.run_watchers() self.assertTrue(mock_run_watchers.called)
def __init__(self, conf): super(CarbonaraBasedStorage, self).__init__(conf) try: self.coord = coordination.get_coordinator( conf.coordination_url, str(uuid.uuid4()).encode('ascii')) self.coord.start(start_heart=True) except Exception as e: raise storage.StorageError("Unable to start coordinator: %s" % e) self.aggregation_workers_number = conf.aggregation_workers_number
def start(self): if self.started: return # NOTE(bluex): Tooz expects member_id as a byte string. member_id = (self.prefix + self.agent_id).encode('ascii') self.coordinator = coordination.get_coordinator( cfg.CONF.coordination.backend_url, member_id) self.coordinator.start(start_heart=True) self.started = True
def __init__(self, worker_id, conf, namespaces=None): namespaces = namespaces or ['compute', 'central'] group_prefix = conf.polling.partitioning_group_prefix super(AgentManager, self).__init__(worker_id) self.conf = conf if type(namespaces) is not list: namespaces = [namespaces] # we'll have default ['compute', 'central'] here if no namespaces will # be passed extensions = (self._extensions('poll', namespace, self.conf).extensions for namespace in namespaces) # get the extensions from pollster builder extensions_fb = (self._extensions_from_builder('poll', namespace) for namespace in namespaces) self.extensions = list(itertools.chain(*list(extensions))) + list( itertools.chain(*list(extensions_fb))) if not self.extensions: LOG.warning('No valid pollsters can be loaded from %s ' 'namespaces', namespaces) discoveries = (self._extensions('discover', namespace, self.conf).extensions for namespace in namespaces) self.discoveries = list(itertools.chain(*list(discoveries))) self.polling_periodics = None self.hashrings = None self.partition_coordinator = None if self.conf.coordination.backend_url: # XXX uuid4().bytes ought to work, but it requires ascii for now coordination_id = str(uuid.uuid4()).encode('ascii') self.partition_coordinator = coordination.get_coordinator( self.conf.coordination.backend_url, coordination_id) # Compose coordination group prefix. # We'll use namespaces as the basement for this partitioning. namespace_prefix = '-'.join(sorted(namespaces)) self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix) if group_prefix else namespace_prefix) self.notifier = oslo_messaging.Notifier( messaging.get_transport(self.conf), driver=self.conf.publisher_notifier.telemetry_driver, publisher_id="ceilometer.polling") self._keystone = None self._keystone_last_exception = None
def __init__(self): super(MonitorService, self).__init__() coord_url = ("%s://%s:%s" % (cfg.CONF.taskflow.coord_url, cfg.CONF.taskflow.zk_hosts, cfg.CONF.taskflow.zk_port)) self.coordinator = coordination.get_coordinator( coord_url, b'cue-monitor') self.coordinator.start() # Create a lock self.lock = self.coordinator.get_lock(b"status_check")
def test_hashring_weight(self): p = self._coord.join_partitioned_group(self.group_id, weight=5) self.assertEqual([5], list(p.ring.nodes.values())) coord = coordination.get_coordinator(self.url, tests.get_random_uuid()) coord.start() p2 = coord.join_partitioned_group(self.group_id, weight=10) self._extra_coords.append(coord) self._coord.run_watchers() self.assertEqual(set([5, 10]), set(p.ring.nodes.values())) self.assertEqual(set([5, 10]), set(p2.ring.nodes.values())) p.stop() p2.stop()
def __init__(self, worker_id, conf, coordination_id=None): super(NotificationService, self).__init__(worker_id) self.startup_delay = worker_id self.conf = conf if self.conf.notification.workload_partitioning: # XXX uuid4().bytes ought to work, but it requires ascii for now coordination_id = (coordination_id or str(uuid.uuid4()).encode('ascii')) self.partition_coordinator = coordination.get_coordinator( self.conf.coordination.backend_url, coordination_id) else: self.partition_coordinator = None
def start(self): """Connect to coordination back end.""" if self.started: return # NOTE(gouthamr): Tooz expects member_id as a byte string. member_id = (self.prefix + self.agent_id).encode('ascii') LOG.info("cfg.CONF.coordination.backend_url=%s, membrid=%s" % (cfg.CONF.coordination.backend_url, member_id)) self.coordinator = coordination.get_coordinator( cfg.CONF.coordination.backend_url, member_id) self.coordinator.start(start_heart=True) self.started = True
def __init__(self, backend_url): self.coordinator = None self.member_id = uuidutils.generate_uuid() if backend_url: try: self.coordinator = coordination.get_coordinator( backend_url, self.member_id) self.coordinator.start() LOG.info('Coordination backend loaded successfully.') except coordination.ToozError: LOG.error('Error connecting to coordination backend.') raise
def start(self): """Connect to coordination back end.""" if self.started: return # NOTE(gouthamr): Tooz expects member_id as a byte string. member_id = (self.prefix + self.agent_id).encode('ascii') backend_url = _get_redis_backend_url() self.coordinator = coordination.get_coordinator( backend_url, member_id, timeout=CONF.coordination.expiration) self.coordinator.start(start_heart=True) self.started = True
def __init__(self, conf): super(CarbonaraBasedStorage, self).__init__(conf) self.coord = coordination.get_coordinator( conf.coordination_url, str(uuid.uuid4()).encode('ascii')) self.coord.start() if conf.aggregation_workers_number is None: try: self.aggregation_workers_number = multiprocessing.cpu_count() except NotImplementedError: self.aggregation_workers_number = 2 else: self.aggregation_workers_number = conf.aggregation_workers_number
def _create_coordinator(self, url): def _safe_stop(coord): try: coord.stop() except tooz.ToozError as e: message = encodeutils.exception_to_unicode(e) if (message != 'Can not stop a driver which has not' ' been started'): raise coord = coordination.get_coordinator(url, tests.get_random_uuid()) self.addCleanup(_safe_stop, coord) return coord
def start(self) -> None: if self.started: return backend_url = cfg.CONF.coordination.backend_url # NOTE(bluex): Tooz expects member_id as a byte string. member_id = (self.prefix + self.agent_id).encode('ascii') self.coordinator = coordination.get_coordinator(backend_url, member_id) assert self.coordinator is not None self.coordinator.start(start_heart=True) self._file_path = self._get_file_path(backend_url) self.started = True
def __init__(self, backend_url): self.coordinator = None self.member_id = uuidutils.generate_uuid() if backend_url: try: self.coordinator = coordination.get_coordinator( backend_url, self.member_id) self.coordinator.start() LOG.info(_LI('Coordination backend loaded successfully.')) except coordination.ToozError: LOG.error(_LE('Error connecting to coordination backend.')) raise
def _create_coordinator(self, url): def _safe_stop(coord): try: coord.stop() except coordination.ToozError as e: message = encodeutils.exception_to_unicode(e) if (message != 'Can not stop a driver which has not' ' been started'): raise coord = coordination.get_coordinator(url, tests.get_random_uuid()) self.addCleanup(_safe_stop, coord) return coord
def _create_coordinator(self, url): def _safe_stop(coord): try: coord.stop() except coordination.ToozError as e: message = utils.exception_message(e) if (message != 'Can not stop a driver which has not' ' been started'): raise coord = coordination.get_coordinator(url, str(uuid.uuid4()).encode('ascii')) self.addCleanup(_safe_stop, coord) return coord
def _get_lock_distributed(name): if LockManager._coordinator_pid != os.getpid(): # We should use a per-process coordinator. If PID is different # start a new coordinator. # While the API workers are spawned, we have to re-initialize # a coordinator, so we validate that the PID is still the same. LockManager._coordinator_pid = os.getpid() LOG.debug('Initialized coordinator with connect string %s', LockManager._connect_string) LockManager._coordinator = coordination.get_coordinator( LockManager._connect_string, 'vmware-neutron-plugin') LockManager._coordinator.start() LOG.debug('Retrieved lock for %s', name) return LockManager._coordinator.get_lock(name)
def coordinator_setup(): """ Sets up the client for the coordination service. URL examples for connection: zake:// file:///tmp redis://username:password@host:port mysql://username:password@host:port/dbname """ url = cfg.CONF.coordination.url lock_timeout = cfg.CONF.coordination.lock_timeout proc_info = system_info.get_process_info() member_id = '%s_%d' % (proc_info['hostname'], proc_info['pid']) coordinator = tooz_coord.get_coordinator(url, member_id, lock_timeout=lock_timeout) coordinator.start() return coordinator
def _create_coordinator(self): def _safe_stop(coord): try: coord.stop() except coordination.ToozError as e: # TODO(harlowja): make this better, so that we don't have to # do string checking... message = utils.exception_message(e) if (message != 'Can not stop a driver which has not' ' been started'): raise coord = coordination.get_coordinator(self.FAKE_URL, str(uuid.uuid4()).encode('ascii')) self.addCleanup(_safe_stop, coord) return coord
def start_tooz(self): """ start the tooz client :return: """ try: local_ip = get_local_ip() user_name = getpass.getuser() member_name = local_ip + '_' + user_name + '_' + self.__group_name self.tooz_client = coordination.get_coordinator(MEMCACHE_CFG, member_name.encode()) except Exception as group_info: logging.error('get group info fail as %s', group_info) return try: self.tooz_client.start() create_result = self.__create_group_by_name() if create_result is False: self.tooz_client.stop() logging.error('create group fail by [%s]', self.__group_name) return def group_joined(event): logging.info('%s joined group %s', event.member_id.decode(), event.group_id.decode()) logging.info('current group leader is: %s ', self.tooz_client.get_leader(self.__group_name.encode()).get()) def group_leaved(event): logging.info('%s leaved group %s', event.member_id.decode(), event.group_id.decode()) def when_i_am_elected_leader(event): # event is a LeaderElected event logging.info('group %s has elected leader %s', event.group_id.decode(), event.member_id.decode()) self.child_process = subprocess.Popen(self.__start_command, shell=True) self.tooz_client.watch_join_group(self.__group_name.encode(), group_joined) self.tooz_client.watch_leave_group(self.__group_name.encode(), group_leaved) self.tooz_client.watch_elected_as_leader(self.__group_name.encode(), when_i_am_elected_leader) self.tooz_client.join_group(self.__group_name.encode()).get() except Exception as e: logging.error("ERROR: %s", e) return self.__while_loop_for_watch()
def __init__(self, conf): super(CarbonaraBasedStorage, self).__init__(conf) self.coord = coordination.get_coordinator( conf.coordination_url, str(uuid.uuid4()).encode('ascii')) self.coord.start() if conf.aggregation_workers_number is None: try: self.aggregation_workers_number = multiprocessing.cpu_count() except NotImplementedError: self.aggregation_workers_number = 2 else: self.aggregation_workers_number = conf.aggregation_workers_number self.partition = 0 self.heartbeater = threading.Thread(target=self._heartbeat, name='heartbeat') self.heartbeater.setDaemon(True) self.heartbeater.start()
def save_result_to_task(): global task_uuid global out parse_func = getattr(base, "parse_%s" % ag) # TODO(kun) file lock is okay in localhost, here need redis for distributed # lock istead co = coordination.get_coordinator("file:///tmp", b"localhost") co.start() lock = co.get_lock("task_update_lock") with lock: for ret in parse_func(out): ret = db_api.result_create(**ret) print "[LOG] appending result with id %s" % ret.uuid db_api.task_append_result(task_uuid, ret.uuid) db_api.tracer_append_result(ag, ret.uuid) print "[LOG] update tas with result %s" % task_uuid time.sleep(2) co.stop()
def test_leftover_file(self): fixture = self.useFixture(fixtures.TempDir()) file_path = fixture.path url = 'file://%s' % file_path coord = coordination.get_coordinator(url, self._FAKE_MEMBER_ID) coord.start() self.addCleanup(coord.stop) coord.create_group(b"my_group").get() safe_group_id = coord._make_filesystem_safe(b"my_group") with open(os.path.join(file_path, 'groups', safe_group_id, "junk.txt"), "wb"): pass os.unlink(os.path.join(file_path, 'groups', safe_group_id, '.metadata')) self.assertRaises(tooz.ToozError, coord.delete_group(b"my_group").get)
def __init__(self, conf): self.swift = swclient.Connection( auth_version=conf.swift_auth_version, authurl=conf.swift_authurl, preauthtoken=conf.swift_preauthtoken, user=conf.swift_user, key=conf.swift_key, tenant_name=conf.swift_tenant_name) self.compresslevel = conf.compression_level self.coord = coordination.get_coordinator( conf.swift_coordination_driver, str(uuid.uuid4())) self.coord.start() # NOTE(jd) So this is a (smart?) optimization: since we're going to # lock for each of this aggregation type, if we are using running # Gnocchi with multiple processses, let's randomize what we iter # over so there are less chances we fight for the same lock! self.aggregation_types = list(storage.AGGREGATION_TYPES) random.shuffle(self.aggregation_types)
def __init__(self): # Tenant fetcher self.fetcher = driver.DriverManager( FETCHERS_NAMESPACE, CONF.tenant_fetcher.backend, invoke_on_load=True).driver self.transformers = transformer.get_transformers() self.collector = collector.get_collector(self.transformers) self.storage = storage.get_storage(self.collector) # RPC self.server = None self._rating_endpoint = RatingEndpoint(self) self._init_messaging() # DLM self.coord = coordination.get_coordinator( CONF.orchestrator.coordination_url, str(uuid.uuid4()).encode('ascii')) self.coord.start()
def start(self): """Brings up coordination service online This connects the coordination service to its tooz backend. This involves: - connecting to the cluster - creating the coordination group (if required) - joining the coordination group - registering callbacks to respond to join/leave membership events After the local node has joined the cluster and knows its remote peers, it fires off an initial rebalance event to the workers so they can seed their hash ring with the current membership. """ LOG.info(_LI('Starting RUG coordinator process for host %s on %s'), self.host, self.url) self._coordinator = tz_coordination.get_coordinator( self.url, self.host) self._coordinator.start() try: self._coordinator.create_group(self.group).get() except tooz.coordination.GroupAlreadyExist: pass try: self._coordinator.join_group(self.group).get() self._coordinator.heartbeat() except tooz.coordination.MemberAlreadyExist: pass self._coordinator.watch_join_group(self.group, self.cluster_changed) self._coordinator.watch_leave_group(self.group, self.cluster_changed) self._coordinator.heartbeat() LOG.debug("Sending initial event changed for members: %s" % self.members) self.cluster_changed(event=None, node_bootstrap=True)
def tooz_make_change(driver, url, session, vol_id, initial, destination, attach_status): global coordinator global lock # If coordinator is not the one we want we cannot reuse it if not isinstance(coordinator, driver): if coordinator: coordinator.stop() # Create new coordinator and lock coordinator = coordination.get_coordinator(url, str(session)) coordinator.start() lock = coordinator.get_lock(vol_id) while True: with lock, session.begin(): vol = session.query(db.Volume).with_for_update().get(vol_id) if vol.status == initial: vol.status = destination vol.attach_status = attach_status return coordinator.heartbeat() time.sleep(0.01)
import uuid import six from tooz import coordination coordinator = coordination.get_coordinator('zake://', b'host-1') coordinator.start() # Create a group group = six.binary_type(six.text_type(uuid.uuid4()).encode('ascii')) request = coordinator.create_group(group) request.get() # Join a group request = coordinator.join_group(group) request.get() coordinator.stop()
import time import uuid import six from tooz import coordination ALIVE_TIME = 1 coordinator = coordination.get_coordinator('kazoo://localhost', b'host-1') coordinator.start() # Create a group group = six.binary_type(six.text_type(uuid.uuid4()).encode('ascii')) request = coordinator.create_group(group) request.get() # Join a group request = coordinator.join_group(group) request.get() def when_i_am_elected_leader(event): # event is a LeaderElected event print(event.group_id, event.member_id) # Propose to be a leader for the group coordinator.watch_elected_as_leader(group, when_i_am_elected_leader) start = time.time() while time.time() - start < ALIVE_TIME: