def autoscale(group, config_data, args): """This function executes scale up or scale down policy :param group: group name :param config_data: json configuration data :param args: user provided arguments :returns: enums.ScaleEvent """ try: group_config = config_data['autoscale_groups'][group] except KeyError: return common.exit_with_error('Unable to get scaling group config for group: %s', group) scaling_group = ScalingGroup(group_config, group) logger.info('Cluster Mode Enabled: %s', args.get('cluster', False)) if args['cluster']: if scaling_group.is_master in [NodeStatus.Slave, NodeStatus.Unknown]: return ScaleEvent.NotMaster plugin_config = scaling_group.plugin_config mgr = NamedExtensionManager( namespace='raxas.ext', names=plugin_config.keys(), invoke_on_load=True, invoke_args=(scaling_group,) ) logger.info('Loaded plugins: %s' % mgr.names()) results = [result for result in mgr.map_method('make_decision') if result is not None] scaling_decision = sum(results) if scaling_decision <= -1: scaling_decision = -1 elif scaling_decision >= 1: scaling_decision = 1 scale = ScaleDirection(scaling_decision) if scale is ScaleDirection.Nothing: logger.info('Cluster within target parameters') return ScaleEvent.NoAction logger.info('Threshold reached - Scaling %s', scale.name) if not args['dry_run']: scaling_group.execute_webhook(scale, HookType.Pre) policy_result = scaling_group.execute_policy(scale) if policy_result == ScaleEvent.Success: scaling_group.execute_webhook(scale, HookType.Post) return ScaleEvent.Success elif policy_result == ScaleEvent.NoAction: return ScaleEvent.Success else: return ScaleEvent.Error else: logger.info('Scale %s prevented by --dry-run', scale.name) return ScaleEvent.Success
def _load_kuryr_ctrlr_handlers(): configured_handlers = CONF.kubernetes.enabled_handlers LOG.info('Configured handlers: %s', configured_handlers) handlers = NamedExtensionManager( 'kuryr_kubernetes.controller.handlers', configured_handlers, invoke_on_load=True, on_missing_entrypoints_callback=_handler_not_found, on_load_failure_callback=_handler_not_loaded) LOG.info('Loaded handlers: %s', handlers.names()) ctrlr_handlers = [] for handler in handlers.extensions: ctrlr_handlers.append(handler.obj) return ctrlr_handlers
def extensions(self): """Lazy loading of extensions""" if self._extensions is None: self._extensions = NamedExtensionManager(namespace='extensions', names=self.features, name_order=True) return self._extensions
def load_plugins(plugins, load_args=None, load_kwargs=None): load_args = load_args or [] load_kwargs = load_kwargs or {} logger.debug('Enabled plugins: %s', plugins) plugins = NamedExtensionManager(namespace='wazo_call_logd.plugins', names=plugins, name_order=True, on_load_failure_callback=plugins_load_fail, propagate_map_exceptions=True, invoke_on_load=True) try: plugins.map(load_plugin, load_args, load_kwargs) except RuntimeError as e: logger.error("Could not load enabled plugins") logger.exception(e)
def _init_extensions(self): """ Loads and prepares all enabled extensions """ enabled_notification_handlers = \ cfg.CONF['service:sink'].enabled_notification_handlers self.extensions_manager = NamedExtensionManager( HANDLER_NAMESPACE, names=enabled_notification_handlers) def _load_extension(ext): handler_cls = ext.plugin return handler_cls() try: return self.extensions_manager.map(_load_extension) except RuntimeError: # No handlers enabled. No problem. return []
def run(): """Example application using pangadfs""" logging.basicConfig(level=logging.INFO) ctx = { 'ga_settings': { 'crossover_method': 'uniform', 'csvpth': Path(__file__).parent / 'appdata' / 'pool.csv', 'elite_divisor': 5, 'elite_method': 'fittest', 'mutation_rate': .05, 'n_generations': 20, 'points_column': 'proj', 'population_size': 30000, 'position_column': 'pos', 'salary_column': 'salary', 'select_method': 'roulette', 'stop_criteria': 10, 'verbose': True }, 'site_settings': { 'flex_positions': ('RB', 'WR', 'TE'), 'lineup_size': 9, 'posfilter': {'QB': 14, 'RB': 8, 'WR': 8, 'TE': 5, 'DST': 4, 'FLEX': 8}, 'posmap': {'DST': 1, 'QB': 1, 'TE': 1, 'RB': 2, 'WR': 3, 'FLEX': 7}, 'salary_cap': 50000 } } # set up driver managers dmgrs = {} emgrs = {} for ns in GeneticAlgorithm.PLUGIN_NAMESPACES: pns = f'pangadfs.{ns}' if ns == 'validate': emgrs['validate'] = NamedExtensionManager( namespace=pns, names=['validate_salary', 'validate_duplicates'], invoke_on_load=True, name_order=True) else: dmgrs[ns] = DriverManager( namespace=pns, name=f'{ns}_default', invoke_on_load=True) # set up GeneticAlgorithm object ga = GeneticAlgorithm(ctx=ctx, driver_managers=dmgrs, extension_managers=emgrs) # run optimizer results = ga.optimize() # show best score and lineup at conclusion # will break after n_generations or when stop_criteria reached print(results['best_lineup']) print(f'Lineup score: {results["best_score"]}')
def load(namespace, names, dependencies): names = enabled_names(names) logger.debug('Enabled plugins: %s', names) if not names: logger.info('no enabled plugins') return manager = NamedExtensionManager( namespace, names, name_order=True, on_load_failure_callback=on_load_failure, on_missing_entrypoints_callback=on_missing_entrypoints, invoke_on_load=True) manager.map(load_plugin, dependencies) return manager
def _init_extensions(self): """ Loads and prepares all enabled extensions """ enabled_notification_handlers = \ cfg.CONF['service:sink'].enabled_notification_handlers self.extensions_manager = NamedExtensionManager( HANDLER_NAMESPACE, names=enabled_notification_handlers) def _load_extension(ext): handler_cls = ext.plugin return handler_cls() try: return self.extensions_manager.map(_load_extension) except RuntimeError: # No handlers enabled. Bail! raise exceptions.ConfigurationError('No designate-sink handlers ' 'enabled')
def run_hooks(recipe_parts, hook_type, extensions=[]): if not extensions: return recipe_parts namespace = "recipe.hooks." + hook_type hook_mgr = NamedExtensionManager(namespace, extensions, name_order=True) for extension in hook_mgr.extensions: recipe_parts = extension.plugin(recipe_parts).execute() return recipe_parts
def _load_plugins(self): """Loads default plugins for any namespace that doesn't have a plugin""" for ns in self.PLUGIN_NAMESPACES: if ns not in self.driver_managers and ns not in self.extension_managers: if ns == 'validate': self.extension_managers[ns] = NamedExtensionManager( namespace='pangadfs.validate', names=self.VALIDATE_PLUGINS, invoke_on_load=True, name_order=True) else: mgr = DriverManager(namespace=f'pangadfs.{ns}', name=f'{ns}_default', invoke_on_load=True) self.driver_managers[ns] = mgr
def _init_extensions(self): """ Loads and prepares all enabled extensions """ self.extensions_manager = NamedExtensionManager( HANDLER_NAMESPACE, names=cfg.CONF.enabled_notification_handlers) def _load_extension(ext): handler_cls = ext.plugin handler_cls.register_opts(cfg.CONF) return handler_cls(central_service=self) try: return self.extensions_manager.map(_load_extension) except RuntimeError: # No handlers enabled. No problem. return []
class Service(rpc_service.Service): def __init__(self, *args, **kwargs): kwargs.update( host=cfg.CONF.host, topic=cfg.CONF.central_topic ) policy.init_policy() super(Service, self).__init__(*args, **kwargs) # Get a storage connection self.storage_conn = storage.get_connection(cfg.CONF) # Initialize extensions self.handlers = self._init_extensions() if self.handlers: # Get a rpc connection if needed self.rpc_conn = rpc.create_connection() def _init_extensions(self): """ Loads and prepares all enabled extensions """ self.extensions_manager = NamedExtensionManager( HANDLER_NAMESPACE, names=cfg.CONF.enabled_notification_handlers) def _load_extension(ext): handler_cls = ext.plugin handler_cls.register_opts(cfg.CONF) return handler_cls(central_service=self) try: return self.extensions_manager.map(_load_extension) except RuntimeError: # No handlers enabled. No problem. return [] def start(self): super(Service, self).start() if self.handlers: # Setup notification subscriptions and start consuming self._setup_subscriptions() self.rpc_conn.consume_in_thread_group(self.tg) def stop(self): if self.handlers: # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.rpc_conn.close() except Exception: pass super(Service, self).stop() def _setup_subscriptions(self): """ Set's up subscriptions for the various exchange+topic combinations that we have a handler for. """ for handler in self.handlers: exchange, topics = handler.get_exchange_topics() for topic in topics: queue_name = "moniker.notifications.%s.%s" % (exchange, topic) self.rpc_conn.declare_topic_consumer( queue_name=queue_name, topic=topic, exchange_name=exchange, callback=self._process_notification) def _process_notification(self, notification): """ Processes an incoming notification, offering each extension the opportunity to handle it. """ event_type = notification.get('event_type') LOG.debug('Processing notification: %s' % event_type) for handler in self.handlers: self._process_notification_for_handler(handler, notification) def _process_notification_for_handler(self, handler, notification): """ Processes an incoming notification for a specific handler, checking to see if the handler is interested in the notification before handing it over. """ event_type = notification['event_type'] payload = notification['payload'] if event_type in handler.get_event_types(): LOG.debug('Found handler for: %s' % event_type) handler.process_notification(event_type, payload) # Server Methods def create_server(self, context, values): server = self.storage_conn.create_server(context, values) utils.notify(context, 'api', 'server.create', server) return server def get_servers(self, context, criterion=None): return self.storage_conn.get_servers(context, criterion) def get_server(self, context, server_id): return self.storage_conn.get_server(context, server_id) def update_server(self, context, server_id, values): server = self.storage_conn.update_server(context, server_id, values) utils.notify(context, 'api', 'server.update', server) return server def delete_server(self, context, server_id): server = self.storage_conn.get_server(context, server_id) utils.notify(context, 'api', 'server.delete', server) return self.storage_conn.delete_server(context, server_id) # Domain Methods def create_domain(self, context, values): domain = self.storage_conn.create_domain(context, values) agent_api.create_domain(context, domain) utils.notify(context, 'api', 'domain.create', domain) return domain def get_domains(self, context, criterion=None): return self.storage_conn.get_domains(context, criterion) def get_domain(self, context, domain_id): return self.storage_conn.get_domain(context, domain_id) def update_domain(self, context, domain_id, values): domain = self.storage_conn.update_domain(context, domain_id, values) agent_api.update_domain(context, domain) utils.notify(context, 'api', 'domain.update', domain) return domain def delete_domain(self, context, domain_id): domain = self.storage_conn.get_domain(context, domain_id) agent_api.delete_domain(context, domain) utils.notify(context, 'api', 'domain.delete', domain) return self.storage_conn.delete_domain(context, domain_id) # Record Methods def create_record(self, context, domain_id, values): record = self.storage_conn.create_record(context, domain_id, values) domain = self.storage_conn.get_domain(context, domain_id) agent_api.create_record(context, domain, record) utils.notify(context, 'api', 'record.create', record) return record def get_records(self, context, domain_id, criterion=None): return self.storage_conn.get_records(context, domain_id, criterion) def get_record(self, context, domain_id, record_id): return self.storage_conn.get_record(context, record_id) def update_record(self, context, domain_id, record_id, values): record = self.storage_conn.update_record(context, record_id, values) domain = self.storage_conn.get_domain(context, domain_id) agent_api.update_record(context, domain, record) utils.notify(context, 'api', 'record.update', record) return record def delete_record(self, context, domain_id, record_id): record = self.storage_conn.get_record(context, record_id) domain = self.storage_conn.get_domain(context, domain_id) agent_api.delete_record(context, domain, record) utils.notify(context, 'api', 'record.delete', record) return self.storage_conn.delete_record(context, record_id)
class Service(service.Service): def __init__(self, *args, **kwargs): super(Service, self).__init__(*args, **kwargs) # Initialize extensions self.handlers = self._init_extensions() # Get a rpc connection self.rpc_conn = rpc.create_connection() def _init_extensions(self): """ Loads and prepares all enabled extensions """ enabled_notification_handlers = \ cfg.CONF['service:sink'].enabled_notification_handlers self.extensions_manager = NamedExtensionManager( HANDLER_NAMESPACE, names=enabled_notification_handlers) def _load_extension(ext): handler_cls = ext.plugin return handler_cls() try: return self.extensions_manager.map(_load_extension) except RuntimeError: # No handlers enabled. Bail! raise exceptions.ConfigurationError('No designate-sink handlers ' 'enabled') def start(self): super(Service, self).start() # Setup notification subscriptions and start consuming self._setup_subscriptions() self.rpc_conn.consume_in_thread() def wait(self): super(Service, self).wait() self.rpc_conn.consumer_thread.wait() def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.rpc_conn.close() except Exception: pass super(Service, self).stop() def _setup_subscriptions(self): """ Set's up subscriptions for the various exchange+topic combinations that we have a handler for. """ for handler in self.handlers: exchange, topics = handler.get_exchange_topics() for topic in topics: queue_name = "designate.notifications.%s.%s.%s" % ( handler.get_canonical_name(), exchange, topic) self.rpc_conn.join_consumer_pool( self._process_notification, queue_name, topic, exchange_name=exchange) def _get_handler_event_types(self): event_types = set() for handler in self.handlers: for et in handler.get_event_types(): event_types.add(et) return event_types def _process_notification(self, notification): """ Processes an incoming notification, offering each extension the opportunity to handle it. """ event_type = notification.get('event_type') # NOTE(zykes): Only bother to actually do processing if there's any # matching events, skips logging of things like compute.exists etc. if event_type in self._get_handler_event_types(): for handler in self.handlers: self._process_notification_for_handler(handler, notification) def _process_notification_for_handler(self, handler, notification): """ Processes an incoming notification for a specific handler, checking to see if the handler is interested in the notification before handing it over. """ event_type = notification['event_type'] payload = notification['payload'] if event_type in handler.get_event_types(): LOG.debug('Found handler for: %s' % event_type) handler.process_notification(event_type, payload)
class Service(service.Service): def __init__(self, *args, **kwargs): super(Service, self).__init__(*args, **kwargs) # Initialize extensions self.handlers = self._init_extensions() # Get a rpc connection self.rpc_conn = rpc.create_connection() def _init_extensions(self): """ Loads and prepares all enabled extensions """ enabled_notification_handlers = \ cfg.CONF['service:sink'].enabled_notification_handlers self.extensions_manager = NamedExtensionManager( HANDLER_NAMESPACE, names=enabled_notification_handlers) def _load_extension(ext): handler_cls = ext.plugin return handler_cls() try: return self.extensions_manager.map(_load_extension) except RuntimeError: # No handlers enabled. Bail! raise exceptions.ConfigurationError('No designate-sink handlers ' 'enabled') def start(self): super(Service, self).start() # Setup notification subscriptions and start consuming self._setup_subscriptions() self.rpc_conn.consume_in_thread() def wait(self): super(Service, self).wait() self.rpc_conn.consumer_thread.wait() def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.rpc_conn.close() except Exception: pass super(Service, self).stop() def _setup_subscriptions(self): """ Set's up subscriptions for the various exchange+topic combinations that we have a handler for. """ for handler in self.handlers: exchange, topics = handler.get_exchange_topics() for topic in topics: queue_name = "designate.notifications.%s.%s.%s" % ( handler.get_canonical_name(), exchange, topic) self.rpc_conn.join_consumer_pool(self._process_notification, queue_name, topic, exchange_name=exchange) def _get_handler_event_types(self): event_types = set() for handler in self.handlers: for et in handler.get_event_types(): event_types.add(et) return event_types def _process_notification(self, notification): """ Processes an incoming notification, offering each extension the opportunity to handle it. """ event_type = notification.get('event_type') # NOTE(zykes): Only bother to actually do processing if there's any # matching events, skips logging of things like compute.exists etc. if event_type in self._get_handler_event_types(): for handler in self.handlers: self._process_notification_for_handler(handler, notification) def _process_notification_for_handler(self, handler, notification): """ Processes an incoming notification for a specific handler, checking to see if the handler is interested in the notification before handing it over. """ event_type = notification['event_type'] payload = notification['payload'] if event_type in handler.get_event_types(): LOG.debug('Found handler for: %s' % event_type) handler.process_notification(event_type, payload)
class Service(rpc_service.Service): def __init__(self, *args, **kwargs): backend_driver = cfg.CONF['service:central'].backend_driver self.backend = backend.get_backend(backend_driver) kwargs.update( host=cfg.CONF.host, topic=cfg.CONF.central_topic, ) policy.init_policy() super(Service, self).__init__(*args, **kwargs) # Get a storage connection self.storage_conn = storage.get_connection() # Initialize extensions self.handlers = self._init_extensions() if self.handlers: # Get a rpc connection if needed self.rpc_conn = rpc.create_connection() def _init_extensions(self): """ Loads and prepares all enabled extensions """ enabled_notification_handlers = \ cfg.CONF['service:central'].enabled_notification_handlers self.extensions_manager = NamedExtensionManager( HANDLER_NAMESPACE, names=enabled_notification_handlers) def _load_extension(ext): handler_cls = ext.plugin return handler_cls(central_service=self) try: return self.extensions_manager.map(_load_extension) except RuntimeError: # No handlers enabled. No problem. return [] def start(self): self.backend.start() super(Service, self).start() if self.handlers: # Setup notification subscriptions and start consuming self._setup_subscriptions() self.rpc_conn.consume_in_thread_group(self.tg) def stop(self): if self.handlers: # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.rpc_conn.close() except Exception: pass super(Service, self).stop() self.backend.stop() def _setup_subscriptions(self): """ Set's up subscriptions for the various exchange+topic combinations that we have a handler for. """ for handler in self.handlers: exchange, topics = handler.get_exchange_topics() for topic in topics: queue_name = "moniker.notifications.%s.%s.%s" % ( handler.get_canonical_name(), exchange, topic) self.rpc_conn.declare_topic_consumer( queue_name=queue_name, topic=topic, exchange_name=exchange, callback=self._process_notification) def _get_handler_event_types(self): event_types = set() for handler in self.handlers: for et in handler.get_event_types(): event_types.add(et) return event_types def _process_notification(self, notification): """ Processes an incoming notification, offering each extension the opportunity to handle it. """ event_type = notification.get('event_type') # NOTE(zykes): Only bother to actually do processing if there's any # matching events, skips logging of things like compute.exists etc. if event_type in self._get_handler_event_types(): for handler in self.handlers: self._process_notification_for_handler(handler, notification) def _process_notification_for_handler(self, handler, notification): """ Processes an incoming notification for a specific handler, checking to see if the handler is interested in the notification before handing it over. """ event_type = notification['event_type'] payload = notification['payload'] if event_type in handler.get_event_types(): LOG.debug('Found handler for: %s' % event_type) handler.process_notification(event_type, payload) def _check_reserved_domain_suffixes(self, context, domain_name): """ Ensures the provided domain_name does not end with any of the configured reserved suffixes. """ suffixes = cfg.CONF['service:central'].reserved_domain_suffixes for suffix in suffixes: if domain_name.endswith(suffix): policy.check('use_reserved_domain_suffix', context, {'suffix': suffix}) # Server Methods def create_server(self, context, values): policy.check('create_server', context) server = self.storage_conn.create_server(context, values) utils.notify(context, 'api', 'server.create', server) return server def get_servers(self, context, criterion=None): policy.check('get_servers', context) return self.storage_conn.get_servers(context, criterion) def get_server(self, context, server_id): policy.check('get_server', context, {'server_id': server_id}) return self.storage_conn.get_server(context, server_id) def update_server(self, context, server_id, values): policy.check('update_server', context, {'server_id': server_id}) server = self.storage_conn.update_server(context, server_id, values) utils.notify(context, 'api', 'server.update', server) return server def delete_server(self, context, server_id): policy.check('delete_server', context, {'server_id': server_id}) server = self.storage_conn.get_server(context, server_id) utils.notify(context, 'api', 'server.delete', server) return self.storage_conn.delete_server(context, server_id) # Domain Methods def create_domain(self, context, values): values['tenant_id'] = context.effective_tenant_id target = {'tenant_id': values['tenant_id']} policy.check('create_domain', context, target) # Ensure the domain does not end with a reserved suffix. self._check_reserved_domain_suffixes(context, values['name']) domain = self.storage_conn.create_domain(context, values) self.backend.create_domain(context, domain) utils.notify(context, 'api', 'domain.create', domain) return domain def get_domains(self, context, criterion=None): target = {'tenant_id': context.effective_tenant_id} policy.check('get_domains', context, target) if criterion is None: criterion = {} criterion['tenant_id'] = context.effective_tenant_id return self.storage_conn.get_domains(context, criterion) def get_domain(self, context, domain_id): domain = self.storage_conn.get_domain(context, domain_id) target = {'domain_id': domain_id, 'tenant_id': domain['tenant_id']} policy.check('get_domain', context, target) return domain def update_domain(self, context, domain_id, values): domain = self.storage_conn.get_domain(context, domain_id) target = {'domain_id': domain_id, 'tenant_id': domain['tenant_id']} policy.check('update_domain', context, target) if 'tenant_id' in values: target = {'domain_id': domain_id, 'tenant_id': values['tenant_id']} policy.check('create_domain', context, target) if 'name' in values: # Ensure the domain does not end with a reserved suffix. self._check_reserved_domain_suffixes(context, values['name']) domain = self.storage_conn.update_domain(context, domain_id, values) self.backend.update_domain(context, domain) utils.notify(context, 'api', 'domain.update', domain) return domain def delete_domain(self, context, domain_id): domain = self.storage_conn.get_domain(context, domain_id) target = {'domain_id': domain_id, 'tenant_id': domain['tenant_id']} policy.check('delete_domain', context, target) self.backend.delete_domain(context, domain) utils.notify(context, 'api', 'domain.delete', domain) return self.storage_conn.delete_domain(context, domain_id) # Record Methods def create_record(self, context, domain_id, values): domain = self.storage_conn.get_domain(context, domain_id) target = {'domain_id': domain_id, 'tenant_id': domain['tenant_id']} policy.check('create_record', context, target) record = self.storage_conn.create_record(context, domain_id, values) self.backend.create_record(context, domain, record) utils.notify(context, 'api', 'record.create', record) return record def get_records(self, context, domain_id, criterion=None): domain = self.storage_conn.get_domain(context, domain_id) target = {'domain_id': domain_id, 'tenant_id': domain['tenant_id']} policy.check('get_records', context, target) return self.storage_conn.get_records(context, domain_id, criterion) def get_record(self, context, domain_id, record_id): domain = self.storage_conn.get_domain(context, domain_id) target = {'domain_id': domain_id, 'tenant_id': domain['tenant_id']} policy.check('get_record', context, target) return self.storage_conn.get_record(context, record_id) def update_record(self, context, domain_id, record_id, values): domain = self.storage_conn.get_domain(context, domain_id) target = { 'domain_id': domain_id, 'record_id': record_id, 'tenant_id': domain['tenant_id'] } policy.check('update_record', context, target) record = self.storage_conn.update_record(context, record_id, values) self.backend.update_record(context, domain, record) utils.notify(context, 'api', 'record.update', record) return record def delete_record(self, context, domain_id, record_id): domain = self.storage_conn.get_domain(context, domain_id) target = { 'domain_id': domain_id, 'record_id': record_id, 'tenant_id': domain['tenant_id'] } policy.check('delete_record', context, target) record = self.storage_conn.get_record(context, record_id) self.backend.delete_record(context, domain, record) utils.notify(context, 'api', 'record.delete', record) return self.storage_conn.delete_record(context, record_id)