def delete_l7rule(self, l7rule): """Deletes an L7 rule. :param l7rule: Provider dict of the l7rule to delete :returns: None :raises L7RuleNotFound: The referenced l7rule was not found """ db_l7policy = self._l7policy_repo.get(db_apis.get_session(), id=l7rule[constants.L7POLICY_ID]) l7policy = provider_utils.db_l7policy_to_provider_l7policy(db_l7policy) load_balancer = db_l7policy.listener.load_balancer listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( [db_l7policy.listener])) delete_l7rule_tf = self._taskflow_load( self._l7rule_flows.get_delete_l7rule_flow(), store={ constants.L7RULE: l7rule, constants.L7POLICY: l7policy.to_dict(), constants.LISTENERS: listeners_dicts, constants.L7POLICY_ID: db_l7policy.id, constants.LOADBALANCER_ID: load_balancer.id }) with tf_logging.DynamicLoggingListener(delete_l7rule_tf, log=LOG): delete_l7rule_tf.run()
def create_pool(self, pool_id): """Creates a node pool. :param pool_id: ID of the pool to create :returns: None :raises NoResultFound: Unable to find the object """ pool = self._pool_repo.get(db_apis.get_session(), id=pool_id) if not pool: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'pool', pool_id) raise db_exceptions.NoResultFound listeners = pool.listeners load_balancer = pool.load_balancer create_pool_tf = self._taskflow_load(self._pool_flows. get_create_pool_flow(), store={constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(create_pool_tf, log=LOG): create_pool_tf.run()
def create_member(self, member): """Creates a pool member. :param member: A member provider dictionary to create :returns: None :raises NoSuitablePool: Unable to find the node pool """ pool = self._pool_repo.get(db_apis.get_session(), id=member[constants.POOL_ID]) load_balancer = pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( pool.listeners)) store = { constants.MEMBER: member, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb, constants.POOL_ID: pool.id } if load_balancer.availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), load_balancer.availability_zone)) else: store[constants.AVAILABILITY_ZONE] = {} create_member_tf = self._taskflow_load( self._member_flows.get_create_member_flow(), store=store) with tf_logging.DynamicLoggingListener(create_member_tf, log=LOG): create_member_tf.run()
def update_l7policy(self, original_l7policy, l7policy_updates): """Updates an L7 policy. :param l7policy: Provider dict of the l7policy to update :param l7policy_updates: Dict containing updated l7policy attributes :returns: None :raises L7PolicyNotFound: The referenced l7policy was not found """ db_listener = self._listener_repo.get( db_apis.get_session(), id=original_l7policy[constants.LISTENER_ID]) listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( [db_listener])) update_l7policy_tf = self._taskflow_load( self._l7policy_flows.get_update_l7policy_flow(), store={ constants.L7POLICY: original_l7policy, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: db_listener.load_balancer.id, constants.UPDATE_DICT: l7policy_updates }) with tf_logging.DynamicLoggingListener(update_l7policy_tf, log=LOG): update_l7policy_tf.run()
def create_health_monitor(self, health_monitor): """Creates a health monitor. :param health_monitor: Provider health monitor dict :returns: None :raises NoResultFound: Unable to find the object """ db_health_monitor = self._health_mon_repo.get( db_apis.get_session(), id=health_monitor[constants.HEALTHMONITOR_ID]) pool = db_health_monitor.pool pool.health_monitor = db_health_monitor load_balancer = pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( pool.listeners)) create_hm_tf = self._taskflow_load( self._health_monitor_flows.get_create_health_monitor_flow(), store={ constants.HEALTH_MON: health_monitor, constants.POOL_ID: pool.id, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb }) with tf_logging.DynamicLoggingListener(create_hm_tf, log=LOG): create_hm_tf.run()
def create_listener(self, listener): """Creates a listener. :param listener: A listener provider dictionary. :returns: None :raises NoResultFound: Unable to find the object """ db_listener = self._listener_repo.get( db_apis.get_session(), id=listener[constants.LISTENER_ID]) if not db_listener: LOG.warning( 'Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'listener', listener[constants.LISTENER_ID]) raise db_exceptions.NoResultFound load_balancer = db_listener.load_balancer listeners = load_balancer.listeners dict_listeners = [] for l in listeners: dict_listeners.append( provider_utils.db_listener_to_provider_listener(l).to_dict()) provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() create_listener_tf = self._taskflow_load( self._listener_flows.get_create_listener_flow(), store={ constants.LISTENERS: dict_listeners, constants.LOADBALANCER: provider_lb, constants.LOADBALANCER_ID: load_balancer.id }) with tf_logging.DynamicLoggingListener(create_listener_tf, log=LOG): create_listener_tf.run()
def _perform_amphora_failover(self, amp, priority): """Internal method to perform failover operations for an amphora. :param amp: The amphora to failover :param priority: The create priority :returns: None """ stored_params = { constants.FAILED_AMPHORA: amp, constants.LOADBALANCER_ID: amp.load_balancer_id, constants.BUILD_TYPE_PRIORITY: priority, } if amp.status == constants.DELETED: LOG.warning( 'Amphora %s is marked DELETED in the database but ' 'was submitted for failover. Deleting it from the ' 'amphora health table to exclude it from health ' 'checks and skipping the failover.', amp.id) self._amphora_health_repo.delete(db_apis.get_session(), amphora_id=amp.id) return if (CONF.house_keeping.spare_amphora_pool_size == 0) and (CONF.nova.enable_anti_affinity is False): LOG.warning("Failing over amphora with no spares pool may " "cause delays in failover times while a new " "amphora instance boots.") # if we run with anti-affinity we need to set the server group # as well lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(), amp.id) if CONF.nova.enable_anti_affinity and lb: stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id if lb.flavor_id: stored_params[constants.FLAVOR] = ( self._flavor_repo.get_flavor_metadata_dict( db_apis.get_session(), lb.flavor_id)) else: stored_params[constants.FLAVOR] = {} distributor = None if lb.topology == constants.TOPOLOGY_ACTIVE_ACTIVE: distributor = getattr(lb, "distributor", None) stored_params[constants.DISTRIBUTOR] = distributor failover_amphora_tf = self._taskflow_load( self._amphora_flows.get_failover_flow(role=amp.role, load_balancer=lb, distributor=distributor), store=stored_params) with tf_logging.DynamicLoggingListener( failover_amphora_tf, log=LOG, hide_inputs_outputs_of=self._exclude_result_logging_tasks): failover_amphora_tf.run()
def update_l7rule(self, l7rule_id, l7rule_updates): """Updates an L7 rule. :param l7rule_id: ID of the l7rule to update :param l7rule_updates: Dict containing updated l7rule attributes :returns: None :raises L7RuleNotFound: The referenced l7rule was not found """ l7rule = None try: l7rule = self._get_db_obj_until_pending_update( self._l7rule_repo, l7rule_id) except tenacity.RetryError as e: LOG.warning('L7 rule did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) l7rule = e.last_attempt.result() l7policy = l7rule.l7policy listeners = [l7policy.listener] load_balancer = l7policy.listener.load_balancer update_l7rule_tf = self._taskflow_load( self._l7rule_flows.get_update_l7rule_flow(), store={constants.L7RULE: l7rule, constants.L7POLICY: l7policy, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.UPDATE_DICT: l7rule_updates}) with tf_logging.DynamicLoggingListener(update_l7rule_tf, log=LOG): update_l7rule_tf.run()
def create_amphora(self): """Creates an Amphora. This is used to create spare amphora. :returns: amphora_id """ try: create_amp_tf = self._taskflow_load( self._amphora_flows.get_create_amphora_flow(), store={ constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_SPARES_POOL_PRIORITY, constants.FLAVOR: None }) with tf_logging.DynamicLoggingListener( create_amp_tf, log=LOG, hide_inputs_outputs_of=self._exclude_result_logging_tasks): create_amp_tf.run() return create_amp_tf.storage.fetch('amphora') except Exception as e: LOG.error('Failed to create an amphora due to: {}'.format(str(e)))
def create_distributor(self, distributor_id): """Creates a distributor. :param distributor_id: ID of the distributor to create :returns: None :raises NoResultFound: Unable to find the object """ distributor = self._distributor_repo.get(db_apis.get_session(), id=distributor_id) if not distributor: LOG.warning( 'Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'distributor', distributor_id) raise db_exceptions.NoResultFound distributor_driver = stevedore_driver.DriverManager( namespace='octavia.distributor.drivers', name=distributor.distributor_driver, invoke_on_load=True).driver create_distributor_tf = self._taskflow_load( self._distributor_flows.get_create_distributor_flows( distributor_driver), store={constants.DISTRIBUTOR: distributor}) with tf_logging.DynamicLoggingListener(create_distributor_tf, log=LOG): create_distributor_tf.run()
def failover_amphora(self, amphora_id): """Perform failover operations for an amphora. :param amphora_id: ID for amphora to failover :returns: None :raises AmphoraNotFound: The referenced amphora was not found """ try: amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) failover_amphora_tf = self._taskflow_load( self._amphora_flows.get_failover_flow(role=amp.role), store={ constants.FAILED_AMPHORA: amp, constants.LOADBALANCER_ID: amp.load_balancer_id }) with tf_logging.DynamicLoggingListener( failover_amphora_tf, log=LOG, hide_inputs_outputs_of=self._exclude_result_logging_tasks): failover_amphora_tf.run() except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failover exception: %s") % e)
def update_health_monitor(self, pool_id, health_monitor_updates): """Updates a health monitor. :param pool_id: ID of the pool to have it's health monitor updated :param health_monitor_updates: Dict containing updated health monitor :returns: None :raises HMNotFound: The referenced health monitor was not found """ health_mon = self._health_mon_repo.get(db_apis.get_session(), pool_id=pool_id) listeners = health_mon.pool.listeners health_mon.pool.health_monitor = health_mon load_balancer = health_mon.pool.load_balancer update_hm_tf = self._taskflow_load( self._health_monitor_flows.get_update_health_monitor_flow(), store={ constants.HEALTH_MON: health_mon, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.UPDATE_DICT: health_monitor_updates }) with tf_logging.DynamicLoggingListener(update_hm_tf, log=LOG): update_hm_tf.run()
def get_agent_info(self): get_info_tf = self._taskflow_load( self._te_flows.get_backend_info_flow(), store={} ) with tf_logging.DynamicLoggingListener(get_info_tf, log=LOG): get_info_tf.run()
def create_load_balancer(self, load_balancer_id): """Creates a load balancer by allocating Amphorae. First tries to allocate an existing Amphora in READY state. If none are available it will attempt to build one specifically for this load balancer. :param load_balancer_id: ID of the load balancer to create :returns: None :raises NoSuitableAmphoraException: Unable to allocate an Amphora. """ store = { constants.LOADBALANCER_ID: load_balancer_id, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY } topology = CONF.controller_worker.loadbalancer_topology store[constants.UPDATE_DICT] = { constants.LOADBALANCER_TOPOLOGY: topology } lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id) create_lb_flow = self._lb_flows.get_create_load_balancer_flow( topology=topology, listeners=lb.listeners) create_lb_tf = self._taskflow_load(create_lb_flow, store=store) with tf_logging.DynamicLoggingListener( create_lb_tf, log=LOG, hide_inputs_outputs_of=self._exclude_result_logging_tasks): create_lb_tf.run()
def create_health_monitor(self, health_monitor_id): """Creates a health monitor. :param pool_id: ID of the pool to create a health monitor on :returns: None :raises NoResultFound: Unable to find the object """ health_mon = self._health_mon_repo.get(db_apis.get_session(), id=health_monitor_id) if not health_mon: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'health_monitor', health_monitor_id) raise db_exceptions.NoResultFound pool = health_mon.pool listeners = pool.listeners pool.health_monitor = health_mon load_balancer = pool.load_balancer create_hm_tf = self._taskflow_load( self._health_monitor_flows.get_create_health_monitor_flow(), store={constants.HEALTH_MON: health_mon, constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(create_hm_tf, log=LOG): create_hm_tf.run()
def create_l7rule(self, l7rule_id): """Creates an L7 Rule. :param l7rule_id: ID of the l7rule to create :returns: None :raises NoResultFound: Unable to find the object """ l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id) if not l7rule: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'l7rule', l7rule_id) raise db_exceptions.NoResultFound l7policy = l7rule.l7policy listeners = [l7policy.listener] load_balancer = l7policy.listener.load_balancer create_l7rule_tf = self._taskflow_load( self._l7rule_flows.get_create_l7rule_flow(), store={constants.L7RULE: l7rule, constants.L7POLICY: l7policy, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(create_l7rule_tf, log=LOG): create_l7rule_tf.run()
def update_health_monitor(self, health_monitor_id, health_monitor_updates): """Updates a health monitor. :param pool_id: ID of the pool to have it's health monitor updated :param health_monitor_updates: Dict containing updated health monitor :returns: None :raises HMNotFound: The referenced health monitor was not found """ health_mon = None try: health_mon = self._get_db_obj_until_pending_update( self._health_mon_repo, health_monitor_id) except tenacity.RetryError as e: LOG.warning('Health monitor did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) health_mon = e.last_attempt.result() pool = health_mon.pool listeners = pool.listeners pool.health_monitor = health_mon load_balancer = pool.load_balancer update_hm_tf = self._taskflow_load( self._health_monitor_flows.get_update_health_monitor_flow(), store={constants.HEALTH_MON: health_mon, constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.UPDATE_DICT: health_monitor_updates}) with tf_logging.DynamicLoggingListener(update_hm_tf, log=LOG): update_hm_tf.run()
def update_amphora_agent_config(self, amphora_id): """Update the amphora agent configuration. Note: This will update the amphora agent configuration file and update the running configuration for mutatable configuration items. :param amphora_id: ID of the amphora to update. :returns: None """ LOG.info( "Start amphora agent configuration update, amphora's id " "is: %s", amphora_id) amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(), amphora_id) flavor = {} if lb.flavor_id: flavor = self._flavor_repo.get_flavor_metadata_dict( db_apis.get_session(), lb.flavor_id) update_amphora_tf = self._taskflow_load( self._amphora_flows.update_amphora_config_flow(), store={ constants.AMPHORA: amp.to_dict(), constants.FLAVOR: flavor }) with tf_logging.DynamicLoggingListener(update_amphora_tf, log=LOG): update_amphora_tf.run()
def create_listener(self, listener_id): """Creates a listener. :param listener_id: ID of the listener to create :returns: None :raises NoResultFound: Unable to find the object """ listener = self._listener_repo.get(db_apis.get_session(), id=listener_id) if not listener: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'listener', listener_id) raise db_exceptions.NoResultFound load_balancer = listener.load_balancer create_listener_tf = self._taskflow_load(self._listener_flows. get_create_listener_flow(), store={constants.LOADBALANCER: load_balancer, constants.LISTENERS: [listener]}) with tf_logging.DynamicLoggingListener(create_listener_tf, log=LOG): create_listener_tf.run()
def delete_health_monitor(self, health_monitor): """Deletes a health monitor. :param health_monitor: Provider health monitor dict :returns: None :raises HMNotFound: The referenced health monitor was not found """ db_health_monitor = self._health_mon_repo.get( db_apis.get_session(), id=health_monitor[constants.HEALTHMONITOR_ID]) pool = db_health_monitor.pool load_balancer = pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( pool.listeners)) delete_hm_tf = self._taskflow_load( self._health_monitor_flows.get_delete_health_monitor_flow(), store={ constants.HEALTH_MON: health_monitor, constants.POOL_ID: pool.id, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb, constants.PROJECT_ID: load_balancer.project_id }) with tf_logging.DynamicLoggingListener(delete_hm_tf, log=LOG): delete_hm_tf.run()
def update_load_balancer(self, load_balancer_id, load_balancer_updates): """Updates a load balancer. :param load_balancer_id: ID of the load balancer to update :param load_balancer_updates: Dict containing updated load balancer :returns: None :raises LBNotFound: The referenced load balancer was not found """ lb = None try: lb = self._get_db_obj_until_pending_update( self._lb_repo, load_balancer_id) except tenacity.RetryError as e: LOG.warning('Load balancer did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) lb = e.last_attempt.result() listeners, _ = self._listener_repo.get_all( db_apis.get_session(), load_balancer_id=load_balancer_id) update_lb_tf = self._taskflow_load( self._lb_flows.get_update_load_balancer_flow(), store={constants.LOADBALANCER: lb, constants.LISTENERS: listeners, constants.UPDATE_DICT: load_balancer_updates}) with tf_logging.DynamicLoggingListener(update_lb_tf, log=LOG): update_lb_tf.run()
def delete_load_balancer(self, load_balancer, cascade=False): """Deletes a load balancer by de-allocating Amphorae. :param load_balancer: Dict of the load balancer to delete :returns: None :raises LBNotFound: The referenced load balancer was not found """ db_lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer[constants.LOADBALANCER_ID]) store = {} if cascade: flow = self._lb_flows.get_cascade_delete_load_balancer_flow( load_balancer) store.update(self._lb_flows.get_delete_pools_store(db_lb)) store.update(self._lb_flows.get_delete_listeners_store(db_lb)) else: flow = self._lb_flows.get_delete_load_balancer_flow(load_balancer) store.update({ constants.LOADBALANCER: load_balancer, constants.SERVER_GROUP_ID: db_lb.server_group_id, constants.PROJECT_ID: db_lb.project_id }) delete_lb_tf = self._taskflow_load(flow, store=store) with tf_logging.DynamicLoggingListener(delete_lb_tf, log=LOG): delete_lb_tf.run()
def create_member(self, member_id): """Creates a pool member. :param member_id: ID of the member to create :returns: None :raises NoSuitablePool: Unable to find the node pool """ member = self._member_repo.get(db_apis.get_session(), id=member_id) if not member: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'member', member_id) raise db_exceptions.NoResultFound pool = member.pool listeners = pool.listeners load_balancer = pool.load_balancer create_member_tf = self._taskflow_load(self._member_flows. get_create_member_flow(), store={constants.MEMBER: member, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.POOL: pool}) with tf_logging.DynamicLoggingListener(create_member_tf, log=LOG): create_member_tf.run()
def delete_pool(self, pool): """Deletes a node pool. :param pool: Provider pool dict to delete :returns: None :raises PoolNotFound: The referenced pool was not found """ db_pool = self._pool_repo.get(db_apis.get_session(), id=pool[constants.POOL_ID]) listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( db_pool.listeners)) load_balancer = db_pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() delete_pool_tf = self._taskflow_load( self._pool_flows.get_delete_pool_flow(), store={ constants.POOL_ID: pool[constants.POOL_ID], constants.LISTENERS: listeners_dicts, constants.LOADBALANCER: provider_lb, constants.LOADBALANCER_ID: load_balancer.id, constants.PROJECT_ID: db_pool.project_id }) with tf_logging.DynamicLoggingListener(delete_pool_tf, log=LOG): delete_pool_tf.run()
def batch_update_members(self, old_member_ids, new_member_ids, updated_members): old_members = [self._member_repo.get(db_apis.get_session(), id=mid) for mid in old_member_ids] new_members = [self._member_repo.get(db_apis.get_session(), id=mid) for mid in new_member_ids] updated_members = [ (self._member_repo.get(db_apis.get_session(), id=m.get('id')), m) for m in updated_members] if old_members: pool = old_members[0].pool elif new_members: pool = new_members[0].pool else: pool = updated_members[0][0].pool listeners = pool.listeners load_balancer = pool.load_balancer batch_update_members_tf = self._taskflow_load( self._member_flows.get_batch_update_members_flow( old_members, new_members, updated_members), store={constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.POOL: pool}) with tf_logging.DynamicLoggingListener(batch_update_members_tf, log=LOG): batch_update_members_tf.run()
def create_l7rule(self, l7rule): """Creates an L7 Rule. :param l7rule: Provider dict l7rule :returns: None :raises NoResultFound: Unable to find the object """ db_l7policy = self._l7policy_repo.get(db_apis.get_session(), id=l7rule[constants.L7POLICY_ID]) load_balancer = db_l7policy.listener.load_balancer listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( [db_l7policy.listener])) l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy( db_l7policy) create_l7rule_tf = self._taskflow_load( self._l7rule_flows.get_create_l7rule_flow(), store={ constants.L7RULE: l7rule, constants.L7POLICY: l7policy_dict.to_dict(), constants.LISTENERS: listeners_dicts, constants.L7POLICY_ID: db_l7policy.id, constants.LOADBALANCER_ID: load_balancer.id }) with tf_logging.DynamicLoggingListener(create_l7rule_tf, log=LOG): create_l7rule_tf.run()
def update_pool(self, pool_id, pool_updates): """Updates a node pool. :param pool_id: ID of the pool to update :param pool_updates: Dict containing updated pool attributes :returns: None :raises PoolNotFound: The referenced pool was not found """ pool = None try: pool = self._get_db_obj_until_pending_update( self._pool_repo, pool_id) except tenacity.RetryError as e: LOG.warning('Pool did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) pool = e.last_attempt.result() listeners = pool.listeners load_balancer = pool.load_balancer update_pool_tf = self._taskflow_load(self._pool_flows. get_update_pool_flow(), store={constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.UPDATE_DICT: pool_updates}) with tf_logging.DynamicLoggingListener(update_pool_tf, log=LOG): update_pool_tf.run()
def create_health_monitor(self, health_monitor_id): """Creates a health monitor. :param pool_id: ID of the pool to create a health monitor on :returns: None :raises NoSuitablePool: Unable to find the node pool """ health_mon = self._health_mon_repo.get(db_apis.get_session(), id=health_monitor_id) pool = health_mon.pool listeners = pool.listeners pool.health_monitor = health_mon load_balancer = pool.load_balancer create_hm_tf = self._taskflow_load( self._health_monitor_flows.get_create_health_monitor_flow(), store={ constants.HEALTH_MON: health_mon, constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer }) with tf_logging.DynamicLoggingListener(create_hm_tf, log=LOG): create_hm_tf.run()
def run_flow(self, func, *args, **kwargs): if CONF.task_flow.jobboard_enabled: self.services_controller.run_poster(func, *args, **kwargs) else: tf = self.tf_engine.taskflow_load( func(*args), **kwargs) with tf_logging.DynamicLoggingListener(tf, log=LOG): tf.run()
def test_dynamic(self): flow = lf.Flow("test") flow.add(test_utils.TaskNoRequiresNoReturns("test-1")) e = self._make_engine(flow) log, handler = self._make_logger() with logging_listeners.DynamicLoggingListener(e, log=log): e.run() self.assertGreater(0, handler.counts[logging.DEBUG]) for levelno in _LOG_LEVELS - set([logging.DEBUG]): self.assertEqual(0, handler.counts[levelno]) self.assertEqual([], handler.exc_infos)