def delete(self, id): """Deletes a pool member.""" context = pecan.request.context.get('octavia_context') db_member = self._get_db_member(context.session, id, show_deleted=False) pool = self.repositories.pool.get(context.session, id=db_member.pool_id) project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) self._validate_pool_id(id, db_member.pool_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session, member=db_member) self.repositories.member.update( lock_session, db_member.id, provisioning_status=constants.PENDING_DELETE) LOG.info("Sending delete Member %s to provider %s", id, driver.name) provider_member = ( driver_utils.db_member_to_provider_member(db_member)) driver_utils.call_provider(driver.name, driver.member_delete, provider_member)
def delete(self, id): """Deletes a l7policy.""" context = pecan.request.context.get('octavia_context') db_l7policy = self._get_db_l7policy(context.session, id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) if db_l7policy.provisioning_status == constants.DELETED: return # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses(lock_session, lb_id=load_balancer_id, listener_ids=[listener_id]) self.repositories.l7policy.update( lock_session, db_l7policy.id, provisioning_status=constants.PENDING_DELETE) LOG.info("Sending delete L7 Policy %s to provider %s", id, driver.name) provider_l7policy = driver_utils.db_l7policy_to_provider_l7policy( db_l7policy) driver_utils.call_provider(driver.name, driver.l7policy_delete, provider_l7policy)
def delete(self, id): """Deletes a pool from a load balancer.""" context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, id, show_deleted=False) if db_pool.l7policies: raise exceptions.PoolInUseByL7Policy( id=db_pool.id, l7policy_id=db_pool.l7policies[0].id) project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses( lock_session, lb_id=db_pool.load_balancer_id, listener_ids=self._get_affected_listener_ids(db_pool)) self.repositories.pool.update( lock_session, db_pool.id, provisioning_status=constants.PENDING_DELETE) LOG.info("Sending delete Pool %s to provider %s", id, driver.name) provider_pool = ( driver_utils.db_pool_to_provider_pool(db_pool)) driver_utils.call_provider(driver.name, driver.pool_delete, provider_pool)
def delete(self, id): """Deletes a listener from a load balancer.""" context = pecan.request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, id, show_deleted=False) load_balancer_id = db_listener.load_balancer_id project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses( lock_session, load_balancer_id, id=id, listener_status=constants.PENDING_DELETE) LOG.info("Sending delete Listener %s to provider %s", id, driver.name) provider_listener = ( driver_utils.db_listener_to_provider_listener(db_listener)) driver_utils.call_provider(driver.name, driver.listener_delete, provider_listener)
def delete(self, id, cascade=False): """Deletes a load balancer.""" context = pecan.request.context.get('octavia_context') cascade = strutils.bool_from_string(cascade) db_lb = self._get_db_lb(context.session, id, show_deleted=False) self._auth_validate_action(context, db_lb.project_id, constants.RBAC_DELETE) # Load the driver early as it also provides validation driver = driver_factory.get_driver(db_lb.provider) with db_api.get_lock_session() as lock_session: if (db_lb.listeners or db_lb.pools) and not cascade: msg = _("Cannot delete Load Balancer %s - " "it has children") % id LOG.warning(msg) raise exceptions.ValidationException(detail=msg) self._test_lb_status(lock_session, id, lb_status=constants.PENDING_DELETE) LOG.info("Sending delete Load Balancer %s to provider %s", id, driver.name) provider_loadbalancer = ( driver_utils.db_loadbalancer_to_provider_loadbalancer(db_lb)) driver_utils.call_provider(driver.name, driver.loadbalancer_delete, provider_loadbalancer, cascade)
def put(self, id, listener_): """Updates a listener on a load balancer.""" listener = listener_.listener context = pecan.request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, id, show_deleted=False) load_balancer_id = db_listener.load_balancer_id project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._validate_listener_PUT(listener, db_listener) self._set_default_on_none(listener) if listener.default_pool_id: self._validate_pool(context.session, load_balancer_id, listener.default_pool_id, db_listener.protocol) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses(lock_session, load_balancer_id, id=id) # Prepare the data for the driver data model listener_dict = listener.to_dict(render_unsets=False) listener_dict['id'] = id provider_listener_dict = ( driver_utils.listener_dict_to_provider_dict(listener_dict)) # Also prepare the baseline object data old_provider_llistener = ( driver_utils.db_listener_to_provider_listener(db_listener)) # Dispatch to the driver LOG.info("Sending update Listener %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.listener_update, old_provider_llistener, driver_dm.Listener.from_dict(provider_listener_dict)) # Update the database to reflect what the driver just accepted self.repositories.listener.update( lock_session, id, **listener.to_dict(render_unsets=False)) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_listener = self._get_db_listener(context.session, id) result = self._convert_db_to_type(db_listener, listener_types.ListenerResponse) return listener_types.ListenerRootResponse(listener=result)
def put(self, id, pool_): """Updates a pool on a load balancer.""" pool = pool_.pool context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, id, show_deleted=False) project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) if (pool.session_persistence and not pool.session_persistence.type and db_pool.session_persistence and db_pool.session_persistence.type): pool.session_persistence.type = db_pool.session_persistence.type self._validate_pool_PUT(pool, db_pool) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses( context.session, lb_id=db_pool.load_balancer_id, listener_ids=self._get_affected_listener_ids(db_pool)) # Prepare the data for the driver data model pool_dict = pool.to_dict(render_unsets=False) pool_dict['id'] = id provider_pool_dict = ( driver_utils.pool_dict_to_provider_dict(pool_dict)) # Also prepare the baseline object data old_provider_pool = driver_utils.db_pool_to_provider_pool( db_pool) # Dispatch to the driver LOG.info("Sending update Pool %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.pool_update, old_provider_pool, driver_dm.Pool.from_dict(provider_pool_dict)) # Update the database to reflect what the driver just accepted pool.provisioning_status = constants.PENDING_UPDATE db_pool_dict = pool.to_dict(render_unsets=False) self.repositories.update_pool_and_sp(lock_session, id, db_pool_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_pool = self._get_db_pool(context.session, id) result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) return pool_types.PoolRootResponse(pool=result)
def put(self, id, load_balancer): """Updates a load balancer.""" load_balancer = load_balancer.loadbalancer context = pecan.request.context.get('octavia_context') db_lb = self._get_db_lb(context.session, id, show_deleted=False) self._auth_validate_action(context, db_lb.project_id, constants.RBAC_PUT) if not isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType): network_driver = utils.get_network_driver() validate.qos_extension_enabled(network_driver) if load_balancer.vip_qos_policy_id is not None: if db_lb.vip.qos_policy_id != load_balancer.vip_qos_policy_id: validate.qos_policy_exists(load_balancer.vip_qos_policy_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(db_lb.provider) with db_api.get_lock_session() as lock_session: self._test_lb_status(lock_session, id) # Prepare the data for the driver data model lb_dict = load_balancer.to_dict(render_unsets=False) lb_dict['id'] = id vip_dict = lb_dict.pop('vip', {}) lb_dict = driver_utils.lb_dict_to_provider_dict(lb_dict) if 'qos_policy_id' in vip_dict: lb_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id'] # Also prepare the baseline object data old_provider_lb = ( driver_utils.db_loadbalancer_to_provider_loadbalancer(db_lb)) # Dispatch to the driver LOG.info("Sending update Load Balancer %s to provider " "%s", id, driver.name) driver_utils.call_provider( driver.name, driver.loadbalancer_update, old_provider_lb, driver_dm.LoadBalancer.from_dict(lb_dict)) db_lb_dict = load_balancer.to_dict(render_unsets=False) if 'vip' in db_lb_dict: db_vip_dict = db_lb_dict.pop('vip') self.repositories.vip.update(lock_session, id, **db_vip_dict) if db_lb_dict: self.repositories.load_balancer.update(lock_session, id, **db_lb_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_lb = self._get_db_lb(context.session, id) result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse) return lb_types.LoadBalancerRootResponse(loadbalancer=result)
def put(self, id, member_): """Updates a pool member.""" member = member_.member context = pecan.request.context.get('octavia_context') db_member = self._get_db_member(context.session, id, show_deleted=False) pool = self.repositories.pool.get(context.session, id=db_member.pool_id) project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._validate_pool_id(id, db_member.pool_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session, member=db_member) # Prepare the data for the driver data model member_dict = member.to_dict(render_unsets=False) member_dict['id'] = id provider_member_dict = ( driver_utils.member_dict_to_provider_dict(member_dict)) # Also prepare the baseline object data old_provider_member = driver_utils.db_member_to_provider_member( db_member) # Dispatch to the driver LOG.info("Sending update Member %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.member_update, old_provider_member, driver_dm.Member.from_dict(provider_member_dict)) # Update the database to reflect what the driver just accepted member.provisioning_status = constants.PENDING_UPDATE db_member_dict = member.to_dict(render_unsets=False) self.repositories.member.update(lock_session, id, **db_member_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_member = self._get_db_member(context.session, id) result = self._convert_db_to_type(db_member, member_types.MemberResponse) return member_types.MemberRootResponse(member=result)
def put(self, **kwargs): """Fails over a loadbalancer""" context = pecan.request.context.get('octavia_context') db_lb = self._get_db_lb(context.session, self.lb_id, show_deleted=False) self._auth_validate_action(context, db_lb.project_id, constants.RBAC_PUT_FAILOVER) # Load the driver early as it also provides validation driver = driver_factory.get_driver(db_lb.provider) with db_api.get_lock_session() as lock_session: self._test_and_set_failover_prov_status(lock_session, self.lb_id) LOG.info("Sending failover request for load balancer %s to the " "provider %s", self.lb_id, driver.name) driver_utils.call_provider( driver.name, driver.loadbalancer_failover, self.lb_id)
def put(self, **kwargs): """Fails over a loadbalancer""" context = pecan.request.context.get('octavia_context') db_lb = self._get_db_lb(context.session, self.lb_id, show_deleted=False) self._auth_validate_action(context, db_lb.project_id, constants.RBAC_PUT_FAILOVER) # Load the driver early as it also provides validation driver = driver_factory.get_driver(db_lb.provider) with db_api.get_lock_session() as lock_session: self._test_and_set_failover_prov_status(lock_session, self.lb_id) LOG.info( "Sending failover request for load balancer %s to the " "provider %s", self.lb_id, driver.name) driver_utils.call_provider(driver.name, driver.loadbalancer_failover, self.lb_id)
def _send_l7policy_to_handler(self, session, db_l7policy, lb_id): try: LOG.info("Sending Creation of L7Policy %s to handler", db_l7policy.id) self.handler.create(db_l7policy) except Exception: with excutils.save_and_reraise_exception( reraise=False), db_api.get_lock_session() as lock_session: self._reset_lb_and_listener_statuses( lock_session, lb_id=lb_id, listener_id=db_l7policy.listener_id) # L7Policy now goes to ERROR self.repositories.l7policy.update( lock_session, db_l7policy.id, provisioning_status=constants.ERROR) db_l7policy = self._get_db_l7policy(session, db_l7policy.id) result = self._convert_db_to_type(db_l7policy, l7policy_types.L7PolicyResponse) return l7policy_types.L7PolicyRootResponse(l7policy=result)
def put(self, id, listener_): """Updates a listener on a load balancer.""" listener = listener_.listener context = pecan.request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, id) load_balancer_id = db_listener.load_balancer_id self._auth_validate_action(context, db_listener.project_id, constants.RBAC_PUT) # TODO(rm_work): Do we need something like this? What do we do on an # empty body for a PUT? if not listener: raise exceptions.ValidationException( detail='No listener object supplied.') if listener.default_pool_id: self._validate_pool(context.session, load_balancer_id, listener.default_pool_id) self._test_lb_and_listener_statuses(context.session, load_balancer_id, id=id) try: LOG.info("Sending Update of Listener %s to handler", id) self.handler.update(db_listener, listener) except Exception: with excutils.save_and_reraise_exception( reraise=False), db_api.get_lock_session() as lock_session: self._reset_lb_status(lock_session, lb_id=db_listener.load_balancer_id) # Listener now goes to ERROR self.repositories.listener.update( lock_session, db_listener.id, provisioning_status=constants.ERROR) db_listener = self._get_db_listener(context.session, id) result = self._convert_db_to_type(db_listener, listener_types.ListenerResponse) return listener_types.ListenerRootResponse(listener=result)
def put(self, id, pool_): """Updates a pool on a load balancer.""" pool = pool_.pool context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, id, show_deleted=False) self._auth_validate_action(context, db_pool.project_id, constants.RBAC_PUT) if pool.session_persistence: sp_dict = pool.session_persistence.to_dict(render_unsets=False) validate.check_session_persistence(sp_dict) self._test_lb_and_listener_statuses( context.session, lb_id=db_pool.load_balancer_id, listener_ids=self._get_affected_listener_ids(db_pool)) self.repositories.pool.update( context.session, db_pool.id, provisioning_status=constants.PENDING_UPDATE) try: LOG.info("Sending Update of Pool %s to handler", id) self.handler.update(db_pool, pool) except Exception: with excutils.save_and_reraise_exception( reraise=False), db_api.get_lock_session() as lock_session: self._reset_lb_and_listener_statuses( lock_session, lb_id=db_pool.load_balancer_id, listener_ids=self._get_affected_listener_ids(db_pool)) # Pool now goes to ERROR self.repositories.pool.update( lock_session, db_pool.id, provisioning_status=constants.ERROR) db_pool = self._get_db_pool(context.session, id) result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) return pool_types.PoolRootResponse(pool=result)
def put(self, id, l7rule_): """Updates a l7rule.""" l7rule = l7rule_.rule context = pecan.request.context.get('octavia_context') db_l7rule = self._get_db_l7rule(context.session, id) new_l7rule = db_l7rule.to_dict() new_l7rule.update(l7rule.to_dict()) new_l7rule = data_models.L7Rule.from_dict(new_l7rule) self._auth_validate_action(context, db_l7rule.project_id, constants.RBAC_PUT) try: validate.l7rule_data(new_l7rule) except Exception as e: raise exceptions.L7RuleValidation(error=e) self._test_lb_listener_policy_statuses(context.session) self.repositories.l7rule.update( context.session, db_l7rule.id, provisioning_status=constants.PENDING_UPDATE) try: LOG.info("Sending Update of L7Rule %s to handler", id) self.handler.update(db_l7rule, l7rule) except Exception: with excutils.save_and_reraise_exception( reraise=False), db_api.get_lock_session() as lock_session: self._reset_lb_listener_policy_statuses(lock_session) # L7Rule now goes to ERROR self.repositories.l7rule.update( lock_session, db_l7rule.id, provisioning_status=constants.ERROR) db_l7rule = self._get_db_l7rule(context.session, id) result = self._convert_db_to_type(db_l7rule, l7rule_types.L7RuleResponse) return l7rule_types.L7RuleRootResponse(rule=result)
def delete(self, id): """Deletes a l7policy.""" context = pecan.request.context.get('octavia_context') db_l7policy = self._get_db_l7policy(context.session, id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) self._auth_validate_action(context, db_l7policy.project_id, constants.RBAC_DELETE) if db_l7policy.provisioning_status == constants.DELETED: return self._test_lb_and_listener_statuses(context.session, lb_id=load_balancer_id, listener_ids=[listener_id]) self.repositories.l7policy.update( context.session, db_l7policy.id, provisioning_status=constants.PENDING_DELETE) try: LOG.info("Sending Deletion of L7Policy %s to handler", db_l7policy.id) self.handler.delete(db_l7policy) except Exception: with excutils.save_and_reraise_exception( reraise=False), db_api.get_lock_session() as lock_session: self._reset_lb_and_listener_statuses( lock_session, lb_id=load_balancer_id, listener_id=db_l7policy.listener_id) # L7Policy now goes to ERROR self.repositories.l7policy.update( lock_session, db_l7policy.id, provisioning_status=constants.ERROR)
def delete(self, id): """Deletes a l7rule.""" context = pecan.request.context.get('octavia_context') db_l7rule = self._get_db_l7rule(context.session, id, show_deleted=False) db_l7policy = self._get_db_l7policy(context.session, self.l7policy_id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) if db_l7rule.provisioning_status == constants.DELETED: return # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_listener_policy_statuses(lock_session) self.repositories.l7rule.update( lock_session, db_l7rule.id, provisioning_status=constants.PENDING_DELETE) LOG.info("Sending delete L7 Rule %s to provider %s", id, driver.name) provider_l7rule = ( driver_utils.db_l7rule_to_provider_l7rule(db_l7rule)) driver_utils.call_provider(driver.name, driver.l7rule_delete, provider_l7rule)
def delete(self, id): """Deletes a pool from a load balancer.""" context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, id) if len(db_pool.l7policies) > 0: raise exceptions.PoolInUseByL7Policy( id=db_pool.id, l7policy_id=db_pool.l7policies[0].id) self._auth_validate_action(context, db_pool.project_id, constants.RBAC_DELETE) if db_pool.provisioning_status == constants.DELETED: return self._test_lb_and_listener_statuses( context.session, lb_id=db_pool.load_balancer_id, listener_ids=self._get_affected_listener_ids(db_pool)) self.repositories.pool.update( context.session, db_pool.id, provisioning_status=constants.PENDING_DELETE) try: LOG.info("Sending Deletion of Pool %s to handler", db_pool.id) self.handler.delete(db_pool) except Exception: with excutils.save_and_reraise_exception( reraise=False), db_api.get_lock_session() as lock_session: self._reset_lb_and_listener_statuses( lock_session, lb_id=db_pool.load_balancer_id, listener_ids=self._get_affected_listener_ids(db_pool)) # Pool now goes to ERROR self.repositories.pool.update( lock_session, db_pool.id, provisioning_status=constants.ERROR)
def put(self, id, l7policy_): """Updates a l7policy.""" l7policy = l7policy_.l7policy context = pecan_request.context.get('octavia_context') db_l7policy = self._get_db_l7policy(context.session, id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) l7policy_dict = validate.sanitize_l7policy_api_args( l7policy.to_dict(render_unsets=False)) # Reset renamed attributes for attr, val in l7policy_types.L7PolicyPUT._type_to_model_map.items(): if val in l7policy_dict: l7policy_dict[attr] = l7policy_dict.pop(val) sanitized_l7policy = l7policy_types.L7PolicyPUT(**l7policy_dict) listener = self._get_db_listener(context.session, db_l7policy.listener_id) # Make sure any specified redirect_pool_id exists if l7policy_dict.get('redirect_pool_id'): db_pool = self._get_db_pool(context.session, l7policy_dict['redirect_pool_id']) self._validate_protocol(listener.protocol, db_pool.protocol) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses(lock_session, lb_id=load_balancer_id, listener_ids=[listener_id]) # Prepare the data for the driver data model l7policy_dict = sanitized_l7policy.to_dict(render_unsets=False) l7policy_dict['id'] = id provider_l7policy_dict = ( driver_utils.l7policy_dict_to_provider_dict(l7policy_dict)) # Also prepare the baseline object data old_provider_l7policy = ( driver_utils.db_l7policy_to_provider_l7policy(db_l7policy)) # Dispatch to the driver LOG.info("Sending update L7 Policy %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.l7policy_update, old_provider_l7policy, driver_dm.L7Policy.from_dict(provider_l7policy_dict)) # Update the database to reflect what the driver just accepted sanitized_l7policy.provisioning_status = constants.PENDING_UPDATE db_l7policy_dict = sanitized_l7policy.to_dict(render_unsets=False) self.repositories.l7policy.update(lock_session, id, **db_l7policy_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_l7policy = self._get_db_l7policy(context.session, id) result = self._convert_db_to_type(db_l7policy, l7policy_types.L7PolicyResponse) return l7policy_types.L7PolicyRootResponse(l7policy=result)
def delete(self, id): """Deletes a listener from a load balancer.""" context = pecan.request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, id, show_deleted=False) load_balancer_id = db_listener.load_balancer_id project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses( lock_session, load_balancer_id, id=id, listener_status=constants.PENDING_DELETE) LOG.info("Sending delete Listener %s to provider %s", id, driver.name) provider_listener = ( driver_utils.db_listener_to_provider_listener(db_listener)) driver_utils.call_provider(driver.name, driver.listener_delete, provider_listener) # Revoke access of octavia service user to certificates tls_refs = [] for sni in db_listener.sni_containers: filters = {'tls_container_id': sni.tls_container_id} snis = self.repositories.sni.get_all(context.session, **filters)[0] if len(snis) == 1: # referred only once, enqueue for access revoking tls_refs.append(sni.tls_container_id) else: blocking_listeners = [s.listener_id for s in snis if s.listener_id != id] LOG.debug("Listeners %s using TLS ref %s. Access to TLS ref " "will not be revoked.", blocking_listeners, sni.tls_container_id) if db_listener.tls_certificate_id: filters = {'tls_certificate_id': db_listener.tls_certificate_id} # Note get_all returns the list and links. We only want the list. listeners = self.repositories.listener.get_all( context.session, show_deleted=False, **filters)[0] if len(listeners) == 1: # referred only once, enqueue for access revoking tls_refs.append(db_listener.tls_certificate_id) else: blocking_listeners = [l.id for l in listeners if l.id != id] LOG.debug("Listeners %s using TLS ref %s. Access to TLS ref " "will not be revoked.", blocking_listeners, db_listener.tls_certificate_id) for ref in tls_refs: try: self.cert_manager.unset_acls(context, ref) except Exception: # certificate may have been removed already pass
def put(self, id, listener_): """Updates a listener on a load balancer.""" listener = listener_.listener context = pecan.request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, id, show_deleted=False) load_balancer_id = db_listener.load_balancer_id project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) # TODO(rm_work): Do we need something like this? What do we do on an # empty body for a PUT? if not listener: raise exceptions.ValidationException( detail='No listener object supplied.') if (db_listener.protocol == constants.PROTOCOL_UDP and self._is_tls_or_insert_header(listener)): raise exceptions.ValidationException(detail=_( "%s protocol listener does not support TLS or header " "insertion.") % constants.PROTOCOL_UDP) if listener.default_pool_id: self._validate_pool(context.session, load_balancer_id, listener.default_pool_id, db_listener.protocol) sni_containers = listener.sni_container_refs or [] tls_refs = [sni for sni in sni_containers] if listener.default_tls_container_ref: tls_refs.append(listener.default_tls_container_ref) self._validate_tls_refs(tls_refs) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses(lock_session, load_balancer_id, id=id) # Prepare the data for the driver data model listener_dict = listener.to_dict(render_unsets=False) listener_dict['id'] = id provider_listener_dict = ( driver_utils.listener_dict_to_provider_dict(listener_dict)) # Also prepare the baseline object data old_provider_llistener = ( driver_utils.db_listener_to_provider_listener(db_listener)) # Dispatch to the driver LOG.info("Sending update Listener %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.listener_update, old_provider_llistener, driver_dm.Listener.from_dict(provider_listener_dict)) # Update the database to reflect what the driver just accepted self.repositories.listener.update( lock_session, id, **listener.to_dict(render_unsets=False)) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_listener = self._get_db_listener(context.session, id) result = self._convert_db_to_type(db_listener, listener_types.ListenerResponse) return listener_types.ListenerRootResponse(listener=result)
def put(self, members_): """Updates all members.""" members = members_.members context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) member_count_diff = len(members) - len(old_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException( resource=data_models.Member._name()) old_member_uniques = {(m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [(m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: validate.ip_not_reserved(m.address) new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) provider_members = [] # Create new members for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id created_member = self._graph_create(lock_session, m) provider_member = driver_utils.db_member_to_provider_member( created_member) provider_members.append(provider_member) # Update old members for m in updated_members: m.provisioning_status = constants.PENDING_UPDATE db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') self.repositories.member.update(lock_session, m.id, **db_member_dict) m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) # Delete old members for m in deleted_members: self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) # Dispatch to the driver LOG.info("Sending Pool %s batch member update to provider %s", db_pool.id, driver.name) driver_utils.call_provider(driver.name, driver.member_batch_update, provider_members)
def put(self, id, l7rule_): """Updates a l7rule.""" l7rule = l7rule_.rule context = pecan.request.context.get('octavia_context') db_l7rule = self._get_db_l7rule(context.session, id, show_deleted=False) new_l7rule = db_l7rule.to_dict() new_l7rule.update(l7rule.to_dict()) new_l7rule = data_models.L7Rule.from_dict(new_l7rule) db_l7policy = self._get_db_l7policy(context.session, self.l7policy_id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) try: validate.l7rule_data(new_l7rule) except Exception as e: raise exceptions.L7RuleValidation(error=e) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_listener_policy_statuses(lock_session) # Prepare the data for the driver data model l7rule_dict = l7rule.to_dict(render_unsets=False) l7rule_dict['id'] = id provider_l7rule_dict = ( driver_utils.l7rule_dict_to_provider_dict(l7rule_dict)) # Also prepare the baseline object data old_provider_l7rule = driver_utils.db_l7rule_to_provider_l7rule( db_l7rule) # Dispatch to the driver LOG.info("Sending update L7 Rule %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.l7rule_update, old_provider_l7rule, driver_dm.L7Rule.from_dict(provider_l7rule_dict)) # Update the database to reflect what the driver just accepted l7rule.provisioning_status = constants.PENDING_UPDATE db_l7rule_dict = l7rule.to_dict(render_unsets=False) self.repositories.l7rule.update(lock_session, id, **db_l7rule_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_l7rule = self._get_db_l7rule(context.session, id) result = self._convert_db_to_type(db_l7rule, l7rule_types.L7RuleResponse) return l7rule_types.L7RuleRootResponse(rule=result)
def put(self, id, pool_): """Updates a pool on a load balancer.""" pool = pool_.pool context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, id, show_deleted=False) project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) if (pool.session_persistence and not pool.session_persistence.type and db_pool.session_persistence and db_pool.session_persistence.type): pool.session_persistence.type = db_pool.session_persistence.type self._auth_validate_action(context, project_id, constants.RBAC_PUT) if db_pool.protocol == constants.PROTOCOL_UDP: self._validate_pool_request_for_udp(pool) else: if (pool.session_persistence and (pool.session_persistence.persistence_timeout or pool.session_persistence.persistence_granularity)): raise exceptions.ValidationException( detail=_("persistence_timeout and persistence_granularity " "is only for UDP protocol pools.")) if pool.session_persistence: sp_dict = pool.session_persistence.to_dict(render_unsets=False) validate.check_session_persistence(sp_dict) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses( context.session, lb_id=db_pool.load_balancer_id, listener_ids=self._get_affected_listener_ids(db_pool)) # Prepare the data for the driver data model pool_dict = pool.to_dict(render_unsets=False) pool_dict['id'] = id provider_pool_dict = ( driver_utils.pool_dict_to_provider_dict(pool_dict)) # Also prepare the baseline object data old_provider_pool = driver_utils.db_pool_to_provider_pool(db_pool) # Dispatch to the driver LOG.info("Sending update Pool %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.pool_update, old_provider_pool, driver_dm.Pool.from_dict(provider_pool_dict)) # Update the database to reflect what the driver just accepted pool.provisioning_status = constants.PENDING_UPDATE db_pool_dict = pool.to_dict(render_unsets=False) self.repositories.update_pool_and_sp(lock_session, id, db_pool_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_pool = self._get_db_pool(context.session, id) result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) return pool_types.PoolRootResponse(pool=result)
def put(self, members_): """Updates all members.""" members = members_.members context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) member_count_diff = len(members) - len(old_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException( resource=data_models.Member._name()) old_member_uniques = { (m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [ (m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: validate.ip_not_reserved(m.address) new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) provider_members = [] # Create new members for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id created_member = self._graph_create(lock_session, m) provider_member = driver_utils.db_member_to_provider_member( created_member) provider_members.append(provider_member) # Update old members for m in updated_members: m.provisioning_status = constants.PENDING_UPDATE db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') self.repositories.member.update( lock_session, m.id, **db_member_dict) m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) # Delete old members for m in deleted_members: self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) # Dispatch to the driver LOG.info("Sending Pool %s batch member update to provider %s", db_pool.id, driver.name) driver_utils.call_provider( driver.name, driver.member_batch_update, provider_members)
def put(self, additive_only=False, members_=None): """Updates all members.""" members = members_.members additive_only = strutils.bool_from_string(additive_only) context = pecan_request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_PUT) if not additive_only: self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id, context=context): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) old_member_uniques = {(m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [(m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: validate.ip_not_reserved(m.address) new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) if not (deleted_members or new_members or updated_members): LOG.info("Member batch update is a noop, rolling back and " "returning early.") lock_session.rollback() return if additive_only: member_count_diff = len(new_members) else: member_count_diff = len(new_members) - len(deleted_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException( resource=data_models.Member._name()) provider_members = [] # Create new members for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id created_member = self._graph_create(lock_session, m) provider_member = driver_utils.db_member_to_provider_member( created_member) provider_members.append(provider_member) # Update old members for m in updated_members: m.provisioning_status = constants.PENDING_UPDATE m.project_id = db_pool.project_id db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') self.repositories.member.update(lock_session, m.id, **db_member_dict) m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) # Delete old members for m in deleted_members: if additive_only: # Members are appended to the dict and their status remains # unchanged, because they are logically "untouched". db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) else: # Members are changed to PENDING_DELETE and not passed. self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) # Dispatch to the driver LOG.info("Sending Pool %s batch member update to provider %s", db_pool.id, driver.name) driver_utils.call_provider(driver.name, driver.member_batch_update, db_pool.id, provider_members)
def put(self, members_): """Updates all members.""" members = members_.members context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, db_pool.project_id, constants.RBAC_POST) self._auth_validate_action(context, db_pool.project_id, constants.RBAC_PUT) self._auth_validate_action(context, db_pool.project_id, constants.RBAC_DELETE) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) member_count_diff = len(members) - len(old_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException old_member_uniques = { (m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [ (m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) # Create new members new_members_created = [] for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id new_members_created.append(self._graph_create(lock_session, m)) # Update old members for m in updated_members: self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_UPDATE) # Delete old members for m in deleted_members: self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) LOG.info("Sending Full Member Update to handler") new_member_ids = [m.id for m in new_members_created] old_member_ids = [m.id for m in deleted_members] self.handler.batch_update( old_member_ids, new_member_ids, updated_members)
def put(self, id, l7policy_): """Updates a l7policy.""" l7policy = l7policy_.l7policy l7policy_dict = validate.sanitize_l7policy_api_args( l7policy.to_dict(render_unsets=False)) # Reset renamed attributes for attr, val in l7policy_types.L7PolicyPUT._type_to_model_map.items(): if val in l7policy_dict: l7policy_dict[attr] = l7policy_dict.pop(val) sanitized_l7policy = l7policy_types.L7PolicyPUT(**l7policy_dict) context = pecan.request.context.get('octavia_context') # Make sure any specified redirect_pool_id exists if l7policy_dict.get('redirect_pool_id'): db_pool = self._get_db_pool( context.session, l7policy_dict['redirect_pool_id']) self._escape_l7policy_udp_pool_request(db_pool) db_l7policy = self._get_db_l7policy(context.session, id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses(lock_session, lb_id=load_balancer_id, listener_ids=[listener_id]) # Prepare the data for the driver data model l7policy_dict = sanitized_l7policy.to_dict(render_unsets=False) l7policy_dict['id'] = id provider_l7policy_dict = ( driver_utils.l7policy_dict_to_provider_dict(l7policy_dict)) # Also prepare the baseline object data old_provider_l7policy = ( driver_utils.db_l7policy_to_provider_l7policy(db_l7policy)) # Dispatch to the driver LOG.info("Sending update L7 Policy %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.l7policy_update, old_provider_l7policy, driver_dm.L7Policy.from_dict(provider_l7policy_dict)) # Update the database to reflect what the driver just accepted sanitized_l7policy.provisioning_status = constants.PENDING_UPDATE db_l7policy_dict = sanitized_l7policy.to_dict(render_unsets=False) self.repositories.l7policy.update(lock_session, id, **db_l7policy_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_l7policy = self._get_db_l7policy(context.session, id) result = self._convert_db_to_type(db_l7policy, l7policy_types.L7PolicyResponse) return l7policy_types.L7PolicyRootResponse(l7policy=result)