def post(self, member_): """Creates a pool member on a pool.""" member = member_.member context = pecan.request.context.get('octavia_context') validate.ip_not_reserved(member.address) # Validate member subnet if member.subnet_id and not validate.subnet_exists(member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) pool = self.repositories.pool.get(context.session, id=self.pool_id) member.project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, member.project_id, constants.RBAC_POST) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_clusterquota_met( lock_session, data_models.Member, base_res_id=self.pool_id): raise exceptions.ClusterQuotaException( resource=data_models.Member._name()) if self.repositories.check_quota_met(context.session, lock_session, data_models.Member, member.project_id): raise exceptions.QuotaException( resource=data_models.Member._name()) member_dict = db_prepare.create_member( member.to_dict(render_unsets=True), self.pool_id, bool(pool.health_monitor)) self._test_lb_and_listener_and_pool_statuses(lock_session) db_member = self._validate_create_member(lock_session, member_dict) # Prepare the data for the driver data model provider_member = ( driver_utils.db_member_to_provider_member(db_member)) # Dispatch to the driver LOG.info("Sending create Member %s to provider %s", db_member.id, driver.name) driver_utils.call_provider(driver.name, driver.member_create, provider_member) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_member = self._get_db_member(context.session, db_member.id) result = self._convert_db_to_type(db_member, member_types.MemberResponse) return member_types.MemberRootResponse(member=result)
def _graph_create(self, session, lock_session, pool_dict): load_balancer_id = pool_dict['load_balancer_id'] pool_dict = db_prepare.create_pool(pool_dict, load_balancer_id) members = pool_dict.pop('members', []) or [] hm = pool_dict.pop('health_monitor', None) db_pool = self._validate_create_pool(lock_session, pool_dict) # Check quotas for healthmonitors if hm and self.repositories.check_quota_met(session, lock_session, data_models.HealthMonitor, db_pool.project_id): raise exceptions.QuotaException( resource=data_models.HealthMonitor._name()) # Now possibly create a healthmonitor if hm: hm[constants.POOL_ID] = db_pool.id hm[constants.PROJECT_ID] = db_pool.project_id new_hm = health_monitor.HealthMonitorController()._graph_create( lock_session, hm) if db_pool.protocol in (constants.PROTOCOL_UDP, lib_consts.PROTOCOL_SCTP): health_monitor.HealthMonitorController( )._validate_healthmonitor_request_for_udp_sctp( new_hm, db_pool) else: if new_hm.type in (constants.HEALTH_MONITOR_UDP_CONNECT, lib_consts.HEALTH_MONITOR_SCTP): raise exceptions.ValidationException( detail=_( "The %(type)s type is only supported for pools of " "type %(protocol)s.") % { 'type': new_hm.type, 'protocol': '/'.join((constants.PROTOCOL_UDP, lib_consts.PROTOCOL_SCTP)) }) db_pool.health_monitor = new_hm # Now check quotas for members if members and self.repositories.check_quota_met(session, lock_session, data_models.Member, db_pool.project_id, count=len(members)): raise exceptions.QuotaException( resource=data_models.Member._name()) # Now create members new_members = [] for m in members: validate.ip_not_reserved(m["ip_address"]) m['project_id'] = db_pool.project_id new_members.append( member.MembersController(db_pool.id)._graph_create( lock_session, m)) db_pool.members = new_members return db_pool
def test_ip_not_reserved(self): self.conf.config(group="networking", reserved_ips=['198.51.100.4']) # Test good address validate.ip_not_reserved('203.0.113.5') # Test IPv4 reserved address self.assertRaises(exceptions.InvalidOption, validate.ip_not_reserved, '198.51.100.4') self.conf.config( group="networking", reserved_ips=['2001:0DB8:0000:0000:0000:0000:0000:0005']) # Test good IPv6 address validate.ip_not_reserved('2001:0DB8::9') # Test reserved IPv6 expanded self.assertRaises(exceptions.InvalidOption, validate.ip_not_reserved, '2001:0DB8:0000:0000:0000:0000:0000:0005') # Test reserved IPv6 short hand notation self.assertRaises(exceptions.InvalidOption, validate.ip_not_reserved, '2001:0DB8::5')
def _graph_create(self, session, lock_session, pool_dict): load_balancer_id = pool_dict['load_balancer_id'] pool_dict = db_prepare.create_pool(pool_dict, load_balancer_id) members = pool_dict.pop('members', []) or [] hm = pool_dict.pop('health_monitor', None) db_pool = self._validate_create_pool(lock_session, pool_dict) # Check cluster quotas for healthmonitors if hm and self.repositories.check_clusterquota_met( lock_session, data_models.HealthMonitor, base_res_id=db_pool.id): raise exceptions.ClusterQuotaException( resource=data_models.HealthMonitor._name()) # Check quotas for healthmonitors if hm and self.repositories.check_quota_met(session, lock_session, data_models.HealthMonitor, db_pool.project_id): raise exceptions.QuotaException( resource=data_models.HealthMonitor._name()) # Now possibly create a healthmonitor new_hm = None if hm: hm['pool_id'] = db_pool.id hm['project_id'] = db_pool.project_id new_hm = health_monitor.HealthMonitorController()._graph_create( lock_session, hm) db_pool.health_monitor = new_hm # Now check cluster quotas for members if members and self.repositories.check_clusterquota_met( lock_session, data_models.Member, base_res_id=db_pool.id, count=len(members)): raise exceptions.ClusterQuotaException( resource=data_models.Member._name()) # Now check quotas for members if members and self.repositories.check_quota_met(session, lock_session, data_models.Member, db_pool.project_id, count=len(members)): raise exceptions.QuotaException( resource=data_models.Member._name()) # Now create members new_members = [] for m in members: validate.ip_not_reserved(m["ip_address"]) m['project_id'] = db_pool.project_id new_members.append( member.MembersController(db_pool.id)._graph_create( lock_session, m)) db_pool.members = new_members return db_pool
def post(self, member_): """Creates a pool member on a pool.""" member = member_.member context = pecan.request.context.get('octavia_context') validate.ip_not_reserved(member.address) # Validate member subnet if member.subnet_id and not validate.subnet_exists(member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) pool = self.repositories.pool.get(context.session, id=self.pool_id) member.project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, member.project_id, constants.RBAC_POST) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met( context.session, lock_session, data_models.Member, member.project_id): raise exceptions.QuotaException( resource=data_models.Member._name()) member_dict = db_prepare.create_member(member.to_dict( render_unsets=True), self.pool_id, bool(pool.health_monitor)) self._test_lb_and_listener_and_pool_statuses(lock_session) db_member = self._validate_create_member(lock_session, member_dict) # Prepare the data for the driver data model provider_member = ( driver_utils.db_member_to_provider_member(db_member)) # Dispatch to the driver LOG.info("Sending create Member %s to provider %s", db_member.id, driver.name) driver_utils.call_provider( driver.name, driver.member_create, provider_member) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_member = self._get_db_member(context.session, db_member.id) result = self._convert_db_to_type(db_member, member_types.MemberResponse) return member_types.MemberRootResponse(member=result)
def _graph_create(self, session, lock_session, pool_dict): load_balancer_id = pool_dict['load_balancer_id'] pool_dict = db_prepare.create_pool( pool_dict, load_balancer_id) members = pool_dict.pop('members', []) or [] hm = pool_dict.pop('health_monitor', None) db_pool = self._validate_create_pool( lock_session, pool_dict) # Check quotas for healthmonitors if hm and self.repositories.check_quota_met( session, lock_session, data_models.HealthMonitor, db_pool.project_id): raise exceptions.QuotaException( resource=data_models.HealthMonitor._name()) # Now possibly create a healthmonitor new_hm = None if hm: hm['pool_id'] = db_pool.id hm['project_id'] = db_pool.project_id new_hm = health_monitor.HealthMonitorController()._graph_create( lock_session, hm) db_pool.health_monitor = new_hm # Now check quotas for members if members and self.repositories.check_quota_met( session, lock_session, data_models.Member, db_pool.project_id, count=len(members)): raise exceptions.QuotaException( resource=data_models.Member._name()) # Now create members new_members = [] for m in members: validate.ip_not_reserved(m["ip_address"]) m['project_id'] = db_pool.project_id new_members.append( member.MembersController(db_pool.id)._graph_create( lock_session, m)) db_pool.members = new_members return db_pool
def put(self, members_): """Updates all members.""" members = members_.members context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) member_count_diff = len(members) - len(old_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException( resource=data_models.Member._name()) old_member_uniques = {(m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [(m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: validate.ip_not_reserved(m.address) new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) provider_members = [] # Create new members for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id created_member = self._graph_create(lock_session, m) provider_member = driver_utils.db_member_to_provider_member( created_member) provider_members.append(provider_member) # Update old members for m in updated_members: m.provisioning_status = constants.PENDING_UPDATE db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') self.repositories.member.update(lock_session, m.id, **db_member_dict) m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) # Delete old members for m in deleted_members: self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) # Dispatch to the driver LOG.info("Sending Pool %s batch member update to provider %s", db_pool.id, driver.name) driver_utils.call_provider(driver.name, driver.member_batch_update, provider_members)
def put(self, members_): """Updates all members.""" members = members_.members context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) member_count_diff = len(members) - len(old_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException( resource=data_models.Member._name()) old_member_uniques = { (m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [ (m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: validate.ip_not_reserved(m.address) new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) provider_members = [] # Create new members for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id created_member = self._graph_create(lock_session, m) provider_member = driver_utils.db_member_to_provider_member( created_member) provider_members.append(provider_member) # Update old members for m in updated_members: m.provisioning_status = constants.PENDING_UPDATE db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') self.repositories.member.update( lock_session, m.id, **db_member_dict) m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) # Delete old members for m in deleted_members: self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) # Dispatch to the driver LOG.info("Sending Pool %s batch member update to provider %s", db_pool.id, driver.name) driver_utils.call_provider( driver.name, driver.member_batch_update, provider_members)
def put(self, additive_only=False, members_=None): """Updates all members.""" members = members_.members additive_only = strutils.bool_from_string(additive_only) context = pecan_request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_PUT) if not additive_only: self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id, context=context): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) old_member_uniques = {(m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [(m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: validate.ip_not_reserved(m.address) new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) if not (deleted_members or new_members or updated_members): LOG.info("Member batch update is a noop, rolling back and " "returning early.") lock_session.rollback() return if additive_only: member_count_diff = len(new_members) else: member_count_diff = len(new_members) - len(deleted_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException( resource=data_models.Member._name()) provider_members = [] # Create new members for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id created_member = self._graph_create(lock_session, m) provider_member = driver_utils.db_member_to_provider_member( created_member) provider_members.append(provider_member) # Update old members for m in updated_members: m.provisioning_status = constants.PENDING_UPDATE m.project_id = db_pool.project_id db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') self.repositories.member.update(lock_session, m.id, **db_member_dict) m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) # Delete old members for m in deleted_members: if additive_only: # Members are appended to the dict and their status remains # unchanged, because they are logically "untouched". db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) else: # Members are changed to PENDING_DELETE and not passed. self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) # Dispatch to the driver LOG.info("Sending Pool %s batch member update to provider %s", db_pool.id, driver.name) driver_utils.call_provider(driver.name, driver.member_batch_update, db_pool.id, provider_members)