def _validate_port_and_fill_or_validate_subnet(load_balancer): port = validate.port_exists(port_id=load_balancer.vip_port_id) validate.check_port_in_use(port) load_balancer.vip_network_id = port.network_id # validate the request vip port whether applied the qos_policy and # store the port_qos_policy to loadbalancer obj if possible. The # default behavior is that if 'vip_qos_policy_id' is specified in the # request, it will override the qos_policy applied on vip_port. port_qos_policy_id = port.qos_policy_id if (port_qos_policy_id and isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType)): load_balancer.vip_qos_policy_id = port_qos_policy_id # Identify the subnet for this port if load_balancer.vip_subnet_id: validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id) else: if load_balancer.vip_address: for port_fixed_ip in port.fixed_ips: if port_fixed_ip.ip_address == load_balancer.vip_address: load_balancer.vip_subnet_id = port_fixed_ip.subnet_id break if not load_balancer.vip_subnet_id: raise exceptions.ValidationException( detail=_("Specified VIP address not found on the " "specified VIP port.")) elif len(port.fixed_ips) == 1: load_balancer.vip_subnet_id = port.fixed_ips[0].subnet_id else: raise exceptions.ValidationException(detail=_( "VIP port's subnet could not be determined. Please " "specify either a VIP subnet or address."))
def _validate_port_and_fill_or_validate_subnet(load_balancer): port = validate.port_exists(port_id=load_balancer.vip_port_id) validate.check_port_in_use(port) load_balancer.vip_network_id = port.network_id # validate the request vip port whether applied the qos_policy and # store the port_qos_policy to loadbalancer obj if possible. The # default behavior is that if 'vip_qos_policy_id' is specified in the # request, it will override the qos_policy applied on vip_port. port_qos_policy_id = port.qos_policy_id if (port_qos_policy_id and isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType)): load_balancer.vip_qos_policy_id = port_qos_policy_id # Identify the subnet for this port if load_balancer.vip_subnet_id: validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id) else: if load_balancer.vip_address: for port_fixed_ip in port.fixed_ips: if port_fixed_ip.ip_address == load_balancer.vip_address: load_balancer.vip_subnet_id = port_fixed_ip.subnet_id break if not load_balancer.vip_subnet_id: raise exceptions.ValidationException(detail=_( "Specified VIP address not found on the " "specified VIP port.")) elif len(port.fixed_ips) == 1: load_balancer.vip_subnet_id = port.fixed_ips[0].subnet_id else: raise exceptions.ValidationException(detail=_( "VIP port's subnet could not be determined. Please " "specify either a VIP subnet or address."))
def post(self, member): """Creates a pool member on a pool.""" context = pecan.request.context.get('octavia_context') member.project_id = self._get_lb_project_id(context.session, self.load_balancer_id) # Validate member subnet if member.subnet_id: validate.subnet_exists(member.subnet_id) lock_session = db_api.get_session(autocommit=False) if self.repositories.check_quota_met( context.session, lock_session, data_models.Member, member.project_id): lock_session.rollback() raise exceptions.QuotaException( resource=data_models.Member._name() ) try: member_dict = db_prepare.create_member(member.to_dict( render_unsets=True), self.pool_id) self._test_lb_and_listener_statuses(lock_session) db_member = self.repositories.member.create(lock_session, **member_dict) db_new_member = self._get_db_member(lock_session, db_member.id) lock_session.commit() except oslo_exc.DBDuplicateEntry as de: lock_session.rollback() if ['id'] == de.columns: raise exceptions.IDAlreadyExists() raise exceptions.DuplicateMemberEntry( ip_address=member_dict.get('ip_address'), port=member_dict.get('protocol_port')) except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() try: LOG.info("Sending Creation of Member %s to handler", db_member.id) self.handler.create(db_member) except Exception: for listener_id in self._get_affected_listener_ids( context.session): with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( context.session, listener_id, operating_status=constants.ERROR) return self._convert_db_to_type(db_new_member, member_types.MemberResponse)
def _validate_port_and_fill_or_validate_subnet(load_balancer, context=None): port = validate.port_exists(port_id=load_balancer.vip_port_id, context=context) validate.check_port_in_use(port) load_balancer.vip_network_id = port.network_id # validate the request vip port whether applied the qos_policy and # store the port_qos_policy to loadbalancer obj if possible. The # default behavior is that if 'vip_qos_policy_id' is specified in the # request, it will override the qos_policy applied on vip_port. port_qos_policy_id = port.qos_policy_id if (port_qos_policy_id and isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType)): load_balancer.vip_qos_policy_id = port_qos_policy_id if load_balancer.vip_subnet_id: # If we were provided a subnet_id, validate it exists and that # there is a fixed_ip on the port that matches the provided subnet validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id, context=context) for port_fixed_ip in port.fixed_ips: if port_fixed_ip.subnet_id == load_balancer.vip_subnet_id: load_balancer.vip_address = port_fixed_ip.ip_address break # Just pick the first address found in the subnet if not load_balancer.vip_address: raise exceptions.ValidationException(detail=_( "No VIP address found on the specified VIP port within " "the specified subnet.")) elif load_balancer.vip_address: normalized_lb_ip = ipaddress.ip_address( load_balancer.vip_address).compressed for port_fixed_ip in port.fixed_ips: normalized_port_ip = ipaddress.ip_address( port_fixed_ip.ip_address).compressed if normalized_port_ip == normalized_lb_ip: load_balancer.vip_subnet_id = port_fixed_ip.subnet_id break if not load_balancer.vip_subnet_id: raise exceptions.ValidationException(detail=_( "Specified VIP address not found on the " "specified VIP port.")) elif len(port.fixed_ips) == 1: # User provided only a port, get the subnet and address from it load_balancer.vip_subnet_id = port.fixed_ips[0].subnet_id load_balancer.vip_address = port.fixed_ips[0].ip_address else: raise exceptions.ValidationException(detail=_( "VIP port's subnet could not be determined. Please " "specify either a VIP subnet or address."))
def test_subnet_exists_with_bad_subnet(self): with mock.patch( 'octavia.common.utils.get_network_driver') as net_mock: net_mock.return_value.get_subnet = mock.Mock( side_effect=network_base.SubnetNotFound('Subnet not found')) subnet_id = uuidutils.generate_uuid() self.assertEqual(validate.subnet_exists(subnet_id), False)
def post(self, member_): """Creates a pool member on a pool.""" member = member_.member context = pecan.request.context.get('octavia_context') validate.ip_not_reserved(member.address) # Validate member subnet if member.subnet_id and not validate.subnet_exists(member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) pool = self.repositories.pool.get(context.session, id=self.pool_id) member.project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, member.project_id, constants.RBAC_POST) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_clusterquota_met( lock_session, data_models.Member, base_res_id=self.pool_id): raise exceptions.ClusterQuotaException( resource=data_models.Member._name()) if self.repositories.check_quota_met(context.session, lock_session, data_models.Member, member.project_id): raise exceptions.QuotaException( resource=data_models.Member._name()) member_dict = db_prepare.create_member( member.to_dict(render_unsets=True), self.pool_id, bool(pool.health_monitor)) self._test_lb_and_listener_and_pool_statuses(lock_session) db_member = self._validate_create_member(lock_session, member_dict) # Prepare the data for the driver data model provider_member = ( driver_utils.db_member_to_provider_member(db_member)) # Dispatch to the driver LOG.info("Sending create Member %s to provider %s", db_member.id, driver.name) driver_utils.call_provider(driver.name, driver.member_create, provider_member) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_member = self._get_db_member(context.session, db_member.id) result = self._convert_db_to_type(db_member, member_types.MemberResponse) return member_types.MemberRootResponse(member=result)
def test_subnet_exists_with_valid_subnet(self): subnet_id = uuidutils.generate_uuid() subnet = network_models.Subnet(id=subnet_id) with mock.patch( 'octavia.common.utils.get_network_driver') as net_mock: net_mock.return_value.get_subnet.return_value = subnet self.assertEqual(validate.subnet_exists(subnet_id), subnet)
def post(self, member_): """Creates a pool member on a pool.""" member = member_.member context = pecan.request.context.get('octavia_context') # Validate member subnet if member.subnet_id and not validate.subnet_exists(member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) pool = self.repositories.pool.get(context.session, id=self.pool_id) member.project_id = self._get_lb_project_id(context.session, pool.load_balancer_id) self._auth_validate_action(context, member.project_id, constants.RBAC_POST) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met(context.session, lock_session, data_models.Member, member.project_id): raise exceptions.QuotaException member_dict = db_prepare.create_member( member.to_dict(render_unsets=True), self.pool_id, bool(pool.health_monitor)) self._test_lb_and_listener_and_pool_statuses(lock_session) db_member = self._validate_create_member(lock_session, member_dict) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() return self._send_member_to_handler(context.session, db_member)
def post(self, load_balancer): """Creates a load balancer.""" # Validate the subnet id if load_balancer.vip.subnet_id: if not validate.subnet_exists(load_balancer.vip.subnet_id): raise exceptions.NotFound(resource='Subnet', id=load_balancer.vip.subnet_id) context = pecan.request.context.get('octavia_context') if load_balancer.listeners: return self._create_load_balancer_graph(context, load_balancer) lb_dict = db_prepare.create_load_balancer(load_balancer.to_dict( render_unsets=True )) vip_dict = lb_dict.pop('vip', {}) try: db_lb = self.repositories.create_load_balancer_and_vip( context.session, lb_dict, vip_dict) except odb_exceptions.DBDuplicateEntry: raise exceptions.IDAlreadyExists() # Handler will be responsible for sending to controller try: LOG.info(_LI("Sending created Load Balancer %s to the handler"), db_lb.id) self.handler.create(db_lb) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.load_balancer.update( context.session, db_lb.id, provisioning_status=constants.ERROR) return self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse)
def test_subnet_exists_with_valid_subnet(self): subnet_id = uuidutils.generate_uuid() subnet = network_models.Subnet(id=subnet_id) with mock.patch( 'octavia.common.utils.get_network_driver') as net_mock: net_mock.return_value.get_subnet.return_value = subnet self.assertEqual(validate.subnet_exists(subnet_id), subnet)
def post(self, member_): """Creates a pool member on a pool.""" member = member_.member context = pecan.request.context.get('octavia_context') validate.ip_not_reserved(member.address) # Validate member subnet if member.subnet_id and not validate.subnet_exists(member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) pool = self.repositories.pool.get(context.session, id=self.pool_id) member.project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, member.project_id, constants.RBAC_POST) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met( context.session, lock_session, data_models.Member, member.project_id): raise exceptions.QuotaException( resource=data_models.Member._name()) member_dict = db_prepare.create_member(member.to_dict( render_unsets=True), self.pool_id, bool(pool.health_monitor)) self._test_lb_and_listener_and_pool_statuses(lock_session) db_member = self._validate_create_member(lock_session, member_dict) # Prepare the data for the driver data model provider_member = ( driver_utils.db_member_to_provider_member(db_member)) # Dispatch to the driver LOG.info("Sending create Member %s to provider %s", db_member.id, driver.name) driver_utils.call_provider( driver.name, driver.member_create, provider_member) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_member = self._get_db_member(context.session, db_member.id) result = self._convert_db_to_type(db_member, member_types.MemberResponse) return member_types.MemberRootResponse(member=result)
def _validate_port_and_fill_or_validate_subnet(load_balancer): port = validate.port_exists(port_id=load_balancer.vip_port_id) load_balancer.vip_network_id = port.network_id # Identify the subnet for this port if load_balancer.vip_subnet_id: validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id) else: if load_balancer.vip_address: for port_fixed_ip in port.fixed_ips: if port_fixed_ip.ip_address == load_balancer.vip_address: load_balancer.vip_subnet_id = port_fixed_ip.subnet_id break if not load_balancer.vip_subnet_id: raise exceptions.ValidationException( detail=_("Specified VIP address not found on the " "specified VIP port.")) elif len(port.fixed_ips) == 1: load_balancer.vip_subnet_id = port.fixed_ips[0].subnet_id else: raise exceptions.ValidationException(detail=_( "VIP port's subnet could not be determined. Please " "specify either a VIP subnet or address."))
def _validate_vip_request_object(self, load_balancer, context=None): allowed_network_objects = [] if CONF.networking.allow_vip_port_id: allowed_network_objects.append('vip_port_id') if CONF.networking.allow_vip_network_id: allowed_network_objects.append('vip_network_id') if CONF.networking.allow_vip_subnet_id: allowed_network_objects.append('vip_subnet_id') msg = _("use of %(object)s is disallowed by this deployment's " "configuration.") if (load_balancer.vip_port_id and not CONF.networking.allow_vip_port_id): raise exceptions.ValidationException( detail=msg % {'object': 'vip_port_id'}) if (load_balancer.vip_network_id and not CONF.networking.allow_vip_network_id): raise exceptions.ValidationException( detail=msg % {'object': 'vip_network_id'}) if (load_balancer.vip_subnet_id and not CONF.networking.allow_vip_subnet_id): raise exceptions.ValidationException( detail=msg % {'object': 'vip_subnet_id'}) if not (load_balancer.vip_port_id or load_balancer.vip_network_id or load_balancer.vip_subnet_id): raise exceptions.VIPValidationException( objects=', '.join(allowed_network_objects)) # Validate the port id if load_balancer.vip_port_id: self._validate_port_and_fill_or_validate_subnet(load_balancer, context=context) # If no port id, validate the network id (and subnet if provided) elif load_balancer.vip_network_id: self._validate_network_and_fill_or_validate_subnet(load_balancer, context=context) # Validate just the subnet id elif load_balancer.vip_subnet_id: subnet = validate.subnet_exists( subnet_id=load_balancer.vip_subnet_id) load_balancer.vip_network_id = subnet.network_id if load_balancer.vip_qos_policy_id: validate.qos_policy_exists( qos_policy_id=load_balancer.vip_qos_policy_id)
def post(self, member): """Creates a pool member on a pool.""" context = pecan.request.context.get('octavia_context') # Validate member subnet if member.subnet_id and not validate.subnet_exists(member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) member_dict = db_prepare.create_member(member.to_dict( render_unsets=True), self.pool_id) self._test_lb_and_listener_statuses(context.session) try: db_member = self.repositories.member.create(context.session, **member_dict) except oslo_exc.DBDuplicateEntry as de: # Setting LB and Listener back to active because this is just a # validation failure self.repositories.load_balancer.update( context.session, self.load_balancer_id, provisioning_status=constants.ACTIVE) for listener_id in self._get_affected_listener_ids( context.session): self.repositories.listener.update( context.session, listener_id, provisioning_status=constants.ACTIVE) if ['id'] == de.columns: raise exceptions.IDAlreadyExists() elif (set(['pool_id', 'ip_address', 'protocol_port']) == set(de.columns)): raise exceptions.DuplicateMemberEntry( ip_address=member_dict.get('ip_address'), port=member_dict.get('protocol_port')) try: LOG.info(_LI("Sending Creation of Member %s to handler"), db_member.id) self.handler.create(db_member) except Exception: for listener_id in self._get_affected_listener_ids( context.session): with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( context.session, listener_id, operating_status=constants.ERROR) db_member = self._get_db_member(context.session, db_member.id) return self._convert_db_to_type(db_member, member_types.MemberResponse)
def _validate_vip_request_object(self, load_balancer): allowed_network_objects = [] if CONF.networking.allow_vip_port_id: allowed_network_objects.append('vip_port_id') if CONF.networking.allow_vip_network_id: allowed_network_objects.append('vip_network_id') if CONF.networking.allow_vip_subnet_id: allowed_network_objects.append('vip_subnet_id') msg = _("use of %(object)s is disallowed by this deployment's " "configuration.") if (load_balancer.vip_port_id and not CONF.networking.allow_vip_port_id): raise exceptions.ValidationException( detail=msg % {'object': 'vip_port_id'}) if (load_balancer.vip_network_id and not CONF.networking.allow_vip_network_id): raise exceptions.ValidationException( detail=msg % {'object': 'vip_network_id'}) if (load_balancer.vip_subnet_id and not CONF.networking.allow_vip_subnet_id): raise exceptions.ValidationException( detail=msg % {'object': 'vip_subnet_id'}) if not (load_balancer.vip_port_id or load_balancer.vip_network_id or load_balancer.vip_subnet_id): raise exceptions.VIPValidationException( objects=', '.join(allowed_network_objects)) # Validate the port id if load_balancer.vip_port_id: self._validate_port_and_fill_or_validate_subnet(load_balancer) # If no port id, validate the network id (and subnet if provided) elif load_balancer.vip_network_id: self._validate_network_and_fill_or_validate_subnet(load_balancer) # Validate just the subnet id elif load_balancer.vip_subnet_id: subnet = validate.subnet_exists( subnet_id=load_balancer.vip_subnet_id) load_balancer.vip_network_id = subnet.network_id if load_balancer.vip_qos_policy_id: validate.qos_policy_exists( qos_policy_id=load_balancer.vip_qos_policy_id) validate.network_allowed_by_config(load_balancer.vip_network_id)
def put(self, members_): """Updates all members.""" members = members_.members context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) member_count_diff = len(members) - len(old_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException( resource=data_models.Member._name()) old_member_uniques = { (m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [ (m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: validate.ip_not_reserved(m.address) new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) provider_members = [] # Create new members for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id created_member = self._graph_create(lock_session, m) provider_member = driver_utils.db_member_to_provider_member( created_member) provider_members.append(provider_member) # Update old members for m in updated_members: m.provisioning_status = constants.PENDING_UPDATE db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') self.repositories.member.update( lock_session, m.id, **db_member_dict) m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) # Delete old members for m in deleted_members: self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) # Dispatch to the driver LOG.info("Sending Pool %s batch member update to provider %s", db_pool.id, driver.name) driver_utils.call_provider( driver.name, driver.member_batch_update, provider_members)
def put(self, members_): """Updates all members.""" members = members_.members context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, db_pool.project_id, constants.RBAC_POST) self._auth_validate_action(context, db_pool.project_id, constants.RBAC_PUT) self._auth_validate_action(context, db_pool.project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) member_count_diff = len(members) - len(old_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException old_member_uniques = { (m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [ (m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) # Create new members new_members_created = [] for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id new_members_created.append(self._graph_create(lock_session, m)) # Update old members for m in updated_members: self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_UPDATE) # Delete old members for m in deleted_members: self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) LOG.info("Sending Full Member Update to handler") new_member_ids = [m.id for m in new_members_created] old_member_ids = [m.id for m in deleted_members] self.handler.batch_update( old_member_ids, new_member_ids, updated_members)
def post(self, load_balancer): """Creates a load balancer.""" context = pecan.request.context.get('octavia_context') project_id = context.project_id if context.is_admin or CONF.auth_strategy == constants.NOAUTH: if load_balancer.project_id: project_id = load_balancer.project_id if not project_id: raise exceptions.MissingAPIProjectID() load_balancer.project_id = project_id # Validate the subnet id if load_balancer.vip.subnet_id: if not validate.subnet_exists(load_balancer.vip.subnet_id): raise exceptions.NotFound(resource='Subnet', id=load_balancer.vip.subnet_id) lock_session = db_api.get_session(autocommit=False) if self.repositories.check_quota_met(context.session, lock_session, data_models.LoadBalancer, load_balancer.project_id): lock_session.rollback() raise exceptions.QuotaException if load_balancer.listeners: try: db_lb = self._create_load_balancer_graph_db( lock_session, load_balancer) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() return self._load_balancer_graph_to_handler(context, db_lb) try: lb_dict = db_prepare.create_load_balancer( load_balancer.to_dict(render_unsets=True)) vip_dict = lb_dict.pop('vip', {}) db_lb = self.repositories.create_load_balancer_and_vip( lock_session, lb_dict, vip_dict) lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() raise exceptions.IDAlreadyExists() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() # Handler will be responsible for sending to controller try: LOG.info(_LI("Sending created Load Balancer %s to the handler"), db_lb.id) self.handler.create(db_lb) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.load_balancer.update( context.session, db_lb.id, provisioning_status=constants.ERROR) return self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse)
def put(self, members_): """Updates all members.""" members = members_.members context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) member_count_diff = len(members) - len(old_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException( resource=data_models.Member._name()) old_member_uniques = {(m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [(m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: validate.ip_not_reserved(m.address) new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) provider_members = [] # Create new members for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id created_member = self._graph_create(lock_session, m) provider_member = driver_utils.db_member_to_provider_member( created_member) provider_members.append(provider_member) # Update old members for m in updated_members: m.provisioning_status = constants.PENDING_UPDATE db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') self.repositories.member.update(lock_session, m.id, **db_member_dict) m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) # Delete old members for m in deleted_members: self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) # Dispatch to the driver LOG.info("Sending Pool %s batch member update to provider %s", db_pool.id, driver.name) driver_utils.call_provider(driver.name, driver.member_batch_update, provider_members)
def post(self, load_balancer): """Creates a load balancer.""" load_balancer = load_balancer.loadbalancer context = pecan.request.context.get('octavia_context') project_id = context.project_id if context.is_admin or CONF.auth_strategy == constants.NOAUTH: if load_balancer.project_id: project_id = load_balancer.project_id if not project_id: raise exceptions.ValidationException(detail=_( "Missing project ID in request where one is required.")) load_balancer.project_id = project_id if not (load_balancer.vip_port_id or load_balancer.vip_network_id or load_balancer.vip_subnet_id): raise exceptions.ValidationException(detail=_( "VIP must contain one of: port_id, network_id, subnet_id.")) # Validate the port id if load_balancer.vip_port_id: port = validate.port_exists(port_id=load_balancer.vip_port_id) load_balancer.vip_network_id = port.network_id # If no port id, validate the network id (and subnet if provided) elif load_balancer.vip_network_id: self._validate_network_and_fill_or_validate_subnet(load_balancer) # Validate just the subnet id elif load_balancer.vip_subnet_id: subnet = validate.subnet_exists( subnet_id=load_balancer.vip_subnet_id) load_balancer.vip_network_id = subnet.network_id lock_session = db_api.get_session(autocommit=False) if self.repositories.check_quota_met(context.session, lock_session, data_models.LoadBalancer, load_balancer.project_id): lock_session.rollback() raise exceptions.QuotaException # TODO(blogan): lb graph, look at v1 code try: lb_dict = db_prepare.create_load_balancer( load_balancer.to_dict(render_unsets=True)) vip_dict = lb_dict.pop('vip', {}) db_lb = self.repositories.create_load_balancer_and_vip( lock_session, lb_dict, vip_dict) lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() raise exceptions.IDAlreadyExists() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() # Handler will be responsible for sending to controller try: LOG.info(_LI("Sending created Load Balancer %s to the handler"), db_lb.id) self.handler.create(db_lb) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.load_balancer.update( context.session, db_lb.id, provisioning_status=constants.ERROR) result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse) return lb_types.LoadBalancerRootResponse(loadbalancer=result)
def put(self, additive_only=False, members_=None): """Updates all members.""" members = members_.members additive_only = strutils.bool_from_string(additive_only) context = pecan_request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_PUT) if not additive_only: self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id, context=context): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) old_member_uniques = {(m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [(m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: validate.ip_not_reserved(m.address) new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) if not (deleted_members or new_members or updated_members): LOG.info("Member batch update is a noop, rolling back and " "returning early.") lock_session.rollback() return if additive_only: member_count_diff = len(new_members) else: member_count_diff = len(new_members) - len(deleted_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException( resource=data_models.Member._name()) provider_members = [] # Create new members for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id created_member = self._graph_create(lock_session, m) provider_member = driver_utils.db_member_to_provider_member( created_member) provider_members.append(provider_member) # Update old members for m in updated_members: m.provisioning_status = constants.PENDING_UPDATE m.project_id = db_pool.project_id db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') self.repositories.member.update(lock_session, m.id, **db_member_dict) m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) # Delete old members for m in deleted_members: if additive_only: # Members are appended to the dict and their status remains # unchanged, because they are logically "untouched". db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) else: # Members are changed to PENDING_DELETE and not passed. self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) # Dispatch to the driver LOG.info("Sending Pool %s batch member update to provider %s", db_pool.id, driver.name) driver_utils.call_provider(driver.name, driver.member_batch_update, db_pool.id, provider_members)