def __init__(self, configfile=None): LOG.debug(_("Start initializing metaplugin")) self.supported_extension_aliases = \ cfg.CONF.META.supported_extension_aliases.split(',') self.supported_extension_aliases += ['flavor', 'external-net', 'router', 'ext-gw-mode', 'extraroute'] # Ignore config option overapping def _is_opt_registered(opts, opt): if opt.dest in opts: return True else: return False cfg._is_opt_registered = _is_opt_registered # Keep existing tables if multiple plugin use same table name. db.model_base.NeutronBase.__table_args__ = {'keep_existing': True} self.plugins = {} plugin_list = [plugin_set.split(':') for plugin_set in cfg.CONF.META.plugin_list.split(',')] for flavor, plugin_provider in plugin_list: self.plugins[flavor] = self._load_plugin(plugin_provider) self.l3_plugins = {} l3_plugin_list = [plugin_set.split(':') for plugin_set in cfg.CONF.META.l3_plugin_list.split(',')] for flavor, plugin_provider in l3_plugin_list: if flavor in self.plugins: self.l3_plugins[flavor] = self.plugins[flavor] else: # For l3 only plugin self.l3_plugins[flavor] = self._load_plugin(plugin_provider) self.default_flavor = cfg.CONF.META.default_flavor if self.default_flavor not in self.plugins: raise exc.Invalid(_('default_flavor %s is not plugin list') % self.default_flavor) self.default_l3_flavor = cfg.CONF.META.default_l3_flavor if self.default_l3_flavor not in self.l3_plugins: raise exc.Invalid(_('default_l3_flavor %s is not plugin list') % self.default_l3_flavor) db.configure_db() self.extension_map = {} if not cfg.CONF.META.extension_map == '': extension_list = [method_set.split(':') for method_set in cfg.CONF.META.extension_map.split(',')] for method_name, flavor in extension_list: self.extension_map[method_name] = flavor self.default_flavor = cfg.CONF.META.default_flavor
def parse_service_provider_opt(): """Parse service definition opts and returns result.""" def validate_name(name): if len(name) > 255: raise n_exc.Invalid( _("Provider name is limited by 255 characters: %s") % name) # Main neutron config file try: svc_providers_opt = cfg.CONF.service_providers.service_provider except cfg.NoSuchOptError: svc_providers_opt = [] # Add in entries from the *aas conf files neutron_mods = repos.NeutronModules() for x in neutron_mods.installed_list(): svc_providers_opt += neutron_mods.service_providers(x) LOG.debug("Service providers = %s", svc_providers_opt) res = [] for prov_def in svc_providers_opt: split = prov_def.split(':') try: svc_type, name, driver = split[:3] except ValueError: raise n_exc.Invalid(_("Invalid service provider format")) validate_name(name) name = normalize_provider_name(name) default = False if len(split) == 4 and split[3]: if split[3] == 'default': default = True else: msg = (_("Invalid provider format. " "Last part should be 'default' or empty: %s") % prov_def) LOG.error(msg) raise n_exc.Invalid(msg) if svc_type not in constants.ALLOWED_SERVICES: msg = (_("Service type '%(svc_type)s' is not allowed, " "allowed types: %(allowed)s") % { 'svc_type': svc_type, 'allowed': constants.ALLOWED_SERVICES }) LOG.error(msg) raise n_exc.Invalid(msg) driver = get_provider_driver_class(driver) res.append({ 'service_type': svc_type, 'name': name, 'driver': driver, 'default': default }) return res
def update_pool(self, context, id, pool): pool = pool.get('pool') session_info = pool.get('session_persistence', None) if session_info: try: self._check_session_persistence_info(pool['session_persistence']) except ValueError: raise n_exc.Invalid(_("Error value for session persistence type.")) healthmonitor_info = pool.get('healthmonitor', None) if healthmonitor_info: self._prepare_healthmonitor_info(pool['healthmonitor']) old_pool = self.db.get_pool(context, id) if (session_info and old_pool.protocol != lb_const.PROTOCOL_HTTP): raise n_exc.Invalid(_("Can not specify session persistence for TCP protocol.")) self.db.test_and_set_status(context, models.PoolV2, id, constants.PENDING_UPDATE) try: updated_pool = self.db.update_pool(context, id, pool) except Exception as exc: self.db.update_status(context, models.PoolV2, id, old_pool.status) LOG.info('_update_pool exc: %s',exc) raise exc if (updated_pool.attached_to_loadbalancer()): if updated_pool.l7policy: loadbalancer_id = updated_pool.l7policy.listener.loadbalancer_id else: loadbalancer_id = updated_pool.listener.loadbalancer_id driver = self._get_driver_for_loadbalancer( context, loadbalancer_id) self._call_driver_operation(context, driver.pool.update, updated_pool, old_db_entity=old_pool) elif (old_pool.attached_to_loadbalancer()): if old_pool.l7policy: loadbalancer_id = old_pool.l7policy.listener.loadbalancer_id else: loadbalancer_id = old_pool.listener.loadbalancer_id driver = self._get_driver_for_loadbalancer( context, loadbalancer_id) self._call_driver_operation(context, driver.pool.update, updated_pool, old_db_entity=old_pool) else: self.db.update_status(context, models.PoolV2, id, constants.DEFERRED) return self.db.get_pool(context, updated_pool.id).to_dict()
def _get_tenant_id(self, os, tenant_id): if tenant_id not in os['projects']: emsg = "tenant(%(tenant_id)s) is not found" % { 'tenant_id': tenant_id} LOG.debug(emsg) raise exceptions.Invalid(error_message=emsg) if not os['projects'][tenant_id]: emsg = "tenant name(%(tenant_id)s) is empty" % { 'tenant_id': tenant_id} LOG.debug(emsg) raise exceptions.Invalid(error_message=emsg) return os['projects'][tenant_id] + '.' + self.neutron_id
def get_logical_device(self, context, pool_id=None): with context.session.begin(subtransactions=True): qry = context.session.query(loadbalancer_db.Pool) qry = qry.filter_by(id=pool_id) pool = qry.one() if pool.status != constants.ACTIVE: raise n_exc.Invalid(_('Expected active pool')) retval = {} retval['pool'] = self.plugin._make_pool_dict(pool) if pool.vip: retval['vip'] = self.plugin._make_vip_dict(pool.vip) retval['vip']['port'] = ( self.plugin._core_plugin._make_port_dict(pool.vip.port)) for fixed_ip in retval['vip']['port']['fixed_ips']: fixed_ip['subnet'] = (self.plugin._core_plugin.get_subnet( context, fixed_ip['subnet_id'])) retval['members'] = [ self.plugin._make_member_dict(m) for m in pool.members if (m.status in constants.ACTIVE_PENDING or m.status == constants.INACTIVE) ] retval['healthmonitors'] = [ self.plugin._make_health_monitor_dict(hm.healthmonitor) for hm in pool.monitors if hm.status in constants.ACTIVE_PENDING ] retval['driver'] = ( self.plugin.drivers[pool.provider.provider_name].device_driver) return retval
def _get_driver_for_provider(self, provider): try: return self.drivers[provider] except KeyError: # raise if not associated (should never be reached) raise n_exc.Invalid(_LE("Error retrieving driver for provider " "%s") % provider)
def _port_action(self, plugin, context, port, action): """Perform port operations taking care of concurrency issues.""" try: if action == 'create_port': return p_utils.create_port(plugin, context, port) elif action == 'update_port': return plugin.update_port(context, port['id'], port) else: msg = _('Unrecognized action') raise n_exc.Invalid(message=msg) except (db_exc.DBError, n_exc.NetworkNotFound, n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e: with excutils.save_and_reraise_exception(reraise=False) as ctxt: if isinstance(e, n_exc.IpAddressGenerationFailure): # Check if the subnet still exists and if it does not, # this is the reason why the ip address generation failed. # In any other unlikely event re-raise try: subnet_id = port['port']['fixed_ips'][0]['subnet_id'] plugin.get_subnet(context, subnet_id) except n_exc.SubnetNotFound: pass else: ctxt.reraise = True net_id = port['port']['network_id'] LOG.warn( _LW("Action %(action)s for network %(net_id)s " "could not complete successfully: %(reason)s"), { "action": action, "net_id": net_id, 'reason': e })
def _port_action(self, plugin, context, port, action): """Perform port operations taking care of concurrency issues.""" try: if action == 'create_port': return plugin.create_port(context, port) elif action == 'update_port': return plugin.update_port(context, port['id'], port['port']) else: msg = _('Unrecognized action') raise n_exc.Invalid(message=msg) except (db_exc.DBError, n_exc.NetworkNotFound, n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e: if isinstance(e, n_exc.IpAddressGenerationFailure): # Check if the subnet still exists and if it does not, this is # the reason why the ip address generation failed. In any other # unlikely event re-raise try: subnet_id = port['port']['fixed_ips'][0]['subnet_id'] plugin.get_subnet(context, subnet_id) except n_exc.SubnetNotFound: pass else: raise network_id = port['port']['network_id'] LOG.warn( _("Port for network %(net_id)s could not be created: " "%(reason)s") % { "net_id": network_id, 'reason': e })
def __init__(self, configfile=None): super(RyuNeutronPluginV2, self).__init__() self.base_binding_dict = { portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, portbindings.VIF_DETAILS: { # TODO(rkukura): Replace with new VIF security details portbindings.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases, portbindings.OVS_HYBRID_PLUG: True } } portbindings_base.register_port_dict_function() self.tunnel_key = db_api_v2.TunnelKey(cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max) self.ofp_api_host = cfg.CONF.OVS.openflow_rest_api if not self.ofp_api_host: raise n_exc.Invalid(_('Invalid configuration. check ryu.ini')) self.client = client.OFPClient(self.ofp_api_host) self.tun_client = client.TunnelClient(self.ofp_api_host) self.iface_client = client.NeutronIfaceClient(self.ofp_api_host) for nw_id in rest_nw_id.RESERVED_NETWORK_IDS: if nw_id != rest_nw_id.NW_ID_UNKNOWN: self.client.update_network(nw_id) self._setup_rpc() # register known all network list on startup self._create_all_tenant_network()
def _ensure_driver_unique(self, driver): for k, v in self.providers.items(): if v['driver'] == driver: msg = (_("Driver %s is not unique across providers") % driver) LOG.exception(msg) raise n_exc.Invalid(msg)
def _get_driver_for_pool(self, context, pool_id): pool = self.get_pool(context, pool_id) try: return self.drivers[pool['provider']] except KeyError: raise n_exc.Invalid( _("Error retrieving provider for pool %s") % pool_id)
def __init__(self, plugin, collection, resource, attr_info, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False): if member_actions is None: member_actions = [] self._plugin = plugin self._collection = collection.replace('-', '_') self._resource = resource.replace('-', '_') self._attr_info = attr_info self._allow_bulk = allow_bulk self._allow_pagination = allow_pagination self._allow_sorting = allow_sorting self._native_bulk = self._is_native_bulk_supported() self._native_pagination = self._is_native_pagination_supported() self._native_sorting = self._is_native_sorting_supported() self._policy_attrs = [ name for (name, info) in self._attr_info.items() if info.get('required_by_policy') ] self._notifier = n_rpc.get_notifier('network') # use plugin's dhcp notifier, if this is already instantiated agent_notifiers = getattr(plugin, 'agent_notifiers', {}) self._dhcp_agent_notifier = (agent_notifiers.get( const.AGENT_TYPE_DHCP) or dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) if cfg.CONF.notify_nova_on_port_data_changes: from neutron.notifiers import nova self._nova_notifier = nova.Notifier() self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: # Native pagination need native sorting support if not self._native_sorting: raise exceptions.Invalid( _("Native pagination depend on native sorting")) if not self._allow_sorting: LOG.info( _LI("Allow sorting is enabled because native " "pagination requires native sorting")) self._allow_sorting = True if parent: self._parent_id_name = '%s_id' % parent['member_name'] parent_part = '_%s' % parent['member_name'] else: self._parent_id_name = None parent_part = '' self._plugin_handlers = { self.LIST: 'get%s_%s' % (parent_part, self._collection), self.SHOW: 'get%s_%s' % (parent_part, self._resource) } for action in [self.CREATE, self.UPDATE, self.DELETE]: self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part, self._resource)
def update_status(self, context, obj_type, obj_id, status): model_mapping = { 'pool': loadbalancer_db.Pool, 'vip': loadbalancer_db.Vip, 'member': loadbalancer_db.Member, 'health_monitor': loadbalancer_db.PoolMonitorAssociation } if obj_type not in model_mapping: raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) try: if obj_type == 'health_monitor': self.plugin.update_pool_health_monitor(context, obj_id['monitor_id'], obj_id['pool_id'], status) else: self.plugin.update_status(context, model_mapping[obj_type], obj_id, status) except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning( _('Cannot update status: %(obj_type)s %(obj_id)s ' 'not found in the DB, it was probably deleted ' 'concurrently'), { 'obj_type': obj_type, 'obj_id': obj_id })
def update_status(self, context, obj_type, obj_id, provisioning_status=None, operating_status=None): if not provisioning_status and not operating_status: LOG.warning(_LW('update_status for %(obj_type)s %(obj_id)s called ' 'without specifying provisioning_status or ' 'operating_status') % {'obj_type': obj_type, 'obj_id': obj_id}) return model_mapping = { 'loadbalancer': db_models.LoadBalancer, 'pool': db_models.PoolV2, 'listener': db_models.Listener, 'member': db_models.MemberV2, 'healthmonitor': db_models.HealthMonitorV2 } if obj_type not in model_mapping: raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) try: self.plugin.db.update_status( context, model_mapping[obj_type], obj_id, provisioning_status=provisioning_status, operating_status=operating_status) except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s ' 'not found in the DB, it was probably deleted ' 'concurrently'), {'obj_type': obj_type, 'obj_id': obj_id})
def __init__(self, configfile=None): self.base_binding_dict = { portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, portbindings.CAPABILITIES: { portbindings.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases } } portbindings_base.register_port_dict_function() db.configure_db() self.tunnel_key = db_api_v2.TunnelKey(cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max) self.ofp_api_host = cfg.CONF.OVS.openflow_rest_api if not self.ofp_api_host: raise q_exc.Invalid(_('Invalid configuration. check ryu.ini')) self.client = client.OFPClient(self.ofp_api_host) self.tun_client = client.TunnelClient(self.ofp_api_host) self.iface_client = client.NeutronIfaceClient(self.ofp_api_host) for nw_id in rest_nw_id.RESERVED_NETWORK_IDS: if nw_id != rest_nw_id.NW_ID_UNKNOWN: self.client.update_network(nw_id) self._setup_rpc() # register known all network list on startup self._create_all_tenant_network()
def validate_name(name): if len(name) > attr.NAME_MAX_LEN: raise n_exc.Invalid( _("Provider name %(name)s is limited by %(len)s characters") % { 'name': name, 'len': attr.NAME_MAX_LEN })
def _get_driver_for_loadbalancer(self, context, loadbalancer_id): lb = self.db.get_loadbalancer(context, loadbalancer_id) try: return self.drivers[lb.provider.provider_name] except KeyError: raise n_exc.Invalid( _LE("Error retrieving provider for load balancer. Possible " "providers are %s.") % self.drivers.keys())
def _ensure_default_unique(self, type, default): if not default: return for k, v in self.providers.items(): if k[0] == type and v['default']: msg = _("Multiple default providers " "for service %s") % type LOG.exception(msg) raise n_exc.Invalid(msg)
def parse_service_provider_opt(service_module='neutron'): """Parse service definition opts and returns result.""" def validate_name(name): if len(name) > attr.NAME_MAX_LEN: raise n_exc.Invalid( _("Provider name %(name)s is limited by %(len)s characters") % { 'name': name, 'len': attr.NAME_MAX_LEN }) neutron_mod = NeutronModule(service_module) svc_providers_opt = neutron_mod.service_providers() LOG.debug("Service providers = %s", svc_providers_opt) res = [] for prov_def in svc_providers_opt: split = prov_def.split(':') try: svc_type, name, driver = split[:3] except ValueError: raise n_exc.Invalid(_("Invalid service provider format")) validate_name(name) name = normalize_provider_name(name) default = False if len(split) == 4 and split[3]: if split[3] == 'default': default = True else: msg = (_("Invalid provider format. " "Last part should be 'default' or empty: %s") % prov_def) LOG.error(msg) raise n_exc.Invalid(msg) driver = get_provider_driver_class(driver) res.append({ 'service_type': svc_type, 'name': name, 'driver': driver, 'default': default }) return res
def _get_network(self, os, network_id): network = next((n for n in os['networks'] if n['id'] == network_id), None) if network: return network else: emsg = "network(%(network_id)s) is not found" % { 'network_id': network_id} LOG.debug(emsg) raise exceptions.Invalid(message=emsg)
def parse_service_provider_opt(): """Parse service definition opts and returns result.""" def validate_name(name): if len(name) > 255: raise n_exc.Invalid( _("Provider name is limited by 255 characters: %s") % name) svc_providers_opt = cfg.CONF.service_providers.service_provider res = [] for prov_def in svc_providers_opt: split = prov_def.split(':') try: svc_type, name, driver = split[:3] except ValueError: raise n_exc.Invalid(_("Invalid service provider format")) validate_name(name) name = normalize_provider_name(name) default = False if len(split) == 4 and split[3]: if split[3] == 'default': default = True else: msg = (_("Invalid provider format. " "Last part should be 'default' or empty: %s") % prov_def) LOG.error(msg) raise n_exc.Invalid(msg) if svc_type not in constants.ALLOWED_SERVICES: msg = (_("Service type '%(svc_type)s' is not allowed, " "allowed types: %(allowed)s") % { 'svc_type': svc_type, 'allowed': constants.ALLOWED_SERVICES }) LOG.error(msg) raise n_exc.Invalid(msg) driver = get_provider_driver_class(driver) res.append({ 'service_type': svc_type, 'name': name, 'driver': driver, 'default': default }) return res
def _update_router_provider(self, resource, event, trigger, context, router_id, router, old_router, router_db, **kwargs): """Handle transition between providers. The provider can currently be changed only by the caller updating 'ha' and/or 'distributed' attributes. If we allow updates of flavor_id directly in the future those requests will also land here. """ drv = self._get_provider_for_router(context, router_id) new_drv = None if _flavor_specified(router): if router['flavor_id'] != old_router['flavor_id']: # TODO(kevinbenton): this is currently disallowed by the API # so we shouldn't hit it but this is a placeholder to add # support later. raise NotImplementedError() # the following is to support updating the 'ha' and 'distributed' # attributes via the API. try: _ensure_driver_supports_request(drv, router) except n_exc.Invalid: # the current driver does not support this request, we need to # migrate to a new provider. populate the distributed and ha # flags from the previous state if not in the update so we can # determine the target provider appropriately. # NOTE(kevinbenton): if the router is associated with a flavor # we bail because changing the provider without changing # the flavor will make things inconsistent. We can probably # update the flavor automatically in the future. if old_router['flavor_id']: raise n_exc.Invalid( _("Changing the 'ha' and 'distributed' attributes on a " "router associated with a flavor is not supported.")) if 'distributed' not in router: router['distributed'] = old_router['distributed'] if 'ha' not in router: router['ha'] = old_router['distributed'] new_drv = self._attrs_to_driver(router) if new_drv: LOG.debug( "Router %(id)s migrating from %(old)s provider to " "%(new)s provider.", { 'id': router_id, 'old': drv, 'new': new_drv }) _ensure_driver_supports_request(new_drv, router) # TODO(kevinbenton): notify old driver explicity of driver change with context.session.begin(subtransactions=True): self._stm.del_resource_associations(context, [router_id]) self._stm.add_resource_association(context, 'L3_ROUTER_NAT', new_drv.name, router_id)
def _ensure_driver_supports_request(drv, router_body): r = router_body for key, attr in (('distributed', 'distributed_support'), ('ha', 'ha_support')): flag = r.get(key) if flag not in [True, False]: continue # not specified in body if not getattr(drv, attr).is_compatible(flag): raise n_exc.Invalid( _("Provider %(name)s does not support %(key)s=%(flag)s") % dict(name=drv.name, key=key, flag=flag))
def __init__(self, plugin, collection, resource, attr_info, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False): if member_actions is None: member_actions = [] self._plugin = plugin self._collection = collection.replace('-', '_') self._resource = resource.replace('-', '_') self._attr_info = attr_info self._allow_bulk = allow_bulk self._allow_pagination = allow_pagination self._allow_sorting = allow_sorting self._native_bulk = self._is_native_bulk_supported() self._native_pagination = self._is_native_pagination_supported() self._native_sorting = self._is_native_sorting_supported() self._policy_attrs = [ name for (name, info) in self._attr_info.items() if info.get('required_by_policy') ] self._publisher_id = notifier_api.publisher_id('network') self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: # Native pagination need native sorting support if not self._native_sorting: raise exceptions.Invalid( _("Native pagination depend on native sorting")) if not self._allow_sorting: LOG.info( _("Allow sorting is enabled because native " "pagination requires native sorting")) self._allow_sorting = True if parent: self._parent_id_name = '%s_id' % parent['member_name'] parent_part = '_%s' % parent['member_name'] else: self._parent_id_name = None parent_part = '' self._plugin_handlers = { self.LIST: 'get%s_%s' % (parent_part, self._collection), self.SHOW: 'get%s_%s' % (parent_part, self._resource) } for action in [self.CREATE, self.UPDATE, self.DELETE]: self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part, self._resource)
def create_pool(self, context, pool): pool = pool.get('pool') session_info = pool.get('session_persistence', None) if session_info: if pool['protocol'] != lb_const.PROTOCOL_HTTP: raise n_exc.Invalid(_("Can not specify session persistence for TCP protocol.")) try: self._check_session_persistence_info(pool['session_persistence']) except ValueError: raise n_exc.Invalid(_("Error value for session persistence type.")) healthmonitor_info = pool.get('healthmonitor', None) if healthmonitor_info: self._prepare_healthmonitor_info(pool['healthmonitor']) db_pool = self.db.create_pool(context, pool) # no need to call driver since on create it cannot be linked to a load # balancer, but will still update status to DEFERRED self.db.update_status(context, models.PoolV2, db_pool.id, constants.DEFERRED) return self.db.get_pool(context, db_pool.id).to_dict()
def add_provider(self, provider): self._ensure_driver_unique(provider['driver']) self._ensure_default_unique(provider['service_type'], provider['default']) provider_type = (provider['service_type'], provider['name']) if provider_type in self.providers: msg = (_("Multiple providers specified for service " "%s") % provider['service_type']) LOG.exception(msg) raise n_exc.Invalid(msg) self.providers[provider_type] = {'driver': provider['driver'], 'default': provider['default']}
def _get_gateway_ip(self, subnet): if ('gateway_ip' not in subnet) or (not subnet['gateway_ip']): emsg = "subnet(%(subnet_id)s) doensn't have gateway_ip" % { 'subnet_id': subnet['id']} LOG.debug(emsg) raise exceptions.Invalid(message=emsg) if ('cidr' not in subnet) or (not subnet['cidr']): emsg = "subnet(%(subnet_id)s) doensn't have cidr" % { 'subnet_id': subnet['id']} LOG.debug(emsg) raise exceptions.Invalid(message=emsg) ip, mask = subnet['cidr'].split('/') if not mask: emsg = "cidr(%(cidr)s) of subnet(%(subnet_id)s)" \ " is wrong format" % { 'cidr': subnet['cidr'], 'subnet_id': subnet['id']} LOG.debug(emsg) raise exceptions.Invalid(message=emsg) return subnet['gateway_ip'] + '/' + mask
def get_logical_device(self, context, pool_id=None, activate=True, **kwargs): with context.session.begin(subtransactions=True): qry = context.session.query(loadbalancer_db.Pool) qry = qry.filter_by(id=pool_id) pool = qry.one() if activate: # set all resources to active if pool.status in ACTIVE_PENDING: pool.status = constants.ACTIVE if pool.vip.status in ACTIVE_PENDING: pool.vip.status = constants.ACTIVE for m in pool.members: if m.status in ACTIVE_PENDING: m.status = constants.ACTIVE for hm in pool.monitors: if hm.status in ACTIVE_PENDING: hm.status = constants.ACTIVE if (pool.status != constants.ACTIVE or pool.vip.status != constants.ACTIVE): raise q_exc.Invalid(_('Expected active pool and vip')) retval = {} retval['pool'] = self.plugin._make_pool_dict(pool) retval['vip'] = self.plugin._make_vip_dict(pool.vip) retval['vip']['port'] = (self.plugin._core_plugin._make_port_dict( pool.vip.port)) for fixed_ip in retval['vip']['port']['fixed_ips']: fixed_ip['subnet'] = (self.plugin._core_plugin.get_subnet( context, fixed_ip['subnet_id'])) retval['members'] = [ self.plugin._make_member_dict(m) for m in pool.members if m.status in (constants.ACTIVE, constants.INACTIVE) ] retval['healthmonitors'] = [ self.plugin._make_health_monitor_dict(hm.healthmonitor) for hm in pool.monitors if hm.status == constants.ACTIVE ] return retval
def update_status(self, context, obj_type, obj_id, status): model_mapping = { 'pool': loadbalancer_db.Pool, 'vip': loadbalancer_db.Vip, 'member': loadbalancer_db.Member, 'health_monitor': loadbalancer_db.PoolMonitorAssociation } if obj_type not in model_mapping: raise q_exc.Invalid(_('Unknown object type: %s') % obj_type) elif obj_type == 'health_monitor': self.plugin.update_pool_health_monitor(context, obj_id['monitor_id'], obj_id['pool_id'], status) else: self.plugin.update_status(context, model_mapping[obj_type], obj_id, status)
def update_status(self, context, obj_type, obj_id, status): model_mapping = { 'loadbalancer': loadbalancer_dbv2.LoadBalancer, 'listener': loadbalancer_dbv2.Listener, } if obj_type not in model_mapping: raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) try: if obj_type == 'loadbalancer': lb = self.plugin.db.get_loadbalancer(context, obj_id) self.plugin.db.update_status(context, model_mapping[obj_type], obj_id, status) LOG.debug( _('update status: %(obj_type)s %(obj_id)s %(status)s '), { 'obj_type': obj_type, 'obj_id': obj_id, 'status': status }) if (lb and lb.status != status): LOG.info( _('update status: %(obj_type)s %(obj_id)s %(status)s notified' ), { 'obj_type': obj_type, 'obj_id': obj_id, 'status': status }) notifier = n_rpc.get_notifier('loadbalancer') notifier.info(context, 'loadbalancer.update.end', lb.to_dict()) else: LOG.warning( _('Cannot update status: %(obj_type)s %(obj_id)s ' 'the object type not supported'), { 'obj_type': obj_type, 'obj_id': obj_id }) pass except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning( _('Cannot update status: %(obj_type)s %(obj_id)s ' 'not found in the DB, it was probably deleted ' 'concurrently'), { 'obj_type': obj_type, 'obj_id': obj_id })