Beispiel #1
0
 def _check_opt(url):
     if not url:
         raise cfg.RequiredOptError('url', cfg.OptGroup('ml2_odl'))
     required_opts = ('url', 'username', 'password')
     for opt in required_opts:
         if not getattr(cfg.CONF.ml2_odl, opt):
             raise cfg.RequiredOptError(opt, cfg.OptGroup('ml2_odl'))
def _get_pod_sgs(pod, project_id):
    sg_list = []

    pod_labels = pod['metadata'].get('labels')
    pod_namespace = pod['metadata']['namespace']

    knp_crds = driver_utils.get_kuryrnetpolicy_crds(namespace=pod_namespace)
    for crd in knp_crds.get('items'):
        pod_selector = crd['spec'].get('podSelector')
        if pod_selector:
            if driver_utils.match_selector(pod_selector, pod_labels):
                LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
                sg_list.append(str(crd['spec']['securityGroupId']))
        else:
            LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
            sg_list.append(str(crd['spec']['securityGroupId']))

    # NOTE(maysams) Pods that are not selected by any Networkpolicy
    # are fully accessible. Thus, the default security group is associated.
    if not sg_list:
        sg_list = config.CONF.neutron_defaults.pod_security_groups
        if not sg_list:
            raise cfg.RequiredOptError('pod_security_groups',
                                       cfg.OptGroup('neutron_defaults'))

    return sg_list[:]
Beispiel #3
0
def start():
    conf = service.prepare_service()

    if conf.statsd.resource_id is None:
        raise cfg.RequiredOptError("resource_id", cfg.OptGroup("statsd"))

    stats = Stats(conf)

    loop = asyncio.get_event_loop()
    # TODO(jd) Add TCP support
    listen = loop.create_datagram_endpoint(
        lambda: StatsdServer(stats),
        local_addr=(conf.statsd.host, conf.statsd.port))

    def _flush():
        loop.call_later(conf.statsd.flush_delay, _flush)
        stats.flush()

    loop.call_later(conf.statsd.flush_delay, _flush)
    transport, protocol = loop.run_until_complete(listen)

    LOG.info("Started on %s:%d", conf.statsd.host, conf.statsd.port)
    LOG.info("Flush delay: %d seconds", conf.statsd.flush_delay)

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass

    transport.close()
    loop.close()
Beispiel #4
0
    def get_project(self, policy):
        project_id = config.CONF.neutron_defaults.project

        if not project_id:
            raise cfg.RequiredOptError('project',
                                       cfg.OptGroup('neutron_defaults'))
        return project_id
def _get_pod_sgs(pod):
    sg_list = []

    pod_labels = pod['metadata'].get('labels')
    pod_namespace = pod['metadata']['namespace']

    knp_crds = driver_utils.get_kuryrnetworkpolicy_crds(
        namespace=pod_namespace)
    for crd in knp_crds:
        pod_selector = crd['spec'].get('podSelector')
        if driver_utils.match_selector(pod_selector, pod_labels):
            sg_id = crd['status'].get('securityGroupId')
            if not sg_id:
                # NOTE(dulek): We could just assume KNP handler will apply it,
                #              but it's possible that when it gets this pod it
                #              will have no IP yet and will be skipped.
                LOG.warning('SG for NP %s not created yet, will retry.',
                            utils.get_res_unique_name(crd))
                raise exceptions.ResourceNotReady(pod)
            LOG.debug("Appending %s", crd['status']['securityGroupId'])
            sg_list.append(crd['status']['securityGroupId'])

    # NOTE(maysams) Pods that are not selected by any Networkpolicy
    # are fully accessible. Thus, the default security group is associated.
    if not sg_list:
        sg_list = config.CONF.neutron_defaults.pod_security_groups
        if not sg_list:
            raise cfg.RequiredOptError('pod_security_groups',
                                       cfg.OptGroup('neutron_defaults'))

    return sg_list[:]
Beispiel #6
0
    def _get_parent_port(self, neutron, pod):
        node_subnet_id = config.CONF.neutron_defaults.worker_nodes_subnet
        if not node_subnet_id:
            raise oslo_cfg.RequiredOptError('worker_nodes_subnet',
                                            'neutron_defaults')

        try:
            # REVISIT(vikasc): Assumption is being made that hostIP is the IP
            #		       of trunk interface on the node(vm).
            node_fixed_ip = pod['status']['hostIP']
        except KeyError:
            if pod['status']['conditions'][0]['type'] != "Initialized":
                LOG.debug("Pod condition type is not 'Initialized'")

            LOG.error(_LE("Failed to get parent vm port ip"))
            raise

        try:
            fixed_ips = [
                'subnet_id=%s' % str(node_subnet_id),
                'ip_address=%s' % str(node_fixed_ip)
            ]
            ports = neutron.list_ports(fixed_ips=fixed_ips)
        except n_exc.NeutronClientException as ex:
            LOG.error(_LE("Parent vm port with fixed ips %s not found!"),
                      fixed_ips)
            raise ex

        if ports['ports']:
            return ports['ports'][0]
        else:
            LOG.error(
                _LE("Neutron port for vm port with fixed ips %s"
                    " not found!"), fixed_ips)
            raise k_exc.K8sNodeTrunkPortFailure
    def on_deleted(self, policy):
        LOG.debug("Deleted network policy: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = self._drv_policy.affected_pods(policy)
        netpolicy_crd = self._drv_policy.get_kuryrnetpolicy_crd(policy)
        if netpolicy_crd:
            crd_sg = netpolicy_crd['spec'].get('securityGroupId')
            for pod in pods_to_update:
                if driver_utils.is_host_network(pod):
                    continue
                pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
                if crd_sg in pod_sgs:
                    pod_sgs.remove(crd_sg)
                if not pod_sgs:
                    pod_sgs = (
                        oslo_cfg.CONF.neutron_defaults.pod_security_groups)
                    if not pod_sgs:
                        raise oslo_cfg.RequiredOptError(
                            'pod_security_groups',
                            oslo_cfg.OptGroup('neutron_defaults'))
                self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)

            self._drv_policy.release_network_policy(netpolicy_crd)

            services = driver_utils.get_services(
                policy['metadata']['namespace'])
            for service in services.get('items'):
                if (service['metadata']['name'] == 'kubernetes'
                        or not self._is_service_affected(
                            service, pods_to_update)):
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service, project_id)
                self._drv_lbaas.update_lbaas_sg(service, sgs)
Beispiel #8
0
def _validate_configuration_group(conf, main_option, value, options, group):
    if conf[group][main_option.replace("-", "_")] != value:
        return

    for opt in options:
        if not conf[group][opt.replace("-", "_")]:
            raise cfg.RequiredOptError(opt, cfg.OptGroup(group))
Beispiel #9
0
 def __init__(self, plugin):
     """Initialize service plugin and load backend driver."""
     super(NsxL2GatewayPlugin, self).__init__()
     self._plugin = plugin
     LOG.debug("Starting service plugin for NSX L2Gateway")
     self._nsx_l2gw_driver = cfg.CONF.nsx_l2gw_driver
     if not getattr(self, "_nsx_l2gw_driver"):
         raise cfg.RequiredOptError("nsx_l2gw_driver")
     self._driver = importutils.import_object(self._nsx_l2gw_driver)
Beispiel #10
0
def _check_for_namespace_opt(conf):
    # NOTE(bnemec): This opt is required, but due to lp#1849518 we need to
    # make it optional while our consumers migrate to the new method of
    # parsing cli args. Making the arg itself optional and explicitly checking
    # for it in the tools will allow us to migrate projects without breaking
    # anything. Once everyone has migrated, we can make the arg required again
    # and remove this check.
    if conf.namespace is None:
        raise cfg.RequiredOptError('namespace', 'DEFAULT')
Beispiel #11
0
    def get_nodes_subnets(self, raise_on_empty=False):
        node_subnet_ids = CONF.pod_vif_nested.worker_nodes_subnets
        if not node_subnet_ids:
            if raise_on_empty:
                raise cfg.RequiredOptError('worker_nodes_subnets',
                                           cfg.OptGroup('pod_vif_nested'))
            else:
                return []

        return node_subnet_ids
Beispiel #12
0
    def initialize(self):
        self.url = cfg.CONF.ml2_odl.url
        self.timeout = cfg.CONF.ml2_odl.timeout
        self.username = cfg.CONF.ml2_odl.username
        self.password = cfg.CONF.ml2_odl.password
        required_opts = ('url', 'username', 'password')
        for opt in required_opts:
            if not getattr(self, opt):
                raise cfg.RequiredOptError(opt, 'ml2_odl')

        self.odl_drv = OpenDaylightDriver()
    def get_subnets(self, service, project_id):
        subnet_id = config.CONF.neutron_defaults.service_subnet

        if not subnet_id:
            # NOTE(ivc): this option is only required for
            # DefaultServiceSubnetDriver and its subclasses, but it may be
            # optional for other drivers (e.g. when each namespace has own
            # subnet)
            raise cfg.RequiredOptError('service_subnet',
                                       cfg.OptGroup('neutron_defaults'))

        return {subnet_id: _get_subnet(subnet_id)}
Beispiel #14
0
    def get_project(self, service):
        project_id = config.CONF.neutron_defaults.project

        if not project_id:
            # NOTE(ivc): this option is only required for
            # DefaultServiceProjectDriver and its subclasses, but it may be
            # optional for other drivers (e.g. when each namespace has own
            # project)
            raise cfg.RequiredOptError('project',
                                       cfg.OptGroup('neutron_defaults'))

        return project_id
Beispiel #15
0
 def initialize(self):
     self.url = cfg.CONF.ml2_odl.url
     self.timeout = cfg.CONF.ml2_odl.timeout
     self.username = cfg.CONF.ml2_odl.username
     self.password = cfg.CONF.ml2_odl.password
     required_opts = ('url', 'username', 'password')
     for opt in required_opts:
         if not getattr(self, opt):
             raise cfg.RequiredOptError(opt, 'ml2_odl')
     self.vif_type = portbindings.VIF_TYPE_OVS
     self.vif_details = {portbindings.CAP_PORT_FILTER: True}
     self.odl_drv = mech_driver.OpenDaylightDriver()
Beispiel #16
0
    def get_security_groups(self, pod, project_id):
        sg_list = config.CONF.neutron_defaults.pod_security_groups

        if not sg_list:
            # NOTE(ivc): this option is only required for
            # Default{Pod,Service}SecurityGroupsDriver and its subclasses,
            # but it may be optional for other drivers (e.g. when each
            # namespace has own set of security groups)
            raise cfg.RequiredOptError('pod_security_groups',
                                       cfg.OptGroup('neutron_defaults'))

        return sg_list[:]
Beispiel #17
0
def start():
    conf = service.prepare_service()
    if conf.amqp1d.resource_name is None:
        raise cfg.RequiredOptError("resource_name", cfg.OptGroup("amqp1d"))
    try:
        if conf.amqp1d.data_source == "collectd":
            stats = CollectdStats(conf)
            Container(AMQP1Server(conf, stats)).run()
        else:
            raise ValueError(
                "Unknown data source type '%s'" %
                conf.amqp1d.data_source)
    except KeyboardInterrupt:
        pass
def _get_lock_path(name, lock_file_prefix, lock_path=None):
    # NOTE(mikal): the lock name cannot contain directory
    # separators
    name = name.replace(os.sep, '_')
    if lock_file_prefix:
        sep = '' if lock_file_prefix.endswith('-') else '-'
        name = '%s%s%s' % (lock_file_prefix, sep, name)

    local_lock_path = lock_path or CONF.oslo_concurrency.lock_path

    if not local_lock_path:
        raise cfg.RequiredOptError('lock_path')

    return os.path.join(local_lock_path, name)
    def acquire_service_pub_ip_info(self,
                                    spec_type,
                                    spec_lb_ip,
                                    project_id,
                                    port_id_to_be_associated=None):

        if spec_type != 'LoadBalancer':
            return None

        if spec_lb_ip:
            user_specified_ip = spec_lb_ip.format()
            res_id = self._drv_pub_ip.is_ip_available(
                user_specified_ip, port_id_to_be_associated)
            if res_id:
                service_pub_ip_info = (obj_lbaas.LBaaSPubIp(
                    ip_id=res_id,
                    ip_addr=str(user_specified_ip),
                    alloc_method='user'))

                return service_pub_ip_info
            else:
                # user specified IP is not valid
                LOG.error("IP=%s is not available", user_specified_ip)
                return None
        else:
            LOG.debug("Trying to allocate public ip from pool")

        # get public network/subnet ids from kuryr.conf
        public_network_id = config.CONF.neutron_defaults.external_svc_net
        public_subnet_id = config.CONF.neutron_defaults.external_svc_subnet
        if not public_network_id:
            raise cfg.RequiredOptError('external_svc_net',
                                       cfg.OptGroup('neutron_defaults'))
        try:
            res_id, alloc_ip_addr = (self._drv_pub_ip.allocate_ip(
                public_network_id,
                project_id,
                pub_subnet_id=public_subnet_id,
                description='kuryr_lb',
                port_id_to_be_associated=port_id_to_be_associated))
        except Exception:
            LOG.exception("Failed to allocate public IP - net_id:%s",
                          public_network_id)
            return None
        service_pub_ip_info = obj_lbaas.LBaaSPubIp(ip_id=res_id,
                                                   ip_addr=alloc_ip_addr,
                                                   alloc_method='pool')

        return service_pub_ip_info
Beispiel #20
0
    def initialize(self):
        """Initialize of variables needed by this class."""

        self._parse_physical_networks()
        self._switch = {
            'address': cfg.CONF.fujitsu_cfab.address,
            'username': cfg.CONF.fujitsu_cfab.username,
            'password': cfg.CONF.fujitsu_cfab.password
        }

        if not self._switch['address']:
            raise cfg.RequiredOptError('address',
                                       cfg.OptGroup(ML2_FUJITSU_GROUP))

        self._driver = importutils.import_object(CFAB_DRIVER, cfg.CONF)
Beispiel #21
0
    def init_profile_id(self):
        """Init the service insertion profile ID

        Initialize the profile id that should be assigned to the redirect
        rules from the nsx configuration and verify that it exists on backend.
        """
        if not cfg.CONF.nsxv.service_insertion_profile_id:
            raise cfg.RequiredOptError("service_profile_id")
        self._profile_id = cfg.CONF.nsxv.service_insertion_profile_id

        # Verify that this moref exists
        if not self._nsxv.vcns.validate_inventory(self._profile_id):
            error = (_("Configured service profile ID: %s not found") %
                     self._profile_id)
            raise nsx_exc.NsxPluginException(err_msg=error)
Beispiel #22
0
def neutron_to_osvif_vif_ovs(vif_plugin, neutron_port, subnets):
    """Converts Neutron port to VIF object for os-vif 'ovs' plugin.

    :param vif_plugin: name of the os-vif plugin to use (i.e. 'ovs')
    :param neutron_port: dict containing port information as returned by
                         neutron client's 'show_port'
    :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets
    :return: os-vif VIF object
    """

    profile = osv_vif.VIFPortProfileOpenVSwitch(
        interface_id=neutron_port['id'])

    details = neutron_port.get('binding:vif_details', {})
    ovs_bridge = details.get('bridge_name',
                             config.CONF.neutron_defaults.ovs_bridge)
    if not ovs_bridge:
        raise oslo_cfg.RequiredOptError('ovs_bridge', 'neutron_defaults')

    network = _make_vif_network(neutron_port, subnets)
    network.bridge = ovs_bridge

    if details.get('ovs_hybrid_plug'):
        vif = osv_vif.VIFBridge(
            id=neutron_port['id'],
            address=neutron_port['mac_address'],
            network=network,
            has_traffic_filtering=details.get('port_filter', False),
            preserve_on_delete=False,
            active=_is_port_active(neutron_port),
            port_profile=profile,
            plugin=vif_plugin,
            vif_name=_get_vif_name(neutron_port),
            bridge_name=_get_ovs_hybrid_bridge_name(neutron_port))
    else:
        vif = osv_vif.VIFOpenVSwitch(id=neutron_port['id'],
                                     address=neutron_port['mac_address'],
                                     network=network,
                                     has_traffic_filtering=details.get(
                                         'port_filter', False),
                                     preserve_on_delete=False,
                                     active=_is_port_active(neutron_port),
                                     port_profile=profile,
                                     plugin=vif_plugin,
                                     vif_name=_get_vif_name(neutron_port),
                                     bridge_name=network.bridge)

    return vif
Beispiel #23
0
    def acquire_service_pub_ip_info(self, spec_type, spec_lb_ip, project_id):

        if spec_type != 'LoadBalancer':
            return None

        if spec_lb_ip:
            user_specified_ip = spec_lb_ip.format()
            res_id = self._drv_pub_ip.is_ip_available(user_specified_ip)
            if res_id:
                service_pub_ip_info = (obj_lbaas.LBaaSPubIp(
                                       ip_id=res_id,
                                       ip_addr=str(user_specified_ip),
                                       alloc_method='user'))

                return service_pub_ip_info
            else:
                # user specified IP is not valid
                LOG.error("IP=%s is not available", user_specified_ip)
                return None
        else:
            LOG.debug("Trying to allocate public ip from pool")

        # get public subnet id from kuryr.conf
        external_svc_subnet = config.CONF.neutron_defaults.external_svc_subnet
        if not external_svc_subnet:
            raise cfg.RequiredOptError('external_svc_subnet',
                                       cfg.OptGroup('neutron_defaults'))

        neutron = clients.get_neutron_client()
        n_subnet = neutron.show_subnet(external_svc_subnet).get('subnet')
        if not n_subnet:
            LOG.error(
                "No subnet found for external_svc_subnet=%s",
                external_svc_subnet)
            raise kl_exc.NoResourceException

        public_network_id = n_subnet['network_id']

        res_id, alloc_ip_addr = (self._drv_pub_ip.allocate_ip
                                 (public_network_id,
                                  external_svc_subnet,
                                  project_id,
                                  'kuryr_lb'))
        service_pub_ip_info = obj_lbaas.LBaaSPubIp(ip_id=res_id,
                                                   ip_addr=alloc_ip_addr,
                                                   alloc_method='pool')

        return service_pub_ip_info
Beispiel #24
0
def neutron_to_osvif_vif_ovs(vif_plugin, os_port, subnets):
    """Converts Neutron port to VIF object for os-vif 'ovs' plugin.

    :param vif_plugin: name of the os-vif plugin to use (i.e. 'ovs')
    :param os_port: openstack.network.v2.port.Port object
    :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets
    :return: os-vif VIF object
    """
    profile = osv_vif.VIFPortProfileOpenVSwitch(interface_id=os_port.id)

    details = os_port.binding_vif_details or {}
    ovs_bridge = details.get('bridge_name',
                             config.CONF.neutron_defaults.ovs_bridge)
    if not ovs_bridge:
        raise oslo_cfg.RequiredOptError('ovs_bridge', 'neutron_defaults')

    network = _make_vif_network(os_port, subnets)
    network.bridge = ovs_bridge

    if details.get('ovs_hybrid_plug'):
        vif = osv_vif.VIFBridge(
            id=os_port.id,
            address=os_port.mac_address,
            network=network,
            has_traffic_filtering=details.get('port_filter', False),
            preserve_on_delete=False,
            active=_is_port_active(os_port),
            port_profile=profile,
            plugin=vif_plugin,
            vif_name=_get_vif_name(os_port),
            bridge_name=_get_ovs_hybrid_bridge_name(os_port))
    else:
        vif = osv_vif.VIFOpenVSwitch(id=os_port.id,
                                     address=os_port.mac_address,
                                     network=network,
                                     has_traffic_filtering=details.get(
                                         'port_filter', False),
                                     preserve_on_delete=False,
                                     active=_is_port_active(os_port),
                                     port_profile=profile,
                                     plugin=vif_plugin,
                                     vif_name=_get_vif_name(os_port),
                                     bridge_name=network.bridge)

    return vif
Beispiel #25
0
    def on_deleted(self, policy):
        LOG.debug("Deleted network policy: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = self._drv_policy.affected_pods(policy)
        netpolicy_crd = self._drv_policy.get_kuryrnetpolicy_crd(policy)
        if netpolicy_crd:
            crd_sg = netpolicy_crd['spec'].get('securityGroupId')
            for pod in pods_to_update:
                if driver_utils.is_host_network(pod):
                    continue
                pod_sgs = self._drv_pod_sg.get_security_groups(pod,
                                                               project_id)
                if crd_sg in pod_sgs:
                    pod_sgs.remove(crd_sg)
                if not pod_sgs:
                    pod_sgs = (
                        oslo_cfg.CONF.neutron_defaults.pod_security_groups)
                    if not pod_sgs:
                        raise oslo_cfg.RequiredOptError(
                            'pod_security_groups',
                            oslo_cfg.OptGroup('neutron_defaults'))
                try:
                    self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
                except os_exc.NotFoundException:
                    LOG.debug("Fail to update pod sgs."
                              " Retrying policy deletion.")
                    raise exceptions.ResourceNotReady(policy)

            # ensure ports at the pool don't have the NP sg associated
            net_id = self._get_policy_net_id(policy)
            self._drv_vif_pool.remove_sg_from_pools(crd_sg, net_id)

            self._drv_policy.release_network_policy(netpolicy_crd)

            if (oslo_cfg.CONF.octavia_defaults.enforce_sg_rules and
                    not self._is_egress_only_policy(policy)):
                services = driver_utils.get_services(
                    policy['metadata']['namespace'])
                for svc in services.get('items'):
                    if (not svc['spec'].get('selector') or not
                            self._is_service_affected(svc, pods_to_update)):
                        continue
                    sgs = self._drv_svc_sg.get_security_groups(svc,
                                                               project_id)
                    self._drv_lbaas.update_lbaas_sg(svc, sgs)
Beispiel #26
0
def external_lock(name, lock_file_prefix=None):
    with internal_lock(name):
        LOG.debug(_('Attempting to grab external lock "%(lock)s"'),
                  {'lock': name})

        # NOTE(mikal): the lock name cannot contain directory
        # separators
        name = name.replace(os.sep, '_')
        if lock_file_prefix:
            sep = '' if lock_file_prefix.endswith('-') else '-'
            name = '%s%s%s' % (lock_file_prefix, sep, name)

        if not CONF.lock_path:
            raise cfg.RequiredOptError('lock_path')

        lock_file_path = os.path.join(CONF.lock_path, name)

        return InterProcessLock(lock_file_path)
Beispiel #27
0
def _get_lock_path(name, lock_file_prefix, lock_path=None):
    # NOTE(mikal): the lock name cannot contain directory
    # separators
    name = name.replace(os.sep, '_')
    if lock_file_prefix:
        sep = '' if lock_file_prefix.endswith('-') else '-'
        name = '%s%s%s' % (lock_file_prefix, sep, name)

    local_lock_path = lock_path or CONF.lock_path

    if not local_lock_path:
        # NOTE(bnemec): Create a fake lock path for posix locks so we don't
        # unnecessarily raise the RequiredOptError below.
        if InterProcessLock is not _PosixLock:
            raise cfg.RequiredOptError('lock_path')
        local_lock_path = 'posixlock:/'

    return os.path.join(local_lock_path, name)
Beispiel #28
0
    def on_deleted(self, policy):
        LOG.debug("Deleted network policy: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = self._drv_policy.affected_pods(policy)
        netpolicy_crd = self._drv_policy.get_kuryrnetpolicy_crd(policy)
        crd_sg = netpolicy_crd['spec'].get('securityGroupId')
        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            if crd_sg in pod_sgs:
                pod_sgs.remove(crd_sg)
            if not pod_sgs:
                pod_sgs = oslo_cfg.CONF.neutron_defaults.pod_security_groups
                if not pod_sgs:
                    raise oslo_cfg.RequiredOptError(
                        'pod_security_groups',
                        oslo_cfg.OptGroup('neutron_defaults'))
            self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)

        self._drv_policy.release_network_policy(netpolicy_crd)
Beispiel #29
0
    def _get_parent_port_by_host_ip(self, neutron, node_fixed_ip):
        node_subnet_id = oslo_cfg.CONF.pod_vif_nested.worker_nodes_subnet
        if not node_subnet_id:
            raise oslo_cfg.RequiredOptError(
                'worker_nodes_subnet', oslo_cfg.OptGroup('pod_vif_nested'))

        try:
            fixed_ips = ['subnet_id=%s' % str(node_subnet_id),
                         'ip_address=%s' % str(node_fixed_ip)]
            ports = neutron.list_ports(fixed_ips=fixed_ips)
        except n_exc.NeutronClientException:
            LOG.error("Parent vm port with fixed ips %s not found!",
                      fixed_ips)
            raise

        if ports['ports']:
            return ports['ports'][0]
        else:
            LOG.error("Neutron port for vm port with fixed ips %s"
                      " not found!", fixed_ips)
            raise kl_exc.NoResourceException
Beispiel #30
0
    def __init__(self, **kwargs):
        """Initialize a new client for the plugin."""
        self.format = 'json'

        # Extract configuration parameters from the configuration file.
        self.n1kv_vsm_ips = cfg.CONF.ml2_cisco_n1kv.n1kv_vsm_ips
        self.username = cfg.CONF.ml2_cisco_n1kv.username
        self.password = cfg.CONF.ml2_cisco_n1kv.password
        self.action_prefix = 'http://%s/api/n1k'
        self.timeout = cfg.CONF.ml2_cisco_n1kv.http_timeout
        required_opts = ('n1kv_vsm_ips', 'username', 'password')
        # Validate whether required options are configured
        for opt in required_opts:
            if not getattr(self, opt):
                raise cfg.RequiredOptError(opt, 'ml2_cisco_n1kv')
        # Validate the configured VSM IP addresses
        # Note: Currently only support IPv4
        for vsm_ip in self.n1kv_vsm_ips:
            if not netaddr.valid_ipv4(vsm_ip):
                raise cfg.Error(
                    _("Cisco Nexus1000V ML2 driver config: "
                      "Invalid format for VSM IP address: %s") % vsm_ip)