def __init__(self, base_url): self._base_url = base_url cert_file = config.CONF.kubernetes.ssl_client_crt_file key_file = config.CONF.kubernetes.ssl_client_key_file ca_crt_file = config.CONF.kubernetes.ssl_ca_crt_file self.verify_server = config.CONF.kubernetes.ssl_verify_server_crt token_file = config.CONF.kubernetes.token_file self.token = None self.cert = (None, None) if token_file: if os.path.exists(token_file): with open(token_file, 'r') as f: self.token = f.readline().rstrip('\n') else: raise RuntimeError( _("Unable to find token_file : %s") % token_file) else: if cert_file and not os.path.exists(cert_file): raise RuntimeError( _("Unable to find ssl cert_file : %s") % cert_file) if key_file and not os.path.exists(key_file): raise RuntimeError( _("Unable to find ssl key_file : %s") % key_file) self.cert = (cert_file, key_file) if self.verify_server: if not ca_crt_file: raise RuntimeError(_("ssl_ca_crt_file cannot be None")) elif not os.path.exists(ca_crt_file): raise RuntimeError( _("Unable to find ca cert_file : %s") % ca_crt_file) else: self.verify_server = ca_crt_file
def __init__(self, base_url): self._base_url = base_url cert_file = config.CONF.kubernetes.ssl_client_crt_file key_file = config.CONF.kubernetes.ssl_client_key_file ca_crt_file = config.CONF.kubernetes.ssl_ca_crt_file self.verify_server = config.CONF.kubernetes.ssl_verify_server_crt token_file = config.CONF.kubernetes.token_file self.token = None self.cert = (None, None) self.are_events_enabled = config.CONF.kubernetes.use_events # Setting higher numbers regarding connection pools as we're running # with max of 1000 green threads. self.session = requests.Session() prefix = '%s://' % parse.urlparse(base_url).scheme self.session.mount(prefix, adapters.HTTPAdapter(pool_maxsize=1000)) if token_file: if os.path.exists(token_file): with open(token_file, 'r') as f: self.token = f.readline().rstrip('\n') else: raise RuntimeError( _("Unable to find token_file : %s") % token_file) else: if cert_file and not os.path.exists(cert_file): raise RuntimeError( _("Unable to find ssl cert_file : %s") % cert_file) if key_file and not os.path.exists(key_file): raise RuntimeError( _("Unable to find ssl key_file : %s") % key_file) self.cert = (cert_file, key_file) if self.verify_server: if not ca_crt_file: raise RuntimeError( _("ssl_ca_crt_file cannot be None")) elif not os.path.exists(ca_crt_file): raise RuntimeError( _("Unable to find ca cert_file : %s") % ca_crt_file) else: self.verify_server = ca_crt_file # Let's setup defaults for our Session. self.session.cert = self.cert self.session.verify = self.verify_server if self.token: self.session.headers['Authorization'] = f'Bearer {self.token}' # NOTE(dulek): Seems like this is the only way to set is globally. self.session.request = functools.partial( self.session.request, timeout=( CONF.kubernetes.watch_connection_timeout, CONF.kubernetes.watch_read_timeout))
def _get_ssl_configs(use_ssl): if use_ssl: cert_file = config.CONF.ssl_cert_file key_file = config.CONF.ssl_key_file if not os.path.exists(cert_file): raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) if not os.path.exists(key_file): raise RuntimeError(_("Unable to find key_file : %s") % key_file) return cert_file, key_file else: return None
def _make_vif_network(neutron_port, subnets): """Get an os-vif Network object for port. :param neutron_port: dict containing port information as returned by neutron client's 'show_port' :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets :return: os-vif Network object """ try: network = next(net.obj_clone() for net in subnets.values() if net.id == neutron_port.get('network_id')) except StopIteration: raise k_exc.IntegrityError( _("Port %(port_id)s belongs to network %(network_id)s, " "but requested networks are: %(requested_networks)s") % { 'port_id': neutron_port.get('id'), 'network_id': neutron_port.get('network_id'), 'requested_networks': [net.id for net in subnets.values()] }) network.subnets = osv_subnet.SubnetList( objects=_make_vif_subnets(neutron_port, subnets)) return network
def _make_vif_subnets(neutron_port, subnets): """Gets a list of os-vif Subnet objects for port. :param neutron_port: dict containing port information as returned by neutron client's 'show_port' :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets :return: list of os-vif Subnet object """ vif_subnets = {} for neutron_fixed_ip in neutron_port.get('fixed_ips', []): subnet_id = neutron_fixed_ip['subnet_id'] ip_address = neutron_fixed_ip['ip_address'] if subnet_id not in subnets: continue try: subnet = vif_subnets[subnet_id] except KeyError: subnet = _make_vif_subnet(subnets, subnet_id) vif_subnets[subnet_id] = subnet subnet.ips.objects.append(osv_fixed_ip.FixedIP(address=ip_address)) if not vif_subnets: raise k_exc.IntegrityError( _("No valid subnets found for port %(port_id)s") % {'port_id': neutron_port.get('id')}) return list(vif_subnets.values())
def _make_vif_network(neutron_port, subnets): """Get an os-vif Network object for port. :param neutron_port: dict containing port information as returned by neutron client's 'show_port', or openstack.network.v2.port.Port object :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets :return: os-vif Network object """ # NOTE(gryf): Because we didn't convert macvlan driver, neutron_port can # be either a dict or an object try: network_id = neutron_port.get('network_id') port_id = neutron_port.get('id') except TypeError: network_id = neutron_port.network_id port_id = neutron_port.id try: network = next(net.obj_clone() for net in subnets.values() if net.id == network_id) except StopIteration: raise k_exc.IntegrityError( _("Port %(port_id)s belongs to network %(network_id)s, " "but requested networks are: %(requested_networks)s") % { 'port_id': port_id, 'network_id': network_id, 'requested_networks': [net.id for net in subnets.values()] }) network.subnets = osv_subnet.SubnetList( objects=_make_vif_subnets(neutron_port, subnets)) return network
def get_instance(cls): """Get an implementing driver instance.""" alias = cls.ALIAS try: manager = _DRIVER_MANAGERS[alias] except KeyError: name = config.CONF.kubernetes[alias + '_driver'] manager = stv_driver.DriverManager(namespace="%s.%s" % (_DRIVER_NAMESPACE_BASE, alias), name=name, invoke_on_load=True) _DRIVER_MANAGERS[alias] = manager driver = manager.driver if not isinstance(driver, cls): raise TypeError( _("Invalid %(alias)r driver type: %(driver)s, " "must be a subclass of %(type)s") % { 'alias': alias, 'driver': driver.__class__.__name__, 'type': cls }) return driver
def _get_vf_info(self, pci, driver): vf_sys_path = '/sys/bus/pci/devices/{}/net/'.format(pci) if not os.path.exists(vf_sys_path): if driver not in constants.USERSPACE_DRIVERS: raise OSError(_("No vf name for device {}").format(pci)) vf_name = None else: vf_names = os.listdir(vf_sys_path) vf_name = vf_names[0] pfysfn_path = '/sys/bus/pci/devices/{}/physfn/net/'.format(pci) # If physical function is not specified in VF's directory then # this VF belongs to current VM node if not os.path.exists(pfysfn_path): LOG.info( "Current device %s is a virtual function which is " "passed into VM. Getting it's pci info", vf_name) pci_info = self._get_vf_pci_info(pci) return vf_name, None, None, pci_info pf_names = os.listdir(pfysfn_path) pf_name = pf_names[0] nvfs = self._get_total_vfs(pf_name) pf_sys_path = '/sys/class/net/{}/device'.format(pf_name) for vf_index in range(nvfs): virtfn_path = os.path.join(pf_sys_path, 'virtfn{}'.format(vf_index)) vf_pci = os.path.basename(os.readlink(virtfn_path)) if vf_pci == pci: pci_info = self._get_pci_info(pf_name, vf_index) return vf_name, vf_index, pf_name, pci_info return None, None, None, None
def _acquire(self, path): if self._lock and self._lock.acquired: raise RuntimeError( _("Attempting to lock {} when {} " "is already locked.").format(path, self._lock)) self._lock = lockutils.InterProcessLock(path=path) return self._lock.acquire()
def _get_ssl_configs(use_ssl): if use_ssl: cert_file = config.CONF.ssl_cert_file key_file = config.CONF.ssl_key_file if not os.path.exists(cert_file): raise RuntimeError( _("Unable to find cert_file : %s") % cert_file) if not os.path.exists(key_file): raise RuntimeError( _("Unable to find key_file : %s") % key_file) return cert_file, key_file else: return None
def osvif_to_neutron_fixed_ips(subnets): fixed_ips = [] for subnet_id, network in subnets.items(): ips = [] if len(network.subnets.objects) > 1: raise k_exc.IntegrityError( _("Network object for subnet %(subnet_id)s is invalid, " "must contain a single subnet, but %(num_subnets)s found") % { 'subnet_id': subnet_id, 'num_subnets': len(network.subnets.objects) }) for subnet in network.subnets.objects: if subnet.obj_attr_is_set('ips'): ips.extend([str(ip.address) for ip in subnet.ips.objects]) if ips: fixed_ips.extend([{ 'subnet_id': subnet_id, 'ip_address': ip } for ip in ips]) else: fixed_ips.append({'subnet_id': subnet_id}) return fixed_ips
def __init__(self, base_url): self._base_url = base_url cert_file = config.CONF.kubernetes.ssl_client_crt_file key_file = config.CONF.kubernetes.ssl_client_key_file ca_crt_file = config.CONF.kubernetes.ssl_ca_crt_file self.verify_server = config.CONF.kubernetes.ssl_verify_server_crt token_file = config.CONF.kubernetes.token_file self.token = None self.cert = (None, None) # Setting higher numbers regarding connection pools as we're running # with max of 1000 green threads. self.session = requests.Session() prefix = '%s://' % parse.urlparse(base_url).scheme self.session.mount(prefix, adapters.HTTPAdapter(pool_maxsize=1000)) if token_file: if os.path.exists(token_file): with open(token_file, 'r') as f: self.token = f.readline().rstrip('\n') else: raise RuntimeError( _("Unable to find token_file : %s") % token_file) else: if cert_file and not os.path.exists(cert_file): raise RuntimeError( _("Unable to find ssl cert_file : %s") % cert_file) if key_file and not os.path.exists(key_file): raise RuntimeError( _("Unable to find ssl key_file : %s") % key_file) self.cert = (cert_file, key_file) if self.verify_server: if not ca_crt_file: raise RuntimeError(_("ssl_ca_crt_file cannot be None")) elif not os.path.exists(ca_crt_file): raise RuntimeError( _("Unable to find ca cert_file : %s") % ca_crt_file) else: self.verify_server = ca_crt_file self._rq_params = { 'cert': self.cert, 'verify': self.verify_server, 'timeout': (CONF.kubernetes.watch_connection_timeout, CONF.kubernetes.watch_read_timeout), }
def _get_vhostport_type(vif): if vif.mode == osv_fields.VIFVHostUserMode.SERVER: return 'dpdkvhostuserclient' elif vif.mode == osv_fields.VIFVHostUserMode.CLIENT: return 'dpdkvhostuser' raise k_exc.IntegrityError( _("Unknown vhostuser mode %(mode)s for vif %(vif_id)s") % {'mode': vif.mode, 'vif_id': vif.id})
def _get_service_link(self, endpoints): ep_link = endpoints['metadata']['selfLink'] link_parts = ep_link.split('/') if link_parts[-2] != 'endpoints': raise k_exc.IntegrityError( _("Unsupported endpoints link: %(link)s") % {'link': ep_link}) link_parts[-2] = 'services' return "/".join(link_parts)
def _get_endpoints_link_by_route(self, route_link, ep_name): route_link = route_link.replace(ocp_const.OCP_API_BASE, k_const.K8S_API_BASE) link_parts = route_link.split('/') if link_parts[-2] != 'routes': raise k_exc.IntegrityError( _("Unsupported route link: %(link)s") % {'link': route_link}) link_parts[-2] = 'endpoints' link_parts[-1] = ep_name return "/".join(link_parts)
def _get_endpoints_link(self, service): svc_link = service['metadata']['selfLink'] link_parts = svc_link.split('/') if link_parts[-2] != 'services': raise k_exc.IntegrityError( _("Unsupported service link: %(link)s") % {'link': svc_link}) link_parts[-2] = 'endpoints' return "/".join(link_parts)
def _get_network_id(self, subnets): ids = ovu.osvif_to_neutron_network_ids(subnets) if len(ids) != 1: raise k_exc.IntegrityError( _("Subnet mapping %(subnets)s is not valid: %(num_networks)s " "unique networks found") % { 'subnets': subnets, 'num_networks': len(ids) }) return ids[0]
def __init__(self, base_url): self._base_url = base_url cert_file = config.CONF.kubernetes.ssl_client_crt_file key_file = config.CONF.kubernetes.ssl_client_key_file ca_crt_file = config.CONF.kubernetes.ssl_ca_crt_file self.verify_server = config.CONF.kubernetes.ssl_verify_server_crt if cert_file and not os.path.exists(cert_file): raise RuntimeError( _("Unable to find ssl cert_file : %s") % cert_file) if key_file and not os.path.exists(key_file): raise RuntimeError( _("Unable to find ssl key_file : %s") % key_file) if self.verify_server: if not ca_crt_file: raise RuntimeError( _("ssl_ca_crt_file cannot be None")) elif not os.path.exists(ca_crt_file): raise RuntimeError( _("Unable to find ca cert_file : %s") % ca_crt_file) else: self.verify_server = ca_crt_file self.cert = (cert_file, key_file)
def _get_subnet_id(self, service, project_id, ip): subnets_mapping = self._drv_subnets.get_subnets(service, project_id) subnet_ids = { subnet_id for subnet_id, network in subnets_mapping.items() for subnet in network.subnets.objects if ip in subnet.cidr} if len(subnet_ids) != 1: raise k_exc.IntegrityError(_( "Found %(num)s subnets for service %(link)s IP %(ip)s") % { 'link': service['metadata']['selfLink'], 'ip': ip, 'num': len(subnet_ids)}) return subnet_ids.pop()
def run(self, env, fin, fout): try: params = CNIParameters(env, jsonutils.load(fin)) if params.CNI_COMMAND == 'ADD': vif = self._plugin.add(params) self._write_vif(fout, vif) elif params.CNI_COMMAND == 'DEL': self._plugin.delete(params) elif params.CNI_COMMAND == 'VERSION': self._write_version(fout) else: raise k_exc.CNIError( _("unknown CNI_COMMAND: %s") % params.CNI_COMMAND) except Exception as ex: # LOG.exception self._write_exception(fout, str(ex)) return 1
def run(self, env, fin, fout): try: # Prepare params according to calling Object params = self.prepare_env(env, fin) if env.get('CNI_COMMAND') == 'ADD': vif = self._add(params) self._write_dict(fout, vif) elif env.get('CNI_COMMAND') == 'DEL': self._delete(params) elif env.get('CNI_COMMAND') == 'VERSION': self._write_version(fout) else: raise k_exc.CNIError( _("unknown CNI_COMMAND: %s") % env['CNI_COMMAND']) return 0 except Exception as ex: # LOG.exception self._write_exception(fout, str(ex)) return 1
def _make_vif_subnet(subnets, subnet_id): """Makes a copy of an os-vif Subnet from subnets mapping. :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets :param subnet_id: ID of the subnet to extract from 'subnets' mapping :return: a copy of an os-vif Subnet object matching 'subnet_id' """ network = subnets[subnet_id] if len(network.subnets.objects) != 1: raise k_exc.IntegrityError(_( "Network object for subnet %(subnet_id)s is invalid, " "must contain a single subnet, but %(num_subnets)s found") % { 'subnet_id': subnet_id, 'num_subnets': len(network.subnets.objects)}) subnet = network.subnets.objects[0].obj_clone() subnet.ips = osv_fixed_ip.FixedIPList(objects=[]) return subnet
class Checks(upgradecheck.UpgradeCommands): """Upgrade checks for the kuryr-status upgrade check command Upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ def _check_placeholder(self): # This is just a placeholder for upgrade checks, it should be # removed when the actual checks are added return upgradecheck.Result(upgradecheck.Code.SUCCESS) # The format of the check functions is to return an # oslo_upgradecheck.upgradecheck.Result # object with the appropriate # oslo_upgradecheck.upgradecheck.Code and details set. # If the check hits warnings or failures then those should be stored # in the returned Result's "details" attribute. The # summary will be rolled up at the end of the check() method. _upgrade_checks = ( # In the future there should be some real checks added here (_('Placeholder'), _check_placeholder), )
async def read_headers(self): # flake8: noqa """Returns HTTP status, reason and headers and updates the object One can either get the response doing: status, reason, hdrs = await response.read_headers() assert status == 200 or check the object after it has been updated: await response.read_headers() assert response.status == 200 """ hdrs = {} # Read status line = await self._reader.readline() if not line: raise IOError(_('No status received')) line = line.decode('ascii').rstrip() http_version, status, reason = line.split(' ', maxsplit=2) self.status = int(status) while True: line = await self._reader.readline() if not line: break line = line.decode('ascii').rstrip() if line: try: key, value = line.split(': ') hdrs[key.upper()] = value except ValueError: LOG.debug('Failed to read header: %s', line) else: break if self._reader.at_eof(): break self.headers = hdrs return self.status, reason, self.headers
def get_instance(cls, specific_driver=None, scope='default'): """Get an implementing driver instance. :param specific_driver: Loads a specific driver instead of using conf. Uses separate manager entry so that loading of default/other drivers is not affected. :param scope: Loads the driver in the given scope (if independent instances of a driver are required) """ alias = cls.ALIAS if specific_driver: driver_key = '{}:{}:{}'.format(alias, specific_driver, scope) else: driver_key = '{}:_from_cfg:{}'.format(alias, scope) try: manager = _DRIVER_MANAGERS[driver_key] except KeyError: driver_name = (specific_driver or config.CONF.kubernetes[alias + '_driver']) manager = stv_driver.DriverManager(namespace="%s.%s" % (_DRIVER_NAMESPACE_BASE, alias), name=driver_name, invoke_on_load=True) _DRIVER_MANAGERS[driver_key] = manager driver = manager.driver if not isinstance(driver, cls): raise TypeError( _("Invalid %(alias)r driver type: %(driver)s, " "must be a subclass of %(type)s") % { 'alias': alias, 'driver': driver.__class__.__name__, 'type': cls }) return driver
def _get_vf_info(self, pci, driver): vf_sys_path = '/sys/bus/pci/devices/{}/net/'.format(pci) if not os.path.exists(vf_sys_path): if driver not in constants.USERSPACE_DRIVERS: raise OSError(_("No vf name for device {}").format(pci)) vf_name = None else: vf_names = os.listdir(vf_sys_path) vf_name = vf_names[0] pfysfn_path = '/sys/bus/pci/devices/{}/physfn/net/'.format(pci) pf_names = os.listdir(pfysfn_path) pf_name = pf_names[0] nvfs = self._get_total_vfs(pf_name) pf_sys_path = '/sys/class/net/{}/device'.format(pf_name) for vf_index in range(nvfs): virtfn_path = os.path.join(pf_sys_path, 'virtfn{}'.format(vf_index)) vf_pci = os.path.basename(os.readlink(virtfn_path)) if vf_pci == pci: pci_info = self._get_pci_info(pf_name, vf_index) return vf_name, vf_index, pf_name, pci_info return None, None, None, None
from kuryr_kubernetes import clients from kuryr_kubernetes import config from kuryr_kubernetes import constants from kuryr_kubernetes.controller.drivers import base from kuryr_kubernetes.controller.drivers import default_subnet from kuryr_kubernetes.controller.managers import pool from kuryr_kubernetes import exceptions from kuryr_kubernetes import os_vif_util as ovu from kuryr_kubernetes import utils LOG = logging.getLogger(__name__) # Moved out from neutron_default group vif_pool_driver_opts = [ oslo_cfg.IntOpt('ports_pool_max', help=_("Set a maximun amount of ports per pool. " "0 to disable"), default=0), oslo_cfg.IntOpt('ports_pool_min', help=_("Set a target minimum size of the pool of ports"), default=5), oslo_cfg.IntOpt('ports_pool_batch', help=_("Number of ports to be created in a bulk request"), default=10), oslo_cfg.IntOpt('ports_pool_update_frequency', help=_("Minimun interval (in seconds) " "between pool updates"), default=20), oslo_cfg.DictOpt('pools_vif_drivers', help=_("Dict with the pool driver and pod driver to be " "used. If not set, it will take them from the " "kubernetes driver options for pool and pod "
import os import sys from kuryr.lib._i18n import _ from kuryr.lib import config as lib_config from oslo_config import cfg from oslo_log import log as logging from kuryr_kubernetes import constants from kuryr_kubernetes import version LOG = logging.getLogger(__name__) kuryr_k8s_opts = [ cfg.StrOpt('pybasedir', help=_('Directory where Kuryr-kubernetes python module is ' 'installed.'), default=os.path.abspath( os.path.join(os.path.dirname(__file__), '../../'))), ] daemon_opts = [ cfg.StrOpt('bind_address', help=_('Bind address for CNI daemon HTTP server. It is ' 'recommened to allow only local connections.'), default='127.0.0.1:5036'), cfg.IntOpt('worker_num', help=_('Maximum number of processes that will be spawned to ' 'process requests from CNI driver.'), default=30), cfg.IntOpt('vif_annotation_timeout', help=_('Time (in seconds) the CNI daemon will wait for VIF '
from kuryr_kubernetes import clients from kuryr_kubernetes import config from kuryr_kubernetes import constants from kuryr_kubernetes.controller.drivers import base from kuryr_kubernetes.controller.drivers import utils as c_utils from kuryr_kubernetes.controller.managers import pool from kuryr_kubernetes import exceptions from kuryr_kubernetes import os_vif_util as ovu from kuryr_kubernetes import utils LOG = logging.getLogger(__name__) # Moved out from neutron_default group vif_pool_driver_opts = [ oslo_cfg.IntOpt('ports_pool_max', help=_("Set a maximum amount of ports per pool. " "0 to disable"), default=0), oslo_cfg.IntOpt('ports_pool_min', help=_("Set a target minimum size of the pool of ports"), default=5), oslo_cfg.IntOpt('ports_pool_batch', help=_("Number of ports to be created in a bulk request"), default=10), oslo_cfg.IntOpt('ports_pool_update_frequency', help=_("Minimum interval (in seconds) " "between pool updates"), default=20), oslo_cfg.DictOpt('pools_vif_drivers', help=_("Dict with the pool driver and pod driver to be " "used. If not set, it will take them from the " "kubernetes driver options for pool and pod "
# under the License. """ Routines for configuring Kuryr """ from keystoneauth1 import loading as ks_loading from oslo_config import cfg from kuryr.lib._i18n import _ core_opts = [ cfg.StrOpt('bindir', default='/usr/libexec/kuryr', help=_('Directory for Kuryr vif binding executables.')), cfg.StrOpt('subnetpool_name_prefix', default='kuryrPool', help=_('Neutron subnetpool name will be prefixed by this.')), ] neutron_group = cfg.OptGroup( 'neutron', title='Neutron Options', help=_('Configuration options for OpenStack Neutron')) neutron_opts = [ cfg.StrOpt('enable_dhcp', default='True', help=_('Enable or Disable dhcp for neutron subnets.')), cfg.StrOpt('default_subnetpool_v4',
# under the License. import os import sys from kuryr.lib._i18n import _ from kuryr.lib import config as lib_config from oslo_config import cfg from oslo_log import log as logging from kuryr_kubernetes import version LOG = logging.getLogger(__name__) kuryr_k8s_opts = [ cfg.StrOpt('pybasedir', help=_('Directory where Kuryr-kubernetes python module is ' 'installed.'), default=os.path.abspath( os.path.join(os.path.dirname(__file__), '../../'))), ] daemon_opts = [ cfg.BoolOpt('daemon_enabled', help=_('Enable CNI Daemon configuration.'), default=True, deprecated_for_removal=True, deprecated_reason="Deployment without kuryr-daemon is now " "deprecated.", deprecated_since="Rocky"), cfg.StrOpt('bind_address', help=_('Bind address for CNI daemon HTTP server. It is '
# License for the specific language governing permissions and limitations # under the License. import os import sys from kuryr.lib._i18n import _, _LI from kuryr.lib import config as lib_config from oslo_config import cfg from oslo_log import log as logging import pbr.version LOG = logging.getLogger(__name__) kuryr_k8s_opts = [ cfg.StrOpt('pybasedir', help=_('Directory where Kuryr-kubernetes python module is ' 'installed.'), default=os.path.abspath( os.path.join(os.path.dirname(__file__), '../../'))), ] k8s_opts = [ cfg.StrOpt('api_root', help=_("The root URL of the Kubernetes API"), default=os.environ.get('K8S_API', 'http://localhost:8080')), cfg.StrOpt( 'pod_project_driver', help=_("The driver to determine OpenStack project for pod ports"), default='default'), cfg.StrOpt( 'service_project_driver', help=_("The driver to determine OpenStack project for services"),
Routines for configuring Kuryr """ import os from oslo_config import cfg from oslo_log import log from kuryr.lib._i18n import _ from kuryr.lib import version core_opts = [ cfg.StrOpt('bindir', default='$pybasedir/usr/libexec/kuryr', help=_('Directory for Kuryr vif binding executables.')), cfg.StrOpt('subnetpool_name_prefix', default='kuryrPool', help=_('Neutron subnetpool name will be prefixed by this.')), ] neutron_opts = [ cfg.StrOpt('neutron_uri', default=os.environ.get('OS_URL', 'http://127.0.0.1:9696'), help=_('Neutron URL for accessing the network service.')), cfg.StrOpt('enable_dhcp', default='True', help=_('Enable or Disable dhcp for neutron subnets.')), cfg.StrOpt('default_subnetpool_v4', default='kuryr', help=_('Name of default subnetpool version 4')), cfg.StrOpt('default_subnetpool_v6',
""" import os from oslo_config import cfg from oslo_log import log from kuryr.lib._i18n import _ from kuryr.lib import version core_opts = [ cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')), help=_('Directory where Kuryr python module is installed.')), cfg.StrOpt('bindir', default='$pybasedir/usr/libexec/kuryr', help=_('Directory for Kuryr vif binding executables.')), cfg.StrOpt('kuryr_uri', default='http://127.0.0.1:2377', help=_('Kuryr URL for accessing Kuryr through json rpc.')), cfg.StrOpt('capability_scope', default=os.environ.get('CAPABILITY_SCOPE', 'local'), choices=['local', 'global'], help=_('Kuryr plugin scope reported to libnetwork.')), cfg.StrOpt('subnetpool_name_prefix', default='kuryrPool', help=_('Neutron subnetpool name will be prefixed by this.')), cfg.StrOpt('local_default_address_space', default='no_address_space',
""" import os from oslo_config import cfg from oslo_log import log import pbr.version from kuryr.lib._i18n import _ from kuryr.lib import config as lib_config core_opts = [ cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../')), help=_('Directory where Kuryr python module is installed.')), cfg.StrOpt('kuryr_uri', default=os.environ.get('OS_KURYR_URI', 'http://127.0.0.1:23750'), help=_('Kuryr URL for accessing Kuryr through json rpc.')), cfg.StrOpt('capability_scope', default=os.environ.get('CAPABILITY_SCOPE', 'local'), choices=['local', 'global'], help=_('Kuryr plugin scope reported to libnetwork.')), cfg.StrOpt('local_default_address_space', default='no_address_space', help=_('There is no address-space by default in neutron')), cfg.StrOpt('global_default_address_space', default='no_address_space', help=_('There is no address-space by default in neutron')), cfg.StrOpt('port_driver',
# under the License. """ Routines for configuring Kuryr """ from keystoneauth1 import loading as ks_loading from oslo_config import cfg from kuryr.lib._i18n import _ core_opts = [ cfg.StrOpt('bindir', default='/usr/libexec/kuryr', help=_('Directory for Kuryr vif binding executables.')), cfg.StrOpt('subnetpool_name_prefix', default='kuryrPool', help=_('Neutron subnetpool name will be prefixed by this.')), cfg.StrOpt('deployment_type', default='baremetal', help=_("baremetal or nested-containers are the supported" " values.")), ] neutron_group = cfg.OptGroup( 'neutron', title='Neutron Options', help=_('Configuration options for OpenStack Neutron')) neutron_opts = [