def use_dpdk():
    '''Determine whether DPDK should be used'''
    cmp_release = CompareOpenStackReleases(
        os_release('neutron-common', base='icehouse'))
    return (cmp_release >= 'mitaka' and config('enable-dpdk'))
Example #2
0
def identity_joined(rid=None):
    if config('vip') and not is_clustered():
        log('Defering registration until clustered', level=DEBUG)
        return

    settings = {}

    if not service_enabled('api'):
        juju_log('api service not enabled; skipping endpoint '
                 'registration')
        return

    cinder_release = os_release('cinder-common')
    if CompareOpenStackReleases(cinder_release) < 'pike':
        public_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'region': None,
            'service': None,
            'public_url': None,
            'internal_url': None,
            'admin_url': None,
            'cinder_region': config('region'),
            'cinder_service': 'cinder',
            'cinder_public_url': public_url,
            'cinder_internal_url': internal_url,
            'cinder_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'icehouse':
        # NOTE(jamespage) register v2 endpoint as well
        public_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'cinderv2_region': config('region'),
            'cinderv2_service': 'cinderv2',
            'cinderv2_public_url': public_url,
            'cinderv2_internal_url': internal_url,
            'cinderv2_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'pike':
        # NOTE(jamespage) register v3 endpoint as well
        public_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'cinderv3_region': config('region'),
            'cinderv3_service': 'cinderv3',
            'cinderv3_public_url': public_url,
            'cinderv3_internal_url': internal_url,
            'cinderv3_admin_url': admin_url,
        })
    relation_set(relation_id=rid, **settings)
def configure_sriov():
    '''Configure SR-IOV devices based on provided configuration options

    NOTE(fnordahl): Boot time configuration is done by init script
    intalled by this charm.

    This function only does runtime configuration!
    '''
    charm_config = config()
    if not enable_sriov():
        return

    # make sure init script has correct mode and that boot time execution
    # is enabled
    os.chmod(NEUTRON_SRIOV_INIT_SCRIPT, 0o755)
    service('enable', 'neutron-openvswitch-networking-sriov')

    if charm_config.changed('sriov-numvfs'):
        devices = PCINetDevices()
        sriov_numvfs = charm_config.get('sriov-numvfs')

        # automatic configuration of all SR-IOV devices
        if sriov_numvfs == 'auto':
            log('Configuring SR-IOV device VF functions in auto mode')
            for device in devices.pci_devices:
                if device and device.sriov:
                    log("Configuring SR-IOV device"
                        " {} with {} VF's".format(device.interface_name,
                                                  device.sriov_totalvfs))
                    # NOTE(fnordahl): run-time change of numvfs is disallowed
                    # without resetting to 0 first.
                    device.set_sriov_numvfs(0)
                    device.set_sriov_numvfs(device.sriov_totalvfs)
        else:
            # Single int blanket configuration
            try:
                log('Configuring SR-IOV device VF functions'
                    ' with blanket setting')
                for device in devices.pci_devices:
                    if device and device.sriov:
                        numvfs = min(int(sriov_numvfs), device.sriov_totalvfs)
                        if int(sriov_numvfs) > device.sriov_totalvfs:
                            log('Requested value for sriov-numvfs ({}) too '
                                'high for interface {}. Falling back to '
                                'interface totalvfs '
                                'value: {}'.format(sriov_numvfs,
                                                   device.interface_name,
                                                   device.sriov_totalvfs))
                        log("Configuring SR-IOV device {} with {} "
                            "VFs".format(device.interface_name, numvfs))
                        # NOTE(fnordahl): run-time change of numvfs is
                        # disallowed without resetting to 0 first.
                        device.set_sriov_numvfs(0)
                        device.set_sriov_numvfs(numvfs)
            except ValueError:
                # <device>:<numvfs>[ <device>:numvfs] configuration
                sriov_numvfs = sriov_numvfs.split()
                for device_config in sriov_numvfs:
                    log('Configuring SR-IOV device VF functions per interface')
                    interface_name, numvfs = device_config.split(':')
                    device = devices.get_device_from_interface_name(
                        interface_name)
                    if device and device.sriov:
                        if int(numvfs) > device.sriov_totalvfs:
                            log('Requested value for sriov-numfs ({}) too '
                                'high for interface {}. Falling back to '
                                'interface totalvfs '
                                'value: {}'.format(numvfs,
                                                   device.interface_name,
                                                   device.sriov_totalvfs))
                            numvfs = device.sriov_totalvfs
                        log("Configuring SR-IOV device {} with {} "
                            "VF's".format(device.interface_name, numvfs))
                        # NOTE(fnordahl): run-time change of numvfs is
                        # disallowed without resetting to 0 first.
                        device.set_sriov_numvfs(0)
                        device.set_sriov_numvfs(int(numvfs))

        # Trigger remote restart in parent application
        remote_restart('neutron-plugin', 'nova-compute')

        # Restart of SRIOV agent is required after changes to system runtime
        # VF configuration
        cmp_release = CompareOpenStackReleases(
            os_release('neutron-common', base='icehouse'))
        if cmp_release >= 'mitaka':
            service_restart('neutron-sriov-agent')
        else:
            service_restart('neutron-plugin-sriov-agent')
Example #4
0
    def __call__(self):
        from neutron_api_utils import api_port
        ctxt = super(NeutronCCContext, self).__call__()
        if config('neutron-plugin') == 'nsx':
            ctxt['nsx_username'] = config('nsx-username')
            ctxt['nsx_password'] = config('nsx-password')
            ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid')
            ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid')
            if 'nsx-controllers' in config():
                ctxt['nsx_controllers'] = \
                    ','.join(config('nsx-controllers').split())
                ctxt['nsx_controllers_list'] = \
                    config('nsx-controllers').split()
        if config('neutron-plugin') == 'plumgrid':
            ctxt['pg_username'] = config('plumgrid-username')
            ctxt['pg_password'] = config('plumgrid-password')
            ctxt['virtual_ip'] = config('plumgrid-virtual-ip')
        elif config('neutron-plugin') == 'midonet':
            ctxt.update(MidonetContext()())
            identity_context = IdentityServiceContext(service='neutron',
                                                      service_user='******')()
            if identity_context is not None:
                ctxt.update(identity_context)
        ctxt['l2_population'] = self.neutron_l2_population
        ctxt['enable_dvr'] = self.neutron_dvr
        ctxt['l3_ha'] = self.neutron_l3ha
        if self.neutron_l3ha:
            max_agents = config('max-l3-agents-per-router')
            min_agents = config('min-l3-agents-per-router')
            if max_agents < min_agents:
                raise ValueError("max-l3-agents-per-router ({}) must be >= "
                                 "min-l3-agents-per-router "
                                 "({})".format(max_agents, min_agents))

            ctxt['max_l3_agents_per_router'] = max_agents
            ctxt['min_l3_agents_per_router'] = min_agents

        ctxt['allow_automatic_l3agent_failover'] = \
            config('allow-automatic-l3agent-failover')
        ctxt['allow_automatic_dhcp_failover'] = \
            config('allow-automatic-dhcp-failover')

        ctxt['dhcp_agents_per_network'] = config('dhcp-agents-per-network')
        ctxt['tenant_network_types'] = self.neutron_tenant_network_types
        ctxt['overlay_network_type'] = self.neutron_overlay_network_type
        ctxt['external_network'] = config('neutron-external-network')
        release = os_release('neutron-server')
        cmp_release = CompareOpenStackReleases(release)
        if config('neutron-plugin') in ['vsp']:
            _config = config()
            for k, v in _config.items():
                if k.startswith('vsd'):
                    ctxt[k.replace('-', '_')] = v
            for rid in relation_ids('vsd-rest-api'):
                for unit in related_units(rid):
                    rdata = relation_get(rid=rid, unit=unit)
                    vsd_ip = rdata.get('vsd-ip-address')
                    if cmp_release >= 'kilo':
                        cms_id_value = rdata.get('nuage-cms-id')
                        log('relation data:cms_id required for'
                            ' nuage plugin: {}'.format(cms_id_value))
                        if cms_id_value is not None:
                            ctxt['vsd_cms_id'] = cms_id_value
                    log('relation data:vsd-ip-address: {}'.format(vsd_ip))
                    if vsd_ip is not None:
                        ctxt['vsd_server'] = '{}:8443'.format(vsd_ip)
            if 'vsd_server' not in ctxt:
                ctxt['vsd_server'] = '1.1.1.1:8443'
        ctxt['verbose'] = config('verbose')
        ctxt['debug'] = config('debug')
        ctxt['neutron_bind_port'] = \
            determine_api_port(api_port('neutron-server'),
                               singlenode_mode=True)
        ctxt['quota_security_group'] = config('quota-security-group')
        ctxt['quota_security_group_rule'] = \
            config('quota-security-group-rule')
        ctxt['quota_network'] = config('quota-network')
        ctxt['quota_subnet'] = config('quota-subnet')
        ctxt['quota_port'] = config('quota-port')
        ctxt['quota_vip'] = config('quota-vip')
        ctxt['quota_pool'] = config('quota-pool')
        ctxt['quota_member'] = config('quota-member')
        ctxt['quota_health_monitors'] = config('quota-health-monitors')
        ctxt['quota_router'] = config('quota-router')
        ctxt['quota_floatingip'] = config('quota-floatingip')

        n_api_settings = self.get_neutron_api_rel_settings()
        if n_api_settings:
            ctxt.update(n_api_settings)

        flat_providers = config('flat-network-providers')
        if flat_providers:
            ctxt['network_providers'] = ','.join(flat_providers.split())

        vlan_ranges = config('vlan-ranges')
        if vlan_ranges:
            ctxt['vlan_ranges'] = ','.join(vlan_ranges.split())

        vni_ranges = config('vni-ranges')
        if vni_ranges:
            ctxt['vni_ranges'] = ','.join(vni_ranges.split())

        enable_dns_extension_driver = False

        dns_domain = get_dns_domain()
        if dns_domain:
            enable_dns_extension_driver = True
            ctxt['dns_domain'] = dns_domain

        if cmp_release >= 'mitaka':
            for rid in relation_ids('external-dns'):
                if related_units(rid):
                    enable_dns_extension_driver = True

            # AZAwareWeightScheduler inherits from WeightScheduler and is
            # available as of mitaka
            ctxt['network_scheduler_driver'] = (
                'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler'
            )
            ctxt['dhcp_load_type'] = config('dhcp-load-type')

        extension_drivers = []
        if config('enable-ml2-port-security'):
            extension_drivers.append(EXTENSION_DRIVER_PORT_SECURITY)
        if enable_dns_extension_driver:
            if cmp_release < 'queens':
                extension_drivers.append(EXTENSION_DRIVER_DNS)
            else:
                extension_drivers.append(EXTENSION_DRIVER_DNS_DOMAIN_PORTS)

        if is_qos_requested_and_valid():
            extension_drivers.append(EXTENSION_DRIVER_QOS)

        if extension_drivers:
            ctxt['extension_drivers'] = ','.join(extension_drivers)

        ctxt['enable_sriov'] = config('enable-sriov')

        if cmp_release >= 'mitaka':
            if config('global-physnet-mtu'):
                ctxt['global_physnet_mtu'] = config('global-physnet-mtu')
                if config('path-mtu'):
                    ctxt['path_mtu'] = config('path-mtu')
                else:
                    ctxt['path_mtu'] = config('global-physnet-mtu')
                physical_network_mtus = config('physical-network-mtus')
                if physical_network_mtus:
                    ctxt['physical_network_mtus'] = ','.join(
                        physical_network_mtus.split())

        if 'kilo' <= cmp_release <= 'mitaka':
            pci_vendor_devs = config('supported-pci-vendor-devs')
            if pci_vendor_devs:
                ctxt['supported_pci_vendor_devs'] = \
                    ','.join(pci_vendor_devs.split())

        ctxt['mechanism_drivers'] = get_ml2_mechanism_drivers()

        n_load_balancer_settings = NeutronLoadBalancerContext()()
        if n_load_balancer_settings:
            ctxt.update(n_load_balancer_settings)

        if config('neutron-plugin') in ['ovs', 'ml2', 'Calico']:
            ctxt['service_plugins'] = []
            service_plugins = {
                'icehouse': [
                    ('neutron.services.l3_router.l3_router_plugin.'
                     'L3RouterPlugin'),
                    'neutron.services.firewall.fwaas_plugin.FirewallPlugin',
                    'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
                    'neutron.services.vpn.plugin.VPNDriverPlugin',
                    ('neutron.services.metering.metering_plugin.'
                     'MeteringPlugin')],
                'juno': [
                    ('neutron.services.l3_router.l3_router_plugin.'
                     'L3RouterPlugin'),
                    'neutron.services.firewall.fwaas_plugin.FirewallPlugin',
                    'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
                    'neutron.services.vpn.plugin.VPNDriverPlugin',
                    ('neutron.services.metering.metering_plugin.'
                     'MeteringPlugin')],
                'kilo': ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'],
                'liberty': ['router', 'firewall', 'lbaas', 'vpnaas',
                            'metering'],
                'mitaka': ['router', 'firewall', 'lbaas', 'vpnaas',
                           'metering'],
                'newton': ['router', 'firewall', 'vpnaas', 'metering',
                           ('neutron_lbaas.services.loadbalancer.plugin.'
                            'LoadBalancerPluginv2')],
                'ocata': ['router', 'firewall', 'vpnaas', 'metering',
                          ('neutron_lbaas.services.loadbalancer.plugin.'
                           'LoadBalancerPluginv2'), 'segments',
                          ('neutron_dynamic_routing.'
                           'services.bgp.bgp_plugin.BgpPlugin')],
                'pike': ['router', 'firewall', 'metering', 'segments',
                         ('neutron_lbaas.services.loadbalancer.plugin.'
                          'LoadBalancerPluginv2'),
                         ('neutron_dynamic_routing.'
                          'services.bgp.bgp_plugin.BgpPlugin')],
                'queens': ['router', 'firewall', 'metering', 'segments',
                           ('neutron_lbaas.services.loadbalancer.plugin.'
                            'LoadBalancerPluginv2'),
                           ('neutron_dynamic_routing.'
                            'services.bgp.bgp_plugin.BgpPlugin')],
                'rocky': ['router', 'firewall', 'metering', 'segments',
                          ('neutron_dynamic_routing.'
                           'services.bgp.bgp_plugin.BgpPlugin')],
            }
            if cmp_release >= 'rocky':
                if ctxt.get('load_balancer_name', None):
                    # TODO(fnordahl): Remove when ``neutron_lbaas`` is retired
                    service_plugins['rocky'].append('lbaasv2-proxy')
                else:
                    # TODO(fnordahl): Remove fall-back in next charm release
                    service_plugins['rocky'].append(
                        'neutron_lbaas.services.loadbalancer.plugin.'
                        'LoadBalancerPluginv2')

            ctxt['service_plugins'] = service_plugins.get(
                release, service_plugins['rocky'])

            if is_nsg_logging_enabled():
                ctxt['service_plugins'].append('log')

            if is_qos_requested_and_valid():
                ctxt['service_plugins'].append('qos')

            if is_vlan_trunking_requested_and_valid():
                ctxt['service_plugins'].append('trunk')

            ctxt['service_plugins'] = ','.join(ctxt['service_plugins'])

        return ctxt
Example #5
0
def config_changed():
    # neutron-server runs if < juno. Neutron-server creates mysql tables
    # which will subsequently cause db migratoins to fail if >= juno.
    # Disable neutron-server if >= juno
    if CompareOpenStackReleases(os_release('nova-common')) >= 'juno':
        with open('/etc/init/neutron-server.override', 'wb') as out:
            out.write('manual\n')
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

    global CONFIGS
    if git_install_requested():
        status_set('maintenance', 'Running Git install')
        if config_value_changed('openstack-origin-git'):
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('nova-common'):
            status_set('maintenance', 'Running openstack upgrade')
            CONFIGS = do_openstack_upgrade(CONFIGS)
            [
                neutron_api_relation_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('neutron-api')
            ]
            # NOTE(jamespage): Force re-fire of shared-db joined hook
            # to ensure that nova_api database is setup if required.
            [db_joined(relation_id=r_id) for r_id in relation_ids('shared-db')]

    save_script_rc()
    configure_https()
    CONFIGS.write_all()

    # NOTE(jamespage): deal with any changes to the console and serial
    #                  console configuration options
    if not git_install_requested():
        filtered = filter_installed_packages(determine_packages())
        if filtered:
            apt_install(filtered, fatal=True)

    for rid in relation_ids('quantum-network-service'):
        quantum_joined(rid=rid)
    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for rid in relation_ids('zeromq-configuration'):
        zeromq_configuration_relation_joined(rid)
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    [compute_joined(rid=rid) for rid in relation_ids('cloud-compute')]

    update_nrpe_config()

    # If the region value has changed, notify the cloud-compute relations
    # to ensure the value is propagated to the compute nodes.
    if config_value_changed('region'):
        for rid in relation_ids('cloud-compute'):
            for unit in related_units(rid):
                compute_changed(rid, unit)

    update_nova_consoleauth_config()
    update_aws_compat_services()
Example #6
0
def run_in_apache():
    """Return true if cinder API is run under apache2 with mod_wsgi in
    this release.
    """
    return CompareOpenStackReleases(os_release('cinder-common')) >= 'ocata'
    def test_310_pci_alias_config(self):
        """Verify the pci alias data is rendered properly."""
        u.log.debug('Checking pci aliases in nova config')

        os_release = self._get_openstack_release_string()
        if CompareOpenStackReleases(os_release) < 'kilo':
            u.log.info('Skipping test, {} < kilo'.format(os_release))
            return

        _pci_alias1 = {
            "name": "IntelNIC",
            "capability_type": "pci",
            "product_id": "1111",
            "vendor_id": "8086",
            "device_type": "type-PF"
        }

        if CompareOpenStackReleases(os_release) >= 'ocata':
            section = "pci"
            key_name = "alias"
        else:
            section = "DEFAULT"
            key_name = "pci_alias"

        CONF = cfg.CONF
        opt_group = cfg.OptGroup(name=section)
        pci_opts = [cfg.MultiStrOpt(key_name)]
        CONF.register_group(opt_group)
        CONF.register_opts(pci_opts, opt_group)

        _pci_alias2 = {
            "name": " Cirrus Logic ",
            "capability_type": "pci",
            "product_id": "0ff2",
            "vendor_id": "10de",
            "device_type": "type-PCI"
        }

        _pci_alias_list = "[{}, {}]".format(
            json.dumps(_pci_alias1, sort_keys=True),
            json.dumps(_pci_alias2, sort_keys=True))

        unit = self.nova_cc_sentry
        conf = '/etc/nova/nova.conf'
        u.log.debug('Setting pci-alias to {}'.format(
            json.dumps(_pci_alias1, sort_keys=True)))
        self.d.configure(
            'nova-cloud-controller',
            {'pci-alias': json.dumps(_pci_alias1, sort_keys=True)})

        u.log.debug('Waiting for config change to take effect')
        self.d.sentry.wait()
        ret = u.validate_config_data(
            unit, conf, section, {
                key_name:
                ('{"capability_type": "pci", "device_type": "type-PF", '
                 '"name": "IntelNIC", "product_id": "1111", '
                 '"vendor_id": "8086"}')
            })
        if ret:
            message = "PCI Alias config error in section {}: {}".format(
                section, ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('Setting pci-alias to {}'.format(_pci_alias_list))
        self.d.configure('nova-cloud-controller',
                         {'pci-alias': _pci_alias_list})
        u.log.debug('Waiting for config change to take effect')
        self.d.sentry.wait()

        f = tempfile.NamedTemporaryFile(delete=False)
        f.write(unit.file_contents(conf))
        f.close()
        CONF(default_config_files=[f.name])
        if CompareOpenStackReleases(os_release) >= 'ocata':
            alias_entries = CONF.pci.alias
        else:
            alias_entries = CONF.DEFAULT.pci_alias
        assert alias_entries[0] == (
            '{"capability_type": "pci", "device_type": "type-PF", '
            '"name": "IntelNIC", "product_id": "1111", "vendor_id": "8086"}')
        assert alias_entries[1] == (
            '{"capability_type": "pci", "device_type": "type-PCI", '
            '"name": " Cirrus Logic ", "product_id": "0ff2", '
            '"vendor_id": "10de"}')
        self.d.configure('nova-cloud-controller', {'pci-alias': ''})
        self.d.sentry.wait()
    def _decode_password_security_compliance_string(cls, maybe_yaml):
        """Decode string to dict for 'password-security-compliance'

        Perform some validation on the string and return either None,
        if the string is not valid, or a dictionary of the security
        compliance keys and values.

        :param maybe_yaml: the config item that is (hopefully) YAML format
        :type maybe_yaml: str
        :returns: a dictionary of keys: values or None if the value is not
                  valid.
        :rtype: Optional[Dict[str, Union[str, int, bool]]]
        """
        cmp_release = CompareOpenStackReleases(os_release('keystone'))
        if cmp_release < 'newton':
            log(
                "'password-security-compliance' isn't valid for releases "
                "before Newton.",
                level='ERROR')
            return None
        try:
            config_items = yaml.safe_load(maybe_yaml)
        except Exception as e:
            log("Couldn't decode config value for "
                "'password-security-compliance': Invalid YAML?: {}".format(
                    str(e)),
                level='ERROR')
            return None
        # ensure that the top level is a dictionary.
        if type(config_items) != dict:
            log("Couldn't decode config value for "
                "'password-security-compliance'.  It doesn't appear to be a "
                "dictionary: {}".format(str(config_items)),
                level='ERROR')
            return None
        # check that the keys present are valid ones.
        config_keys = config_items.keys()
        allowed_keys = cls.ALLOWED_SECURITY_COMPLIANCE_SCHEMA.keys()
        invalid_keys = [k for k in config_keys if k not in allowed_keys]
        if invalid_keys:
            log("Invalid config key(s) found in config "
                "'password-security-compliance' setting: {}".format(
                    ", ".join(invalid_keys)),
                level='ERROR')
            return None
        # check that the types are valid
        valid_types = cls.ALLOWED_SECURITY_COMPLIANCE_SCHEMA
        invalid_types = {
            k: (type(v) != valid_types[k])
            for k, v in config_items.items()
        }
        if any(invalid_types.values()):
            log("Invalid config value type(s) found in config "
                "'password-security-compliance' setting: {}".format(", ".join([
                    "{}: {} -- should be an {}".format(
                        k,
                        type(config_items[k]).__name__,
                        valid_types[k].__name__)
                    for k, v in invalid_types.items()
                ])),
                level='ERROR')
            return None
        return config_items
Example #9
0
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    # TODO: Cache this on first call?
    virt_type = config('virt-type').lower()
    if virt_type in ('lxd', 'ironic'):
        resource_map = deepcopy(BASE_RESOURCE_MAP)
    else:
        resource_map = deepcopy(LIBVIRT_RESOURCE_MAP)

    # if vault deps are not installed it is not yet possible to check the vault
    # context status since it requires the hvac dependency.
    if not vaultlocker_installed():
        to_delete = []
        for item in resource_map[NOVA_CONF]['contexts']:
            if isinstance(item, type(vaultlocker.VaultKVContext())):
                to_delete.append(item)

        for item in to_delete:
            resource_map[NOVA_CONF]['contexts'].remove(item)

    net_manager = network_manager()

    # Network manager gets set late by the cloud-compute interface.
    # FlatDHCPManager only requires some extra packages.
    cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
    if (net_manager in ['flatmanager', 'flatdhcpmanager']
            and config('multi-host').lower() == 'yes'
            and cmp_os_release < 'ocata'):
        resource_map[NOVA_CONF]['services'].extend(
            ['nova-api', 'nova-network'])
    else:
        resource_map.pop(NOVA_API_AA_PROFILE_PATH)
        resource_map.pop(NOVA_NETWORK_AA_PROFILE_PATH)

    if virt_type == 'ironic':
        # NOTE(gsamfira): OpenStack versions prior to Victoria do not have a
        # dedicated nova-compute-ironic package which provides a suitable
        # nova-compute.conf file. We use a template to compensate for that.
        if cmp_os_release < 'victoria':
            resource_map[NOVA_COMPUTE_CONF] = {
                "services": ["nova-compute"],
                "contexts": [],
            }

    cmp_distro_codename = CompareHostReleases(
        lsb_release()['DISTRIB_CODENAME'].lower())
    if (cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata'):
        for data in resource_map.values():
            if LIBVIRT_BIN_DAEMON in data['services']:
                data['services'].remove(LIBVIRT_BIN_DAEMON)
                data['services'].append(LIBVIRTD_DAEMON)

    # Neutron/quantum requires additional contexts, as well as new resources
    # depending on the plugin used.
    # NOTE(james-page): only required for ovs plugin right now
    if net_manager in ['neutron', 'quantum']:
        resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext())

    if relation_ids('ceph'):
        CEPH_RESOURCES[ceph_config_file()] = {
            'contexts': [NovaComputeCephContext()],
            'services': ['nova-compute']
        }
        resource_map.update(CEPH_RESOURCES)

    enable_nova_metadata, _ = nova_metadata_requirement()
    if enable_nova_metadata:
        resource_map[NOVA_CONF]['services'].append('nova-api-metadata')

    # NOTE(james-page): If not on an upstart based system, don't write
    #                   and override file for libvirt-bin.
    if not os.path.exists('/etc/init'):
        if LIBVIRT_BIN_OVERRIDES in resource_map:
            del resource_map[LIBVIRT_BIN_OVERRIDES]

    return resource_map
Example #10
0
    def __call__(self):
        from neutron_api_utils import api_port
        ctxt = super(NeutronCCContext, self).__call__()
        if config('neutron-plugin') == 'nsx':
            ctxt['nsx_username'] = config('nsx-username')
            ctxt['nsx_password'] = config('nsx-password')
            ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid')
            ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid')
            if 'nsx-controllers' in config():
                ctxt['nsx_controllers'] = \
                    ','.join(config('nsx-controllers').split())
                ctxt['nsx_controllers_list'] = \
                    config('nsx-controllers').split()
        if config('neutron-plugin') == 'plumgrid':
            ctxt['pg_username'] = config('plumgrid-username')
            ctxt['pg_password'] = config('plumgrid-password')
            ctxt['virtual_ip'] = config('plumgrid-virtual-ip')
        elif config('neutron-plugin') == 'midonet':
            ctxt.update(MidonetContext()())
            identity_context = IdentityServiceContext(service='neutron',
                                                      service_user='******')()
            if identity_context is not None:
                ctxt.update(identity_context)
        ctxt['l2_population'] = self.neutron_l2_population
        ctxt['enable_dvr'] = self.neutron_dvr
        ctxt['l3_ha'] = self.neutron_l3ha
        if self.neutron_l3ha:
            max_agents = config('max-l3-agents-per-router')
            min_agents = config('min-l3-agents-per-router')
            if max_agents < min_agents:
                raise ValueError("max-l3-agents-per-router ({}) must be >= "
                                 "min-l3-agents-per-router "
                                 "({})".format(max_agents, min_agents))

            ctxt['max_l3_agents_per_router'] = max_agents
            ctxt['min_l3_agents_per_router'] = min_agents

        ctxt['dhcp_agents_per_network'] = config('dhcp-agents-per-network')
        ctxt['tenant_network_types'] = self.neutron_tenant_network_types
        ctxt['overlay_network_type'] = self.neutron_overlay_network_type
        ctxt['external_network'] = config('neutron-external-network')
        release = os_release('neutron-server')
        cmp_release = CompareOpenStackReleases(release)
        if config('neutron-plugin') in ['vsp']:
            _config = config()
            for k, v in _config.iteritems():
                if k.startswith('vsd'):
                    ctxt[k.replace('-', '_')] = v
            for rid in relation_ids('vsd-rest-api'):
                for unit in related_units(rid):
                    rdata = relation_get(rid=rid, unit=unit)
                    vsd_ip = rdata.get('vsd-ip-address')
                    if cmp_release >= 'kilo':
                        cms_id_value = rdata.get('nuage-cms-id')
                        log('relation data:cms_id required for'
                            ' nuage plugin: {}'.format(cms_id_value))
                        if cms_id_value is not None:
                            ctxt['vsd_cms_id'] = cms_id_value
                    log('relation data:vsd-ip-address: {}'.format(vsd_ip))
                    if vsd_ip is not None:
                        ctxt['vsd_server'] = '{}:8443'.format(vsd_ip)
            if 'vsd_server' not in ctxt:
                ctxt['vsd_server'] = '1.1.1.1:8443'
        ctxt['verbose'] = config('verbose')
        ctxt['debug'] = config('debug')
        ctxt['neutron_bind_port'] = \
            determine_api_port(api_port('neutron-server'),
                               singlenode_mode=True)
        ctxt['quota_security_group'] = config('quota-security-group')
        ctxt['quota_security_group_rule'] = \
            config('quota-security-group-rule')
        ctxt['quota_network'] = config('quota-network')
        ctxt['quota_subnet'] = config('quota-subnet')
        ctxt['quota_port'] = config('quota-port')
        ctxt['quota_vip'] = config('quota-vip')
        ctxt['quota_pool'] = config('quota-pool')
        ctxt['quota_member'] = config('quota-member')
        ctxt['quota_health_monitors'] = config('quota-health-monitors')
        ctxt['quota_router'] = config('quota-router')
        ctxt['quota_floatingip'] = config('quota-floatingip')

        n_api_settings = self.get_neutron_api_rel_settings()
        if n_api_settings:
            ctxt.update(n_api_settings)

        flat_providers = config('flat-network-providers')
        if flat_providers:
            ctxt['network_providers'] = ','.join(flat_providers.split())

        vlan_ranges = config('vlan-ranges')
        if vlan_ranges:
            ctxt['vlan_ranges'] = ','.join(vlan_ranges.split())

        vni_ranges = config('vni-ranges')
        if vni_ranges:
            ctxt['vni_ranges'] = ','.join(vni_ranges.split())

        ctxt['enable_ml2_port_security'] = config('enable-ml2-port-security')
        ctxt['enable_sriov'] = config('enable-sriov')

        if cmp_release == 'kilo' or cmp_release >= 'mitaka':
            ctxt['enable_hyperv'] = True
        else:
            ctxt['enable_hyperv'] = False

        if cmp_release >= 'mitaka':
            if config('global-physnet-mtu'):
                ctxt['global_physnet_mtu'] = config('global-physnet-mtu')
                if config('path-mtu'):
                    ctxt['path_mtu'] = config('path-mtu')
                else:
                    ctxt['path_mtu'] = config('global-physnet-mtu')

        return ctxt
Example #11
0
    def __call__(self):
        # distro defaults
        ctxt = {
            # /etc/libvirt/libvirtd.conf (
            'listen_tls': 0
        }
        cmp_distro_codename = CompareHostReleases(
            lsb_release()['DISTRIB_CODENAME'].lower())
        cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))

        # NOTE(jamespage): deal with switch to systemd
        if cmp_distro_codename < "wily":
            ctxt['libvirtd_opts'] = '-d'
        else:
            ctxt['libvirtd_opts'] = ''

        # NOTE(jamespage): deal with alignment with Debian in
        #                  Ubuntu yakkety and beyond.
        if cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata':
            ctxt['libvirt_user'] = '******'
        else:
            ctxt['libvirt_user'] = '******'

        # get the processor architecture to use in the nova.conf template
        ctxt['arch'] = platform.machine()

        # enable tcp listening if configured for live migration.
        if config('enable-live-migration'):
            ctxt['libvirtd_opts'] += ' -l'

        if config('migration-auth-type') in ['none', 'None', 'ssh']:
            ctxt['listen_tls'] = 0

        if config('migration-auth-type') == 'ssh':
            # nova.conf
            ctxt['live_migration_uri'] = 'qemu+ssh://%s/system'

        if config('instances-path') is not None:
            ctxt['instances_path'] = config('instances-path')

        if config('disk-cachemodes'):
            ctxt['disk_cachemodes'] = config('disk-cachemodes')

        if config('cpu-mode'):
            ctxt['cpu_mode'] = config('cpu-mode')
        elif ctxt['arch'] in ('ppc64el', 'ppc64le', 'aarch64'):
            ctxt['cpu_mode'] = 'host-passthrough'
        elif ctxt['arch'] == 's390x':
            ctxt['cpu_mode'] = 'none'

        if config('cpu-model'):
            ctxt['cpu_model'] = config('cpu-model')

        if config('hugepages'):
            ctxt['hugepages'] = True
            ctxt['kvm_hugepages'] = 1
        else:
            ctxt['kvm_hugepages'] = 0

        if config('ksm') in ("1", "0",):
            ctxt['ksm'] = config('ksm')
        else:
            if cmp_os_release < 'kilo':
                log("KSM set to 1 by default on openstack releases < kilo",
                    level=INFO)
                ctxt['ksm'] = "1"
            else:
                ctxt['ksm'] = "AUTO"

        if config('pci-passthrough-whitelist'):
            ctxt['pci_passthrough_whitelist'] = \
                config('pci-passthrough-whitelist')

        if config('vcpu-pin-set'):
            ctxt['vcpu_pin_set'] = config('vcpu-pin-set')

        ctxt['reserved_host_memory'] = config('reserved-host-memory')

        db = kv()
        if db.get('host_uuid'):
            ctxt['host_uuid'] = db.get('host_uuid')
        else:
            host_uuid = str(uuid.uuid4())
            db.set('host_uuid', host_uuid)
            db.flush()
            ctxt['host_uuid'] = host_uuid

        if config('libvirt-image-backend'):
            ctxt['libvirt_images_type'] = config('libvirt-image-backend')

        return ctxt
    def __call__(self):
        bind_port = config('bind-port')
        workers = config('workers')
        if workers == 0:
            import multiprocessing
            workers = multiprocessing.cpu_count()
        if config('prefer-ipv6'):
            proxy_ip = ('[{}]'
                        .format(get_ipv6_addr(exc_list=[config('vip')])[0]))
            memcached_ip = 'ip6-localhost'
        else:
            proxy_ip = get_host_ip(unit_get('private-address'))
            memcached_ip = get_host_ip(unit_get('private-address'))

        ctxt = {
            'proxy_ip': proxy_ip,
            'memcached_ip': memcached_ip,
            'bind_port': determine_api_port(bind_port, singlenode_mode=True),
            'workers': workers,
            'operator_roles': config('operator-roles'),
            'delay_auth_decision': config('delay-auth-decision'),
            'node_timeout': config('node-timeout'),
            'recoverable_node_timeout': config('recoverable-node-timeout'),
            'log_headers': config('log-headers'),
            'statsd_host': config('statsd-host'),
            'statsd_port': config('statsd-port'),
            'statsd_sample_rate': config('statsd-sample-rate'),
            'static_large_object_segments': config(
                'static-large-object-segments'),
            'enable_multi_region': config('enable-multi-region'),
            'read_affinity': get_read_affinity(),
            'write_affinity': get_write_affinity(),
            'write_affinity_node_count': get_write_affinity_node_count()
        }

        cmp_openstack = CompareOpenStackReleases(os_release('swift'))
        if cmp_openstack < 'train':
            # swauth is no longer supported for OpenStack Train and later
            admin_key = leader_get('swauth-admin-key')
            if admin_key is not None:
                ctxt['swauth_admin_key'] = admin_key

        if config('debug'):
            ctxt['log_level'] = 'DEBUG'
        else:
            ctxt['log_level'] = 'INFO'

        # Instead of duplicating code lets use charm-helpers to set signing_dir
        # TODO(hopem): refactor this context handler to use charm-helpers
        #              code.
        _ctxt = IdentityServiceContext(service='swift', service_user='******')()
        signing_dir = _ctxt.get('signing_dir')
        if signing_dir:
            ctxt['signing_dir'] = signing_dir

        ctxt['ssl'] = False

        auth_type = config('auth-type')
        ctxt['auth_type'] = auth_type

        auth_host = config('keystone-auth-host')
        admin_user = config('keystone-admin-user')
        admin_password = config('keystone-admin-user')
        if (auth_type == 'keystone' and auth_host and
                admin_user and admin_password):
            log('Using user-specified Keystone configuration.')
            ks_auth = {
                'auth_type': 'keystone',
                'auth_protocol': config('keystone-auth-protocol'),
                'keystone_host': auth_host,
                'auth_port': config('keystone-auth-port'),
                'service_user': admin_user,
                'service_password': admin_password,
                'service_tenant': config('keystone-admin-tenant-name'),
            }
            ctxt.update(ks_auth)

        for relid in relation_ids('identity-service'):
            log('Using Keystone configuration from identity-service.')
            for unit in related_units(relid):
                ks_auth = {
                    'auth_type': 'keystone',
                    'auth_protocol': relation_get('auth_protocol',
                                                  unit, relid) or 'http',
                    'service_protocol': relation_get('service_protocol',
                                                     unit, relid) or 'http',
                    'keystone_host': relation_get('auth_host',
                                                  unit, relid),
                    'service_host': relation_get('service_host',
                                                 unit, relid),
                    'auth_port': relation_get('auth_port',
                                              unit, relid),
                    'service_user': relation_get('service_username',
                                                 unit, relid),
                    'service_password': relation_get('service_password',
                                                     unit, relid),
                    'service_tenant': relation_get('service_tenant',
                                                   unit, relid),
                    'service_port': relation_get('service_port',
                                                 unit, relid),
                    'api_version': relation_get('api_version',
                                                unit, relid) or '2',
                }
                if ks_auth['api_version'] == '3':
                    ks_auth['admin_domain_id'] = relation_get(
                        'admin_domain_id', unit, relid)
                    ks_auth['service_tenant_id'] = relation_get(
                        'service_tenant_id', unit, relid)
                    ks_auth['admin_domain_name'] = relation_get(
                        'service_domain', unit, relid)
                    ks_auth['admin_tenant_name'] = relation_get(
                        'service_tenant', unit, relid)
                ctxt.update(ks_auth)

        if config('prefer-ipv6'):
            for key in ['keystone_host', 'service_host']:
                host = ctxt.get(key)
                if host:
                    ctxt[key] = format_ipv6_addr(host)

        return ctxt
Example #13
0
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    drop_config = []
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    if use_dvr():
        resource_map.update(DVR_RESOURCE_MAP)
        resource_map.update(METADATA_RESOURCE_MAP)
        dvr_services = ['neutron-metadata-agent', 'neutron-l3-agent']
        resource_map[NEUTRON_CONF]['services'] += dvr_services
    if enable_local_dhcp():
        resource_map.update(METADATA_RESOURCE_MAP)
        resource_map.update(DHCP_RESOURCE_MAP)
        metadata_services = ['neutron-metadata-agent', 'neutron-dhcp-agent']
        resource_map[NEUTRON_CONF]['services'] += metadata_services
    # Remap any service names as required
    _os_release = os_release('neutron-common', base='icehouse')
    if CompareOpenStackReleases(_os_release) >= 'mitaka':
        # ml2_conf.ini -> openvswitch_agent.ini
        drop_config.append(ML2_CONF)
        # drop of -plugin from service name
        resource_map[NEUTRON_CONF]['services'].remove(
            'neutron-plugin-openvswitch-agent'
        )
        resource_map[NEUTRON_CONF]['services'].append(
            'neutron-openvswitch-agent'
        )
        if not use_dpdk():
            drop_config.append(DPDK_INTERFACES)
            drop_config.append(OVS_DEFAULT)
        elif ovs_has_late_dpdk_init():
            drop_config.append(OVS_DEFAULT)
    else:
        drop_config.extend([OVS_CONF, DPDK_INTERFACES])

    if enable_sriov():
        sriov_agent_name = 'neutron-sriov-agent'
        sriov_resource_map = deepcopy(SRIOV_RESOURCE_MAP)

        if CompareOpenStackReleases(_os_release) < 'mitaka':
            sriov_agent_name = 'neutron-plugin-sriov-agent'
            # Patch resource_map for Kilo and Liberty
            sriov_resource_map[NEUTRON_SRIOV_AGENT_CONF]['services'] = \
                [sriov_agent_name]

        resource_map.update(sriov_resource_map)
        resource_map[NEUTRON_CONF]['services'].append(
            sriov_agent_name)

    # Use MAAS1.9 for MTU and external port config on xenial and above
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
        drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])

    for _conf in drop_config:
        try:
            del resource_map[_conf]
        except KeyError:
            pass

    return resource_map
def update_image_location_policy(configs=None):
    """Update *_image_location policy to restrict to admin role.

    We do this unconditonally and keep a record of the original as installed by
    the package.

    For ussuri, the charm updates/writes the policy.yaml file.  The configs
    param is optional as the caller may already be writing all the configs.
    From ussuri onwards glance is policy-in-code (rather than using a
    policy.json) and, therefore, policy files are essentially all overrides.

    From ussuri, this function deletes the policy.json file and alternatively
    writes the GLANCE_POLICY_YAML file via the configs object.

    :param configs: The configs for the charm
    :type configs: Optional[:class:templating.OSConfigRenderer()]
    """
    _res = os_release('glance-common')
    cmp = CompareOpenStackReleases(_res)
    if cmp < 'kilo':
        # NOTE(hopem): at the time of writing we are unable to do this for
        # earlier than Kilo due to LP: #1502136
        return
    if cmp >= 'ussuri':
        # If the policy.json exists, then remove it as it's the packaged
        # version from a previous version of OpenStack, and thus not used.
        if os.path.isfile(GLANCE_POLICY_FILE):
            try:
                os.remove(GLANCE_POLICY_FILE)
            except Exception as e:
                log("Problem removing file: {}: {}".format(
                    GLANCE_POLICY_FILE, str(e)))
        # if the caller supplied a configs param then update the
        # GLANCE_POLICY_FILE using its context.
        if configs is not None:
            configs.write(GLANCE_POLICY_YAML)
        return

    # otherwise the OpenStack release after kilo and before ussuri, so continue
    # modifying the existing policy.json file.
    db = kv()
    policies = [
        "get_image_location", "set_image_location", "delete_image_location"
    ]

    try:
        with open(GLANCE_POLICY_FILE) as f:
            pmap = json.load(f)
    except IOError as e:
        log("Problem opening glance policy file: {}.  Error was:{}".format(
            GLANCE_POLICY_FILE, str(e)),
            level=WARNING)
        return

    for policy_key in policies:
        # Save original value at time of first install in case we ever need to
        # revert.
        db_key = "policy_{}".format(policy_key)
        if db.get(db_key) is None:
            if policy_key in pmap:
                db.set(db_key, pmap[policy_key])
                db.flush()
            else:
                log("key '{}' not found in policy file".format(policy_key),
                    level=INFO)

    if config('restrict-image-location-operations'):
        policy_value = 'role:admin'
    else:
        policy_value = ''

    new_policies = {k: policy_value for k in policies}
    for policy_key, policy_value in new_policies.items():
        log("Updating Glance policy file setting policy "
            "'{}': '{}'".format(policy_key, policy_value),
            level=INFO)

    update_json_file(GLANCE_POLICY_FILE, new_policies)
def enable_sriov():
    '''Determine whether SR-IOV is enabled and supported'''
    cmp_release = CompareOpenStackReleases(
        os_release('neutron-common', base='icehouse'))
    return (cmp_release >= 'kilo' and config('enable-sriov'))
Example #16
0
def resource_map(release=None):
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    release = release or os_release('neutron-common')

    resource_map = deepcopy(BASE_RESOURCE_MAP)
    if CompareOpenStackReleases(release) >= 'liberty':
        resource_map.update(LIBERTY_RESOURCE_MAP)

    if CompareOpenStackReleases(release) >= 'train':
        resource_map.pop(NEUTRON_LBAAS_CONF)

    if os.path.exists('/etc/apache2/conf-available'):
        resource_map.pop(APACHE_CONF)
    else:
        resource_map.pop(APACHE_24_CONF)

    if manage_plugin():
        # add neutron plugin requirements. nova-c-c only needs the
        # neutron-server associated with configs, not the plugin agent.
        plugin = config('neutron-plugin')
        conf = neutron_plugin_attribute(plugin, 'config', 'neutron')
        ctxts = (neutron_plugin_attribute(plugin, 'contexts', 'neutron') or
                 [])
        services = neutron_plugin_attribute(plugin, 'server_services',
                                            'neutron')
        resource_map[conf] = {}
        resource_map[conf]['services'] = services
        resource_map[conf]['contexts'] = ctxts
        resource_map[conf]['contexts'].append(
            neutron_api_context.NeutronCCContext())

        if ('kilo' <= CompareOpenStackReleases(release) <= 'mitaka' and
                config('enable-sriov')):
            resource_map[ML2_SRIOV_INI] = {}
            resource_map[ML2_SRIOV_INI]['services'] = services
            resource_map[ML2_SRIOV_INI]['contexts'] = []
    else:
        plugin_ctxt_instance = neutron_api_context.NeutronApiSDNContext()
        if (plugin_ctxt_instance.is_default('core_plugin') and
                plugin_ctxt_instance.is_default('neutron_plugin_config')):
            # The default core plugin is ML2.  If the driver provided by plugin
            # subordinate is built on top of ML2, the subordinate will have use
            # for influencing existing template variables as well as injecting
            # sections into the ML2 configuration file.
            conf = neutron_plugin_attribute('ovs', 'config', 'neutron')
            services = neutron_plugin_attribute('ovs', 'server_services',
                                                'neutron')
            if conf not in resource_map:
                resource_map[conf] = {}
                resource_map[conf]['services'] = services
                resource_map[conf]['contexts'] = [
                    neutron_api_context.NeutronCCContext(),
                ]
            resource_map[conf]['contexts'].append(
                neutron_api_context.NeutronApiSDNContext(
                    config_file=conf)
            )

        resource_map[NEUTRON_CONF]['contexts'].append(
            plugin_ctxt_instance,
        )
        resource_map[NEUTRON_DEFAULT]['contexts'] = \
            [neutron_api_context.NeutronApiSDNConfigFileContext()]
    if enable_memcache(release=release):
        resource_map[MEMCACHED_CONF] = {
            'contexts': [context.MemcacheContext()],
            'services': ['memcached']}

    return resource_map
Example #17
0
def resource_map(release=None):
    """
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    """
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    release = release or os_release('cinder-common', base='icehouse')
    if relation_ids('backup-backend'):
        resource_map[CINDER_CONF]['services'].append('cinder-backup')
        resource_map[ceph_config_file()]['services'].append('cinder-backup')

    if relation_ids('ceph') and hook_name() != 'ceph-relation-broken':
        # need to create this early, new peers will have a relation during
        # registration # before they've run the ceph hooks to create the
        # directory.
        # !!! FIX: These side effects seem inappropriate for this method
        mkdir(os.path.dirname(CEPH_CONF))
        mkdir(os.path.dirname(ceph_config_file()))

        # Install ceph config as an alternative for co-location with
        # ceph and ceph-osd charm - cinder ceph.conf will be
        # lower priority than both of these but thats OK
        if not os.path.exists(ceph_config_file()):
            # touch file for pre-templated generation
            open(ceph_config_file(), 'w').close()
        install_alternative(os.path.basename(CEPH_CONF), CEPH_CONF,
                            ceph_config_file())
    else:
        resource_map.pop(ceph_config_file())

    if os.path.exists('/etc/apache2/conf-available'):
        resource_map.pop(APACHE_SITE_CONF)
    else:
        resource_map.pop(APACHE_SITE_24_CONF)

    # Remove services from map which are not enabled by user config
    for cfg in resource_map.keys():
        resource_map[cfg]['services'] = \
            filter_services(resource_map[cfg]['services'])

    if enable_memcache(source=config()['openstack-origin']):
        resource_map[MEMCACHED_CONF] = {
            'contexts': [context.MemcacheContext()],
            'services': ['memcached']
        }

    if run_in_apache():
        for cfile in resource_map:
            svcs = resource_map[cfile]['services']
            if 'cinder-api' in svcs:
                svcs.remove('cinder-api')
                if 'apache2' not in svcs:
                    svcs.append('apache2')
        wsgi_script = "/usr/bin/cinder-wsgi"
        resource_map[WSGI_CINDER_API_CONF] = {
            'contexts': [
                context.WSGIWorkerConfigContext(name="cinder",
                                                script=wsgi_script),
                cinder_contexts.HAProxyContext()
            ],
            'services': ['apache2']
        }

    if release and CompareOpenStackReleases(release) < 'queens':
        resource_map.pop(CINDER_POLICY_JSON)

    return resource_map
    def __call__(self):
        # distro defaults
        ctxt = {
            # /etc/libvirt/libvirtd.conf (
            'listen_tls': 0
        }
        cmp_distro_codename = CompareHostReleases(
            lsb_release()['DISTRIB_CODENAME'].lower())
        cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))

        # NOTE(jamespage): deal with switch to systemd
        if cmp_distro_codename < "wily":
            ctxt['libvirtd_opts'] = '-d'
        else:
            ctxt['libvirtd_opts'] = ''

        # NOTE(jamespage): deal with alignment with Debian in
        #                  Ubuntu yakkety and beyond.
        if cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata':
            ctxt['libvirt_user'] = '******'
        else:
            ctxt['libvirt_user'] = '******'

        # get the processor architecture to use in the nova.conf template
        ctxt['arch'] = platform.machine()

        # enable tcp listening if configured for live migration.
        if config('enable-live-migration'):
            ctxt['libvirtd_opts'] += ' -l'

        if config('enable-live-migration') and \
                config('migration-auth-type') in ['none', 'None', 'ssh']:
            ctxt['listen_tls'] = 0

        if config('enable-live-migration') and \
                config('migration-auth-type') == 'ssh':
            # nova.conf
            ctxt['live_migration_uri'] = 'qemu+ssh://%s/system'

        if config('enable-live-migration'):
            ctxt['live_migration_permit_post_copy'] = \
                config('live-migration-permit-post-copy')
            ctxt['live_migration_permit_auto_converge'] = \
                config('live-migration-permit-auto-converge')

        if config('instances-path') is not None:
            ctxt['instances_path'] = config('instances-path')

        if config('disk-cachemodes'):
            ctxt['disk_cachemodes'] = config('disk-cachemodes')

        if config('use-multipath'):
            ctxt['use_multipath'] = config('use-multipath')

        if config('default-ephemeral-format'):
            ctxt['default_ephemeral_format'] = \
                config('default-ephemeral-format')

        if config('cpu-mode'):
            ctxt['cpu_mode'] = config('cpu-mode')
        elif ctxt['arch'] in ('ppc64el', 'ppc64le', 'aarch64'):
            ctxt['cpu_mode'] = 'host-passthrough'
        elif ctxt['arch'] == 's390x':
            ctxt['cpu_mode'] = 'none'

        if config('cpu-model'):
            ctxt['cpu_model'] = config('cpu-model')

        if config('cpu-model-extra-flags'):
            ctxt['cpu_model_extra_flags'] = ', '.join(
                config('cpu-model-extra-flags').split(' '))

        if config('hugepages'):
            ctxt['hugepages'] = True
            ctxt['kvm_hugepages'] = 1
        else:
            ctxt['kvm_hugepages'] = 0

        if config('ksm') in (
                "1",
                "0",
        ):
            ctxt['ksm'] = config('ksm')
        else:
            if cmp_os_release < 'kilo':
                log("KSM set to 1 by default on openstack releases < kilo",
                    level=INFO)
                ctxt['ksm'] = "1"
            else:
                ctxt['ksm'] = "AUTO"

        if config('reserved-huge-pages'):
            # To bypass juju limitation with list of strings, we
            # consider separate the option's values per semicolons.
            ctxt['reserved_huge_pages'] = ([
                o.strip() for o in config('reserved-huge-pages').split(";")
            ])

        if config('pci-passthrough-whitelist'):
            ctxt['pci_passthrough_whitelist'] = \
                config('pci-passthrough-whitelist')

        if config('pci-alias'):
            ctxt['pci_alias'] = config('pci-alias')

        if config('vcpu-pin-set'):
            ctxt['vcpu_pin_set'] = config('vcpu-pin-set')

        if config('cpu-shared-set'):
            ctxt['cpu_shared_set'] = config('cpu-shared-set')

        if config('virtio-net-tx-queue-size'):
            ctxt['virtio_net_tx_queue_size'] = (
                config('virtio-net-tx-queue-size'))
        if config('virtio-net-rx-queue-size'):
            ctxt['virtio_net_rx_queue_size'] = (
                config('virtio-net-rx-queue-size'))

        ctxt['reserved_host_memory'] = config('reserved-host-memory')

        db = kv()
        if db.get('host_uuid'):
            ctxt['host_uuid'] = db.get('host_uuid')
        else:
            host_uuid = str(uuid.uuid4())
            db.set('host_uuid', host_uuid)
            db.flush()
            ctxt['host_uuid'] = host_uuid

        if config('libvirt-image-backend'):
            ctxt['libvirt_images_type'] = config('libvirt-image-backend')

        ctxt['force_raw_images'] = config('force-raw-images')

        return ctxt
Example #19
0
    def __call__(self):
        # distro defaults
        ctxt = {
            # /etc/libvirt/libvirtd.conf (
            'listen_tls': 0
        }
        cmp_distro_codename = CompareHostReleases(
            lsb_release()['DISTRIB_CODENAME'].lower())
        cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))

        # NOTE(jamespage): deal with switch to systemd
        if cmp_distro_codename < "wily":
            ctxt['libvirtd_opts'] = '-d'
        else:
            ctxt['libvirtd_opts'] = ''

        # NOTE(jamespage): deal with alignment with Debian in
        #                  Ubuntu yakkety and beyond.
        if cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata':
            ctxt['libvirt_user'] = '******'
        else:
            ctxt['libvirt_user'] = '******'

        # get the processor architecture to use in the nova.conf template
        ctxt['arch'] = platform.machine()

        # enable tcp listening if configured for live migration.
        if config('enable-live-migration'):
            ctxt['libvirtd_opts'] += ' -l'

        if config('enable-live-migration') and \
                config('migration-auth-type') in ['none', 'None', 'ssh']:
            ctxt['listen_tls'] = 0

        if config('enable-live-migration') and \
                config('migration-auth-type') == 'ssh':
            migration_address = get_relation_ip(
                'migration', cidr_network=config('libvirt-migration-network'))

            if cmp_os_release >= 'ocata':
                ctxt['live_migration_scheme'] = config('migration-auth-type')
                ctxt['live_migration_inbound_addr'] = migration_address
            else:
                ctxt['live_migration_uri'] = 'qemu+ssh://%s/system'

        if config('enable-live-migration'):
            ctxt['live_migration_completion_timeout'] = \
                config('live-migration-completion-timeout')
            ctxt['live_migration_downtime'] = \
                config('live-migration-downtime')
            ctxt['live_migration_downtime_steps'] = \
                config('live-migration-downtime-steps')
            ctxt['live_migration_downtime_delay'] = \
                config('live-migration-downtime-delay')
            ctxt['live_migration_permit_post_copy'] = \
                config('live-migration-permit-post-copy')
            ctxt['live_migration_permit_auto_converge'] = \
                config('live-migration-permit-auto-converge')

        if config('instances-path') is not None:
            ctxt['instances_path'] = config('instances-path')

        if config('disk-cachemodes'):
            ctxt['disk_cachemodes'] = config('disk-cachemodes')

        if config('use-multipath'):
            ctxt['use_multipath'] = config('use-multipath')

        if config('default-ephemeral-format'):
            ctxt['default_ephemeral_format'] = \
                config('default-ephemeral-format')

        if config('cpu-mode'):
            ctxt['cpu_mode'] = config('cpu-mode')
        elif ctxt['arch'] in ('ppc64el', 'ppc64le', 'aarch64'):
            ctxt['cpu_mode'] = 'host-passthrough'
        elif ctxt['arch'] == 's390x':
            ctxt['cpu_mode'] = 'none'

        if config('cpu-model'):
            ctxt['cpu_model'] = config('cpu-model')

        if config('cpu-model-extra-flags'):
            ctxt['cpu_model_extra_flags'] = ', '.join(
                config('cpu-model-extra-flags').split(' '))

        if config('hugepages'):
            ctxt['hugepages'] = True
            ctxt['kvm_hugepages'] = 1
        else:
            ctxt['kvm_hugepages'] = 0

        if config('ksm') in (
                "1",
                "0",
        ):
            ctxt['ksm'] = config('ksm')
        else:
            if cmp_os_release < 'kilo':
                log("KSM set to 1 by default on openstack releases < kilo",
                    level=INFO)
                ctxt['ksm'] = "1"
            else:
                ctxt['ksm'] = "AUTO"

        if config('reserved-huge-pages'):
            # To bypass juju limitation with list of strings, we
            # consider separate the option's values per semicolons.
            ctxt['reserved_huge_pages'] = ([
                o.strip() for o in config('reserved-huge-pages').split(";")
            ])

        if config('pci-passthrough-whitelist'):
            ctxt['pci_passthrough_whitelist'] = \
                config('pci-passthrough-whitelist')

        if config('pci-alias'):
            aliases = json.loads(config('pci-alias'))
            # Behavior previous to queens is maintained as it was
            if isinstance(aliases, list) and cmp_os_release >= 'queens':
                ctxt['pci_aliases'] = [
                    json.dumps(x, sort_keys=True) for x in aliases
                ]
            else:
                ctxt['pci_alias'] = json.dumps(aliases, sort_keys=True)

        if config('cpu-dedicated-set'):
            ctxt['cpu_dedicated_set'] = config('cpu-dedicated-set')
        elif config('vcpu-pin-set'):
            ctxt['vcpu_pin_set'] = config('vcpu-pin-set')

        if config('cpu-shared-set'):
            ctxt['cpu_shared_set'] = config('cpu-shared-set')

        if config('virtio-net-tx-queue-size'):
            ctxt['virtio_net_tx_queue_size'] = (
                config('virtio-net-tx-queue-size'))
        if config('virtio-net-rx-queue-size'):
            ctxt['virtio_net_rx_queue_size'] = (
                config('virtio-net-rx-queue-size'))

        if config('num-pcie-ports'):
            ctxt['num_pcie_ports'] = config('num-pcie-ports')

        ctxt['reserved_host_memory'] = config('reserved-host-memory')
        ctxt['reserved_host_disk'] = config('reserved-host-disk')

        db = kv()
        if db.get('host_uuid'):
            ctxt['host_uuid'] = db.get('host_uuid')
        else:
            host_uuid = str(uuid.uuid4())
            db.set('host_uuid', host_uuid)
            db.flush()
            ctxt['host_uuid'] = host_uuid

        if config('libvirt-image-backend'):
            ctxt['libvirt_images_type'] = config('libvirt-image-backend')

        ctxt['force_raw_images'] = config('force-raw-images')
        ctxt['inject_password'] = config('inject-password')
        # if allow the injection of an admin password it depends
        # on value greater or equal to -1 for inject_partition
        # -2 means disable the injection of data
        ctxt['inject_partition'] = -1 if config('inject-password') else -2

        return ctxt
def neutron_plugins():
    release = os_release('nova-common')
    plugins = {
        'ovs': {
            'config':
            '/etc/neutron/plugins/openvswitch/'
            'ovs_neutron_plugin.ini',
            'driver':
            'neutron.plugins.openvswitch.ovs_neutron_plugin.'
            'OVSNeutronPluginV2',
            'contexts': [],
            'services': ['neutron-plugin-openvswitch-agent'],
            'packages':
            [determine_dkms_package(), ['neutron-plugin-openvswitch-agent']],
            'server_packages':
            ['neutron-server', 'neutron-plugin-openvswitch'],
            'server_services': ['neutron-server']
        },
        'nvp': {
            'config': '/etc/neutron/plugins/nicira/nvp.ini',
            'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
            'NeutronPlugin.NvpPluginV2',
            'contexts': [],
            'services': [],
            'packages': [],
            'server_packages': ['neutron-server', 'neutron-plugin-nicira'],
            'server_services': ['neutron-server']
        },
        'nsx': {
            'config': '/etc/neutron/plugins/vmware/nsx.ini',
            'driver': 'vmware',
            'contexts': [],
            'services': [],
            'packages': [],
            'server_packages': ['neutron-server', 'neutron-plugin-vmware'],
            'server_services': ['neutron-server']
        },
        'n1kv': {
            'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
            'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
            'contexts': [],
            'services': [],
            'packages': [determine_dkms_package(), ['neutron-plugin-cisco']],
            'server_packages': ['neutron-server', 'neutron-plugin-cisco'],
            'server_services': ['neutron-server']
        },
        'Calico': {
            'config':
            '/etc/neutron/plugins/ml2/ml2_conf.ini',
            'driver':
            'neutron.plugins.ml2.plugin.Ml2Plugin',
            'contexts': [],
            'services': [
                'calico-felix', 'bird', 'neutron-dhcp-agent',
                'nova-api-metadata', 'etcd'
            ],
            'packages': [
                determine_dkms_package(),
                [
                    'calico-compute', 'bird', 'neutron-dhcp-agent',
                    'nova-api-metadata', 'etcd'
                ]
            ],
            'server_packages': ['neutron-server', 'calico-control', 'etcd'],
            'server_services': ['neutron-server', 'etcd']
        },
        'vsp': {
            'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
            'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
            'contexts': [],
            'services': [],
            'packages': [],
            'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
            'server_services': ['neutron-server']
        },
        'plumgrid': {
            'config':
            '/etc/neutron/plugins/plumgrid/plumgrid.ini',
            'driver': ('neutron.plugins.plumgrid.plumgrid_plugin'
                       '.plumgrid_plugin.NeutronPluginPLUMgridV2'),
            'contexts': [],
            'services': [],
            'packages': ['plumgrid-lxc', 'iovisor-dkms'],
            'server_packages': ['neutron-server', 'neutron-plugin-plumgrid'],
            'server_services': ['neutron-server']
        },
        'midonet': {
            'config': '/etc/neutron/plugins/midonet/midonet.ini',
            'driver': 'midonet.neutron.plugin.MidonetPluginV2',
            'contexts': [],
            'services': [],
            'packages': [determine_dkms_package()],
            'server_packages':
            ['neutron-server', 'python-neutron-plugin-midonet'],
            'server_services': ['neutron-server']
        }
    }
    if CompareOpenStackReleases(release) >= 'icehouse':
        # NOTE: patch in ml2 plugin for icehouse onwards
        plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
        plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
        plugins['ovs']['server_packages'] = [
            'neutron-server', 'neutron-plugin-ml2'
        ]
        # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
        plugins['nvp'] = plugins['nsx']
    if CompareOpenStackReleases(release) >= 'kilo':
        plugins['midonet']['driver'] = (
            'neutron.plugins.midonet.plugin.MidonetPluginV2')
    if CompareOpenStackReleases(release) >= 'liberty':
        plugins['midonet']['driver'] = (
            'midonet.neutron.plugin_v1.MidonetPluginV2')
        plugins['midonet']['server_packages'].remove(
            'python-neutron-plugin-midonet')
        plugins['midonet']['server_packages'].append(
            'python-networking-midonet')
        plugins['plumgrid']['driver'] = ('networking_plumgrid.neutron.plugins'
                                         '.plugin.NeutronPluginPLUMgridV2')
        plugins['plumgrid']['server_packages'].remove(
            'neutron-plugin-plumgrid')
    if CompareOpenStackReleases(release) >= 'mitaka':
        plugins['nsx']['server_packages'].remove('neutron-plugin-vmware')
        plugins['nsx']['server_packages'].append('python-vmware-nsx')
        plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
        plugins['vsp']['driver'] = (
            'nuage_neutron.plugins.nuage.plugin.NuagePlugin')
    if CompareOpenStackReleases(release) >= 'newton':
        plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
        plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
        plugins['vsp']['server_packages'] = [
            'neutron-server', 'neutron-plugin-ml2'
        ]
    return plugins
Example #21
0
def run_in_apache():
    """Return true if ceilometer API is run under apache2 with mod_wsgi in
    this release.
    """
    os_cmp = CompareOpenStackReleases(os_release('ceilometer-common'))
    return (os_cmp >= 'ocata' and os_cmp < 'queens')
Example #22
0
    def ovs_ctxt(self):
        # In addition to generating config context, ensure the OVS service
        # is running and the OVS bridge exists. Also need to ensure
        # local_ip points to actual IP, not hostname.
        ovs_ctxt = super(OVSPluginContext, self).ovs_ctxt()
        if not ovs_ctxt:
            return {}

        conf = config()

        fallback = get_host_ip(unit_get('private-address'))
        if config('os-data-network'):
            # NOTE: prefer any existing use of config based networking
            ovs_ctxt['local_ip'] = \
                get_address_in_network(config('os-data-network'),
                                       fallback)
        else:
            # NOTE: test out network-spaces support, then fallback
            try:
                ovs_ctxt['local_ip'] = get_host_ip(
                    network_get_primary_address('data')
                )
            except NotImplementedError:
                ovs_ctxt['local_ip'] = fallback

        neutron_api_settings = NeutronAPIContext()()
        ovs_ctxt['neutron_security_groups'] = self.neutron_security_groups
        ovs_ctxt['l2_population'] = neutron_api_settings['l2_population']
        ovs_ctxt['distributed_routing'] = neutron_api_settings['enable_dvr']
        ovs_ctxt['extension_drivers'] = neutron_api_settings[
            'extension_drivers']
        ovs_ctxt['overlay_network_type'] = \
            neutron_api_settings['overlay_network_type']
        ovs_ctxt['polling_interval'] = neutron_api_settings['polling_interval']
        ovs_ctxt['rpc_response_timeout'] = \
            neutron_api_settings['rpc_response_timeout']
        ovs_ctxt['report_interval'] = neutron_api_settings['report_interval']
        # TODO: We need to sort out the syslog and debug/verbose options as a
        # general context helper
        ovs_ctxt['use_syslog'] = conf['use-syslog']
        ovs_ctxt['verbose'] = conf['verbose']
        ovs_ctxt['debug'] = conf['debug']

        cmp_release = CompareOpenStackReleases(
            os_release('neutron-common', base='icehouse'))
        if conf['prevent-arp-spoofing'] and cmp_release >= 'ocata':
            log("prevent-arp-spoofing is True yet this feature is deprecated "
                "and no longer has any effect in your version of Openstack",
                WARNING)

        ovs_ctxt['prevent_arp_spoofing'] = conf['prevent-arp-spoofing']
        ovs_ctxt['enable_dpdk'] = conf['enable-dpdk']

        net_dev_mtu = neutron_api_settings.get('network_device_mtu')
        if net_dev_mtu:
            # neutron.conf
            ovs_ctxt['network_device_mtu'] = net_dev_mtu
            # ml2 conf
            ovs_ctxt['veth_mtu'] = net_dev_mtu

        mappings = config('bridge-mappings')
        if mappings:
            ovs_ctxt['bridge_mappings'] = ','.join(mappings.split())

        sriov_mappings = config('sriov-device-mappings')
        if sriov_mappings:
            ovs_ctxt['sriov_device_mappings'] = (
                ','.join(sriov_mappings.split())
            )

        enable_sriov = config('enable-sriov')
        if enable_sriov:
            ovs_ctxt['enable_sriov'] = True

        sriov_numvfs = config('sriov-numvfs')
        if sriov_numvfs:
            try:
                if sriov_numvfs != 'auto':
                    int(sriov_numvfs)
            except ValueError:
                ovs_ctxt['sriov_vfs_list'] = sriov_numvfs
            else:
                ovs_ctxt['sriov_vfs_blanket'] = sriov_numvfs

        flat_providers = config('flat-network-providers')
        if flat_providers:
            ovs_ctxt['network_providers'] = ','.join(flat_providers.split())

        vlan_ranges = config('vlan-ranges')
        if vlan_ranges:
            ovs_ctxt['vlan_ranges'] = ','.join(vlan_ranges.split())

        ovs_ctxt['firewall_driver'] = _get_firewall_driver()

        return ovs_ctxt
def determine_purge_packages():
    cmp_release = CompareOpenStackReleases(
        os_release('neutron-common', base='icehouse', reset_cache=True))
    if cmp_release >= 'rocky':
        return PURGE_PACKAGES
    return []
def determine_purge_packages():
    '''Return a list of packages to purge for the current OS release'''
    cmp_os_source = CompareOpenStackReleases(os_release('nova-common'))
    if cmp_os_source >= 'rocky':
        return PURGE_PACKAGES
    return []