コード例 #1
0
def determine_purge_packages():
    """
    Determine list of packages that where previously installed which are no
    longer needed.

    :returns: list of package names
    """
    pkgs = []
    release = CompareOpenStackReleases(os_release('openstack-dashboard'))
    if release >= 'rocky':
        pkgs = [p for p in BASE_PACKAGES if p.startswith('python-')]
        pkgs.extend([
            'python-django-horizon',
            'python-django-openstack-auth',
            'python-pymysql',
            'python-neutron-lbaas-dashboard',
            'python-designate-dashboard',
            'python-heat-dashboard',
        ])
    if release >= 'train':
        pkgs.append('python3-neutron-lbaas-dashboard')
    # NOTE(ajkavanagh) also ensure that associated plugins can purge on upgrade
    return list(
        set(pkgs).union(set(determine_purge_packages_dashboard_plugin())))
コード例 #2
0
def get_dns_domain():
    if not config('enable-ml2-dns'):
        log('ML2 DNS Extensions are not enabled.', DEBUG)
        return ""

    dns_domain = config('dns-domain')
    if not dns_domain:
        log('No dns-domain has been configured', DEBUG)
        return dns_domain

    release = os_release('neutron-server')
    if CompareOpenStackReleases(release) < 'mitaka':
        log('Internal DNS resolution is not supported before Mitaka')
        return ""

    # Strip any trailing . at the end
    if dns_domain[-1] == '.':
        dns_domain = dns_domain[:-1]

    # Ensure that the dns name is only a valid name. Valid entries include
    # a-z, A-Z, 0-9, ., and -. No particular name may be longer than 63
    # characters, each part cannot begin/end with a -. Validate this here in
    # order to prevent other chaos which may prevent neutron services from
    # functioning properly.
    # Note: intentionally not validating the length of the domain name because
    # this is practically difficult to validate reasonably well.
    for level in dns_domain.split('.'):
        if not DOMAIN_NAME_REGEX.match(level):
            msg = "dns-domain '%s' is an invalid domain name." % dns_domain
            log(msg, ERROR)
            raise ValueError(msg)

    # Make sure it ends with a .
    dns_domain += '.'

    return dns_domain
コード例 #3
0
 def test_keystone_v3(self):
     """Verify that the service is configured and operates correctly when
        using Keystone v3 auth."""
     if self._get_openstack_release() >= self.xenial_queens:
         u.log.info('Skipping keystone v3 test for queens or later')
         return
     os_release = self._get_openstack_release_string()
     if CompareOpenStackReleases(os_release) < 'kilo':
         u.log.info('Skipping test, {} < kilo'.format(os_release))
         return
     u.log.info('Checking that service is configured and operate correctly '
                'when using Keystine v3 auth...')
     if not self._set_auth_api_version('3'):
         msg = "Unable to set auth_api_version to '3'"
         amulet.raise_status(amulet.FAIL, msg=msg)
         return
     if self._get_openstack_release() >= self.trusty_mitaka:
         # NOTE(jamespage):
         # Re-init tests to create v3 versions of glance, swift and
         # keystone clients for mitaka or later, where glance uses
         # v3 to access backend swift services.  Early v3 deployments
         # still use v2 credentials in glance for swift access.
         self._initialize_tests(api_version=3)
     self.test_400_swift_backed_image_create()
コード例 #4
0
def conditional_neutron_migration():
    """Initialise neutron database if not already done so.

    Runs neutron-manage to initialize a new database or migrate existing and
    restarts services to ensure that the changes are picked up. The first
    (leader) unit to perform this action should have broadcast this information
    to its peers so first we check whether this has already occurred.
    """
    if CompareOpenStackReleases(os_release('neutron-server')) <= 'icehouse':
        log('Not running neutron database migration as migrations are handled '
            'by the neutron-server process.')
        return

    if not is_elected_leader(CLUSTER_RES):
        log('Not running neutron database migration, not leader')
        return

    allowed_units = relation_get('allowed_units')
    if not (allowed_units and local_unit() in allowed_units.split()):
        log('Not running neutron database migration, either no '
            'allowed_units or this unit is not present')
        return

    migrate_neutron_database()
コード例 #5
0
def leader_settings_changed():

    # we always want to write the keys on leader-settings-changed regardless of
    # whether the unit is paused or not.
    if fernet_enabled():
        key_write()

    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    # Since minions are notified of a regime change via the
    # leader-settings-changed hook, rewrite the token flush cron job to make
    # sure only the leader is running the cron job.
    CONFIGS.write(TOKEN_FLUSH_CRON_FILE)

    # Make sure we keep domain and/or project ids used in templates up to date
    if CompareOpenStackReleases(os_release('keystone')) >= 'liberty':
        CONFIGS.write(POLICY_JSON)

    update_all_identity_relation_units()
    inform_peers_if_ready(check_api_unit_ready)
コード例 #6
0
def determine_packages(source=None):
    # currently all packages match service names
    release = get_os_codename_install_source(source)
    cmp_release = CompareOpenStackReleases(release)
    packages = deepcopy(BASE_PACKAGES)
    if cmp_release >= 'rocky':
        packages.extend(PY3_PACKAGES)

    for v in resource_map().values():
        packages.extend(v['services'])
        if manage_plugin():
            pkgs = neutron_plugin_attribute(config('neutron-plugin'),
                                            'server_packages', 'neutron')
            packages.extend(pkgs)

    packages.extend(token_cache_pkgs(release=release))

    if cmp_release < 'rocky':
        if cmp_release >= 'kilo':
            packages.extend(KILO_PACKAGES)
        if cmp_release >= 'ocata':
            packages.append('python-neutron-dynamic-routing')
        if cmp_release >= 'pike':
            packages.remove('python-neutron-vpnaas')

        if release == 'kilo' or cmp_release >= 'mitaka':
            packages.append('python-networking-hyperv')

    if config('neutron-plugin') == 'vsp':
        nuage_pkgs = config('nuage-packages').split()
        packages.extend(nuage_pkgs)

    if cmp_release >= 'rocky':
        packages = [p for p in packages if not p.startswith('python-')]

    return list(set(packages))
コード例 #7
0
def run_in_apache():
    """Return true if cinder API is run under apache2 with mod_wsgi in
    this release.
    """
    return CompareOpenStackReleases(os_release('cinder-common')) >= 'ocata'
コード例 #8
0
    def __call__(self):
        from neutron_api_utils import api_port
        ctxt = super(NeutronCCContext, self).__call__()
        if config('neutron-plugin') == 'nsx':
            ctxt['nsx_username'] = config('nsx-username')
            ctxt['nsx_password'] = config('nsx-password')
            ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid')
            ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid')
            if 'nsx-controllers' in config():
                ctxt['nsx_controllers'] = \
                    ','.join(config('nsx-controllers').split())
                ctxt['nsx_controllers_list'] = \
                    config('nsx-controllers').split()
        if config('neutron-plugin') == 'plumgrid':
            ctxt['pg_username'] = config('plumgrid-username')
            ctxt['pg_password'] = config('plumgrid-password')
            ctxt['virtual_ip'] = config('plumgrid-virtual-ip')
        elif config('neutron-plugin') == 'midonet':
            ctxt.update(MidonetContext()())
            identity_context = IdentityServiceContext(service='neutron',
                                                      service_user='******')()
            if identity_context is not None:
                ctxt.update(identity_context)
        ctxt['l2_population'] = self.neutron_l2_population
        ctxt['enable_dvr'] = self.neutron_dvr
        ctxt['l3_ha'] = self.neutron_l3ha
        if self.neutron_l3ha:
            max_agents = config('max-l3-agents-per-router')
            min_agents = config('min-l3-agents-per-router')
            if max_agents < min_agents:
                raise ValueError("max-l3-agents-per-router ({}) must be >= "
                                 "min-l3-agents-per-router "
                                 "({})".format(max_agents, min_agents))

            ctxt['max_l3_agents_per_router'] = max_agents
            ctxt['min_l3_agents_per_router'] = min_agents

        ctxt['dhcp_agents_per_network'] = config('dhcp-agents-per-network')
        ctxt['tenant_network_types'] = self.neutron_tenant_network_types
        ctxt['overlay_network_type'] = self.neutron_overlay_network_type
        ctxt['external_network'] = config('neutron-external-network')
        release = os_release('neutron-server')
        cmp_release = CompareOpenStackReleases(release)
        if config('neutron-plugin') in ['vsp']:
            _config = config()
            for k, v in _config.items():
                if k.startswith('vsd'):
                    ctxt[k.replace('-', '_')] = v
            for rid in relation_ids('vsd-rest-api'):
                for unit in related_units(rid):
                    rdata = relation_get(rid=rid, unit=unit)
                    vsd_ip = rdata.get('vsd-ip-address')
                    if cmp_release >= 'kilo':
                        cms_id_value = rdata.get('nuage-cms-id')
                        log('relation data:cms_id required for'
                            ' nuage plugin: {}'.format(cms_id_value))
                        if cms_id_value is not None:
                            ctxt['vsd_cms_id'] = cms_id_value
                    log('relation data:vsd-ip-address: {}'.format(vsd_ip))
                    if vsd_ip is not None:
                        ctxt['vsd_server'] = '{}:8443'.format(vsd_ip)
            if 'vsd_server' not in ctxt:
                ctxt['vsd_server'] = '1.1.1.1:8443'
        ctxt['verbose'] = config('verbose')
        ctxt['debug'] = config('debug')
        ctxt['neutron_bind_port'] = \
            determine_api_port(api_port('neutron-server'),
                               singlenode_mode=True)
        ctxt['quota_security_group'] = config('quota-security-group')
        ctxt['quota_security_group_rule'] = \
            config('quota-security-group-rule')
        ctxt['quota_network'] = config('quota-network')
        ctxt['quota_subnet'] = config('quota-subnet')
        ctxt['quota_port'] = config('quota-port')
        ctxt['quota_vip'] = config('quota-vip')
        ctxt['quota_pool'] = config('quota-pool')
        ctxt['quota_member'] = config('quota-member')
        ctxt['quota_health_monitors'] = config('quota-health-monitors')
        ctxt['quota_router'] = config('quota-router')
        ctxt['quota_floatingip'] = config('quota-floatingip')

        n_api_settings = self.get_neutron_api_rel_settings()
        if n_api_settings:
            ctxt.update(n_api_settings)

        flat_providers = config('flat-network-providers')
        if flat_providers:
            ctxt['network_providers'] = ','.join(flat_providers.split())

        vlan_ranges = config('vlan-ranges')
        if vlan_ranges:
            ctxt['vlan_ranges'] = ','.join(vlan_ranges.split())

        vni_ranges = config('vni-ranges')
        if vni_ranges:
            ctxt['vni_ranges'] = ','.join(vni_ranges.split())

        enable_dns_extension_driver = False

        dns_domain = get_dns_domain()
        if dns_domain:
            enable_dns_extension_driver = True
            ctxt['dns_domain'] = dns_domain

        if cmp_release >= 'mitaka':
            for rid in relation_ids('external-dns'):
                if related_units(rid):
                    enable_dns_extension_driver = True

        extension_drivers = []
        if config('enable-ml2-port-security'):
            extension_drivers.append(EXTENSION_DRIVER_PORT_SECURITY)
        if enable_dns_extension_driver:
            extension_drivers.append(EXTENSION_DRIVER_DNS)
        if is_qos_requested_and_valid():
            extension_drivers.append(EXTENSION_DRIVER_QOS)

        if extension_drivers:
            ctxt['extension_drivers'] = ','.join(extension_drivers)

        ctxt['enable_sriov'] = config('enable-sriov')

        if cmp_release >= 'mitaka':
            if config('global-physnet-mtu'):
                ctxt['global_physnet_mtu'] = config('global-physnet-mtu')
                if config('path-mtu'):
                    ctxt['path_mtu'] = config('path-mtu')
                else:
                    ctxt['path_mtu'] = config('global-physnet-mtu')
                physical_network_mtus = config('physical-network-mtus')
                if physical_network_mtus:
                    ctxt['physical_network_mtus'] = ','.join(
                        physical_network_mtus.split())

        if 'kilo' <= cmp_release <= 'mitaka':
            pci_vendor_devs = config('supported-pci-vendor-devs')
            if pci_vendor_devs:
                ctxt['supported_pci_vendor_devs'] = \
                    ','.join(pci_vendor_devs.split())

        ctxt['mechanism_drivers'] = get_ml2_mechanism_drivers()

        if config('neutron-plugin') in ['ovs', 'ml2', 'Calico']:
            ctxt['service_plugins'] = []
            service_plugins = {
                'icehouse':
                [('neutron.services.l3_router.l3_router_plugin.'
                  'L3RouterPlugin'),
                 'neutron.services.firewall.fwaas_plugin.FirewallPlugin',
                 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
                 'neutron.services.vpn.plugin.VPNDriverPlugin',
                 ('neutron.services.metering.metering_plugin.'
                  'MeteringPlugin')],
                'juno':
                [('neutron.services.l3_router.l3_router_plugin.'
                  'L3RouterPlugin'),
                 'neutron.services.firewall.fwaas_plugin.FirewallPlugin',
                 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
                 'neutron.services.vpn.plugin.VPNDriverPlugin',
                 ('neutron.services.metering.metering_plugin.'
                  'MeteringPlugin')],
                'kilo': ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'],
                'liberty':
                ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'],
                'mitaka':
                ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'],
                'newton': [
                    'router', 'firewall', 'vpnaas', 'metering',
                    ('neutron_lbaas.services.loadbalancer.plugin.'
                     'LoadBalancerPluginv2')
                ],
                'ocata': [
                    'router', 'firewall', 'vpnaas', 'metering',
                    ('neutron_lbaas.services.loadbalancer.plugin.'
                     'LoadBalancerPluginv2')
                ],
                'pike': [
                    'router', 'firewall', 'metering',
                    ('neutron_lbaas.services.loadbalancer.plugin.'
                     'LoadBalancerPluginv2')
                ],
            }
            ctxt['service_plugins'] = service_plugins.get(
                release, service_plugins['pike'])

            if is_qos_requested_and_valid():
                ctxt['service_plugins'].append('qos')
            ctxt['service_plugins'] = ','.join(ctxt['service_plugins'])

        return ctxt
コード例 #9
0
def resource_map():
    """Get map of resources that will be managed for a single hook execution.

    :returns: map of resources
    :rtype: OrderedDict[str,Dict[str,List[str]]]
    """
    drop_config = []
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    # Remap any service names as required
    _os_release = os_release('neutron-common', base='icehouse')
    if use_dvr():
        resource_map.update(DVR_RESOURCE_MAP)
        resource_map.update(METADATA_RESOURCE_MAP)
        dvr_services = ['neutron-metadata-agent', 'neutron-l3-agent']
        resource_map[NEUTRON_CONF]['services'] += dvr_services
    if enable_local_dhcp():
        resource_map.update(METADATA_RESOURCE_MAP)
        resource_map.update(DHCP_RESOURCE_MAP)
        metadata_services = ['neutron-metadata-agent', 'neutron-dhcp-agent']
        resource_map[NEUTRON_CONF]['services'] += metadata_services
        if use_dpdk() and CompareOpenStackReleases(_os_release) >= 'queens':
            resource_map[OVS_CONF]['services'] += ['neutron-dhcp-agent']
    if CompareOpenStackReleases(_os_release) >= 'mitaka':
        # ml2_conf.ini -> openvswitch_agent.ini
        drop_config.append(ML2_CONF)
        # drop of -plugin from service name
        resource_map[NEUTRON_CONF]['services'].remove(
            'neutron-plugin-openvswitch-agent')
        resource_map[NEUTRON_CONF]['services'].append(
            'neutron-openvswitch-agent')
        if use_dpdk():
            resource_map.update(DPDK_RESOURCE_MAP)
            if ovs_has_late_dpdk_init():
                drop_config.append(OVS_DEFAULT)
    else:
        drop_config.append(OVS_CONF)

    if enable_sriov():
        sriov_agent_name = 'neutron-sriov-agent'
        sriov_resource_map = deepcopy(SRIOV_RESOURCE_MAP)

        if CompareOpenStackReleases(_os_release) < 'mitaka':
            sriov_agent_name = 'neutron-plugin-sriov-agent'
            # Patch resource_map for Kilo and Liberty
            sriov_resource_map[NEUTRON_SRIOV_AGENT_CONF]['services'] = \
                [sriov_agent_name]

        resource_map.update(sriov_resource_map)
        resource_map[NEUTRON_CONF]['services'].append(sriov_agent_name)
    if enable_sriov() or use_hw_offload():
        # We do late initialization of this as a call to
        # ``context.SRIOVContext`` requires the ``sriov-netplan-shim`` package
        # to already be installed on the system.
        #
        # Note that we also do not want the charm to manage the service, but
        # only update the configuration for boot-time initialization.
        # LP: #1908351
        try:
            resource_map.update(
                OrderedDict([
                    (
                        SRIOV_NETPLAN_SHIM_CONF,
                        {
                            # We deliberately omit service here as we only want changes
                            # to be applied at boot time.
                            'services': [],
                            'contexts': [SRIOVContext_adapter()],
                        }),
                ]))
        except NameError:
            # The resource_map is built at module import time and as such this
            # function is called multiple times prior to the charm actually
            # being installed. As the SRIOVContext depends on a Python module
            # provided by the ``sriov-netplan-shim`` package gracefully ignore
            # this to allow the package to be installed.
            pass

    # Use MAAS1.9 for MTU and external port config on xenial and above
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
        drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])

    for _conf in drop_config:
        try:
            del resource_map[_conf]
        except KeyError:
            pass

    return resource_map
コード例 #10
0
def _do_openstack_upgrade(new_src):
    enable_policy_rcd()
    # All upgrades to Liberty are forced to step through Kilo. Liberty does
    # not have the migrate_flavor_data option (Bug #1511466) available so it
    # must be done pre-upgrade
    if (CompareOpenStackReleases(os_release('nova-common')) == 'kilo'
            and is_leader()):
        migrate_nova_flavors()

    # 'nova-manage db online_data_migrations' needs to be run before moving to
    # the next release for environments upgraded using old charms where this
    # step was not being executed (LP: #1711209).
    online_data_migrations_if_needed()

    new_os_rel = get_os_codename_install_source(new_src)
    cmp_new_os_rel = CompareOpenStackReleases(new_os_rel)
    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option',
        'Dpkg::Options::=--force-confnew',
        '--option',
        'Dpkg::Options::=--force-confdef',
    ]

    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    reset_os_release()
    apt_install(determine_packages(), fatal=True)

    disable_policy_rcd()

    # NOTE(jamespage) upgrade with existing config files as the
    # havana->icehouse migration enables new service_plugins which
    # create issues with db upgrades
    configs = register_configs(release=new_os_rel)
    configs.write_all()

    if cmp_new_os_rel >= 'mitaka' and not database_setup(prefix='novaapi'):
        # NOTE: Defer service restarts and database migrations for now
        #       as nova_api database is not yet created
        if (relation_ids('cluster') and is_leader()):
            # NOTE: reset dbsync state so that migration will complete
            #       when the nova_api database is setup.
            peer_store('dbsync_state', None)
        return configs

    if cmp_new_os_rel >= 'ocata' and not database_setup(prefix='novacell0'):
        # NOTE: Defer service restarts and database migrations for now
        #       as nova_cell0 database is not yet created
        if (relation_ids('cluster') and is_leader()):
            # NOTE: reset dbsync state so that migration will complete
            #       when the novacell0 database is setup.
            peer_store('dbsync_state', None)
        return configs

    if is_leader():
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_databases()

    if not is_unit_paused_set():
        [service_start(s) for s in services()]

    return configs
コード例 #11
0
def placement_api_enabled():
    """Return true if nova-placement-api is enabled in this release"""
    return CompareOpenStackReleases(os_release('nova-common')) >= 'ocata'
コード例 #12
0
def determine_purge_packages():
    '''Return a list of packages to purge for the current OS release'''
    cmp_os_source = CompareOpenStackReleases(os_release('nova-common'))
    if cmp_os_source >= 'rocky':
        return PURGE_PACKAGES
    return []
コード例 #13
0
def enable_sriov_agent():
    '''Determine with SR-IOV agent should be used'''
    cmp_release = CompareOpenStackReleases(
        os_release('neutron-common', base='icehouse'))
    return (cmp_release >= 'mitaka' and config('enable-sriov'))
コード例 #14
0
def run_in_apache():
    """Return true if ceilometer API is run under apache2 with mod_wsgi in
    this release.
    """
    os_cmp = CompareOpenStackReleases(os_release('ceilometer-common'))
    return (os_cmp >= 'ocata' and os_cmp < 'queens')
コード例 #15
0
    def test_310_pci_alias_config(self):
        """Verify the pci alias data is rendered properly."""
        u.log.debug('Checking pci aliases in nova config')

        os_release = self._get_openstack_release_string()
        if CompareOpenStackReleases(os_release) < 'kilo':
            u.log.info('Skipping test, {} < kilo'.format(os_release))
            return

        _pci_alias1 = {
            "name": "IntelNIC",
            "capability_type": "pci",
            "product_id": "1111",
            "vendor_id": "8086",
            "device_type": "type-PF"
        }

        if CompareOpenStackReleases(os_release) >= 'ocata':
            section = "pci"
            key_name = "alias"
        else:
            section = "DEFAULT"
            key_name = "pci_alias"

        CONF = cfg.CONF
        opt_group = cfg.OptGroup(name=section)
        pci_opts = [cfg.MultiStrOpt(key_name)]
        CONF.register_group(opt_group)
        CONF.register_opts(pci_opts, opt_group)

        _pci_alias2 = {
            "name": " Cirrus Logic ",
            "capability_type": "pci",
            "product_id": "0ff2",
            "vendor_id": "10de",
            "device_type": "type-PCI"
        }

        _pci_alias_list = "[{}, {}]".format(
            json.dumps(_pci_alias1, sort_keys=True),
            json.dumps(_pci_alias2, sort_keys=True))

        unit = self.nova_cc_sentry
        conf = '/etc/nova/nova.conf'
        u.log.debug('Setting pci-alias to {}'.format(
            json.dumps(_pci_alias1, sort_keys=True)))
        self.d.configure(
            'nova-cloud-controller',
            {'pci-alias': json.dumps(_pci_alias1, sort_keys=True)})

        u.log.debug('Waiting for config change to take effect')
        self.d.sentry.wait()
        ret = u.validate_config_data(
            unit, conf, section, {
                key_name:
                ('{"capability_type": "pci", "device_type": "type-PF", '
                 '"name": "IntelNIC", "product_id": "1111", '
                 '"vendor_id": "8086"}')
            })
        if ret:
            message = "PCI Alias config error in section {}: {}".format(
                section, ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('Setting pci-alias to {}'.format(_pci_alias_list))
        self.d.configure('nova-cloud-controller',
                         {'pci-alias': _pci_alias_list})
        u.log.debug('Waiting for config change to take effect')
        self.d.sentry.wait()

        f = tempfile.NamedTemporaryFile(delete=False)
        f.write(unit.file_contents(conf))
        f.close()
        CONF(default_config_files=[f.name])
        if CompareOpenStackReleases(os_release) >= 'ocata':
            alias_entries = CONF.pci.alias
        else:
            alias_entries = CONF.DEFAULT.pci_alias
        assert alias_entries[0] == (
            '{"capability_type": "pci", "device_type": "type-PF", '
            '"name": "IntelNIC", "product_id": "1111", "vendor_id": "8086"}')
        assert alias_entries[1] == (
            '{"capability_type": "pci", "device_type": "type-PCI", '
            '"name": " Cirrus Logic ", "product_id": "0ff2", '
            '"vendor_id": "10de"}')
        self.d.configure('nova-cloud-controller', {'pci-alias': ''})
        self.d.sentry.wait()
コード例 #16
0
    def __call__(self):
        from neutron_api_utils import api_port
        ctxt = super(NeutronCCContext, self).__call__()
        if config('neutron-plugin') == 'nsx':
            ctxt['nsx_username'] = config('nsx-username')
            ctxt['nsx_password'] = config('nsx-password')
            ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid')
            ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid')
            if 'nsx-controllers' in config():
                ctxt['nsx_controllers'] = \
                    ','.join(config('nsx-controllers').split())
                ctxt['nsx_controllers_list'] = \
                    config('nsx-controllers').split()
        if config('neutron-plugin') == 'plumgrid':
            ctxt['pg_username'] = config('plumgrid-username')
            ctxt['pg_password'] = config('plumgrid-password')
            ctxt['virtual_ip'] = config('plumgrid-virtual-ip')
        elif config('neutron-plugin') == 'midonet':
            ctxt.update(MidonetContext()())
            identity_context = IdentityServiceContext(service='neutron',
                                                      service_user='******')()
            if identity_context is not None:
                ctxt.update(identity_context)
        ctxt['l2_population'] = self.neutron_l2_population
        ctxt['enable_dvr'] = self.neutron_dvr
        ctxt['l3_ha'] = self.neutron_l3ha
        if self.neutron_l3ha:
            ctxt['max_l3_agents_per_router'] = \
                config('max-l3-agents-per-router')
            ctxt['min_l3_agents_per_router'] = \
                config('min-l3-agents-per-router')
        ctxt['dhcp_agents_per_network'] = config('dhcp-agents-per-network')
        ctxt['tenant_network_types'] = self.neutron_tenant_network_types
        ctxt['overlay_network_type'] = self.neutron_overlay_network_type
        ctxt['external_network'] = config('neutron-external-network')
        release = os_release('neutron-server')
        cmp_release = CompareOpenStackReleases(release)
        if config('neutron-plugin') in ['vsp']:
            _config = config()
            for k, v in _config.iteritems():
                if k.startswith('vsd'):
                    ctxt[k.replace('-', '_')] = v
            for rid in relation_ids('vsd-rest-api'):
                for unit in related_units(rid):
                    rdata = relation_get(rid=rid, unit=unit)
                    vsd_ip = rdata.get('vsd-ip-address')
                    if cmp_release >= 'kilo':
                        cms_id_value = rdata.get('nuage-cms-id')
                        log('relation data:cms_id required for'
                            ' nuage plugin: {}'.format(cms_id_value))
                        if cms_id_value is not None:
                            ctxt['vsd_cms_id'] = cms_id_value
                    log('relation data:vsd-ip-address: {}'.format(vsd_ip))
                    if vsd_ip is not None:
                        ctxt['vsd_server'] = '{}:8443'.format(vsd_ip)
            if 'vsd_server' not in ctxt:
                ctxt['vsd_server'] = '1.1.1.1:8443'
        ctxt['verbose'] = config('verbose')
        ctxt['debug'] = config('debug')
        ctxt['neutron_bind_port'] = \
            determine_api_port(api_port('neutron-server'),
                               singlenode_mode=True)
        ctxt['quota_security_group'] = config('quota-security-group')
        ctxt['quota_security_group_rule'] = \
            config('quota-security-group-rule')
        ctxt['quota_network'] = config('quota-network')
        ctxt['quota_subnet'] = config('quota-subnet')
        ctxt['quota_port'] = config('quota-port')
        ctxt['quota_vip'] = config('quota-vip')
        ctxt['quota_pool'] = config('quota-pool')
        ctxt['quota_member'] = config('quota-member')
        ctxt['quota_health_monitors'] = config('quota-health-monitors')
        ctxt['quota_router'] = config('quota-router')
        ctxt['quota_floatingip'] = config('quota-floatingip')

        n_api_settings = self.get_neutron_api_rel_settings()
        if n_api_settings:
            ctxt.update(n_api_settings)

        flat_providers = config('flat-network-providers')
        if flat_providers:
            ctxt['network_providers'] = ','.join(flat_providers.split())

        vlan_ranges = config('vlan-ranges')
        if vlan_ranges:
            ctxt['vlan_ranges'] = ','.join(vlan_ranges.split())

        vni_ranges = config('vni-ranges')
        if vni_ranges:
            ctxt['vni_ranges'] = ','.join(vni_ranges.split())

        ctxt['enable_ml2_port_security'] = config('enable-ml2-port-security')
        ctxt['enable_sriov'] = config('enable-sriov')

        if cmp_release == 'kilo' or cmp_release >= 'mitaka':
            ctxt['enable_hyperv'] = True
        else:
            ctxt['enable_hyperv'] = False

        if cmp_release >= 'mitaka':
            if config('global-physnet-mtu'):
                ctxt['global_physnet_mtu'] = config('global-physnet-mtu')
                if config('path-mtu'):
                    ctxt['path_mtu'] = config('path-mtu')
                else:
                    ctxt['path_mtu'] = config('global-physnet-mtu')

        return ctxt
コード例 #17
0
    def __call__(self):
        bind_port = config('bind-port')
        workers = config('workers')
        if workers == 0:
            import multiprocessing
            workers = multiprocessing.cpu_count()
        if config('prefer-ipv6'):
            proxy_ip = ('[{}]'.format(
                get_ipv6_addr(exc_list=[config('vip')])[0]))
            memcached_ip = 'ip6-localhost'
        else:
            proxy_ip = get_host_ip(unit_get('private-address'))
            memcached_ip = get_host_ip(unit_get('private-address'))

        ctxt = {
            'proxy_ip': proxy_ip,
            'memcached_ip': memcached_ip,
            'bind_port': determine_api_port(bind_port, singlenode_mode=True),
            'workers': workers,
            'operator_roles': config('operator-roles'),
            'delay_auth_decision': config('delay-auth-decision'),
            'node_timeout': config('node-timeout'),
            'recoverable_node_timeout': config('recoverable-node-timeout'),
            'log_headers': config('log-headers'),
            'statsd_host': config('statsd-host'),
            'statsd_port': config('statsd-port'),
            'statsd_sample_rate': config('statsd-sample-rate'),
            'static_large_object_segments':
            config('static-large-object-segments'),
            'enable_multi_region': config('enable-multi-region'),
            'read_affinity': get_read_affinity(),
            'write_affinity': get_write_affinity(),
            'write_affinity_node_count': get_write_affinity_node_count()
        }

        cmp_openstack = CompareOpenStackReleases(os_release('swift'))
        if cmp_openstack < 'train':
            # swauth is no longer supported for OpenStack Train and later
            admin_key = leader_get('swauth-admin-key')
            if admin_key is not None:
                ctxt['swauth_admin_key'] = admin_key

        if config('debug'):
            ctxt['log_level'] = 'DEBUG'
        else:
            ctxt['log_level'] = 'INFO'

        # Instead of duplicating code lets use charm-helpers to set signing_dir
        # TODO(hopem): refactor this context handler to use charm-helpers
        #              code.
        _ctxt = IdentityServiceContext(service='swift', service_user='******')()
        signing_dir = _ctxt.get('signing_dir')
        if signing_dir:
            ctxt['signing_dir'] = signing_dir

        ctxt['ssl'] = False

        auth_type = config('auth-type')
        ctxt['auth_type'] = auth_type

        auth_host = config('keystone-auth-host')
        admin_user = config('keystone-admin-user')
        admin_password = config('keystone-admin-user')
        if (auth_type == 'keystone' and auth_host and admin_user
                and admin_password):
            log('Using user-specified Keystone configuration.')
            ks_auth = {
                'auth_type': 'keystone',
                'auth_protocol': config('keystone-auth-protocol'),
                'keystone_host': auth_host,
                'auth_port': config('keystone-auth-port'),
                'service_user': admin_user,
                'service_password': admin_password,
                'service_tenant': config('keystone-admin-tenant-name'),
            }
            ctxt.update(ks_auth)

        # Sometime during the 20.08 development cycle, keystone changed from
        # every unit setting relation data to just the leader.  This means that
        # the charm needs to data from the first relation that actually has
        # data, or almalgamate the data from all the relations. For this charm,
        # it merges from the relation ids available like the charms.reactive
        # system does.
        _keys = (('auth_protocol', 'auth_protocol',
                  'http'), ('service_protocol', 'service_protocol',
                            'http'), ('keystone_host', 'auth_host', None),
                 ('service_host', 'service_host',
                  None), ('auth_port', 'auth_port',
                          None), ('service_user', 'service_username', None),
                 ('service_password', 'service_password',
                  None), ('service_tenant', 'service_tenant',
                          None), ('service_port', 'service_port',
                                  None), ('api_version', 'api_version', '2'))
        _keysv3 = (('admin_domain_id', 'admin_domain_id'),
                   ('service_tenant_id', 'service_tenant_id'),
                   ('admin_domain_name',
                    'service_domain'), ('admin_tenant_name', 'service_tenant'))

        kvs = {}
        relids = relation_ids('identity-service')
        # if we have relids at all, then set the auth_type to keystone
        if relids:
            kvs['auth_type'] = 'keystone'
        # merge the data from the related units
        for (key, source, default) in _keys:
            for relid in relids:
                for unit in related_units(relid):
                    value = relation_get(source, unit, relid)
                    if value is not None:
                        kvs[key] = value
                    else:
                        kvs[key] = kvs.get(key, default)
        # if the api is version 3, also merge the additional keys
        if kvs.get('api_version', None) == '3':
            for (key, source) in _keysv3:
                for relid in relids:
                    for unit in related_units(relid):
                        value = relation_get(source, unit, relid)
                        if value is not None:
                            kvs[key] = value

        # merge in the creds from the relation; which override the config
        ctxt.update(kvs)

        if config('prefer-ipv6'):
            for key in ['keystone_host', 'service_host']:
                host = ctxt.get(key)
                if host:
                    ctxt[key] = format_ipv6_addr(host)

        return ctxt
コード例 #18
0
def git_post_install(projects_yaml):
    """Perform post-install setup."""
    src_etc = os.path.join(git_src_dir(projects_yaml, 'neutron'), 'etc')
    configs = [
        {
            'src': src_etc,
            'dest': '/etc/neutron'
        },
        {
            'src': os.path.join(src_etc, 'neutron/plugins'),
            'dest': '/etc/neutron/plugins'
        },
        {
            'src': os.path.join(src_etc, 'neutron/rootwrap.d'),
            'dest': '/etc/neutron/rootwrap.d'
        },
    ]

    for c in configs:
        if os.path.exists(c['dest']):
            shutil.rmtree(c['dest'])
        shutil.copytree(c['src'], c['dest'])

    # NOTE(coreycb): Need to find better solution than bin symlinks.
    symlinks = [
        {
            'src':
            os.path.join(git_pip_venv_dir(projects_yaml),
                         'bin/neutron-rootwrap'),
            'link':
            '/usr/local/bin/neutron-rootwrap'
        },
    ]

    for s in symlinks:
        if os.path.lexists(s['link']):
            os.remove(s['link'])
        os.symlink(s['src'], s['link'])

    render('git/neutron_sudoers',
           '/etc/sudoers.d/neutron_sudoers', {},
           perms=0o440)

    bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
    cmp_os_release = CompareOpenStackReleases(os_release('neutron-common'))
    # Use systemd init units/scripts from ubuntu wily onward
    _release = lsb_release()['DISTRIB_CODENAME']
    if CompareHostReleases(_release) >= 'wily':
        templates_dir = os.path.join(charm_dir(), 'templates/git')
        daemons = ['neutron-openvswitch-agent', 'neutron-ovs-cleanup']
        for daemon in daemons:
            neutron_ovs_context = {
                'daemon_path': os.path.join(bin_dir, daemon),
            }
            filename = daemon
            if daemon == 'neutron-openvswitch-agent':
                if cmp_os_release < 'mitaka':
                    filename = 'neutron-plugin-openvswitch-agent'
            template_file = 'git/{}.init.in.template'.format(filename)
            init_in_file = '{}.init.in'.format(filename)
            render(template_file,
                   os.path.join(templates_dir, init_in_file),
                   neutron_ovs_context,
                   perms=0o644)
        git_generate_systemd_init_files(templates_dir)

        for daemon in daemons:
            filename = daemon
            if daemon == 'neutron-openvswitch-agent':
                if cmp_os_release < 'mitaka':
                    filename = 'neutron-plugin-openvswitch-agent'
            service('enable', filename)
    else:
        neutron_ovs_agent_context = {
            'service_description': 'Neutron OpenvSwitch Plugin Agent',
            'charm_name': 'neutron-openvswitch',
            'process_name': 'neutron-openvswitch-agent',
            'executable_name': os.path.join(bin_dir,
                                            'neutron-openvswitch-agent'),
            'cleanup_process_name': 'neutron-ovs-cleanup',
            'plugin_config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
            'log_file': '/var/log/neutron/openvswitch-agent.log',
        }

        neutron_ovs_cleanup_context = {
            'service_description': 'Neutron OpenvSwitch Cleanup',
            'charm_name': 'neutron-openvswitch',
            'process_name': 'neutron-ovs-cleanup',
            'executable_name': os.path.join(bin_dir, 'neutron-ovs-cleanup'),
            'log_file': '/var/log/neutron/ovs-cleanup.log',
        }

        if cmp_os_release < 'mitaka':
            render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
                   '/etc/init/neutron-plugin-openvswitch-agent.conf',
                   neutron_ovs_agent_context,
                   perms=0o644)
        else:
            render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
                   '/etc/init/neutron-openvswitch-agent.conf',
                   neutron_ovs_agent_context,
                   perms=0o644)
        render('git/upstart/neutron-ovs-cleanup.upstart',
               '/etc/init/neutron-ovs-cleanup.conf',
               neutron_ovs_cleanup_context,
               perms=0o644)

    if not is_unit_paused_set():
        service_restart('neutron-plugin-openvswitch-agent')
コード例 #19
0
def determine_endpoints(public_url, internal_url, admin_url):
    '''Generates a dictionary containing all relevant endpoints to be
    passed to keystone as relation settings.'''
    region = config('region')
    os_rel = os_release('nova-common')
    cmp_os_rel = CompareOpenStackReleases(os_rel)

    nova_public_url = ('%s:%s/v2/$(tenant_id)s' %
                       (public_url, api_port('nova-api-os-compute')))
    nova_internal_url = ('%s:%s/v2/$(tenant_id)s' %
                         (internal_url, api_port('nova-api-os-compute')))
    nova_admin_url = ('%s:%s/v2/$(tenant_id)s' %
                      (admin_url, api_port('nova-api-os-compute')))
    ec2_public_url = '%s:%s/services/Cloud' % (public_url,
                                               api_port('nova-api-ec2'))
    ec2_internal_url = '%s:%s/services/Cloud' % (internal_url,
                                                 api_port('nova-api-ec2'))
    ec2_admin_url = '%s:%s/services/Cloud' % (admin_url,
                                              api_port('nova-api-ec2'))

    s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore'))
    s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore'))
    s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore'))

    if cmp_os_rel >= 'ocata':
        placement_public_url = '%s:%s' % (public_url,
                                          api_port('nova-placement-api'))
        placement_internal_url = '%s:%s' % (internal_url,
                                            api_port('nova-placement-api'))
        placement_admin_url = '%s:%s' % (admin_url,
                                         api_port('nova-placement-api'))

    # the base endpoints
    endpoints = {
        'nova_service': 'nova',
        'nova_region': region,
        'nova_public_url': nova_public_url,
        'nova_admin_url': nova_admin_url,
        'nova_internal_url': nova_internal_url,
        'ec2_service': 'ec2',
        'ec2_region': region,
        'ec2_public_url': ec2_public_url,
        'ec2_admin_url': ec2_admin_url,
        'ec2_internal_url': ec2_internal_url,
        's3_service': 's3',
        's3_region': region,
        's3_public_url': s3_public_url,
        's3_admin_url': s3_admin_url,
        's3_internal_url': s3_internal_url,
    }

    if cmp_os_rel >= 'kilo':
        # NOTE(jamespage) drop endpoints for ec2 and s3
        #  ec2 is deprecated
        #  s3 is insecure and should die in flames
        endpoints.update({
            'ec2_service': None,
            'ec2_region': None,
            'ec2_public_url': None,
            'ec2_admin_url': None,
            'ec2_internal_url': None,
            's3_service': None,
            's3_region': None,
            's3_public_url': None,
            's3_admin_url': None,
            's3_internal_url': None,
        })

    if cmp_os_rel >= 'ocata':
        endpoints.update({
            'placement_service': 'placement',
            'placement_region': region,
            'placement_public_url': placement_public_url,
            'placement_admin_url': placement_admin_url,
            'placement_internal_url': placement_internal_url,
        })

    return endpoints
コード例 #20
0
    def __call__(self):
        # distro defaults
        ctxt = {
            # /etc/libvirt/libvirtd.conf (
            'listen_tls': 0
        }
        cmp_distro_codename = CompareHostReleases(
            lsb_release()['DISTRIB_CODENAME'].lower())
        cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))

        # NOTE(jamespage): deal with switch to systemd
        if cmp_distro_codename < "wily":
            ctxt['libvirtd_opts'] = '-d'
        else:
            ctxt['libvirtd_opts'] = ''

        # NOTE(jamespage): deal with alignment with Debian in
        #                  Ubuntu yakkety and beyond.
        if cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata':
            ctxt['libvirt_user'] = '******'
        else:
            ctxt['libvirt_user'] = '******'

        # get the processor architecture to use in the nova.conf template
        ctxt['arch'] = platform.machine()

        # enable tcp listening if configured for live migration.
        if config('enable-live-migration'):
            ctxt['libvirtd_opts'] += ' -l'

        if config('migration-auth-type') in ['none', 'None', 'ssh']:
            ctxt['listen_tls'] = 0

        if config('migration-auth-type') == 'ssh':
            # nova.conf
            ctxt['live_migration_uri'] = 'qemu+ssh://%s/system'

        if config('instances-path') is not None:
            ctxt['instances_path'] = config('instances-path')

        if config('disk-cachemodes'):
            ctxt['disk_cachemodes'] = config('disk-cachemodes')

        if config('cpu-mode'):
            ctxt['cpu_mode'] = config('cpu-mode')
        elif ctxt['arch'] in ['ppc64el', 'ppc64le']:
            ctxt['cpu_mode'] = 'host-passthrough'
        elif ctxt['arch'] == 's390x':
            ctxt['cpu_mode'] = 'none'

        if config('cpu-model'):
            ctxt['cpu_model'] = config('cpu-model')

        if config('hugepages'):
            ctxt['hugepages'] = True
            ctxt['kvm_hugepages'] = 1
        else:
            ctxt['kvm_hugepages'] = 0

        if config('ksm') in (
                "1",
                "0",
        ):
            ctxt['ksm'] = config('ksm')
        else:
            if cmp_os_release < 'kilo':
                log("KSM set to 1 by default on openstack releases < kilo",
                    level=INFO)
                ctxt['ksm'] = "1"
            else:
                ctxt['ksm'] = "AUTO"

        if config('pci-passthrough-whitelist'):
            ctxt['pci_passthrough_whitelist'] = \
                config('pci-passthrough-whitelist')

        if config('vcpu-pin-set'):
            ctxt['vcpu_pin_set'] = config('vcpu-pin-set')

        ctxt['reserved_host_memory'] = config('reserved-host-memory')

        db = kv()
        if db.get('host_uuid'):
            ctxt['host_uuid'] = db.get('host_uuid')
        else:
            host_uuid = str(uuid.uuid4())
            db.set('host_uuid', host_uuid)
            db.flush()
            ctxt['host_uuid'] = host_uuid

        if config('libvirt-image-backend'):
            ctxt['libvirt_images_type'] = config('libvirt-image-backend')

        return ctxt
コード例 #21
0
def resource_map(actual_services=True):
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.

    :param actual_services: Whether to return the actual services that run on a
        unit (ie. apache2) or the services defined in BASE_SERVICES
        (ie.nova-placement-api).
    '''
    resource_map = deepcopy(BASE_RESOURCE_MAP)

    if os.path.exists('/etc/apache2/conf-available'):
        resource_map.pop(APACHE_CONF)
    else:
        resource_map.pop(APACHE_24_CONF)

    resource_map[NOVA_CONF]['contexts'].append(
        nova_cc_context.NeutronCCContext())

    release = os_release('nova-common')
    cmp_os_release = CompareOpenStackReleases(release)
    if cmp_os_release >= 'mitaka':
        resource_map[NOVA_CONF]['contexts'].append(
            nova_cc_context.NovaAPISharedDBContext(relation_prefix='novaapi',
                                                   database='nova_api',
                                                   ssl_dir=NOVA_CONF_DIR))

    if console_attributes('services'):
        resource_map[NOVA_CONF]['services'] += console_attributes('services')
        # nova-consoleauth will be managed by pacemaker, if
        # single-nova-consoleauth is used, then don't monitor for the
        # nova-consoleauth service to be started (LP: #1660244).
        if config('single-nova-consoleauth') and relation_ids('ha'):
            services = resource_map[NOVA_CONF]['services']
            if 'nova-consoleauth' in services:
                services.remove('nova-consoleauth')

    if (config('enable-serial-console') and cmp_os_release >= 'juno'):
        resource_map[NOVA_CONF]['services'] += SERIAL_CONSOLE['services']

    # also manage any configs that are being updated by subordinates.
    vmware_ctxt = context.SubordinateConfigContext(interface='nova-vmware',
                                                   service='nova',
                                                   config_file=NOVA_CONF)
    vmware_ctxt = vmware_ctxt()
    if vmware_ctxt and 'services' in vmware_ctxt:
        for s in vmware_ctxt['services']:
            if s not in resource_map[NOVA_CONF]['services']:
                resource_map[NOVA_CONF]['services'].append(s)

    if enable_memcache(release=release):
        resource_map[MEMCACHED_CONF] = {
            'contexts': [context.MemcacheContext()],
            'services': ['memcached']
        }

    if actual_services and placement_api_enabled():
        for cfile in resource_map:
            svcs = resource_map[cfile]['services']
            if 'nova-placement-api' in svcs:
                svcs.remove('nova-placement-api')
                if 'apache2' not in svcs:
                    svcs.append('apache2')
        wsgi_script = "/usr/bin/nova-placement-api"
        resource_map[WSGI_NOVA_PLACEMENT_API_CONF] = {
            'contexts': [
                context.WSGIWorkerConfigContext(name="nova",
                                                script=wsgi_script),
                nova_cc_context.HAProxyContext()
            ],
            'services': ['apache2']
        }
    elif not placement_api_enabled():
        for cfile in resource_map:
            svcs = resource_map[cfile]['services']
            if 'nova-placement-api' in svcs:
                svcs.remove('nova-placement-api')

    return resource_map
コード例 #22
0
    def __call__(self):
        # distro defaults
        ctxt = {
            # /etc/libvirt/libvirtd.conf (
            'listen_tls': 0
        }
        cmp_distro_codename = CompareHostReleases(
            lsb_release()['DISTRIB_CODENAME'].lower())
        cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))

        # NOTE(jamespage): deal with switch to systemd
        if cmp_distro_codename < "wily":
            ctxt['libvirtd_opts'] = '-d'
        else:
            ctxt['libvirtd_opts'] = ''

        # NOTE(jamespage): deal with alignment with Debian in
        #                  Ubuntu yakkety and beyond.
        if cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata':
            ctxt['libvirt_user'] = '******'
        else:
            ctxt['libvirt_user'] = '******'

        # get the processor architecture to use in the nova.conf template
        ctxt['arch'] = platform.machine()

        # enable tcp listening if configured for live migration.
        if config('enable-live-migration'):
            ctxt['libvirtd_opts'] += ' -l'

        if config('enable-live-migration') and \
                config('migration-auth-type') in ['none', 'None', 'ssh']:
            ctxt['listen_tls'] = 0

        if config('enable-live-migration') and \
                config('migration-auth-type') == 'ssh':
            migration_address = get_relation_ip(
                'migration', cidr_network=config('libvirt-migration-network'))

            if cmp_os_release >= 'ocata':
                ctxt['live_migration_scheme'] = config('migration-auth-type')
                ctxt['live_migration_inbound_addr'] = migration_address
            else:
                ctxt['live_migration_uri'] = 'qemu+ssh://%s/system'

        if config('enable-live-migration'):
            ctxt['live_migration_completion_timeout'] = \
                config('live-migration-completion-timeout')
            ctxt['live_migration_downtime'] = \
                config('live-migration-downtime')
            ctxt['live_migration_downtime_steps'] = \
                config('live-migration-downtime-steps')
            ctxt['live_migration_downtime_delay'] = \
                config('live-migration-downtime-delay')
            ctxt['live_migration_permit_post_copy'] = \
                config('live-migration-permit-post-copy')
            ctxt['live_migration_permit_auto_converge'] = \
                config('live-migration-permit-auto-converge')

        if config('instances-path') is not None:
            ctxt['instances_path'] = config('instances-path')

        if config('disk-cachemodes'):
            ctxt['disk_cachemodes'] = config('disk-cachemodes')

        if config('use-multipath'):
            ctxt['use_multipath'] = config('use-multipath')

        if config('default-ephemeral-format'):
            ctxt['default_ephemeral_format'] = \
                config('default-ephemeral-format')

        if config('cpu-mode'):
            ctxt['cpu_mode'] = config('cpu-mode')
        elif ctxt['arch'] in ('ppc64el', 'ppc64le', 'aarch64'):
            ctxt['cpu_mode'] = 'host-passthrough'
        elif ctxt['arch'] == 's390x':
            ctxt['cpu_mode'] = 'none'

        if config('cpu-model'):
            ctxt['cpu_model'] = config('cpu-model')

        if config('cpu-model-extra-flags'):
            ctxt['cpu_model_extra_flags'] = ', '.join(
                config('cpu-model-extra-flags').split(' '))

        if config('hugepages'):
            ctxt['hugepages'] = True
            ctxt['kvm_hugepages'] = 1
        else:
            ctxt['kvm_hugepages'] = 0

        if config('ksm') in (
                "1",
                "0",
        ):
            ctxt['ksm'] = config('ksm')
        else:
            if cmp_os_release < 'kilo':
                log("KSM set to 1 by default on openstack releases < kilo",
                    level=INFO)
                ctxt['ksm'] = "1"
            else:
                ctxt['ksm'] = "AUTO"

        if config('reserved-huge-pages'):
            # To bypass juju limitation with list of strings, we
            # consider separate the option's values per semicolons.
            ctxt['reserved_huge_pages'] = ([
                o.strip() for o in config('reserved-huge-pages').split(";")
            ])

        if config('pci-passthrough-whitelist'):
            ctxt['pci_passthrough_whitelist'] = \
                config('pci-passthrough-whitelist')

        if config('pci-alias'):
            aliases = json.loads(config('pci-alias'))
            # Behavior previous to queens is maintained as it was
            if isinstance(aliases, list) and cmp_os_release >= 'queens':
                ctxt['pci_aliases'] = [
                    json.dumps(x, sort_keys=True) for x in aliases
                ]
            else:
                ctxt['pci_alias'] = json.dumps(aliases, sort_keys=True)

        if config('cpu-dedicated-set'):
            ctxt['cpu_dedicated_set'] = config('cpu-dedicated-set')
        elif config('vcpu-pin-set'):
            ctxt['vcpu_pin_set'] = config('vcpu-pin-set')

        if config('cpu-shared-set'):
            ctxt['cpu_shared_set'] = config('cpu-shared-set')

        if config('virtio-net-tx-queue-size'):
            ctxt['virtio_net_tx_queue_size'] = (
                config('virtio-net-tx-queue-size'))
        if config('virtio-net-rx-queue-size'):
            ctxt['virtio_net_rx_queue_size'] = (
                config('virtio-net-rx-queue-size'))

        ctxt['reserved_host_memory'] = config('reserved-host-memory')

        db = kv()
        if db.get('host_uuid'):
            ctxt['host_uuid'] = db.get('host_uuid')
        else:
            host_uuid = str(uuid.uuid4())
            db.set('host_uuid', host_uuid)
            db.flush()
            ctxt['host_uuid'] = host_uuid

        if config('libvirt-image-backend'):
            ctxt['libvirt_images_type'] = config('libvirt-image-backend')

        ctxt['force_raw_images'] = config('force-raw-images')
        ctxt['inject_password'] = config('inject-password')
        # if allow the injection of an admin password it depends
        # on value greater or equal to -1 for inject_partition
        # -2 means disable the injection of data
        ctxt['inject_partition'] = -1 if config('inject-password') else -2

        return ctxt
コード例 #23
0
def determine_purge_packages():
    cmp_release = CompareOpenStackReleases(
        os_release('neutron-common', base='icehouse', reset_cache=True))
    if cmp_release >= 'rocky':
        return PURGE_PACKAGES
    return []
コード例 #24
0
def config_changed():
    # neutron-server runs if < juno. Neutron-server creates mysql tables
    # which will subsequently cause db migratoins to fail if >= juno.
    # Disable neutron-server if >= juno
    if CompareOpenStackReleases(os_release('nova-common')) >= 'juno':
        with open('/etc/init/neutron-server.override', 'wb') as out:
            out.write('manual\n')
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

    global CONFIGS
    if git_install_requested():
        status_set('maintenance', 'Running Git install')
        if config_value_changed('openstack-origin-git'):
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('nova-common'):
            status_set('maintenance', 'Running openstack upgrade')
            CONFIGS = do_openstack_upgrade(CONFIGS)
            [
                neutron_api_relation_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('neutron-api')
            ]
            # NOTE(jamespage): Force re-fire of shared-db joined hook
            # to ensure that nova_api database is setup if required.
            [db_joined(relation_id=r_id) for r_id in relation_ids('shared-db')]

    save_script_rc()
    configure_https()
    CONFIGS.write_all()

    # NOTE(jamespage): deal with any changes to the console and serial
    #                  console configuration options
    if not git_install_requested():
        filtered = filter_installed_packages(determine_packages())
        if filtered:
            apt_install(filtered, fatal=True)

    for rid in relation_ids('quantum-network-service'):
        quantum_joined(rid=rid)
    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for rid in relation_ids('zeromq-configuration'):
        zeromq_configuration_relation_joined(rid)
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    [compute_joined(rid=rid) for rid in relation_ids('cloud-compute')]

    update_nrpe_config()

    # If the region value has changed, notify the cloud-compute relations
    # to ensure the value is propagated to the compute nodes.
    if config_value_changed('region'):
        for rid in relation_ids('cloud-compute'):
            for unit in related_units(rid):
                compute_changed(rid, unit)

    update_nova_consoleauth_config()
    update_aws_compat_services()
コード例 #25
0
def use_dpdk():
    '''Determine whether DPDK should be used'''
    cmp_release = CompareOpenStackReleases(
        os_release('neutron-common', base='icehouse', reset_cache=True))
    return (cmp_release >= 'mitaka' and config('enable-dpdk'))
コード例 #26
0
    def _decode_password_security_compliance_string(cls, maybe_yaml):
        """Decode string to dict for 'password-security-compliance'

        Perform some validation on the string and return either None,
        if the string is not valid, or a dictionary of the security
        compliance keys and values.

        :param maybe_yaml: the config item that is (hopefully) YAML format
        :type maybe_yaml: str
        :returns: a dictionary of keys: values or None if the value is not
                  valid.
        :rtype: Optional[Dict[str, Union[str, int, bool]]]
        """
        cmp_release = CompareOpenStackReleases(os_release('keystone'))
        if cmp_release < 'newton':
            log(
                "'password-security-compliance' isn't valid for releases "
                "before Newton.",
                level='ERROR')
            return None
        try:
            config_items = yaml.safe_load(maybe_yaml)
        except Exception as e:
            log("Couldn't decode config value for "
                "'password-security-compliance': Invalid YAML?: {}".format(
                    str(e)),
                level='ERROR')
            return None
        # ensure that the top level is a dictionary.
        if type(config_items) != dict:
            log("Couldn't decode config value for "
                "'password-security-compliance'.  It doesn't appear to be a "
                "dictionary: {}".format(str(config_items)),
                level='ERROR')
            return None
        # check that the keys present are valid ones.
        config_keys = config_items.keys()
        allowed_keys = cls.ALLOWED_SECURITY_COMPLIANCE_SCHEMA.keys()
        invalid_keys = [k for k in config_keys if k not in allowed_keys]
        if invalid_keys:
            log("Invalid config key(s) found in config "
                "'password-security-compliance' setting: {}".format(
                    ", ".join(invalid_keys)),
                level='ERROR')
            return None
        # check that the types are valid
        valid_types = cls.ALLOWED_SECURITY_COMPLIANCE_SCHEMA
        invalid_types = {
            k: (type(v) != valid_types[k])
            for k, v in config_items.items()
        }
        if any(invalid_types.values()):
            log("Invalid config value type(s) found in config "
                "'password-security-compliance' setting: {}".format(", ".join([
                    "{}: {} -- should be an {}".format(
                        k,
                        type(config_items[k]).__name__,
                        valid_types[k].__name__)
                    for k, v in invalid_types.items()
                ])),
                level='ERROR')
            return None
        return config_items
コード例 #27
0
def resource_map(release=None):
    """
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    """
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    release = release or os_release('cinder-common', base='icehouse')
    if relation_ids('backup-backend'):
        resource_map[CINDER_CONF]['services'].append('cinder-backup')
        resource_map[ceph_config_file()]['services'].append('cinder-backup')

    if relation_ids('ceph') and hook_name() != 'ceph-relation-broken':
        # need to create this early, new peers will have a relation during
        # registration # before they've run the ceph hooks to create the
        # directory.
        # !!! FIX: These side effects seem inappropriate for this method
        mkdir(os.path.dirname(CEPH_CONF))
        mkdir(os.path.dirname(ceph_config_file()))

        # Install ceph config as an alternative for co-location with
        # ceph and ceph-osd charm - cinder ceph.conf will be
        # lower priority than both of these but thats OK
        if not os.path.exists(ceph_config_file()):
            # touch file for pre-templated generation
            open(ceph_config_file(), 'w').close()
        install_alternative(os.path.basename(CEPH_CONF), CEPH_CONF,
                            ceph_config_file())
    else:
        resource_map.pop(ceph_config_file())

    if os.path.exists('/etc/apache2/conf-available'):
        resource_map.pop(APACHE_SITE_CONF)
    else:
        resource_map.pop(APACHE_SITE_24_CONF)

    # Remove services from map which are not enabled by user config
    for cfg in resource_map.keys():
        resource_map[cfg]['services'] = \
            filter_services(resource_map[cfg]['services'])

    if enable_memcache(source=config()['openstack-origin']):
        resource_map[MEMCACHED_CONF] = {
            'contexts': [context.MemcacheContext()],
            'services': ['memcached']
        }

    if not service_enabled('api'):
        # haproxy and apache2 are related to cinder-api
        cfg_files = {
            CINDER_API_CONF,
            HAPROXY_CONF,
            APACHE_SITE_CONF,
            APACHE_SITE_24_CONF,
            APACHE_PORTS_CONF,
        }
        for cfg in cfg_files.intersection(resource_map.keys()):
            resource_map.pop(cfg)
    elif run_in_apache():
        for cfile in resource_map:
            svcs = resource_map[cfile]['services']
            if 'cinder-api' in svcs:
                svcs.remove('cinder-api')
                if 'apache2' not in svcs:
                    svcs.append('apache2')
        wsgi_script = "/usr/bin/cinder-wsgi"
        resource_map[WSGI_CINDER_API_CONF] = {
            'contexts': [
                context.WSGIWorkerConfigContext(name="cinder",
                                                script=wsgi_script),
                cinder_contexts.HAProxyContext()
            ],
            'services': ['apache2']
        }

    if release and CompareOpenStackReleases(release) < 'queens':
        resource_map.pop(CINDER_POLICY_JSON)

    return resource_map
コード例 #28
0
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    # TODO: Cache this on first call?
    if config('virt-type').lower() == 'lxd':
        resource_map = deepcopy(BASE_RESOURCE_MAP)
    else:
        resource_map = deepcopy(LIBVIRT_RESOURCE_MAP)

    # if vault deps are not installed it is not yet possible to check the vault
    # context status since it requires the hvac dependency.
    if not vaultlocker_installed():
        to_delete = []
        for item in resource_map[NOVA_CONF]['contexts']:
            if isinstance(item, type(vaultlocker.VaultKVContext())):
                to_delete.append(item)

        for item in to_delete:
            resource_map[NOVA_CONF]['contexts'].remove(item)

    net_manager = network_manager()

    # Network manager gets set late by the cloud-compute interface.
    # FlatDHCPManager only requires some extra packages.
    cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
    if (net_manager in ['flatmanager', 'flatdhcpmanager']
            and config('multi-host').lower() == 'yes'
            and cmp_os_release < 'ocata'):
        resource_map[NOVA_CONF]['services'].extend(
            ['nova-api', 'nova-network'])
    else:
        resource_map.pop(NOVA_API_AA_PROFILE_PATH)
        resource_map.pop(NOVA_NETWORK_AA_PROFILE_PATH)

    cmp_distro_codename = CompareHostReleases(
        lsb_release()['DISTRIB_CODENAME'].lower())
    if (cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata'):
        for data in resource_map.values():
            if LIBVIRT_BIN_DAEMON in data['services']:
                data['services'].remove(LIBVIRT_BIN_DAEMON)
                data['services'].append(LIBVIRTD_DAEMON)

    # Neutron/quantum requires additional contexts, as well as new resources
    # depending on the plugin used.
    # NOTE(james-page): only required for ovs plugin right now
    if net_manager in ['neutron', 'quantum']:
        resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext())

    if relation_ids('ceph'):
        CEPH_RESOURCES[ceph_config_file()] = {
            'contexts': [NovaComputeCephContext()],
            'services': ['nova-compute']
        }
        resource_map.update(CEPH_RESOURCES)

    enable_nova_metadata, _ = nova_metadata_requirement()
    if enable_nova_metadata:
        resource_map[NOVA_CONF]['services'].append('nova-api-metadata')

    # NOTE(james-page): If not on an upstart based system, don't write
    #                   and override file for libvirt-bin.
    if not os.path.exists('/etc/init'):
        del resource_map[LIBVIRT_BIN_OVERRIDES]

    return resource_map
コード例 #29
0
 def __call__(self):
     cmp_os_release = CompareOpenStackReleases(os_release('cinder-common'))
     return {'sectional_default_config': cmp_os_release >= "ocata"}
コード例 #30
0
def identity_joined(rid=None):
    if config('vip') and not is_clustered():
        log('Defering registration until clustered', level=DEBUG)
        return

    settings = {}

    if not service_enabled('api'):
        juju_log('api service not enabled; skipping endpoint '
                 'registration')
        return

    cinder_release = os_release('cinder-common')
    if CompareOpenStackReleases(cinder_release) < 'pike':
        public_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'region': None,
            'service': None,
            'public_url': None,
            'internal_url': None,
            'admin_url': None,
            'cinder_region': config('region'),
            'cinder_service': 'cinder',
            'cinder_public_url': public_url,
            'cinder_internal_url': internal_url,
            'cinder_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'icehouse':
        # NOTE(jamespage) register v2 endpoint as well
        public_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'cinderv2_region': config('region'),
            'cinderv2_service': 'cinderv2',
            'cinderv2_public_url': public_url,
            'cinderv2_internal_url': internal_url,
            'cinderv2_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'pike':
        # NOTE(jamespage) register v3 endpoint as well
        public_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'cinderv3_region': config('region'),
            'cinderv3_service': 'cinderv3',
            'cinderv3_public_url': public_url,
            'cinderv3_internal_url': internal_url,
            'cinderv3_admin_url': admin_url,
        })
    relation_set(relation_id=rid, **settings)