def openstack_upgrade():
    """Upgrade packages to config-set Openstack version.

    If the charm was installed from source we cannot upgrade it.
    For backwards compatibility a config flag must be set for this
    code to run, otherwise a full service level upgrade will fire
    on config-changed."""

    # If attempting to upgrade from Stein->Train, block until Placement
    # charm is related. Status is set in check_optional_relations().
    release = ch_utils.os_release('nova-common')
    cmp_os_release = ch_utils.CompareOpenStackReleases(release)
    if (cmp_os_release == 'stein' and not hookenv.relation_ids('placement')):
        return

    if (ch_utils.do_action_openstack_upgrade('nova-common',
                                             utils.do_openstack_upgrade,
                                             hooks.CONFIGS)):
        for rid in hookenv.relation_ids('neutron-api'):
            hooks.neutron_api_relation_joined(rid=rid, remote_restart=True)
        # NOTE(thedac): Force re-fire of shared-db joined hook
        # to ensure that nova_api database is setup if required.
        for r_id in hookenv.relation_ids('shared-db'):
            hooks.db_joined(relation_id=r_id)
        hooks.config_changed()
    def __call__(self):
        vdata_values = super(NovaMetadataContext, self).__call__()

        release = ch_utils.os_release('nova-common')
        cmp_os_release = ch_utils.CompareOpenStackReleases(release)

        ctxt = {}

        if cmp_os_release >= 'rocky':
            ctxt.update(vdata_values)

            ctxt['metadata_proxy_shared_secret'] = hookenv.leader_get(
                'shared-metadata-secret')
            ctxt['enable_metadata'] = True
        else:
            hookenv.log("Vendor metadata has been configured but is not "
                        "effective in nova-cloud-controller because release "
                        "{} is prior to Rocky.".format(release),
                        level=hookenv.DEBUG)
            ctxt['enable_metadata'] = False

        # NOTE(ganso): always propagate config value for nova-compute since
        # we need to apply it there for all releases, and we cannot determine
        # whether nova-compute is really the one serving the vendor metadata
        for rid in hookenv.relation_ids('cloud-compute'):
            hookenv.relation_set(relation_id=rid,
                                 vendor_data=json.dumps(vdata_values))

        return ctxt
Exemple #3
0
def db_joined(relation_id=None):
    cmp_os_release = ch_utils.CompareOpenStackReleases(
        ch_utils.os_release('nova-common'))
    if hookenv.config('prefer-ipv6'):
        ch_utils.sync_db_with_multi_ipv6_addresses(
            hookenv.config('database'),
            hookenv.config('database-user'),
            relation_prefix='nova')

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            ch_utils.sync_db_with_multi_ipv6_addresses(
                'nova_api',
                hookenv.config('database-user'),
                relation_prefix='novaapi')

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            ch_utils.sync_db_with_multi_ipv6_addresses(
                'nova_cell0',
                hookenv.config('database-user'),
                relation_prefix='novacell0')
    else:
        # Avoid churn check for access-network early
        access_network = None
        for unit in hookenv.related_units(relid=relation_id):
            access_network = hookenv.relation_get(rid=relation_id,
                                                  unit=unit,
                                                  attribute='access-network')
            if access_network:
                break
        host = ch_network_ip.get_relation_ip('shared-db',
                                             cidr_network=access_network)

        hookenv.relation_set(nova_database=hookenv.config('database'),
                             nova_username=hookenv.config('database-user'),
                             nova_hostname=host,
                             relation_id=relation_id)

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            hookenv.relation_set(
                novaapi_database='nova_api',
                novaapi_username=hookenv.config('database-user'),
                novaapi_hostname=host,
                relation_id=relation_id)

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            hookenv.relation_set(
                novacell0_database='nova_cell0',
                novacell0_username=hookenv.config('database-user'),
                novacell0_hostname=host,
                relation_id=relation_id)
def default_enabled_filters():
    """
    Determine the list of default filters for scheduler use

    :returns: list of filters to use
    :rtype: list of str
    """
    os_rel = ch_utils.os_release('nova-common')
    cmp_os_rel = ch_utils.CompareOpenStackReleases(os_rel)
    if cmp_os_rel >= 'pike':
        return _pike_enabled_filters
    return _base_enabled_filters
def is_policyd_override_valid_on_this_release(openstack_release):
    """Check that the charm is running on at least Ubuntu Xenial, and at
    least the queens release.

    :param openstack_release: the release codename that is installed.
    :type openstack_release: str
    :returns: True if okay
    :rtype: bool
    """
    # NOTE(ajkavanagh) circular import!  This is because the status message
    # generation code in utils has to call into this module, but this function
    # needs the CompareOpenStackReleases() function.  The only way to solve
    # this is either to put ALL of this module into utils, or refactor one or
    # other of the CompareOpenStackReleases or status message generation code
    # into a 3rd module.
    import charmhelpers.contrib.openstack.utils as ch_utils
    return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens'
    def __call__(self):
        vdata_values = super(NovaMetadataJSONContext, self).__call__()

        # NOTE(ganso): always propagate config value for nova-compute since
        # we need to apply it there for releases prior to rocky
        for rid in hookenv.relation_ids('cloud-compute'):
            hookenv.relation_set(relation_id=rid,
                                 vendor_json=vdata_values['vendor_data_json'])

        release = ch_utils.os_release('nova-common')
        cmp_os_release = ch_utils.CompareOpenStackReleases(release)

        if cmp_os_release >= 'rocky':
            return vdata_values
        else:
            hookenv.log("Vendor metadata has been configured but is not "
                        "effective in nova-cloud-controller because release "
                        "{} is prior to Rocky.".format(release),
                        level=hookenv.DEBUG)
            return {'vendor_data_json': '{}'}
Exemple #7
0
    def __call__(self):
        cmp_os_release = ch_utils.CompareOpenStackReleases(
            ch_utils.os_release('nova-common'))
        ctxt = {}
        if cmp_os_release >= 'rocky':
            ctxt['vendordata_providers'] = []
            vdata = hookenv.config('vendor-data')
            vdata_url = hookenv.config('vendor-data-url')

            if vdata:
                ctxt['vendor_data'] = True
                ctxt['vendordata_providers'].append('StaticJSON')

            if vdata_url:
                ctxt['vendor_data_url'] = vdata_url
                ctxt['vendordata_providers'].append('DynamicJSON')
            ctxt['metadata_proxy_shared_secret'] = hookenv.leader_get(
                'shared-metadata-secret')
            ctxt['enable_metadata'] = True
        else:
            ctxt['enable_metadata'] = False

        return ctxt
    def __call__(self):
        '''
        Extends the main charmhelpers HAProxyContext with a port mapping
        specific to this charm.
        Also used to extend nova.conf context with correct api_listening_ports
        '''
        ctxt = super(HAProxyContext, self).__call__()

        os_rel = ch_utils.os_release('nova-common')
        cmp_os_rel = ch_utils.CompareOpenStackReleases(os_rel)
        # determine which port api processes should bind to, depending
        # on existence of haproxy + apache frontends
        compute_api = ch_cluster.determine_api_port(
            common.api_port('nova-api-os-compute'), singlenode_mode=True)
        ec2_api = ch_cluster.determine_api_port(
            common.api_port('nova-api-ec2'), singlenode_mode=True)
        s3_api = ch_cluster.determine_api_port(
            common.api_port('nova-objectstore'), singlenode_mode=True)
        placement_api = ch_cluster.determine_api_port(
            common.api_port('nova-placement-api'), singlenode_mode=True)
        metadata_api = ch_cluster.determine_api_port(
            common.api_port('nova-api-metadata'), singlenode_mode=True)
        # Apache ports
        a_compute_api = ch_cluster.determine_apache_port(
            common.api_port('nova-api-os-compute'), singlenode_mode=True)
        a_ec2_api = ch_cluster.determine_apache_port(
            common.api_port('nova-api-ec2'), singlenode_mode=True)
        a_s3_api = ch_cluster.determine_apache_port(
            common.api_port('nova-objectstore'), singlenode_mode=True)
        a_placement_api = ch_cluster.determine_apache_port(
            common.api_port('nova-placement-api'), singlenode_mode=True)
        a_metadata_api = ch_cluster.determine_apache_port(
            common.api_port('nova-api-metadata'), singlenode_mode=True)
        # to be set in nova.conf accordingly.
        listen_ports = {
            'osapi_compute_listen_port': compute_api,
            'ec2_listen_port': ec2_api,
            's3_listen_port': s3_api,
            'placement_listen_port': placement_api,
            'metadata_listen_port': metadata_api,
        }

        port_mapping = {
            'nova-api-os-compute':
            [common.api_port('nova-api-os-compute'), a_compute_api],
            'nova-api-ec2': [common.api_port('nova-api-ec2'), a_ec2_api],
            'nova-objectstore':
            [common.api_port('nova-objectstore'), a_s3_api],
            'nova-placement-api':
            [common.api_port('nova-placement-api'), a_placement_api],
            'nova-api-metadata':
            [common.api_port('nova-api-metadata'), a_metadata_api],
        }

        if cmp_os_rel >= 'kilo':
            del listen_ports['ec2_listen_port']
            del listen_ports['s3_listen_port']
            del port_mapping['nova-api-ec2']
            del port_mapping['nova-objectstore']

        rids = hookenv.relation_ids('placement')
        if (rids or cmp_os_rel < 'ocata' or cmp_os_rel > 'stein'):
            del listen_ports['placement_listen_port']
            del port_mapping['nova-placement-api']

        # for haproxy.conf
        ctxt['service_ports'] = port_mapping
        # for nova.conf
        ctxt['listen_ports'] = listen_ports
        return ctxt
Exemple #9
0
 def test_compare_openstack_comparator(self):
     self.assertTrue(utils.CompareOpenStackReleases('mitaka') < 'newton')
     self.assertTrue(utils.CompareOpenStackReleases('pike') > 'essex')
def sync_compute_availability_zones(args):
    """Sync the nova-compute Juju units' availability zones with the OpenStack
    hypervisors' availability zones."""
    # Due to python3 issues, we do a check here to see which version of
    # OpenStack is installed and gate the availability of the action on
    # that. See note below.
    release = ch_utils.CompareOpenStackReleases(
        ch_utils.os_release('nova-common'))
    if release < 'stein':
        msg = ('The sync_compute_availability_zones action is not available'
               'for the {} release.'.format(release))
        hookenv.action_fail(msg)
        return

    # Note (wolsen): There's a problem with the action script using only
    # python3 (/usr/bin/env python3) above, however on versions lower than
    # rocky, the python2 versions of the following python packages are
    # installed. The imports are moved to here to avoid causing actions
    # to fail outright.
    import hooks.nova_cc_context as ncc_context
    from keystoneauth1 import session
    from keystoneauth1.identity import v3
    from novaclient import client as nova_client
    from novaclient import exceptions as nova_exceptions

    ctxt = ncc_context.IdentityServiceContext()()
    if not ctxt:
        hookenv.action_fail("Identity service context cannot be generated")
        return

    keystone_auth = ctxt['keystone_authtoken']
    keystone_creds = {
        'auth_url': keystone_auth.get('auth_url'),
        'username': keystone_auth.get('username'),
        'password': keystone_auth.get('password'),
        'user_domain_name': keystone_auth.get('user_domain_name'),
        'project_domain_name': keystone_auth.get('project_domain_name'),
        'project_name': keystone_auth.get('project_name'),
    }
    keystone_session = session.Session(auth=v3.Password(**keystone_creds))
    client = nova_client.Client(2, session=keystone_session)
    output_str = ''
    for r_id in hookenv.relation_ids('cloud-compute'):
        units = hookenv.related_units(r_id)
        for unit in units:
            rel_data = hookenv.relation_get(rid=r_id, unit=unit)
            unit_az = rel_data.get('availability_zone')
            if not unit_az:
                continue
            aggregate_name = '{}_az'.format(unit_az)
            try:
                aggregate = client.aggregates.find(name=aggregate_name,
                                                   availability_zone=unit_az)
            except nova_exceptions.NotFound:
                aggregate = client.aggregates.create(aggregate_name,
                                                     availability_zone=unit_az)
            unit_ip = rel_data.get('private-address')
            hypervisor = client.hypervisors.find(host_ip=unit_ip)
            if hypervisor.hypervisor_hostname not in aggregate.hosts:
                client.aggregates.add_host(aggregate,
                                           hypervisor.hypervisor_hostname)
            output_str += \
                "Hypervisor {} added to availability zone {}\n".format(
                    hypervisor.hypervisor_hostname, unit_az)
    hookenv.action_set({'output': output_str})
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if ch_utils.is_unit_paused_set():
        hookenv.log("Unit is pause or upgrading. Skipping config_changed",
                    hookenv.WARNING)
        return

    # neutron-server runs if < juno. Neutron-server creates mysql tables
    # which will subsequently cause db migrations to fail if >= juno.
    # Disable neutron-server if >= juno
    if ch_utils.CompareOpenStackReleases(
            ch_utils.os_release('nova-common')) >= 'juno':
        try:
            ch_host.service_pause('neutron-server')
        except ValueError:
            # neutron-server service not installed, ignore.
            pass
    if hookenv.config('prefer-ipv6'):
        hookenv.status_set('maintenance', 'configuring ipv6')
        ncc_utils.setup_ipv6()
        ch_utils.sync_db_with_multi_ipv6_addresses(
            hookenv.config('database'),
            hookenv.config('database-user'),
            relation_prefix='nova')

    global CONFIGS
    if not hookenv.config('action-managed-upgrade'):
        if ch_utils.openstack_upgrade_available('nova-common'):
            hookenv.status_set('maintenance', 'Running openstack upgrade')
            ncc_utils.do_openstack_upgrade(CONFIGS)
            for rid in hookenv.relation_ids('neutron-api'):
                neutron_api_relation_joined(rid=rid, remote_restart=True)
            # NOTE(jamespage): Force re-fire of shared-db joined hook
            # to ensure that nova_api database is setup if required.
            for r_id in hookenv.relation_ids('shared-db'):
                db_joined(relation_id=r_id)

    ncc_utils.save_script_rc()
    configure_https()
    CONFIGS.write_all()

    # NOTE(jamespage): deal with any changes to the console and serial
    #                  console configuration options
    filtered = ch_fetch.filter_installed_packages(
        ncc_utils.determine_packages())
    if filtered:
        ch_fetch.apt_install(filtered, fatal=True)

    for r_id in hookenv.relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for rid in hookenv.relation_ids('cluster'):
        cluster_joined(rid)
    update_nova_relation()

    update_nrpe_config()

    # If the region value has changed, notify the cloud-compute relations
    # to ensure the value is propagated to the compute nodes.
    if ch_utils.config_value_changed('region'):
        for rid in hookenv.relation_ids('cloud-compute'):
            set_region_on_relation_from_config(rid)

    ncc_utils.update_aws_compat_services()

    if hookenv.is_leader() and not ncc_utils.get_shared_metadatasecret():
        ncc_utils.set_shared_metadatasecret()
    for rid in hookenv.relation_ids('ha'):
        ha_joined(rid)
    if (not ch_utils.is_unit_paused_set() and
            ncc_utils.is_console_auth_enabled()):
        ch_host.service_resume('nova-consoleauth')
 def configure_sink(self):
     cmp_os_release = ch_utils.CompareOpenStackReleases(self.release)
     return cmp_os_release < 'queens'
Exemple #13
0
 def _since_openstack_release(audit_options=None):
     _release = openstack_utils.get_os_codename_package(pkg)
     return openstack_utils.CompareOpenStackReleases(_release) >= release