def db_joined(relation_id=None):
    if is_relation_made('pgsql-nova-db') or \
            is_relation_made('pgsql-neutron-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if os_release('nova-common') >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            sync_db_with_multi_ipv6_addresses('nova_api',
                                              config('database-user'),
                                              relation_prefix='novaapi')

    else:
        host = unit_get('private-address')
        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host,
                     relation_id=relation_id)

        if os_release('nova-common') >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            relation_set(novaapi_database='nova_api',
                         novaapi_username=config('database-user'),
                         novaapi_hostname=host,
                         relation_id=relation_id)
    def volume_context(self):
        # provide basic validation that the volume manager is supported on the
        # given openstack release (nova-volume is only supported for E and F)
        # it is up to release templates to set the correct volume driver.

        if not self.volume_service:
            return {}

        os_rel = os_release('nova-common')

        # ensure volume service is supported on specific openstack release.
        if self.volume_service == 'cinder':
            if os_rel == 'essex':
                e = ('Attempting to configure cinder volume manager on '
                     'an unsupported OpenStack release (essex)')
                log(e, level=ERROR)
                raise context.OSContextError(e)
            return 'cinder'
        elif self.volume_service == 'nova-volume':
            if os_release('nova-common') not in ['essex', 'folsom']:
                e = ('Attempting to configure nova-volume manager on '
                     'an unsupported OpenStack release (%s).' % os_rel)
                log(e, level=ERROR)
                raise context.OSContextError(e)
            return 'nova-volume'
        else:
            e = ('Invalid volume service received via cloud-compute: %s' %
                 self.volume_service)
            log(e, level=ERROR)
            raise context.OSContextError(e)
def config_changed_postupgrade():
    save_script_rc()
    release = os_release('keystone')
    if run_in_apache(release=release):
        # Need to ensure mod_wsgi is installed and apache2 is reloaded
        # immediatly as charm querys its local keystone before restart
        # decorator can fire
        apt_install(filter_installed_packages(determine_packages()))
        # when deployed from source, init scripts aren't installed
        service_pause('keystone')

        disable_unused_apache_sites()
        if WSGI_KEYSTONE_API_CONF in CONFIGS.templates:
            CONFIGS.write(WSGI_KEYSTONE_API_CONF)
        if not is_unit_paused_set():
            restart_pid_check('apache2')
            stop_manager_instance()

    if enable_memcache(release=release):
        # If charm or OpenStack have been upgraded then the list of required
        # packages may have changed so ensure they are installed.
        apt_install(filter_installed_packages(determine_packages()))

    if is_leader() and fernet_enabled():
        key_setup()
        key_leader_set()

    configure_https()
    open_port(config('service-port'))

    update_nrpe_config()

    CONFIGS.write_all()

    if snap_install_requested() and not is_unit_paused_set():
        service_restart('snap.keystone.*')
        stop_manager_instance()

    if (is_db_initialised() and is_elected_leader(CLUSTER_RES) and not
            is_unit_paused_set()):
        ensure_initial_admin(config)
        if CompareOpenStackReleases(
                os_release('keystone')) >= 'liberty':
            CONFIGS.write(POLICY_JSON)

    update_all_identity_relation_units()
    update_all_domain_backends()
    update_all_fid_backends()

    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)

    notify_middleware_with_release_version()
def resolve_required_interfaces():
    """Helper function to build a map of required interfaces based on the
    OpenStack release being deployed.

    @returns dict - a dictionary keyed by high-level type of interfaces names
    """
    required_ints = deepcopy(REQUIRED_INTERFACES)
    required_ints.update(get_optional_relations())
    if CompareOpenStackReleases(os_release('ceilometer-common')) >= 'mitaka':
        required_ints['database'].append('metric-service')
    if CompareOpenStackReleases(os_release('ceilometer-common')) >= 'queens':
        required_ints['database'].remove('mongodb')
        required_ints['identity'] = ['identity-credentials']
    return required_ints
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    # TODO: Cache this on first call?
    if config('virt-type').lower() == 'lxd':
        resource_map = deepcopy(BASE_RESOURCE_MAP)
    else:
        resource_map = deepcopy(LIBVIRT_RESOURCE_MAP)
    net_manager = network_manager()

    # Network manager gets set late by the cloud-compute interface.
    # FlatDHCPManager only requires some extra packages.
    if (net_manager in ['flatmanager', 'flatdhcpmanager'] and
            config('multi-host').lower() == 'yes' and
            os_release('nova-common') < 'ocata'):
        resource_map[NOVA_CONF]['services'].extend(
            ['nova-api', 'nova-network']
        )
    else:
        resource_map.pop(NOVA_API_AA_PROFILE_PATH)
        resource_map.pop(NOVA_NETWORK_AA_PROFILE_PATH)

    distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
    if (distro_codename >= 'yakkety' or
            os_release('nova-common') >= 'ocata'):
        for data in resource_map.values():
            if LIBVIRT_BIN_DAEMON in data['services']:
                data['services'].remove(LIBVIRT_BIN_DAEMON)
                data['services'].append(LIBVIRTD_DAEMON)

    # Neutron/quantum requires additional contexts, as well as new resources
    # depending on the plugin used.
    # NOTE(james-page): only required for ovs plugin right now
    if net_manager in ['neutron', 'quantum']:
        resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext())

    if relation_ids('ceph'):
        CEPH_RESOURCES[ceph_config_file()] = {
            'contexts': [NovaComputeCephContext()],
            'services': ['nova-compute']
        }
        resource_map.update(CEPH_RESOURCES)

    enable_nova_metadata, _ = nova_metadata_requirement()
    if enable_nova_metadata:
        resource_map[NOVA_CONF]['services'].append('nova-api-metadata')
    return resource_map
def remove_services():
    load_config_file(os.path.join(os.path.sep, "etc", "cinder", "cinder.conf"))

    host = action_get(key="host")
    services = model_query({}, models.Service, read_deleted="no",
                           session=get_session())

    if host not in ("unused", "",):
        services = services.filter(models.Service.host == host)
    else:
        ands = []
        for service in DEFAULT_SERVICES:
            ands.append(and_(models.Service.host != service))
        services = services.filter(*ands)

    removed_services = []
    ctxt = context.get_admin_context()

    for service in services.all():
        log("Removing service:%d, hostname:%s" % (service.id, service.host))
        try:
            if os_release("cinder") >= "liberty":
                cinder_manage_remove(service.binary, service.host)
            else:
                db.service_destroy(ctxt, service.id)
        except:
            action_set({'traceback': traceback.format_exc()})
            action_fail("Cannot remove service: %s" % service.host)
        else:
            removed_services.append(service.host)

    action_set({'removed': ",".join(removed_services)})
def manage():
    config = hookenv.config()
    release = os_release('neutron-common')
    manager = ServiceManager([
        # onos services setup
        {
            'service': 'onos-setup',
            'data_ready': [
                onos_package.install_packages,
            ],
            'provided_data': [
                onos_relation.BuildSDNRelation(),
            ],
        },
        {
            'service': 'api-render',
            'required_data': [
                onos_relation.ONOSControllerRelation(),
                config,
                onos_relation.ConfigTranslation(),
            ],
            'data_ready': [
                helpers.render_template(
                    source='ml2_conf.ini',
                    template_loader=get_loader('templates/', release),
                    target='/etc/neutron/plugins/ml2/ml2_conf.ini',
                    on_change_action=(partial(remote_restart,
                                              'neutron-plugin-api-subordinate',
                                              'neutron-server')),
                ),
            ],
        },
    ])
    manager.manage()
def keystone_fid_service_provider_broken():
    if CompareOpenStackReleases(os_release('keystone')) < 'ocata':
        log('Ignoring keystone-fid-service-provider relation as it is'
            ' not supported on releases older than Ocata')
        return

    restart_keystone()
def keystone_fid_service_provider_changed():
    if get_api_version() < 3:
        log('Identity federation is only supported with keystone v3')
        return
    if CompareOpenStackReleases(os_release('keystone')) < 'ocata':
        log('Ignoring keystone-fid-service-provider relation as it is'
            ' not supported on releases older than Ocata')
        return
    # for the join case a keystone public-facing hostname and service
    # port need to be set
    update_keystone_fid_service_provider(relation_id=relation_id())

    # handle relation data updates (if any), e.g. remote_id_attribute
    # and a restart will be handled via a nonce, not restart_on_change
    CONFIGS.write(KEYSTONE_CONF)

    # The relation is container-scoped so this keystone unit's unitdata
    # will only contain a nonce of a single fid subordinate for a given
    # fid backend (relation id)
    restart_nonce = relation_get('restart-nonce')
    if restart_nonce:
        nonce = json.loads(restart_nonce)
        # multiplex by relation id for multiple federated identity
        # provider charms
        fid_nonce_key = 'fid-restart-nonce-{}'.format(relation_id())
        db = unitdata.kv()
        if restart_nonce != db.get(fid_nonce_key):
            restart_keystone()
            db.set(fid_nonce_key, nonce)
            db.flush()
def leader_init_db_if_ready(use_current_context=False):
    """ Initialise the keystone db if it is ready and mark it as initialised.

    NOTE: this must be idempotent.
    """
    if not is_elected_leader(CLUSTER_RES):
        log("Not leader - skipping db init", level=DEBUG)
        return

    if is_db_initialised():
        log("Database already initialised - skipping db init", level=DEBUG)
        update_all_identity_relation_units(check_db_ready=False)
        return

    # Bugs 1353135 & 1187508. Dbs can appear to be ready before the
    # units acl entry has been added. So, if the db supports passing
    # a list of permitted units then check if we're in the list.
    if not is_db_ready(use_current_context=use_current_context):
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    migrate_database()
    ensure_initial_admin(config)
    if CompareOpenStackReleases(
            os_release('keystone')) >= 'liberty':
        CONFIGS.write(POLICY_JSON)
    # Ensure any existing service entries are updated in the
    # new database backend. Also avoid duplicate db ready check.
    update_all_identity_relation_units(check_db_ready=False)
    update_all_domain_backends()
Beispiel #11
0
def update_image_location_policy():
    """Update *_image_location policy to restrict to admin role.

    We do this unconditonally and keep a record of the original as installed by
    the package.
    """
    if CompareOpenStackReleases(os_release('glance-common')) < 'kilo':
        # NOTE(hopem): at the time of writing we are unable to do this for
        # earlier than Kilo due to LP: #1502136
        return

    db = kv()
    policies = ["get_image_location", "set_image_location",
                "delete_image_location"]
    for policy_key in policies:
        # Save original value at time of first install in case we ever need to
        # revert.
        db_key = "policy_{}".format(policy_key)
        if db.get(db_key) is None:
            p = json.loads(open(GLANCE_POLICY_FILE).read())
            if policy_key in p:
                db.set(db_key, p[policy_key])
                db.flush()
            else:
                log("key '{}' not found in policy file".format(policy_key),
                    level=INFO)

        if config('restrict-image-location-operations'):
            policy_value = 'role:admin'
        else:
            policy_value = ''

        log("Updating Glance policy file setting policy "
            "'{}':'{}'".format(policy_key, policy_value), level=INFO)
        update_json_file(GLANCE_POLICY_FILE, {policy_key: policy_value})
Beispiel #12
0
def reinstall_paste_ini(force_reinstall=False):
    '''
    Re-install glance-{api,registry}-paste.ini file from packages

    Existing glance-{api,registry}-paste.ini file will be removed
    and the original files provided by the packages will be
    re-installed.

    This will only be performed once per unit unless force_reinstall
    is set to True.
    '''
    db = kv()
    if not db.get(PASTE_INI_MARKER) or force_reinstall:
        for paste_file in [GLANCE_REGISTRY_PASTE,
                           GLANCE_API_PASTE]:
            if os.path.exists(paste_file):
                os.remove(paste_file)
        cmp_release = CompareOpenStackReleases(os_release('glance-common'))
        # glance-registry is deprecated at queens but still
        # installed.
        if cmp_release < 'rocky':
            pkg_list = ['glance-api', 'glance-registry']
        # File is in glance-common for py3 packages.
        else:
            pkg_list = ['glance-common']
        apt_install(packages=pkg_list,
                    options=REINSTALL_OPTIONS,
                    fatal=True)
        db.set(PASTE_INI_MARKER, True)
        db.flush()
 def __call__(self):
     """Used to generate template context to be added to cinder.conf in the
     presence of a ceph relation.
     """
     # TODO(this should call is_relation_made)
     if not relation_ids('ceph'):
         return {}
     service = service_name()
     cmp_os_release = CompareOpenStackReleases(os_release('cinder-common'))
     if cmp_os_release >= "icehouse":
         volume_driver = 'cinder.volume.drivers.rbd.RBDDriver'
     else:
         volume_driver = 'cinder.volume.driver.RBDDriver'
     if cmp_os_release >= "ocata":
         driver_key = 'ceph_volume_driver'
     else:
         driver_key = 'volume_driver'
     return {
         driver_key: volume_driver,
         # ensure_ceph_pool() creates pool based on service name.
         'rbd_pool': service,
         'rbd_user': service,
         'host': service,
         'rbd_ceph_conf': ceph_config_file()
     }
def determine_purge_packages():
    cmp_release = CompareOpenStackReleases(
        os_release('neutron-common', base='icehouse',
                   reset_cache=True))
    if cmp_release >= 'rocky':
        return PURGE_PACKAGES
    return []
def do_openstack_upgrade(configs):
    """
    Perform an upgrade.  Takes care of upgrading packages, rewriting
    configs, database migrations and potentially any other post-upgrade
    actions.

    :param configs: The charms main OSConfigRenderer object.
    """
    cur_os_rel = os_release('neutron-common')
    new_src = config('openstack-origin')
    new_os_rel = get_os_codename_install_source(new_src)

    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option', 'Dpkg::Options::=--force-confnew',
        '--option', 'Dpkg::Options::=--force-confdef',
    ]
    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    pkgs = determine_packages(new_os_rel)
    # Sort packages just to make unit tests easier
    pkgs.sort()
    apt_install(packages=pkgs,
                options=dpkg_opts,
                fatal=True)

    # set CONFIGS to load templates from new release
    configs.set_release(openstack_release=new_os_rel)
    # Before kilo it's nova-cloud-controllers job
    if is_elected_leader(CLUSTER_RES):
        #stamp_neutron_database(cur_os_rel)
        migrate_neutron_database()
def register_configs(release=None):
    release = release or os_release('neutron-common')
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)
    for cfg, rscs in resource_map().iteritems():
        configs.register(cfg, rscs['contexts'])
    return configs
def db_changed():
    rel = os_release('glance-common')

    if 'shared-db' not in CONFIGS.complete_contexts():
        juju_log('shared-db relation incomplete. Peer not ready?')
        return

    CONFIGS.write(GLANCE_REGISTRY_CONF)
    # since folsom, a db connection setting in glance-api.conf is required.
    if rel != "essex":
        CONFIGS.write(GLANCE_API_CONF)

    if is_elected_leader(CLUSTER_RES):
        # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
        # acl entry has been added. So, if the db supports passing a list of
        # permitted units then check if we're in the list.
        allowed_units = relation_get('allowed_units')
        if allowed_units and local_unit() in allowed_units.split():
            if rel == "essex":
                status = call(['glance-manage', 'db_version'])
                if status != 0:
                    juju_log('Setting version_control to 0')
                    cmd = ["glance-manage", "version_control", "0"]
                    check_call(cmd)

            juju_log('Cluster leader, performing db sync')
            migrate_database()
        else:
            juju_log('allowed_units either not presented, or local unit '
                     'not in acl list: %s' % allowed_units)
def determine_packages():
    pkgs = []
    plugin_pkgs = neutron_plugin_attribute('ovs', 'packages', 'neutron')
    for plugin_pkg in plugin_pkgs:
        pkgs.extend(plugin_pkg)
    if use_dvr():
        pkgs.extend(DVR_PACKAGES)
    if enable_local_dhcp():
        pkgs.extend(DHCP_PACKAGES)
        pkgs.extend(METADATA_PACKAGES)

    if git_install_requested():
        pkgs.extend(BASE_GIT_PACKAGES)
        # don't include packages that will be installed from git
        for p in GIT_PACKAGE_BLACKLIST:
            if p in pkgs:
                pkgs.remove(p)

    release = os_release('neutron-common', base='icehouse')
    if release >= 'mitaka' and 'neutron-plugin-openvswitch-agent' in pkgs:
        pkgs.remove('neutron-plugin-openvswitch-agent')
        pkgs.append('neutron-openvswitch-agent')

    if use_dpdk():
        pkgs.append('openvswitch-switch-dpdk')

    return pkgs
def determine_packages():
    packages = [] + BASE_PACKAGES

    net_manager = network_manager()
    if (net_manager in ['flatmanager', 'flatdhcpmanager'] and
            config('multi-host').lower() == 'yes' and
            os_release('nova-common') < 'ocata'):
        packages.extend(['nova-api', 'nova-network'])

    if relation_ids('ceph'):
        packages.append('ceph-common')

    virt_type = config('virt-type')
    try:
        packages.extend(VIRT_TYPES[virt_type])
    except KeyError:
        log('Unsupported virt-type configured: %s' % virt_type)
        raise
    enable_nova_metadata, _ = nova_metadata_requirement()
    if enable_nova_metadata:
        packages.append('nova-api-metadata')

    packages.extend(determine_packages_arch())

    if git_install_requested():
        packages = list(set(packages))
        packages.extend(BASE_GIT_PACKAGES)
        # don't include packages that will be installed from git
        for p in GIT_PACKAGE_BLACKLIST:
            if p in packages:
                packages.remove(p)

    return packages
def pgsql_db_changed():
    rel = os_release('glance-common')

    if 'pgsql-db' not in CONFIGS.complete_contexts():
        juju_log('pgsql-db relation incomplete. Peer not ready?')
        return

    CONFIGS.write(GLANCE_REGISTRY_CONF)
    # since folsom, a db connection setting in glance-api.conf is required.
    if rel != "essex":
        CONFIGS.write(GLANCE_API_CONF)

    if is_elected_leader(CLUSTER_RES):
        if rel == "essex":
            status = call(['glance-manage', 'db_version'])
            if status != 0:
                juju_log('Setting version_control to 0')
                cmd = ["glance-manage", "version_control", "0"]
                check_call(cmd)

        juju_log('Cluster leader, performing db sync')
        migrate_database()

    for rid in relation_ids('image-service'):
        image_service_joined(rid)
def config_changed():
    # neutron-server runs if < juno. Neutron-server creates mysql tables
    # which will subsequently cause db migratoins to fail if >= juno.
    # Disable neutron-server if >= juno
    if os_release('nova-common') >= 'juno':
        with open('/etc/init/neutron-server.override', 'wb') as out:
            out.write('manual\n')
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

    global CONFIGS
    if git_install_requested():
        status_set('maintenance', 'Running Git install')
        if config_value_changed('openstack-origin-git'):
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('nova-common'):
            status_set('maintenance', 'Running openstack upgrade')
            CONFIGS = do_openstack_upgrade(CONFIGS)
            [neutron_api_relation_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('neutron-api')]
            # NOTE(jamespage): Force re-fire of shared-db joined hook
            # to ensure that nova_api database is setup if required.
            [db_joined(relation_id=r_id)
                for r_id in relation_ids('shared-db')]

    save_script_rc()
    configure_https()
    CONFIGS.write_all()
    if console_attributes('protocol'):
        if not git_install_requested():
            status_set('maintenance', 'Configuring guest console access')
            apt_update()
            packages = console_attributes('packages') or []
            filtered = filter_installed_packages(packages)
            if filtered:
                apt_install(filtered, fatal=True)

        [compute_joined(rid=rid)
            for rid in relation_ids('cloud-compute')]

    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for rid in relation_ids('zeromq-configuration'):
        zeromq_configuration_relation_joined(rid)
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    update_nrpe_config()

    # If the region value has changed, notify the cloud-compute relations
    # to ensure the value is propagated to the compute nodes.
    if config_value_changed('region'):
        for rid in relation_ids('cloud-compute'):
            for unit in related_units(rid):
                compute_changed(rid, unit)

    update_nova_consoleauth_config()
def neutron_plugins():
    from charmhelpers.contrib.openstack import context
    release = os_release('nova-common')
    plugins = {
        'ovs': {
            'config': '/etc/neutron/plugins/openvswitch/'
                      'ovs_neutron_plugin.ini',
            'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
                      'OVSNeutronPluginV2',
            'contexts': [
                context.SharedDBContext(user=config('neutron-database-user'),
                                        database=config('neutron-database'),
                                        relation_prefix='neutron',
                                        ssl_dir=NEUTRON_CONF_DIR)],
            'services': ['neutron-plugin-openvswitch-agent'],
            'packages': [[headers_package()] + determine_dkms_package(),
                         ['neutron-plugin-openvswitch-agent']],
            'server_packages': ['neutron-server',
                                'neutron-plugin-openvswitch'],
            'server_services': ['neutron-server']
        },
        'nvp': {
            'config': '/etc/neutron/plugins/nicira/nvp.ini',
            'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
                      'NeutronPlugin.NvpPluginV2',
            'contexts': [
                context.SharedDBContext(user=config('neutron-database-user'),
                                        database=config('neutron-database'),
                                        relation_prefix='neutron',
                                        ssl_dir=NEUTRON_CONF_DIR)],
            'services': [],
            'packages': [],
            'server_packages': ['neutron-server',
                                'neutron-plugin-nicira'],
            'server_services': ['neutron-server']
        },
        'nsx': {
            'config': '/etc/neutron/plugins/vmware/nsx.ini',
            'driver': 'vmware',
            'contexts': [
                context.SharedDBContext(user=config('neutron-database-user'),
                                        database=config('neutron-database'),
                                        relation_prefix='neutron',
                                        ssl_dir=NEUTRON_CONF_DIR)],
            'services': [],
            'packages': [],
            'server_packages': ['neutron-server',
                                'neutron-plugin-vmware'],
            'server_services': ['neutron-server']
        }
    }
    if release >= 'icehouse':
        # NOTE: patch in ml2 plugin for icehouse onwards
        plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
        plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
        plugins['ovs']['server_packages'] = ['neutron-server',
                                             'neutron-plugin-ml2']
        # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
        plugins['nvp'] = plugins['nsx']
    return plugins
def register_configs():
    '''
    Returns an OSTemplateRenderer object with all required configs registered.
    '''
    release = os_release('nova-common')
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    if relation_ids('ceph'):
        # Add charm ceph configuration to resources and
        # ensure directory actually exists
        mkdir(os.path.dirname(ceph_config_file()))
        mkdir(os.path.dirname(CEPH_CONF))
        # Install ceph config as an alternative for co-location with
        # ceph and ceph-osd charms - nova-compute ceph.conf will be
        # lower priority that both of these but thats OK
        if not os.path.exists(ceph_config_file()):
            # touch file for pre-templated generation
            open(ceph_config_file(), 'w').close()
        install_alternative(os.path.basename(CEPH_CONF),
                            CEPH_CONF, ceph_config_file())

    for cfg, d in resource_map().iteritems():
        configs.register(cfg, d['contexts'])
    return configs
Beispiel #24
0
def remove_services(args):
    host = action_get(key="host")
    services = cinder_manage_service_list()

    if host not in ("unused", "",):
        services = [s for s in services if s.host == host]
    else:
        services = [s for s in services if s.host not in DEFAULT_SERVICES]

    removed_services = []

    for service in services:
        log("Removing binary:{}, hostname:{}"
            .format(service.binary, service.host))
        try:
            if CompareOpenStackReleases(os_release("cinder")) >= "liberty":
                cinder_manage_remove(service.binary, service.host)
            else:
                action_fail("Cannot remove service: {}".format(service.host))
        except:
            action_set({'traceback': traceback.format_exc()})
            action_fail("Cannot remove service: {}".format(service.host))
        else:
            removed_services.append(service.host)

    action_set({'removed': ",".join(removed_services)})
def determine_endpoints(public_url, internal_url, admin_url):
    '''Generates a dictionary containing all relevant endpoints to be
    passed to keystone as relation settings.'''
    region = config('region')
    os_rel = os_release('nova-common')

    nova_public_url = ('%s:%s/v2/$(tenant_id)s' %
                       (public_url, api_port('nova-api-os-compute')))
    nova_internal_url = ('%s:%s/v2/$(tenant_id)s' %
                         (internal_url, api_port('nova-api-os-compute')))
    nova_admin_url = ('%s:%s/v2/$(tenant_id)s' %
                      (admin_url, api_port('nova-api-os-compute')))
    ec2_public_url = '%s:%s/services/Cloud' % (
        public_url, api_port('nova-api-ec2'))
    ec2_internal_url = '%s:%s/services/Cloud' % (
        internal_url, api_port('nova-api-ec2'))
    ec2_admin_url = '%s:%s/services/Cloud' % (admin_url,
                                              api_port('nova-api-ec2'))

    s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore'))
    s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore'))
    s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore'))

    # the base endpoints
    endpoints = {
        'nova_service': 'nova',
        'nova_region': region,
        'nova_public_url': nova_public_url,
        'nova_admin_url': nova_admin_url,
        'nova_internal_url': nova_internal_url,
        'ec2_service': 'ec2',
        'ec2_region': region,
        'ec2_public_url': ec2_public_url,
        'ec2_admin_url': ec2_admin_url,
        'ec2_internal_url': ec2_internal_url,
        's3_service': 's3',
        's3_region': region,
        's3_public_url': s3_public_url,
        's3_admin_url': s3_admin_url,
        's3_internal_url': s3_internal_url,
    }

    if os_rel >= 'kilo':
        # NOTE(jamespage) drop endpoints for ec2 and s3
        #  ec2 is deprecated
        #  s3 is insecure and should die in flames
        endpoints.update({
            'ec2_service': None,
            'ec2_region': None,
            'ec2_public_url': None,
            'ec2_admin_url': None,
            'ec2_internal_url': None,
            's3_service': None,
            's3_region': None,
            's3_public_url': None,
            's3_admin_url': None,
            's3_internal_url': None,
        })

    return endpoints
def is_vlan_trunking_requested_and_valid():
    """Check whether VLAN trunking should be enabled by checking whether
       it has been requested and, if it has, is it supported in the current
       configuration.
    """

    if config('enable-vlan-trunking'):
        if VLAN not in _get_tenant_network_types():
            msg = ("Disabling vlan-trunking, the vlan network type must be "
                   "enabled to use vlan-trunking")
            log(msg, ERROR)
            return False

        if config('neutron-plugin') != 'ovs':
            msg = ("Disabling vlan-trunking, implementation only exists "
                   "for the OVS plugin")
            log(msg, ERROR)
            return False

        if CompareOpenStackReleases(os_release('neutron-server')) < 'newton':
            msg = ("The vlan-trunking option is only supported on newton or "
                   "later")
            log(msg, ERROR)
            return False

        return True
    else:
        return False
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    if use_dvr():
        resource_map.update(DVR_RESOURCE_MAP)
        resource_map.update(METADATA_RESOURCE_MAP)
        dvr_services = ['neutron-metadata-agent', 'neutron-l3-agent']
        resource_map[NEUTRON_CONF]['services'] += dvr_services
    if enable_local_dhcp():
        resource_map.update(METADATA_RESOURCE_MAP)
        resource_map.update(DHCP_RESOURCE_MAP)
        metadata_services = ['neutron-metadata-agent', 'neutron-dhcp-agent']
        resource_map[NEUTRON_CONF]['services'] += metadata_services
    # Remap any service names as required
    if os_release('neutron-common', base='icehouse') >= 'mitaka':
        # ml2_conf.ini -> openvswitch_agent.ini
        del resource_map[ML2_CONF]
        # drop of -plugin from service name
        resource_map[NEUTRON_CONF]['services'].remove(
            'neutron-plugin-openvswitch-agent'
        )
        resource_map[NEUTRON_CONF]['services'].append(
            'neutron-openvswitch-agent'
        )
    else:
        del resource_map[OVS_CONF]
    return resource_map
def install_packages(servicename):
    if os_release('neutron-common') >= 'kilo':
        output = os.popen('pip install networking-onos')
        print output.read()
    pkgs = ['neutron-common', 'neutron-plugin-ml2']
    pkgs = filter_installed_packages(pkgs)
    apt_install(pkgs, fatal=True)
Beispiel #29
0
def guard_map():
    '''Map of services and required interfaces that must be present before
    the service should be allowed to start'''
    gmap = {}
    nova_services = deepcopy(BASE_SERVICES)
    if os_release('nova-common') not in ['essex', 'folsom']:
        nova_services.append('nova-conductor')

    nova_interfaces = ['identity-service', 'amqp']
    if relation_ids('pgsql-nova-db'):
        nova_interfaces.append('pgsql-nova-db')
    else:
        nova_interfaces.append('shared-db')

    for svc in nova_services:
        gmap[svc] = nova_interfaces

    net_manager = network_manager()
    if net_manager in ['neutron', 'quantum'] and \
            not is_relation_made('neutron-api'):
        neutron_interfaces = ['identity-service', 'amqp']
        if relation_ids('pgsql-neutron-db'):
            neutron_interfaces.append('pgsql-neutron-db')
        else:
            neutron_interfaces.append('shared-db')
        if network_manager() == 'quantum':
            gmap['quantum-server'] = neutron_interfaces
        else:
            gmap['neutron-server'] = neutron_interfaces

    return gmap
Beispiel #30
0
def conditional_neutron_migration():
    if os_release('nova-common') <= 'icehouse':
        log('Not running neutron database migration as migrations are handled '
            'by the neutron-server process.')
    elif os_release('nova-common') >= 'kilo':
        log('Not running neutron database migration as migrations are by '
            'the neutron-api charm.')
    else:
        status_set('maintenance', 'Running neutron db migration')
        migrate_neutron_database()
        # neutron-api service may have appeared while the migration was
        # running so prod it just in case
        [neutron_api_relation_joined(rid=rid, remote_restart=True)
            for rid in relation_ids('neutron-api')]
        if 'neutron-server' in services():
            service_restart('neutron-server')
def setup_ipv6():
    ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(ubuntu_rel) < "trusty":
        raise Exception("IPv6 is not supported in the charms for Ubuntu "
                        "versions less than Trusty 14.04")

    # Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to
    # use trusty-backports otherwise we can use the UCA.
    this_os_release = os_release('neutron-server')
    if (ubuntu_rel == 'trusty' and
            CompareOpenStackReleases(this_os_release) < 'liberty'):
        add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports '
                   'main')
        apt_update()
        apt_install('haproxy/trusty-backports', fatal=True)
Beispiel #32
0
def is_qos_requested_and_valid():
    """Check whether QoS should be enabled by checking whether it has been
       requested and, if it has, is it supported in the current configuration
    """

    if config('enable-qos'):
        if CompareOpenStackReleases(os_release('neutron-server')) < 'mitaka':
            msg = ("The enable-qos option is only supported on mitaka or "
                   "later")
            log(msg, ERROR)
            return False
        else:
            return True
    else:
        return False
Beispiel #33
0
def vsd_changed(relation_id=None, remote_unit=None):
    if config('neutron-plugin') == 'vsp':
        vsd_ip_address = relation_get('vsd-ip-address')
        if not vsd_ip_address:
            return
        vsd_address = '{}:8443'.format(vsd_ip_address)
        if os_release('neutron-server') >= 'kilo':
            cms_id = relation_get('nuage-cms-id')
            log("nuage-vsd-api-relation-changed : cms_id:{}".format(cms_id))
        nuage_config_file = neutron_plugin_attribute(config('neutron-plugin'),
                                                     'config', 'neutron')
        log('vsd-rest-api-relation-changed: ip address:{}'.format(vsd_address))
        log('vsd-rest-api-relation-changed:{}'.format(nuage_config_file))

        CONFIGS.write(nuage_config_file)
Beispiel #34
0
def register_configs(release=None):
    '''
    Register config files with their respective contexts.

    :param release: string containing the openstack release to use
                    over automatic detection based on installed pkgs.
    '''
    release = release or os_release('neutron-common')
    plugin = config('plugin')
    config_files = resolve_config_files(plugin, release)
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)
    for conf in config_files[plugin]:
        configs.register(conf, config_files[plugin][conf]['hook_contexts'])
    return configs
Beispiel #35
0
def migrate_nova_database():
    '''Runs nova-manage to initialize a new database or migrate existing'''
    log('Migrating the nova database.', level=INFO)
    cmd = ['nova-manage', 'db', 'sync']
    subprocess.check_output(cmd)
    if os_release('nova-common') >= 'mitaka':
        log('Migrating the nova-api database.', level=INFO)
        cmd = ['nova-manage', 'api_db', 'sync']
        subprocess.check_output(cmd)
    if relation_ids('cluster'):
        log('Informing peers that dbsync is complete', level=INFO)
        peer_store('dbsync_state', 'complete')
    log('Enabling services', level=INFO)
    enable_services()
    cmd_all_services('start')
Beispiel #36
0
def do_openstack_upgrade(configs):
    """
    Perform an upgrade.  Takes care of upgrading packages, rewriting
    configs, database migrations and potentially any other post-upgrade
    actions.

    :param configs: The charms main OSConfigRenderer object.
    """
    cur_os_rel = os_release('neutron-common')
    new_src = config('openstack-origin')
    new_os_rel = get_os_codename_install_source(new_src)

    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option',
        'Dpkg::Options::=--force-confnew',
        '--option',
        'Dpkg::Options::=--force-confdef',
    ]
    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    pkgs = determine_packages(new_os_rel)
    # Sort packages just to make unit tests easier
    pkgs.sort()
    apt_install(packages=pkgs, options=dpkg_opts, fatal=True)

    # set CONFIGS to load templates from new release
    configs.set_release(openstack_release=new_os_rel)
    # Before kilo it's nova-cloud-controllers job
    if is_elected_leader(CLUSTER_RES):
        # Stamping seems broken and unnecessary in liberty (Bug #1536675)
        if os_release('neutron-common') < 'liberty':
            stamp_neutron_database(cur_os_rel)
        migrate_neutron_database()
Beispiel #37
0
def get_packages():
    '''Return a list of packages for install based on the configured plugin'''
    plugin = config('plugin')
    packages = deepcopy(GATEWAY_PKGS[plugin])
    cmp_os_source = CompareOpenStackReleases(os_release('neutron-common'))
    cmp_host_release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
    if plugin == OVS:
        if (cmp_os_source >= 'icehouse' and cmp_os_source < 'mitaka'
                and cmp_host_release < 'utopic'):
            # NOTE(jamespage) neutron-vpn-agent supercedes l3-agent for
            # icehouse but openswan was removed in utopic.
            packages.remove('neutron-l3-agent')
            packages.append('neutron-vpn-agent')
            packages.append('openswan')
        if cmp_os_source >= 'liberty':
            # Switch out mysql driver
            packages.remove('python-mysqldb')
            packages.append('python-pymysql')
        if cmp_os_source >= 'mitaka':
            # Switch out to actual ovs agent package
            packages.remove('neutron-plugin-openvswitch-agent')
            packages.append('neutron-openvswitch-agent')
        if cmp_os_source >= 'kilo':
            packages.append('python-neutron-fwaas')
    if plugin in (OVS, OVS_ODL):
        if cmp_os_source >= 'newton':
            # LBaaS v1 dropped in newton
            packages.remove('neutron-lbaas-agent')
            packages.append('neutron-lbaasv2-agent')
        if cmp_os_source >= 'train':
            # LBaaS v2 dropped in train
            packages.remove('neutron-lbaasv2-agent')

    if disable_nova_metadata(cmp_os_source):
        packages.remove('nova-api-metadata')
    packages.extend(determine_l3ha_packages())

    if cmp_os_source >= 'rocky':
        packages = [p for p in packages if not p.startswith('python-')]
        packages.extend(PY3_PACKAGES)
        if cmp_os_source >= 'train':
            packages.remove('python3-neutron-lbaas')
        # Remove python3-neutron-fwaas from stein release as the package is
        # included as dependency for neutron-l3-agent.
        if cmp_os_source >= 'stein':
            packages.remove('python3-neutron-fwaas')

    return packages
    def disable_mlockall(self):
        '''
        Determine if Open vSwitch use of mlockall() should be disabled

        If the disable-mlockall config option is unset, mlockall will be
        disabled if running in a container and will default to enabled if
        not running in a container.
        '''
        disable_mlockall = config('disable-mlockall')
        if disable_mlockall is None:
            disable_mlockall = False
            if is_container():
                disable_mlockall = True
        cmp_release = CompareOpenStackReleases(
            os_release('neutron-common', base='icehouse'))
        return (cmp_release >= 'mitaka' and disable_mlockall)
Beispiel #39
0
def determine_purge_packages():
    '''
    Determine list of packages that where previously installed which are no
    longer needed.

    :returns: list of package names
    '''
    if CompareOpenStackReleases(os_release('glance')) >= 'rocky':
        pkgs = [p for p in PACKAGES if p.startswith('python-')]
        pkgs.append('python-glance')
        pkgs.append('python-memcache')
        pkgs.extend(["python-cinderclient",
                     "python-os-brick",
                     "python-oslo.rootwrap"])
        return pkgs
    return []
Beispiel #40
0
 def __init__(self, release=None, **kwargs):
     """Custom initialiser for class
     If no release is passed, then the charm determines the release from the
     ch_utils.os_release() function.
     """
     self.src_branch = hookenv.config('source-branch')
     if self.src_branch:
         self.install_dir = "/home/ubuntu/congress"
         init_script = '/etc/init/congress-server.conf'
         self.restart_map[init_script] = ['congress-server']
     if release is None:
         if self.src_branch:
             release = self.src_branch.split('/')[1]
         else:
             release = ch_utils.os_release('python-keystonemiddleware')
     super(CongressCharm, self).__init__(release=release, **kwargs)
def db_migration():
    release = CompareOpenStackReleases(os_release('openstack-dashboard'))
    if release >= 'rocky':
        python = 'python3'
        python_django = 'python3-django'
    else:
        python = 'python2'
        python_django = 'python-django'
    if cmp_pkgrevno(python_django, '1.9') >= 0:
        # syncdb was removed in django 1.9
        subcommand = 'migrate'
    else:
        subcommand = 'syncdb'
    cmd = [python, '/usr/share/openstack-dashboard/manage.py', subcommand,
           '--noinput']
    subprocess.check_call(cmd)
def install():
    execd_preinstall()
    configure_installation_source(config('openstack-origin'))

    apt_update(fatal=True)
    packages = determine_packages()
    _os_release = os_release('openstack-dashboard')
    if CompareOpenStackReleases(_os_release) < 'icehouse':
        packages += ['nodejs', 'node-less']
    if lsb_release()['DISTRIB_CODENAME'] == 'precise':
        # Explicitly upgrade python-six Bug#1420708
        apt_install('python-six', fatal=True)
    packages = filter_installed_packages(packages)
    if packages:
        status_set('maintenance', 'Installing packages')
        apt_install(packages, fatal=True)
Beispiel #43
0
def upgrade_charm():
    apt_install(filter_installed_packages(determine_packages()), fatal=True)
    packages_removed = remove_old_packages()
    for rel_id in relation_ids('amqp'):
        amqp_joined(relation_id=rel_id)
    update_nrpe_config()
    scrub_old_style_ceph()
    if packages_removed:
        juju_log("Package purge detected, restarting services")
        for s in services():
            service_restart(s)
    # call the policy overrides handler which will install any policy overrides
    maybe_do_policyd_overrides(
        os_release('cinder-common'),
        'cinder',
        restart_handler=lambda: service_restart('cinder-api'))
def migrate_neutron_database():
    '''Initializes a new database or upgrades an existing database.'''
    log('Migrating the neutron database.')
    if(os_release('neutron-server') == 'juno' and
       config('neutron-plugin') == 'vsp'):
        nuage_vsp_juno_neutron_migration()
    else:
        plugin = config('neutron-plugin')
        cmd = ['neutron-db-manage',
               '--config-file', NEUTRON_CONF,
               '--config-file', neutron_plugin_attribute(plugin,
                                                         'config',
                                                         'neutron'),
               'upgrade',
               'head']
        subprocess.check_output(cmd)
Beispiel #45
0
def migrate_nova_databases():
    '''Runs nova-manage to initialize new databases or migrate existing'''
    if CompareOpenStackReleases(os_release('nova-common')) < 'ocata':
        migrate_nova_api_database()
        migrate_nova_database()
        online_data_migrations_if_needed()
        finalize_migrate_nova_databases()

    elif is_cellv2_init_ready():
        migrate_nova_api_database()
        initialize_cell_databases()
        migrate_nova_database()
        online_data_migrations_if_needed()
        add_hosts_to_cell()
        map_instances()
        finalize_migrate_nova_databases()
def resource_map(release=None):
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    release = release or os_release('neutron-common')

    resource_map = deepcopy(BASE_RESOURCE_MAP)
    if CompareOpenStackReleases(release) >= 'liberty':
        resource_map.update(LIBERTY_RESOURCE_MAP)

    if os.path.exists('/etc/apache2/conf-available'):
        resource_map.pop(APACHE_CONF)
    else:
        resource_map.pop(APACHE_24_CONF)

    if manage_plugin():
        # add neutron plugin requirements. nova-c-c only needs the
        # neutron-server associated with configs, not the plugin agent.
        plugin = config('neutron-plugin')
        conf = neutron_plugin_attribute(plugin, 'config', 'neutron')
        ctxts = (neutron_plugin_attribute(plugin, 'contexts', 'neutron') or
                 [])
        services = neutron_plugin_attribute(plugin, 'server_services',
                                            'neutron')
        resource_map[conf] = {}
        resource_map[conf]['services'] = services
        resource_map[conf]['contexts'] = ctxts
        resource_map[conf]['contexts'].append(
            neutron_api_context.NeutronCCContext())

        # update for postgres
        resource_map[conf]['contexts'].append(
            context.PostgresqlDBContext(database=config('database')))

    else:
        resource_map[NEUTRON_CONF]['contexts'].append(
            neutron_api_context.NeutronApiSDNContext()
        )
        resource_map[NEUTRON_DEFAULT]['contexts'] = \
            [neutron_api_context.NeutronApiSDNConfigFileContext()]
    if enable_memcache(release=release):
        resource_map[MEMCACHED_CONF] = {
            'contexts': [context.MemcacheContext()],
            'services': ['memcached']}

    return resource_map
def db_joined(relation_id=None):
    cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            sync_db_with_multi_ipv6_addresses('nova_api',
                                              config('database-user'),
                                              relation_prefix='novaapi')

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            sync_db_with_multi_ipv6_addresses('nova_cell0',
                                              config('database-user'),
                                              relation_prefix='novacell0')
    else:
        # Avoid churn check for access-network early
        access_network = None
        for unit in related_units(relid=relation_id):
            access_network = relation_get(rid=relation_id,
                                          unit=unit,
                                          attribute='access-network')
            if access_network:
                break
        host = get_relation_ip('shared-db', cidr_network=access_network)

        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host,
                     relation_id=relation_id)

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            relation_set(novaapi_database='nova_api',
                         novaapi_username=config('database-user'),
                         novaapi_hostname=host,
                         relation_id=relation_id)

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            relation_set(novacell0_database='nova_cell0',
                         novacell0_username=config('database-user'),
                         novacell0_hostname=host,
                         relation_id=relation_id)
def upgrade_charm():
    packages_to_install = filter_installed_packages(determine_packages())
    if packages_to_install:
        log('Installing apt packages')
        status_set('maintenance', 'Installing apt packages')
        apt_install(packages_to_install)
    packages_removed = remove_old_packages()

    if run_in_apache():
        disable_unused_apache_sites()

    log('Regenerating configuration files')
    status_set('maintenance', 'Regenerating configuration files')
    CONFIGS.write_all()

    # We no longer use the admin_token and need to ensure the charm has
    # credentials.  This call is idempotent and safe to run on existing
    # deployments.
    if is_leader():
        bootstrap_keystone(configs=CONFIGS)

    # See LP bug 1519035
    leader_init_db_if_ready()

    update_nrpe_config()

    if packages_removed:
        status_set('maintenance', 'Restarting services')
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)
        stop_manager_instance()

    if is_elected_leader(CLUSTER_RES):
        log('Cluster leader - ensuring endpoint configuration is up to '
            'date', level=DEBUG)
        update_all_identity_relation_units()
        # also ensure that the PCI-DSS protection is in place for service
        # accounts.
        ensure_all_service_accounts_protected_for_pci_dss_options()

    # call the policy overrides handler which will install any policy overrides
    maybe_do_policyd_overrides(
        os_release('keystone'),
        'keystone',
        restart_handler=lambda: service_restart('apache2'))
    inform_peers_if_ready(check_api_unit_ready)
Beispiel #49
0
def config_changed():
    resolve_CONFIGS()
    if config('prefer-ipv6'):
        setup_ipv6()
        localhost = 'ip6-localhost'
    else:
        localhost = 'localhost'

    if (os_release('openstack-dashboard') == 'icehouse'
            and config('offline-compression') in ['no', 'False']):
        apt_install(filter_installed_packages(['python-lesscpy']), fatal=True)

    # Ensure default role changes are propagated to keystone
    for relid in relation_ids('identity-service'):
        keystone_joined(relid)
    enable_ssl()

    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('openstack-dashboard'):
            status_set('maintenance', 'Upgrading to new OpenStack release')
            do_openstack_upgrade(configs=CONFIGS)
            resolve_CONFIGS(force_update=True)

    env_vars = {
        'OPENSTACK_URL_HORIZON':
        "http://{}:70{}|Login+-+OpenStack".format(localhost,
                                                  config('webroot')),
        'OPENSTACK_SERVICE_HORIZON':
        "apache2",
        'OPENSTACK_PORT_HORIZON_SSL':
        433,
        'OPENSTACK_PORT_HORIZON':
        70
    }
    save_script_rc(**env_vars)
    update_nrpe_config()
    CONFIGS.write_all()
    check_custom_theme()
    open_port(80)
    open_port(443)
    for relid in relation_ids('certificates'):
        for unit in related_units(relid):
            certs_changed(relation_id=relid, unit=unit)
    for relid in relation_ids('ha'):
        ha_relation_joined(relation_id=relid)

    websso_trusted_dashboard_changed()
def _do_openstack_upgrade(new_src):
    enable_policy_rcd()
    # All upgrades to Liberty are forced to step through Kilo. Liberty does
    # not have the migrate_flavor_data option (Bug #1511466) available so it
    # must be done pre-upgrade
    if os_release('nova-common') == 'kilo' and is_elected_leader(CLUSTER_RES):
        migrate_nova_flavors()
    new_os_rel = get_os_codename_install_source(new_src)
    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option',
        'Dpkg::Options::=--force-confnew',
        '--option',
        'Dpkg::Options::=--force-confdef',
    ]

    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    apt_install(determine_packages(), fatal=True)

    disable_policy_rcd()

    # NOTE(jamespage) upgrade with existing config files as the
    # havana->icehouse migration enables new service_plugins which
    # create issues with db upgrades
    reset_os_release()
    configs = register_configs(release=new_os_rel)
    configs.write_all()

    if new_os_rel >= 'mitaka' and not database_setup(prefix='novaapi'):
        # NOTE: Defer service restarts and database migrations for now
        #       as nova_api database is not yet created
        if (relation_ids('cluster') and is_elected_leader(CLUSTER_RES)):
            # NOTE: reset dbsync state so that migration will complete
            #       when the nova_api database is setup.
            peer_store('dbsync_state', None)
        return configs

    if is_elected_leader(CLUSTER_RES):
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_database()
    if not is_unit_paused_set():
        [service_start(s) for s in services()]

    return configs
Beispiel #51
0
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    drop_config = []
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    if use_dvr():
        resource_map.update(DVR_RESOURCE_MAP)
        resource_map.update(METADATA_RESOURCE_MAP)
        dvr_services = ['neutron-metadata-agent', 'neutron-l3-agent']
        resource_map[NEUTRON_CONF]['services'] += dvr_services
    if enable_local_dhcp():
        resource_map.update(METADATA_RESOURCE_MAP)
        resource_map.update(DHCP_RESOURCE_MAP)
        metadata_services = ['neutron-metadata-agent', 'neutron-dhcp-agent']
        resource_map[NEUTRON_CONF]['services'] += metadata_services
    # Remap any service names as required
    _os_release = os_release('neutron-common', base='icehouse')
    if CompareOpenStackReleases(_os_release) >= 'mitaka':
        # ml2_conf.ini -> openvswitch_agent.ini
        drop_config.append(ML2_CONF)
        # drop of -plugin from service name
        resource_map[NEUTRON_CONF]['services'].remove(
            'neutron-plugin-openvswitch-agent')
        resource_map[NEUTRON_CONF]['services'].append(
            'neutron-openvswitch-agent')
        if not use_dpdk():
            drop_config.append(DPDK_INTERFACES)
        if enable_sriov_agent():
            resource_map.update(SRIOV_RESOURCE_MAP)
            resource_map[NEUTRON_CONF]['services'].append(
                'neutron-sriov-agent')
    else:
        drop_config.extend([OVS_CONF, DPDK_INTERFACES])

    # Use MAAS1.9 for MTU and external port config on xenial and above
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
        drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])

    for _conf in drop_config:
        try:
            del resource_map[_conf]
        except KeyError:
            pass

    return resource_map
Beispiel #52
0
 def __call__(self):
     stores = ['glance.store.filesystem.Store', 'glance.store.http.Store']
     store_mapping = {
         'ceph': 'glance.store.rbd.Store',
         'object-store': 'glance.store.swift.Store',
     }
     for store_relation, store_type in store_mapping.iteritems():
         if relation_ids(store_relation):
             stores.append(store_type)
     if ((relation_ids('cinder-volume-service')
          or relation_ids('storage-backend'))
             and os_release('glance-common') >= 'mitaka'):
         # even if storage-backend is present with cinder-backend=False it
         # means that glance should not store images in cinder by default
         # but can read images from cinder.
         stores.append('glance.store.cinder.Store')
     return {'known_stores': ','.join(stores)}
Beispiel #53
0
 def __call__(self):
     backends = []
     for rid in relation_ids('storage-backend'):
         for unit in related_units(rid):
             backend_name = relation_get('backend_name', unit, rid)
             if backend_name:
                 backends.append(backend_name)
     # Ocata onwards all backends must be in there own sectional config
     if CompareOpenStackReleases(os_release('cinder-common')) >= "ocata":
         if relation_ids('ceph'):
             backends.append('CEPH')
         if enable_lvm():
             backends.append('LVM')
         # Use the package default backend to stop the service flapping.
         if not backends:
             backends = ['LVM']
     return {'active_backends': backends, 'backends': ",".join(backends)}
Beispiel #54
0
def register_configs():
    release = os_release('heat-common')
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    confs = [HEAT_CONF, HEAT_API_PASTE, HAPROXY_CONF, ADMIN_OPENRC]
    for conf in confs:
        configs.register(conf, CONFIG_FILES[conf]['contexts'])

    if os.path.exists('/etc/apache2/conf-available'):
        configs.register(HTTPS_APACHE_24_CONF,
                         CONFIG_FILES[HTTPS_APACHE_24_CONF]['contexts'])
    else:
        configs.register(HTTPS_APACHE_CONF,
                         CONFIG_FILES[HTTPS_APACHE_CONF]['contexts'])

    return configs
def determine_packages():
    """Determine packages to install"""
    packages = deepcopy(BASE_PACKAGES)
    release = CompareOpenStackReleases(os_release('openstack-dashboard'))
    # Really should be handled as a dep in the openstack-dashboard package
    if release >= 'mitaka':
        packages.append('python-pymysql')
    if release >= 'ocata' and release < 'rocky':
        packages.append('python-neutron-lbaas-dashboard')
    if release >= 'queens':
        packages.append('python-designate-dashboard')
        packages.append('python-heat-dashboard')
        packages.append('python-neutron-fwaas-dashboard')
    if release >= 'rocky':
        packages = [p for p in packages if not p.startswith('python-')]
        packages.extend(PY3_PACKAGES)
    return list(set(packages))
Beispiel #56
0
def conditional_neutron_migration():
    if os_release('neutron-common') < 'kilo':
        log('Not running neutron database migration as migrations are handled '
            'by the neutron-server process or nova-cloud-controller charm.')
        return

    if is_elected_leader(CLUSTER_RES):
        allowed_units = relation_get('allowed_units')
        if allowed_units and local_unit() in allowed_units.split():
            migrate_neutron_database()
            service_restart('neutron-server')
        else:
            log('Not running neutron database migration, either no'
                ' allowed_units or this unit is not present')
            return
    else:
        log('Not running neutron database migration, not leader')
def conditional_neutron_migration():
    if CompareOpenStackReleases(os_release('neutron-server')) <= 'icehouse':
        log('Not running neutron database migration as migrations are handled '
            'by the neutron-server process.')
        return
    if is_elected_leader(CLUSTER_RES):
        allowed_units = relation_get('allowed_units')
        if allowed_units and local_unit() in allowed_units.split():
            migrate_neutron_database()
            if not is_unit_paused_set():
                service_restart('neutron-server')
        else:
            log('Not running neutron database migration, either no'
                ' allowed_units or this unit is not present')
            return
    else:
        log('Not running neutron database migration, not leader')
Beispiel #58
0
def register_configs():
    ''' Register config files with their respective contexts. '''
    release = os_release('openstack-dashboard')
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    confs = [LOCAL_SETTINGS,
             HAPROXY_CONF,
             PORTS_CONF]

    if CompareOpenStackReleases(release) >= 'mitaka':
        configs.register(KEYSTONEV3_POLICY,
                         CONFIG_FILES[KEYSTONEV3_POLICY]['hook_contexts'])
        CONFIG_FILES[LOCAL_SETTINGS]['hook_contexts'].append(
            context.SharedDBContext(
                user=config('database-user'),
                database=config('database'),
                ssl_dir=DASHBOARD_CONF_DIR))

    for conf in confs:
        configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])

    if os.path.isdir(APACHE_CONF_DIR) and cmp_pkgrevno('apache2', '2.4') >= 0:
        for conf in [APACHE_CONF, APACHE_SSL, APACHE_DEFAULT]:
            if os.path.isfile(conf):
                log('Removing old config %s' % (conf))
                os.remove(conf)
        configs.register(APACHE_24_DEFAULT,
                         CONFIG_FILES[APACHE_24_DEFAULT]['hook_contexts'])
        configs.register(APACHE_24_CONF,
                         CONFIG_FILES[APACHE_24_CONF]['hook_contexts'])
        configs.register(APACHE_24_SSL,
                         CONFIG_FILES[APACHE_24_SSL]['hook_contexts'])
    else:
        configs.register(APACHE_DEFAULT,
                         CONFIG_FILES[APACHE_DEFAULT]['hook_contexts'])
        configs.register(APACHE_CONF,
                         CONFIG_FILES[APACHE_CONF]['hook_contexts'])
        configs.register(APACHE_SSL,
                         CONFIG_FILES[APACHE_SSL]['hook_contexts'])

    if os.path.exists(os.path.dirname(ROUTER_SETTING)):
        configs.register(ROUTER_SETTING,
                         CONFIG_FILES[ROUTER_SETTING]['hook_contexts'])

    return configs
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    configure_installation_source(config('openstack-origin'))

    status_set('maintenance', 'Installing apt packages')
    apt_update()
    apt_install(determine_packages(), fatal=True)

    # Start migration to agent registration with FQDNs for newly installed
    # units with OpenStack release Stein or newer.
    release = os_release('nova-common')
    if CompareOpenStackReleases(release) >= 'stein':
        db = kv()
        db.set(USE_FQDN_KEY, True)
        db.flush()

    install_vaultlocker()
Beispiel #60
0
    def write_all(self):
        """Write all of the config files.

        This function subclasses the parent version of the function such that
        if the hook is config-changed or upgrade-charm then it defers writing
        the LOCAL_SETTINGS file until after processing the policyd stuff.
        """
        _hook = hook_name()
        if _hook not in ('upgrade-charm', 'config-changed'):
            return super(HorizonOSConfigRenderer, self).write_all()
        # Otherwise, first do all the other templates
        for k in self.templates.keys():
            if k != LOCAL_SETTINGS:
                self.write(k)
        # Now do the policy overrides thing
        maybe_handle_policyd_override(os_release('openstack-dashboard'), _hook)
        # Finally, let's do the LOCAL_SETTINGS if the policyd worked.
        self.write(LOCAL_SETTINGS)