def config_changed():
    configure_deferred_restarts(deferrable_services())
    if not config('action-managed-upgrade'):
        if openstack_upgrade_available(NEUTRON_COMMON):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(CONFIGS)

    update_nrpe_config()

    module_settings = config('kernel-modules')
    if module_settings:
        if is_container():
            log("Cannot load modules inside of a container", level=WARNING)
        else:
            for module in module_settings.split():
                try:
                    modprobe(module)
                except Exception:
                    message = "Failed to load kernel module '%s'" % module
                    log(message, level=WARNING)

    sysctl_settings = config('sysctl')
    if sysctl_settings:
        if is_container():
            log("Cannot create sysctls inside of a container", level=WARNING)
        else:
            create_sysctl(sysctl_settings,
                          '/etc/sysctl.d/50-quantum-gateway.conf')

    # Re-run joined hooks as config might have changed
    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)
    for r_id in relation_ids('amqp-nova'):
        amqp_nova_joined(relation_id=r_id)
    if valid_plugin():
        CONFIGS.write_all()
        configure_ovs()
        configure_apparmor()
    else:
        message = 'Please provide a valid plugin config'
        log(message, level=ERROR)
        status_set('blocked', message)
        sys.exit(1)
    if config('plugin') == 'n1kv':
        if config('enable-l3-agent'):
            status_set('maintenance', 'Installing apt packages')
            apt_install(filter_installed_packages('neutron-l3-agent'))
        else:
            apt_purge('neutron-l3-agent')

    # Setup legacy ha configurations
    update_legacy_ha_files()
    # Disable nova metadata if possible,
    if disable_nova_metadata():
        remove_legacy_nova_metadata()
    if disable_neutron_lbaas():
        remove_legacy_neutron_lbaas()
示例#2
0
def configure_system_for_redis():
    """Configurations for production redis.
    (only applied to non-container based machines)
    """

    if not is_container():
        with open('/etc/sysctl.d/50-redis-charm.conf', 'w') as f:
            f.write("vm.overcommit_memory = 1\n")
            f.write("net.core.somaxconn = {}".format(SOMAXCONN))

        # increase open file limits
        with open("/etc/security/limits.conf", "a") as f:
            f.write("{}{}{}64000\n".format("*".ljust(17), "soft".ljust(8),
                                           "nofile".ljust(16)))
            f.write("{}{}{}64000\n".format("*".ljust(17), "hard".ljust(8),
                                           "nofile".ljust(16)))
        with open("/etc/systemd/system.conf", "a") as f:
            f.write("DefaultLimitNOFILE=65536\n")

        # reload sysctl configs
        check_call(["sysctl", "-p"])

        with open("/etc/rc.local", "a") as f:
            f.write(
                "\necho never > /sys/kernel/mm/transparent_hugepage/enabled")

        with open('/sys/kernel/mm/transparent_hugepage/enabled', 'w') as f:
            f.write('never')

    set_flag('redis.system.configured')
示例#3
0
 def custom_status_check(self):
     if ch_host.is_container():
         self.unit.status = ops.model.BlockedStatus(
             "Some charm actions cannot be performed when deployed in a "
             "container")
         return False
     return True
def render_bootstrap_config():
    '''Render the bootstrap elasticsearch.yml and restart.
    '''
    ctxt = {
        'extra_ctxt': {
            'xpack_security_enabled': 'xpack.security.enabled: false',
            'bootstrap_memory_lock': kv.get('bootstrap_memory_lock'),
        },
        'elasticsearch_yml_template': 'elasticsearch-bootstrap.yml.j2',
    }

    if is_container():
        ctxt['extra_ctxt']['discovery_type'] = kv.get('discovery_type')
    else:
        ctxt['extra_ctxt']['cluster_initial_master_nodes'] = [
            charms.leadership.leader_get('master_ip')
        ]

    render_elasticsearch_yml(**ctxt)

    sp.call(['systemctl', 'daemon-reload'])
    sp.call(['systemctl', 'enable', 'elasticsearch.service'])

    if start_restart_systemd_service('elasticsearch'):
        sleep(1)
        set_flag('elasticsearch.init.running')
示例#5
0
def workaround_lxd_kernel_params():
    """
    Workaround for kubelet not starting in LXD when kernel params are not set
    to the desired values.
    """
    if host.is_container():
        hookenv.log("LXD detected, faking kernel params via bind mounts")
        root_dir = "/root/cdk/lxd-kernel-params"
        os.makedirs(root_dir, exist_ok=True)
        # Kernel params taken from:
        # https://github.com/kubernetes/kubernetes/blob/v1.22.0/pkg/kubelet/cm/container_manager_linux.go#L421-L426
        # https://github.com/kubernetes/kubernetes/blob/v1.22.0/pkg/util/sysctl/sysctl.go#L30-L64
        params = {
            "vm.overcommit_memory": 1,
            "vm.panic_on_oom": 0,
            "kernel.panic": 10,
            "kernel.panic_on_oops": 1,
            "kernel.keys.root_maxkeys": 1000000,
            "kernel.keys.root_maxbytes": 1000000 * 25,
        }
        for param, param_value in params.items():
            fake_param_path = root_dir + "/" + param
            with open(fake_param_path, "w") as f:
                f.write(str(param_value))
            real_param_path = "/proc/sys/" + param.replace(".", "/")
            host.fstab_add(fake_param_path, real_param_path, "none", "bind")
        subprocess.check_call(["mount", "-a"])
    else:
        hookenv.log("LXD not detected, not faking kernel params")
示例#6
0
def configure_vault(context):
    log("Running configure_vault", level=DEBUG)
    context['disable_mlock'] = is_container() or config('disable-mlock')

    context['ssl_available'] = is_state('vault.ssl.available')

    if is_flag_set('etcd.tls.available'):
        etcd = endpoint_from_flag('etcd.available')
        log("Etcd detected, adding to context", level=DEBUG)
        context['etcd_conn'] = etcd.connection_string()
        context['etcd_tls_ca_file'] = '/var/snap/vault/common/etcd-ca.pem'
        context['etcd_tls_cert_file'] = '/var/snap/vault/common/etcd-cert.pem'
        context['etcd_tls_key_file'] = '/var/snap/vault/common/etcd.key'
        save_etcd_client_credentials(etcd,
                                     key=context['etcd_tls_key_file'],
                                     cert=context['etcd_tls_cert_file'],
                                     ca=context['etcd_tls_ca_file'])
        context['api_addr'] = vault.get_api_url()
        context['cluster_addr'] = vault.get_cluster_url()
        log("Etcd detected, setting api_addr to {}".format(
            context['api_addr']))
    else:
        log("Etcd not detected", level=DEBUG)
    log("Rendering vault.hcl.j2", level=DEBUG)
    render('vault.hcl.j2', VAULT_CONFIG, context, perms=0o600)
    log("Rendering vault systemd configuation", level=DEBUG)
    render('vault.service.j2', VAULT_SYSTEMD_CONFIG, {}, perms=0o644)
    service('enable', 'vault')
    log("Opening vault port", level=DEBUG)
    open_port(8200)
    set_flag('configured')
    if any_file_changed([VAULT_CONFIG, VAULT_SYSTEMD_CONFIG]):
        # force a restart if config has changed
        clear_flag('started')
示例#7
0
def install_packages():
    apt_update()
    # NOTE(jamespage): install neutron-common package so we always
    #                  get a clear signal on which OS release is
    #                  being deployed
    apt_install(filter_installed_packages(['neutron-common']), fatal=True)
    # NOTE(jamespage): ensure early install of dkms related
    #                  dependencies for kernels which need
    #                  openvswitch via dkms (12.04).
    dkms_packages = determine_dkms_package()
    if dkms_packages:
        apt_install([headers_package()] + dkms_packages, fatal=True)
    missing_packages = filter_installed_packages(determine_packages())
    if missing_packages:
        status_set('maintenance', 'Installing packages')
        apt_install(missing_packages, fatal=True)
    if use_dpdk():
        enable_ovs_dpdk()

    # NOTE(tpsilva): if we're using openvswitch driver, we need to explicitly
    #                load the nf_conntrack_ipv4/6 module, since it won't be
    #                loaded automatically in some cases. LP#1834213
    if not is_container() and config('firewall-driver') == 'openvswitch':
        try:
            modprobe('nf_conntrack_ipv4', True)
            modprobe('nf_conntrack_ipv6', True)
        except subprocess.CalledProcessError:
            # Newer kernel versions (4.19+) don't have two modules for that, so
            # only load nf_conntrack
            log("This kernel does not have nf_conntrack_ipv4/6. "
                "Loading nf_conntrack only.")
            modprobe('nf_conntrack', True)
def confiugre_vm_max_heap():
    bootstrap_memory_lock = 'bootstrap.memory_lock: false'
    if is_container():
        kv.set('discovery_type', 'discovery.type: single-node')
        bootstrap_memory_lock = 'bootstrap.memory_lock: true'
    kv.set('bootstrap_memory_lock', bootstrap_memory_lock)
    set_flag('container.check.complete')
示例#9
0
def configure_kube_proxy(configure_prefix,
                         api_servers,
                         cluster_cidr,
                         bind_address=None):
    kube_proxy_opts = {}
    kube_proxy_opts["cluster-cidr"] = cluster_cidr
    kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path
    kube_proxy_opts["logtostderr"] = "true"
    kube_proxy_opts["v"] = "0"
    num_apis = len(api_servers)
    kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis]
    kube_proxy_opts["hostname-override"] = get_node_name()
    if bind_address:
        kube_proxy_opts["bind-address"] = bind_address
    elif is_ipv6(cluster_cidr):
        kube_proxy_opts["bind-address"] = "::"

    if host.is_container():
        kube_proxy_opts["conntrack-max-per-core"] = "0"

    if is_dual_stack(cluster_cidr):
        kube_proxy_opts["feature-gates"] = "IPv6DualStack=true"

    configure_kubernetes_service(configure_prefix, "kube-proxy",
                                 kube_proxy_opts, "proxy-extra-args")
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    install_packages()
    install_tmpfilesd()

    # NOTE(jamespage): purge any packages as a result of py3 switch
    #                  at rocky.
    packages_to_purge = determine_purge_packages()
    request_nova_compute_restart = False
    if packages_to_purge:
        purge_packages(packages_to_purge)
        request_nova_compute_restart = True

    sysctl_settings = config('sysctl')
    if not is_container() and sysctl_settings:
        create_sysctl(sysctl_settings,
                      '/etc/sysctl.d/50-openvswitch.conf')

    configure_ovs()
    CONFIGS.write_all()
    # NOTE(fnordahl): configure_sriov must be run after CONFIGS.write_all()
    # to allow us to enable boot time execution of init script
    configure_sriov()
    for rid in relation_ids('neutron-plugin'):
        neutron_plugin_joined(
            relation_id=rid,
            request_restart=request_nova_compute_restart)
示例#11
0
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    install_packages()
    install_tmpfilesd()

    # NOTE(jamespage): purge any packages as a result of py3 switch
    #                  at rocky.
    packages_to_purge = determine_purge_packages()
    request_nova_compute_restart = False
    if packages_to_purge:
        purge_packages(packages_to_purge)
        request_nova_compute_restart = True

    sysctl_settings = config('sysctl')
    if not is_container() and sysctl_settings:
        create_sysctl(sysctl_settings,
                      '/etc/sysctl.d/50-openvswitch.conf')

    configure_ovs()
    CONFIGS.write_all()
    # NOTE(fnordahl): configure_sriov must be run after CONFIGS.write_all()
    # to allow us to enable boot time execution of init script
    configure_sriov()
    for rid in relation_ids('neutron-plugin'):
        neutron_plugin_joined(
            relation_id=rid,
            request_restart=request_nova_compute_restart)
def neutron_plugin_joined(relation_id=None, request_restart=False):
    secret = None
    if not is_container():
        if enable_local_dhcp():
            install_packages()
        else:
            pkgs = deepcopy(DHCP_PACKAGES)
            # NOTE: only purge metadata packages if dvr is not
            #       in use as this will remove the l3 agent
            #       see https://pad.lv/1515008
            if not use_dvr():
                # NOTE(fnordahl) do not remove ``haproxy``, the principal
                # charm may have use for it. LP: #1832739
                pkgs.extend(set(METADATA_PACKAGES)-set(['haproxy']))
            purge_packages(pkgs)
        secret = get_shared_secret() if enable_nova_metadata() else None
    rel_data = {
        'metadata-shared-secret': secret,
    }
    host_info = os_context.HostInfoContext()()
    if use_fqdn_hint() and host_info.get('host_fqdn'):
        rel_data.update({'host': host_info['host_fqdn']})
    if request_restart:
        rel_data['restart-nonce'] = str(uuid.uuid4())
    relation_set(relation_id=relation_id, **rel_data)
示例#13
0
def install_packages():
    c = config()
    if c.changed('source') or c.changed('key'):
        add_source(c.get('source'), c.get('key'))
        apt_update(fatal=True)

    if is_container():
        PACKAGES.remove('ntp')

    # NOTE: just use full package list if we're in an upgrade
    #       config-changed execution
    pkgs = (PACKAGES
            if upgrade_available() else filter_installed_packages(PACKAGES))
    if pkgs:
        status_set('maintenance', 'Installing radosgw packages')
        if ('apache2' in pkgs):
            # NOTE(lourot): Apache's default config makes it listen on port 80,
            # which will prevent HAProxy from listening on that same port. We
            # use Apache in this setup however for SSL (different port). We
            # need to let Apache free port 80 before we can install HAProxy
            # otherwise HAProxy will crash. See lp:1904411
            log('Installing Apache')
            apt_install(['apache2'], fatal=True)
            disable_unused_apache_sites()
        apt_install(pkgs, fatal=True)

    pkgs = filter_missing_packages(APACHE_PACKAGES)
    if pkgs:
        apt_purge(pkgs)
def configure_kube_proxy(configure_prefix,
                         api_servers,
                         cluster_cidr,
                         bind_address=None):
    kube_proxy_opts = {}
    kube_proxy_opts['cluster-cidr'] = cluster_cidr
    kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
    kube_proxy_opts['logtostderr'] = 'true'
    kube_proxy_opts['v'] = '0'
    num_apis = len(api_servers)
    kube_proxy_opts['master'] = api_servers[get_unit_number() % num_apis]
    kube_proxy_opts['hostname-override'] = get_node_name()
    if bind_address:
        kube_proxy_opts['bind-address'] = bind_address
    elif is_ipv6(cluster_cidr):
        kube_proxy_opts['bind-address'] = '::'

    if host.is_container():
        kube_proxy_opts['conntrack-max-per-core'] = '0'

    if is_dual_stack(cluster_cidr):
        kube_proxy_opts['feature-gates'] = "IPv6DualStack=true"

    configure_kubernetes_service(configure_prefix, 'kube-proxy',
                                 kube_proxy_opts, 'proxy-extra-args')
示例#15
0
def _assess_status():
    """Assess status of relations and services for local unit"""
    if is_flag_set('snap.channel.invalid'):
        status_set('blocked',
                   'Invalid snap channel '
                   'configured: {}'.format(config('channel')))
        return
    if is_flag_set('config.dns_vip.invalid'):
        status_set('blocked',
                   'vip and dns-ha-access-record configured')
        return

    health = None
    if service_running('vault'):
        health = vault.get_vault_health()
        application_version_set(health.get('version'))

    _missing_interfaces = []
    _incomplete_interfaces = []

    _assess_interface_groups(REQUIRED_INTERFACES, optional=False,
                             missing_interfaces=_missing_interfaces,
                             incomplete_interfaces=_incomplete_interfaces)

    _assess_interface_groups(OPTIONAL_INTERFACES, optional=True,
                             missing_interfaces=_missing_interfaces,
                             incomplete_interfaces=_incomplete_interfaces)

    if _missing_interfaces or _incomplete_interfaces:
        state = 'blocked' if _missing_interfaces else 'waiting'
        status_set(state, ', '.join(_missing_interfaces +
                                    _incomplete_interfaces))
        return

    if not service_running('vault'):
        status_set('blocked', 'Vault service not running')
        return

    if not health['initialized']:
        status_set('blocked', 'Vault needs to be initialized')
        return

    if health['sealed']:
        status_set('blocked', 'Unit is sealed')
        return

    mlock_disabled = is_container() or config('disable-mlock')

    status_set(
        'active',
        'Unit is ready '
        '(active: {}, mlock: {})'.format(
            str(not health['standby']).lower(),
            'disabled' if mlock_disabled else 'enabled'
        )
    )
 def custom_status_check(self):
     if ch_host.is_container():
         self.unit.status = ops.model.BlockedStatus(
             'Charm cannot be deployed into a container')
         return False
     if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS:
         self.unit.status = ops.model.BlockedStatus(
             '{} is an invalid unit count'.format(self.peers.unit_count))
         return False
     return True
示例#17
0
def install_packages():
    add_source(config('source'), config('key'))
    apt_update(fatal=True)
    if is_container():
        PACKAGES.remove('ntp')
    pkgs = filter_installed_packages(PACKAGES)
    if pkgs:
        status_set('maintenance', 'Installing radosgw packages')
        apt_install(PACKAGES, fatal=True)
    apt_purge(APACHE_PACKAGES)
示例#18
0
def configure_system_for_redis():
    if not is_container():
        with open('/etc/sysctl.conf', 'a') as f:
            f.write("\nvm.overcommit_memory = 1")
        call('sysctl vm.overcommit_memory=1'.split())

        with open('/sys/kernel/mm/transparent_hugepage/enabled', 'w') as f:
            f.write('never')

        with open('/proc/sys/net/core/somaxconn', 'w') as f:
            f.write('1024')

    set_flag('redis.system.configured')
    def calculate_threads(self):
        """
        Determine the number of erl vm threads in pool based in cpu resources
        available.

        Number of threads will be limited to MAX_DEFAULT_WORKERS in
        container environments where no worker-multipler configuration
        option been set.

        @returns int: number of io threads to allocate
        """

        try:
            num_cpus = psutil.cpu_count()
        except AttributeError:
            num_cpus = psutil.NUM_CPUS

        multiplier = (config('erl-vm-io-thread-multiplier')
                      or DEFAULT_MULTIPLIER)

        log(
            "Calculating erl vm io thread pool size based on num_cpus={} and "
            "multiplier={}".format(num_cpus, multiplier), DEBUG)

        count = int(num_cpus * multiplier)
        if multiplier > 0 and count == 0:
            count = 1

        if config('erl-vm-io-thread-multiplier') is None and is_container():
            # NOTE(hopem): Limit unconfigured erl-vm-io-thread-multiplier
            #              to MAX_DEFAULT_THREADS to avoid insane pool
            #              configuration in LXD containers on large servers.
            count = min(count, MAX_DEFAULT_THREADS)

        log(
            "erl vm io thread pool size = {} (capped={})".format(
                count, is_container()), DEBUG)

        return count
示例#20
0
def install_udev_rules():
    """
    Install and reload udev rules for ceph-volume LV
    permissions
    """
    if is_container():
        log('Skipping udev rule installation '
            'as unit is in a container',
            level=DEBUG)
        return
    for x in glob.glob('files/udev/*'):
        shutil.copy(x, '/lib/udev/rules.d')
    subprocess.check_call(['udevadm', 'control', '--reload-rules'])
示例#21
0
def install_udev_rules():
    """
    Install and reload udev rules for ceph-volume LV
    permissions
    """
    if is_container():
        log('Skipping udev rule installation '
            'as unit is in a container', level=DEBUG)
        return
    for x in glob.glob('files/udev/*'):
        shutil.copy(x, '/lib/udev/rules.d')
    subprocess.check_call(['udevadm', 'control',
                           '--reload-rules'])
def render_elasticsearch_yml(elasticsearch_yml_template=None,
                             extra_ctxt=None) -> None:
    '''
    Render /etc/elasticsearch/elasticsearch.yml
    '''

    status_set('maintenance', 'Writing /etc/elasticsearch/elasticsearch.yml')

    ctxt = {
        'cluster_name':
        config('cluster-name'),
        'cluster_network_ip':
        ES_CLUSTER_INGRESS_ADDRESS,
        'node_type':
        NODE_TYPE_MAP[config('node-type')],
        'custom_config':
        config('custom-config'),
        'xpack_security_enabled':
        'xpack.security.enabled: {}'.format(
            'true' if config('xpack-security-enabled') else 'false')
    }

    if is_container():
        ctxt['bootstrap_memory_lock'] = \
            kv.get('bootstrap_memory_lock')
        ctxt['discovery_type'] = \
            kv.get('discovery_type')

    if config('xpack-security-enabled'):
        ctxt['xpack_security_transport_ssl_enabled'] = (
            'xpack.security.transport.ssl.enabled: true')
        ctxt['xpack_security_transport_ssl_verification_mode'] = (
            'xpack.security.transport.ssl.verification_mode: certificate')
        ctxt['xpack_security_transport_ssl_keystore_path'] = (
            'xpack.security.transport.ssl.keystore.path: '
            'certs/elastic-certificates.p12')
        ctxt['xpack_security_transport_ssl_truststore_path'] = (
            'xpack.security.transport.ssl.truststore.path: '
            'certs/elastic-certificates.p12')

    if extra_ctxt is not None:
        ctxt = {**ctxt, **extra_ctxt}

    if elasticsearch_yml_template is None:
        elasticsearch_yml_tmpl = "elasticsearch.yml.j2"
    else:
        elasticsearch_yml_tmpl = elasticsearch_yml_template

    render_elasticsearch_file(template_name=elasticsearch_yml_tmpl,
                              target=ES_YML_PATH,
                              ctxt=ctxt)
示例#23
0
def configure_kube_proxy(configure_prefix, api_servers, cluster_cidr):
    kube_proxy_opts = {}
    kube_proxy_opts['cluster-cidr'] = cluster_cidr
    kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
    kube_proxy_opts['logtostderr'] = 'true'
    kube_proxy_opts['v'] = '0'
    kube_proxy_opts['master'] = random.choice(api_servers)
    kube_proxy_opts['hostname-override'] = get_node_name()

    if host.is_container():
        kube_proxy_opts['conntrack-max-per-core'] = '0'

    configure_kubernetes_service(configure_prefix, 'kube-proxy',
                                 kube_proxy_opts, 'proxy-extra-args')
示例#24
0
    def custom_status_check(self):
        """Custom status check.

        Inform the operator if the charm has been deployed in a container.

        :returns: This method is called for its side effects
        :rtype: None
        """
        if ch_host.is_container():
            return ops.model.ActiveStatus(
                "Some charm actions cannot be performed when deployed in a "
                "container")
        else:
            return ops.model.ActiveStatus()
    def calculate_threads(self):
        """
        Determine the number of erl vm threads in pool based in cpu resources
        available.

        Number of threads will be limited to MAX_DEFAULT_WORKERS in
        container environments where no worker-multipler configuration
        option been set.

        @returns int: number of io threads to allocate
        """

        try:
            num_cpus = psutil.cpu_count()
        except AttributeError:
            num_cpus = psutil.NUM_CPUS

        multiplier = (config('erl-vm-io-thread-multiplier') or
                      DEFAULT_MULTIPLIER)

        log("Calculating erl vm io thread pool size based on num_cpus={} and "
            "multiplier={}".format(num_cpus, multiplier), DEBUG)

        count = int(num_cpus * multiplier)
        if multiplier > 0 and count == 0:
            count = 1

        if config('erl-vm-io-thread-multiplier') is None and is_container():
            # NOTE(hopem): Limit unconfigured erl-vm-io-thread-multiplier
            #              to MAX_DEFAULT_THREADS to avoid insane pool
            #              configuration in LXD containers on large servers.
            count = min(count, MAX_DEFAULT_THREADS)

        log("erl vm io thread pool size = {} (capped={})"
            .format(count, is_container()), DEBUG)

        return count
示例#26
0
def enable_br_netfilter_module():
    """
    Enable br_netfilter to work around https://github.com/kubernetes/kubernetes/issues/21613.

    :return: None
    """
    try:
        modprobe('br_netfilter', persist=True)
    except Exception:
        log(traceback.format_exc())
        if host.is_container():
            log('LXD detected, ignoring failure to load br_netfilter')
        else:
            log('LXD not detected, will retry loading br_netfilter')
            return
    set_state('containerd.br_netfilter.enabled')
    def disable_mlockall(self):
        '''
        Determine if Open vSwitch use of mlockall() should be disabled

        If the disable-mlockall config option is unset, mlockall will be
        disabled if running in a container and will default to enabled if
        not running in a container.
        '''
        disable_mlockall = config('disable-mlockall')
        if disable_mlockall is None:
            disable_mlockall = False
            if is_container():
                disable_mlockall = True
        cmp_release = CompareOpenStackReleases(
            os_release('neutron-common', base='icehouse'))
        return (cmp_release >= 'mitaka' and disable_mlockall)
def write_sysctl():
    """
    :return: None
    """
    sysctl_settings = hookenv.config('sysctl')
    if sysctl_settings and not is_container():
        create_sysctl(
            sysctl_settings,
            '/etc/sysctl.d/50-kubernetes-charm.conf',
            # Some keys in the config may not exist in /proc/sys/net/.
            # For example, the conntrack module may not be loaded when
            # using lxd drivers insteam of kvm. In these cases, we
            # simply ignore the missing keys, rather than making time
            # consuming calls out to the filesystem to check for their
            # existence.
            ignore=True)
示例#29
0
def reset_sysctl():
    if host.is_container():
        hookenv.log("In a container, not changing kernel settings")
    else:
        cassandra_sysctl_file = os.path.join('/', 'etc', 'sysctl.d', '99-cassandra.conf')
        contents = b"vm.max_map_count = 131072\n"
        try:
            host.write_file(cassandra_sysctl_file, contents)
            subprocess.check_call(['sysctl', '-p', cassandra_sysctl_file])
        except OSError as e:
            if e.errno == errno.EACCES:
                hookenv.log("Ignoring permission Denied trying to set the "
                            "sysctl settings at {}".format(cassandra_sysctl_file),
                            WARNING)
            else:
                raise
    reactive.set_flag("cassandra.kernelsettings.done")
示例#30
0
    def on_install(self, event):
        """Event handler on install.

        :param event: Event
        :type event: Operator framework event object
        :returns: This method is called for its side effects
        :rtype: None
        """
        if ch_host.is_container():
            logging.warning("Some charm actions cannot be performed while "
                            "deployed in a container.")
        self.unit.status = ops.model.MaintenanceStatus(
            "Installing packages and snaps")
        self.install_pkgs()
        # Perform install tasks
        snap_path = None
        try:
            snap_path = self.model.resources.fetch(self.SNAP_NAME)
        except ops.model.ModelError:
            self.unit.status = ops.model.BlockedStatus(
                "Upload swift-bench snap resource to proceed")
            logging.warning(
                "No snap resource available, install blocked, deferring event:"
                " {}".format(event.handle))
            self._defer_once(event)

            return
        # Install the resource
        try:
            snap.snap_install(
                str(snap_path), "--dangerous",
                "--classic")  # TODO: Remove devmode when snap is ready
            # Set the snap has been installed
            self._stored.swift_bench_snap_installed = True
        except snap.CouldNotAcquireLockException:
            self.unit.status = ops.model.BlockedStatus(
                "Resource failed to install")
            logging.error(
                "Could not install resource, deferring event: {}".format(
                    event.handle))
            self._defer_once(event)

            return
        self.unit.status = ops.model.MaintenanceStatus("Install complete")
        logging.info("Install of software complete")
        self.state.installed = True
示例#31
0
def set_io_scheduler(io_scheduler, directory):
    '''Set the block device io scheduler.'''

    if host.is_container():
        return

    hookenv.log("Setting block device of {} to IO scheduler {}"
                "".format(directory, io_scheduler))

    assert os.path.isdir(directory)

    # The block device regex may be a tad simplistic.
    block_regex = re.compile('\/dev\/([a-z]*)', re.IGNORECASE)

    output = subprocess.check_output(['df', directory],
                                     universal_newlines=True)
    try:
        block_dev = re.findall(block_regex, output)[0]
    except IndexError:
        hookenv.log("Unable to locate block device of {}".format(directory))
        return
    sys_file = os.path.join("/", "sys", "block", block_dev,
                            "queue", "scheduler")
    if not os.path.exists(sys_file):
        hookenv.log("Got no such file or directory trying to "
                    "set the IO scheduler at {}. It may be "
                    "this is an LXC, the device name is as "
                    "yet unknown to the charm, or LVM/RAID is "
                    "hiding the underlying device name."
                    "".format(sys_file),
                    WARNING)
        return

    available = open(sys_file, 'r').read().split()
    if '[{}]'.format(io_scheduler) in available:
        hookenv.log('{} already {}'.format(sys_file, io_scheduler), DEBUG)
        return

    if io_scheduler not in available:
        hookenv.log('{} is not valid for {}'.format(io_scheduler, sys_file),
                    WARNING)
        return

    host.write_file(sys_file, io_scheduler.encode('ascii'),
                    perms=0o644)
示例#32
0
def config_changed(check_deferred_restarts=True):
    configure_deferred_restarts(deferrable_services())
    # policy_rcd.remove_policy_file()
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    allowed, reason = is_hook_allowed(
        'config-changed', check_deferred_restarts=check_deferred_restarts)
    if not allowed:
        log(reason, "WARN")
        return

    install_packages()
    install_tmpfilesd()

    # NOTE(jamespage): purge any packages as a result of py3 switch
    #                  at rocky.
    packages_to_purge = determine_purge_packages()
    request_nova_compute_restart = False
    if packages_to_purge:
        purge_packages(packages_to_purge)
        request_nova_compute_restart = True

    sysctl_settings = config('sysctl')
    if not is_container() and sysctl_settings:
        create_sysctl(sysctl_settings, '/etc/sysctl.d/50-openvswitch.conf')

    # NOTE(fnordahl): It is important to write config to disk and perhaps
    # restart the openvswitch-swith service prior to attempting to do run-time
    # configuration of OVS as we may have to pass options to `ovs-ctl` for
    # `ovs-vswitchd` to run at all. LP: #1906280
    # TODO: make restart_on_change use contextlib.contextmanager
    @restart_on_change({
        cfg: services
        for cfg, services in restart_map().items() if cfg == OVS_DEFAULT
    })
    def _restart_before_runtime_config_when_required():
        CONFIGS.write_all()

    _restart_before_runtime_config_when_required()
    configure_ovs()

    for rid in relation_ids('neutron-plugin'):
        neutron_plugin_joined(relation_id=rid,
                              request_restart=request_nova_compute_restart)
示例#33
0
def configure_lxd_host():
    ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
    cmp_ubuntu_release = CompareHostReleases(ubuntu_release)
    if cmp_ubuntu_release > "vivid":
        log('>= Wily deployment - configuring LXD trust password and address',
            level=INFO)
        cmd = ['lxc', 'config', 'set',
               'core.trust_password', lxd_trust_password()]
        check_call(cmd)
        cmd = ['lxc', 'config', 'set',
               'core.https_address', '[::]']
        check_call(cmd)

        if not is_container():
            # NOTE(jamespage): None of the below is worth doing when running
            #                  within a container on an all-in-one install

            # Configure live migration
            if cmp_ubuntu_release == 'xenial':
                uname = os.uname()[2]
                if uname > '4.4.0-122-generic':
                    pkg = "linux-modules-extra-{}"
                else:
                    pkg = "linux-image-extra-{}"
                apt_install(pkg.format(uname), fatal=True)

            if cmp_ubuntu_release >= 'xenial':
                modprobe('netlink_diag')

            # Enable/disable use of ext4 within nova-lxd containers
            if os.path.exists(EXT4_USERNS_MOUNTS):
                with open(EXT4_USERNS_MOUNTS, 'w') as userns_mounts:
                    userns_mounts.write(
                        'Y\n' if config('enable-ext4-userns') else 'N\n'
                    )

        configure_uid_mapping()
    elif cmp_ubuntu_release == "vivid":
        log('Vivid deployment - loading overlay kernel module', level=INFO)
        cmd = ['modprobe', 'overlay']
        check_call(cmd)
        with open('/etc/modules', 'r+') as modules:
            if 'overlay' not in modules.read():
                modules.write('overlay')
示例#34
0
def configure_lxd_host():
    ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
    cmp_ubuntu_release = CompareHostReleases(ubuntu_release)
    if cmp_ubuntu_release > "vivid":
        log('>= Wily deployment - configuring LXD trust password and address',
            level=INFO)
        cmd = [
            'lxc', 'config', 'set', 'core.trust_password',
            lxd_trust_password()
        ]
        check_call(cmd)
        cmd = ['lxc', 'config', 'set', 'core.https_address', '[::]']
        check_call(cmd)

        if not is_container():
            # NOTE(jamespage): None of the below is worth doing when running
            #                  within a container on an all-in-one install

            # Configure live migration
            if cmp_ubuntu_release == 'xenial':
                uname = os.uname()[2]
                if uname > '4.4.0-122-generic':
                    pkg = "linux-modules-extra-{}"
                else:
                    pkg = "linux-image-extra-{}"
                apt_install(pkg.format(uname), fatal=True)

            if cmp_ubuntu_release >= 'xenial':
                modprobe('netlink_diag')

            # Enable/disable use of ext4 within nova-lxd containers
            if os.path.exists(EXT4_USERNS_MOUNTS):
                with open(EXT4_USERNS_MOUNTS, 'w') as userns_mounts:
                    userns_mounts.write(
                        'Y\n' if config('enable-ext4-userns') else 'N\n')

        configure_uid_mapping()
    elif cmp_ubuntu_release == "vivid":
        log('Vivid deployment - loading overlay kernel module', level=INFO)
        cmd = ['modprobe', 'overlay']
        check_call(cmd)
        with open('/etc/modules', 'r+') as modules:
            if 'overlay' not in modules.read():
                modules.write('overlay')
示例#35
0
def install_packages():
    # NOTE(jamespage): install neutron-common package so we always
    #                  get a clear signal on which OS release is
    #                  being deployed
    apt_install(filter_installed_packages(['neutron-common']), fatal=True)
    # NOTE(jamespage):
    # networking-tools-source provides general tooling for configuration
    # of SR-IOV VF's and Mellanox ConnectX switchdev capable adapters
    # The default PPA published packages back to Xenial, which covers
    # all target series for this charm.
    if config('networking-tools-source') and \
       (use_dpdk() or enable_sriov() or use_hw_offload()):
        add_source(config('networking-tools-source'))
    apt_update()
    # NOTE(jamespage): ensure early install of dkms related
    #                  dependencies for kernels which need
    #                  openvswitch via dkms (12.04).
    dkms_packages = determine_dkms_package()
    if dkms_packages:
        apt_install([headers_package()] + dkms_packages, fatal=True)
    missing_packages = filter_installed_packages(determine_packages())
    if missing_packages:
        status_set('maintenance', 'Installing packages')
        apt_install(missing_packages, fatal=True)
    if use_dpdk():
        enable_ovs_dpdk()

    if use_hw_offload():
        enable_hw_offload()

    # NOTE(tpsilva): if we're using openvswitch driver, we need to explicitly
    #                load the nf_conntrack_ipv4/6 module, since it won't be
    #                loaded automatically in some cases. LP#1834213
    if not is_container() and config('firewall-driver') == 'openvswitch':
        try:
            modprobe('nf_conntrack_ipv4', True)
            modprobe('nf_conntrack_ipv6', True)
        except subprocess.CalledProcessError:
            # Newer kernel versions (4.19+) don't have two modules for that, so
            # only load nf_conntrack
            log("This kernel does not have nf_conntrack_ipv4/6. "
                "Loading nf_conntrack only.")
            modprobe('nf_conntrack', True)
    def install(self):
        """
        Install the base components of Apache Bigtop.

        You will then need to call `render_site_yaml` to set up the correct
        configuration and `trigger_puppet` to install the desired components.
        """
        if not is_container():
            # Only configure swap in non-containerized envs. Containers inherit
            # the host OS swap config.
            self.install_swap()
        self.install_java()
        self.pin_bigtop_packages()
        self.check_localdomain()
        self.check_reverse_dns()
        self.fetch_bigtop_release()
        self.install_puppet_modules()
        self.apply_patches()
        self.render_hiera_yaml()
示例#37
0
def create(sysctl_dict, sysctl_file, ignore=False):
    """Creates a sysctl.conf file from a YAML associative array

    :param sysctl_dict: a dict or YAML-formatted string of sysctl
                        options eg "{ 'kernel.max_pid': 1337 }"
    :type sysctl_dict: str
    :param sysctl_file: path to the sysctl file to be saved
    :type sysctl_file: str or unicode
    :param ignore: If True, ignore "unknown variable" errors.
    :type ignore: bool
    :returns: None
    """
    if type(sysctl_dict) is not dict:
        try:
            sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
        except yaml.YAMLError:
            log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
                level=ERROR)
            return
    else:
        sysctl_dict_parsed = sysctl_dict

    with open(sysctl_file, "w") as fd:
        for key, value in sysctl_dict_parsed.items():
            fd.write("{}={}\n".format(key, value))

    log("Updating sysctl_file: {} values: {}".format(sysctl_file,
                                                     sysctl_dict_parsed),
        level=DEBUG)

    call = ["sysctl", "-p", sysctl_file]
    if ignore:
        call.append("-e")

    try:
        check_call(call)
    except CalledProcessError as e:
        if is_container():
            log("Error setting some sysctl keys in this container: {}".format(
                e.output),
                level=WARNING)
        else:
            raise e
示例#38
0
def swapoff(fstab_path='/etc/fstab'):
    '''Turn off swapping on the machine, permanently.'''
    # Turn off swap in the current session
    if host.is_container():
        hookenv.log("In a container, not touching swap.")
    else:
        try:
            hookenv.log("Turning off swap (swapoff -a)")
            subprocess.check_call(['swapoff', '-a'])
            hookenv.log("Removing swap entries from {}".format(fstab_path))
            with closing(fstab.Fstab(fstab_path)) as f:
                while True:
                    swap_entry = f.get_entry_by_attr('filesystem', 'swap')
                    if swap_entry is None:
                        break
                    f.remove_entry(swap_entry)
        except Exception as e:
            hookenv.log("Ignoring an error trying to turn off swap: {}".format(e), WARNING)
            return  # cassandra.swapoff.done state not set, will be attempted again.
    reactive.set_flag('cassandra.swapoff.done')
示例#39
0
def ensure_snapd():
    if not snapd_supported():
        hookenv.log('Snaps do not work in this environment', hookenv.ERROR)
        return

    # I don't use the apt layer, because that would tie this layer
    # too closely to apt packaging. Perhaps this is a snap-only system.
    if not shutil.which('snap'):
        cmd = ['apt', 'install', '-y', 'snapd']
        # LP:1699986: Force install of systemd on Trusty.
        if get_series() == 'trusty':
            cmd.append('systemd')
        subprocess.check_call(cmd, universal_newlines=True)

    # Work around lp:1628289. Remove this stanza once snapd depends
    # on the necessary package and snaps work in lxd xenial containers
    # without the workaround.
    if host.is_container() and not shutil.which('squashfuse'):
        cmd = ['apt', 'install', '-y', 'squashfuse', 'fuse']
        subprocess.check_call(cmd, universal_newlines=True)
示例#40
0
def install_packages():
    c = config()
    if c.changed('source') or c.changed('key'):
        add_source(c.get('source'), c.get('key'))
        apt_update(fatal=True)

    if is_container():
        PACKAGES.remove('ntp')

    # NOTE: just use full package list if we're in an upgrade
    #       config-changed execution
    pkgs = (
        PACKAGES if upgrade_available() else
        filter_installed_packages(PACKAGES)
    )
    if pkgs:
        status_set('maintenance', 'Installing radosgw packages')
        apt_install(pkgs, fatal=True)

    pkgs = filter_missing_packages(APACHE_PACKAGES)
    if pkgs:
        apt_purge(pkgs)

    disable_unused_apache_sites()
def config_changed():

    if is_unit_paused_set():
        log("Do not run config_changed when paused", "WARNING")
        return

    if config('ephemeral-unmount'):
        umount(config('ephemeral-unmount'), persist=True)

    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        assert_charm_supports_ipv6()

    if (migration_enabled() and
            config('migration-auth-type') not in MIGRATION_AUTH_TYPES):
        message = ("Invalid migration-auth-type")
        status_set('blocked', message)
        raise Exception(message)
    global CONFIGS
    send_remote_restart = False
    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('nova-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(CONFIGS)
            send_remote_restart = True

    sysctl_settings = config('sysctl')
    if sysctl_settings and not is_container():
        create_sysctl(
            sysctl_settings,
            '/etc/sysctl.d/50-nova-compute.conf',
            # Some keys in the config may not exist in /proc/sys/net/.
            # For example, the conntrack module may not be loaded when
            # using lxd drivers insteam of kvm. In these cases, we
            # simply ignore the missing keys, rather than making time
            # consuming calls out to the filesystem to check for their
            # existence.
            ignore=True)

    remove_libvirt_network('default')

    if migration_enabled() and config('migration-auth-type') == 'ssh':
        # Check-in with nova-c-c and register new ssh key, if it has just been
        # generated.
        status_set('maintenance', 'SSH key exchange')
        initialize_ssh_keys()
        import_authorized_keys()

    if config('enable-resize') is True:
        enable_shell(user='******')
        status_set('maintenance', 'SSH key exchange')
        initialize_ssh_keys(user='******')
        import_authorized_keys(user='******', prefix='nova')
    else:
        disable_shell(user='******')

    if config('instances-path') is not None:
        fp = config('instances-path')
        fix_path_ownership(fp, user='******')

    [compute_joined(rid) for rid in relation_ids('cloud-compute')]

    for rid in relation_ids('neutron-plugin'):
        neutron_plugin_joined(rid, remote_restart=send_remote_restart)

    for rid in relation_ids('nova-ceilometer'):
        nova_ceilometer_joined(rid, remote_restart=send_remote_restart)

    if is_relation_made("nrpe-external-master"):
        update_nrpe_config()

    if config('hugepages'):
        install_hugepages()

    # Disable smt for ppc64, required for nova/libvirt/kvm
    arch = platform.machine()
    log('CPU architecture: {}'.format(arch))
    if arch in ['ppc64el', 'ppc64le']:
        set_ppc64_cpu_smt_state('off')

    # NOTE(jamespage): trigger any configuration related changes
    #                  for cephx permissions restrictions and
    #                  keys on disk for ceph-access backends
    for rid in relation_ids('ceph'):
        for unit in related_units(rid):
            ceph_changed(rid=rid, unit=unit)
    for rid in relation_ids('ceph-access'):
        for unit in related_units(rid):
            ceph_access(rid=rid, unit=unit)

    CONFIGS.write_all()

    NovaComputeAppArmorContext().setup_aa_profile()
    if (network_manager() in ['flatmanager', 'flatdhcpmanager'] and
            config('multi-host').lower() == 'yes'):
        NovaAPIAppArmorContext().setup_aa_profile()
        NovaNetworkAppArmorContext().setup_aa_profile()

    install_vaultlocker()
    install_multipath()

    configure_local_ephemeral_storage()
示例#42
0
def snapd_supported():
    # snaps are not supported in trusty lxc containers.
    if get_series() == 'trusty' and host.is_container():
        return False
    return True  # For all other cases, assume true.