def __call__(self):
        # distro defaults
        ctxt = {
            # /etc/libvirt/libvirtd.conf (
            'listen_tls': 0
        }
        cmp_distro_codename = CompareHostReleases(
            lsb_release()['DISTRIB_CODENAME'].lower())
        cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))

        # NOTE(jamespage): deal with switch to systemd
        if cmp_distro_codename < "wily":
            ctxt['libvirtd_opts'] = '-d'
        else:
            ctxt['libvirtd_opts'] = ''

        # NOTE(jamespage): deal with alignment with Debian in
        #                  Ubuntu yakkety and beyond.
        if cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata':
            ctxt['libvirt_user'] = '******'
        else:
            ctxt['libvirt_user'] = '******'

        # get the processor architecture to use in the nova.conf template
        ctxt['arch'] = platform.machine()

        # enable tcp listening if configured for live migration.
        if config('enable-live-migration'):
            ctxt['libvirtd_opts'] += ' -l'

        if config('migration-auth-type') in ['none', 'None', 'ssh']:
            ctxt['listen_tls'] = 0

        if config('migration-auth-type') == 'ssh':
            # nova.conf
            ctxt['live_migration_uri'] = 'qemu+ssh://%s/system'

        if config('instances-path') is not None:
            ctxt['instances_path'] = config('instances-path')

        if config('disk-cachemodes'):
            ctxt['disk_cachemodes'] = config('disk-cachemodes')

        if config('cpu-mode'):
            ctxt['cpu_mode'] = config('cpu-mode')
        elif ctxt['arch'] in ['ppc64el', 'ppc64le']:
            ctxt['cpu_mode'] = 'host-passthrough'
        elif ctxt['arch'] == 's390x':
            ctxt['cpu_mode'] = 'none'

        if config('cpu-model'):
            ctxt['cpu_model'] = config('cpu-model')

        if config('hugepages'):
            ctxt['hugepages'] = True
            ctxt['kvm_hugepages'] = 1
        else:
            ctxt['kvm_hugepages'] = 0

        if config('ksm') in (
                "1",
                "0",
        ):
            ctxt['ksm'] = config('ksm')
        else:
            if cmp_os_release < 'kilo':
                log("KSM set to 1 by default on openstack releases < kilo",
                    level=INFO)
                ctxt['ksm'] = "1"
            else:
                ctxt['ksm'] = "AUTO"

        if config('pci-passthrough-whitelist'):
            ctxt['pci_passthrough_whitelist'] = \
                config('pci-passthrough-whitelist')

        if config('vcpu-pin-set'):
            ctxt['vcpu_pin_set'] = config('vcpu-pin-set')

        ctxt['reserved_host_memory'] = config('reserved-host-memory')

        db = kv()
        if db.get('host_uuid'):
            ctxt['host_uuid'] = db.get('host_uuid')
        else:
            host_uuid = str(uuid.uuid4())
            db.set('host_uuid', host_uuid)
            db.flush()
            ctxt['host_uuid'] = host_uuid

        if config('libvirt-image-backend'):
            ctxt['libvirt_images_type'] = config('libvirt-image-backend')

        return ctxt
예제 #2
0
    def __call__(self):
        # distro defaults
        ctxt = {
            # /etc/libvirt/libvirtd.conf (
            'listen_tls': 0
        }
        cmp_distro_codename = CompareHostReleases(
            lsb_release()['DISTRIB_CODENAME'].lower())
        cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))

        # NOTE(jamespage): deal with switch to systemd
        if cmp_distro_codename < "wily":
            ctxt['libvirtd_opts'] = '-d'
        else:
            ctxt['libvirtd_opts'] = ''

        # NOTE(jamespage): deal with alignment with Debian in
        #                  Ubuntu yakkety and beyond.
        if cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata':
            ctxt['libvirt_user'] = '******'
        else:
            ctxt['libvirt_user'] = '******'

        # get the processor architecture to use in the nova.conf template
        ctxt['arch'] = platform.machine()

        # enable tcp listening if configured for live migration.
        if config('enable-live-migration'):
            ctxt['libvirtd_opts'] += ' -l'

        if config('enable-live-migration') and \
                config('migration-auth-type') in ['none', 'None', 'ssh']:
            ctxt['listen_tls'] = 0

        if config('enable-live-migration') and \
                config('migration-auth-type') == 'ssh':
            migration_address = get_relation_ip(
                'migration', cidr_network=config('libvirt-migration-network'))

            if cmp_os_release >= 'ocata':
                ctxt['live_migration_scheme'] = config('migration-auth-type')
                ctxt['live_migration_inbound_addr'] = migration_address
            else:
                ctxt['live_migration_uri'] = 'qemu+ssh://%s/system'

        if config('enable-live-migration'):
            ctxt['live_migration_completion_timeout'] = \
                config('live-migration-completion-timeout')
            ctxt['live_migration_downtime'] = \
                config('live-migration-downtime')
            ctxt['live_migration_downtime_steps'] = \
                config('live-migration-downtime-steps')
            ctxt['live_migration_downtime_delay'] = \
                config('live-migration-downtime-delay')
            ctxt['live_migration_permit_post_copy'] = \
                config('live-migration-permit-post-copy')
            ctxt['live_migration_permit_auto_converge'] = \
                config('live-migration-permit-auto-converge')

        if config('instances-path') is not None:
            ctxt['instances_path'] = config('instances-path')

        if config('disk-cachemodes'):
            ctxt['disk_cachemodes'] = config('disk-cachemodes')

        if config('use-multipath'):
            ctxt['use_multipath'] = config('use-multipath')

        if config('default-ephemeral-format'):
            ctxt['default_ephemeral_format'] = \
                config('default-ephemeral-format')

        if config('cpu-mode'):
            ctxt['cpu_mode'] = config('cpu-mode')
        elif ctxt['arch'] in ('ppc64el', 'ppc64le', 'aarch64'):
            ctxt['cpu_mode'] = 'host-passthrough'
        elif ctxt['arch'] == 's390x':
            ctxt['cpu_mode'] = 'none'

        if config('cpu-model'):
            ctxt['cpu_model'] = config('cpu-model')

        if config('cpu-model-extra-flags'):
            ctxt['cpu_model_extra_flags'] = ', '.join(
                config('cpu-model-extra-flags').split(' '))

        if config('hugepages'):
            ctxt['hugepages'] = True
            ctxt['kvm_hugepages'] = 1
        else:
            ctxt['kvm_hugepages'] = 0

        if config('ksm') in (
                "1",
                "0",
        ):
            ctxt['ksm'] = config('ksm')
        else:
            if cmp_os_release < 'kilo':
                log("KSM set to 1 by default on openstack releases < kilo",
                    level=INFO)
                ctxt['ksm'] = "1"
            else:
                ctxt['ksm'] = "AUTO"

        if config('reserved-huge-pages'):
            # To bypass juju limitation with list of strings, we
            # consider separate the option's values per semicolons.
            ctxt['reserved_huge_pages'] = ([
                o.strip() for o in config('reserved-huge-pages').split(";")
            ])

        if config('pci-passthrough-whitelist'):
            ctxt['pci_passthrough_whitelist'] = \
                config('pci-passthrough-whitelist')

        if config('pci-alias'):
            aliases = json.loads(config('pci-alias'))
            # Behavior previous to queens is maintained as it was
            if isinstance(aliases, list) and cmp_os_release >= 'queens':
                ctxt['pci_aliases'] = [
                    json.dumps(x, sort_keys=True) for x in aliases
                ]
            else:
                ctxt['pci_alias'] = json.dumps(aliases, sort_keys=True)

        if config('cpu-dedicated-set'):
            ctxt['cpu_dedicated_set'] = config('cpu-dedicated-set')
        elif config('vcpu-pin-set'):
            ctxt['vcpu_pin_set'] = config('vcpu-pin-set')

        if config('cpu-shared-set'):
            ctxt['cpu_shared_set'] = config('cpu-shared-set')

        if config('virtio-net-tx-queue-size'):
            ctxt['virtio_net_tx_queue_size'] = (
                config('virtio-net-tx-queue-size'))
        if config('virtio-net-rx-queue-size'):
            ctxt['virtio_net_rx_queue_size'] = (
                config('virtio-net-rx-queue-size'))

        ctxt['reserved_host_memory'] = config('reserved-host-memory')

        db = kv()
        if db.get('host_uuid'):
            ctxt['host_uuid'] = db.get('host_uuid')
        else:
            host_uuid = str(uuid.uuid4())
            db.set('host_uuid', host_uuid)
            db.flush()
            ctxt['host_uuid'] = host_uuid

        if config('libvirt-image-backend'):
            ctxt['libvirt_images_type'] = config('libvirt-image-backend')

        ctxt['force_raw_images'] = config('force-raw-images')
        ctxt['inject_password'] = config('inject-password')
        # if allow the injection of an admin password it depends
        # on value greater or equal to -1 for inject_partition
        # -2 means disable the injection of data
        ctxt['inject_partition'] = -1 if config('inject-password') else -2

        return ctxt
def resolve_data_dir():
    _release = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(_release) < 'vivid':
        return '/var/lib/mysql'
    else:
        return '/var/lib/percona-xtradb-cluster'
def resolve_cnf_file():
    _release = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(_release) < 'vivid':
        return '/etc/mysql/my.cnf'
    else:
        return '/etc/mysql/percona-xtradb-cluster.conf.d/mysqld.cnf'
예제 #5
0
def git_post_install(projects_yaml):
    """Perform post-install setup."""
    src_etc = os.path.join(git_src_dir(projects_yaml, 'neutron'), 'etc')
    configs = [
        {
            'src': src_etc,
            'dest': '/etc/neutron'
        },
        {
            'src': os.path.join(src_etc, 'neutron/plugins'),
            'dest': '/etc/neutron/plugins'
        },
        {
            'src': os.path.join(src_etc, 'neutron/rootwrap.d'),
            'dest': '/etc/neutron/rootwrap.d'
        },
    ]

    for c in configs:
        if os.path.exists(c['dest']):
            shutil.rmtree(c['dest'])
        shutil.copytree(c['src'], c['dest'])

    # NOTE(coreycb): Need to find better solution than bin symlinks.
    symlinks = [
        {
            'src':
            os.path.join(git_pip_venv_dir(projects_yaml),
                         'bin/neutron-rootwrap'),
            'link':
            '/usr/local/bin/neutron-rootwrap'
        },
    ]

    for s in symlinks:
        if os.path.lexists(s['link']):
            os.remove(s['link'])
        os.symlink(s['src'], s['link'])

    render('git/neutron_sudoers',
           '/etc/sudoers.d/neutron_sudoers', {},
           perms=0o440)

    bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
    cmp_os_release = CompareOpenStackReleases(os_release('neutron-common'))
    # Use systemd init units/scripts from ubuntu wily onward
    _release = lsb_release()['DISTRIB_CODENAME']
    if CompareHostReleases(_release) >= 'wily':
        templates_dir = os.path.join(charm_dir(), 'templates/git')
        daemons = ['neutron-openvswitch-agent', 'neutron-ovs-cleanup']
        for daemon in daemons:
            neutron_ovs_context = {
                'daemon_path': os.path.join(bin_dir, daemon),
            }
            filename = daemon
            if daemon == 'neutron-openvswitch-agent':
                if cmp_os_release < 'mitaka':
                    filename = 'neutron-plugin-openvswitch-agent'
            template_file = 'git/{}.init.in.template'.format(filename)
            init_in_file = '{}.init.in'.format(filename)
            render(template_file,
                   os.path.join(templates_dir, init_in_file),
                   neutron_ovs_context,
                   perms=0o644)
        git_generate_systemd_init_files(templates_dir)

        for daemon in daemons:
            filename = daemon
            if daemon == 'neutron-openvswitch-agent':
                if cmp_os_release < 'mitaka':
                    filename = 'neutron-plugin-openvswitch-agent'
            service('enable', filename)
    else:
        neutron_ovs_agent_context = {
            'service_description': 'Neutron OpenvSwitch Plugin Agent',
            'charm_name': 'neutron-openvswitch',
            'process_name': 'neutron-openvswitch-agent',
            'executable_name': os.path.join(bin_dir,
                                            'neutron-openvswitch-agent'),
            'cleanup_process_name': 'neutron-ovs-cleanup',
            'plugin_config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
            'log_file': '/var/log/neutron/openvswitch-agent.log',
        }

        neutron_ovs_cleanup_context = {
            'service_description': 'Neutron OpenvSwitch Cleanup',
            'charm_name': 'neutron-openvswitch',
            'process_name': 'neutron-ovs-cleanup',
            'executable_name': os.path.join(bin_dir, 'neutron-ovs-cleanup'),
            'log_file': '/var/log/neutron/ovs-cleanup.log',
        }

        if cmp_os_release < 'mitaka':
            render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
                   '/etc/init/neutron-plugin-openvswitch-agent.conf',
                   neutron_ovs_agent_context,
                   perms=0o644)
        else:
            render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
                   '/etc/init/neutron-openvswitch-agent.conf',
                   neutron_ovs_agent_context,
                   perms=0o644)
        render('git/upstart/neutron-ovs-cleanup.upstart',
               '/etc/init/neutron-ovs-cleanup.conf',
               neutron_ovs_cleanup_context,
               perms=0o644)

    if not is_unit_paused_set():
        service_restart('neutron-plugin-openvswitch-agent')
def render_config(hosts=None):
    if hosts is None:
        hosts = []

    config_file = resolve_cnf_file()
    if not os.path.exists(os.path.dirname(config_file)):
        os.makedirs(os.path.dirname(config_file))

    context = {
        'cluster_name': 'juju_cluster',
        'private_address': get_cluster_host_ip(),
        'cluster_hosts': ",".join(hosts),
        'sst_method': config('sst-method'),
        'sst_password': sst_password(),
        'innodb_file_per_table': config('innodb-file-per-table'),
        'table_open_cache': config('table-open-cache'),
        'binlogs_path': config('binlogs-path'),
        'enable_binlogs': config('enable-binlogs'),
        'binlogs_max_size': config('binlogs-max-size'),
        'binlogs_expire_days': config('binlogs-expire-days'),
        'performance_schema': config('performance-schema'),
        'is_leader': is_leader(),
        'server_id': get_server_id(),
        'series_upgrade': is_unit_upgrading_set(),
    }

    if config('prefer-ipv6'):
        # NOTE(hopem): this is a kludge to get percona working with ipv6.
        # See lp 1380747 for more info. This is intended as a stop gap until
        # percona package is fixed to support ipv6.
        context['bind_address'] = '::'
        context['ipv6'] = True
    else:
        context['ipv6'] = False

    wsrep_provider_options = get_wsrep_provider_options()
    if wsrep_provider_options:
        context['wsrep_provider_options'] = wsrep_provider_options

    if config('wsrep-slave-threads') is not None:
        context['wsrep_slave_threads'] = config('wsrep-slave-threads')

    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
        # myisam_recover is not valid for PXC 5.7 (introduced in Bionic) so we
        # only set it for PXC 5.6.
        context['myisam_recover'] = 'BACKUP'
        context['wsrep_provider'] = '/usr/lib/libgalera_smm.so'
        if 'wsrep_slave_threads' not in context:
            context['wsrep_slave_threads'] = 1
    elif CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic':
        context['wsrep_provider'] = '/usr/lib/galera3/libgalera_smm.so'
        context['default_storage_engine'] = 'InnoDB'
        context['wsrep_log_conflicts'] = True
        context['innodb_autoinc_lock_mode'] = '2'
        context['pxc_strict_mode'] = config('pxc-strict-mode')
        if 'wsrep_slave_threads' not in context:
            context['wsrep_slave_threads'] = 48

    if config('databases-to-replicate'):
        context['databases_to_replicate'] = get_databases_to_replicate()

    context['server-id'] = get_server_id()

    context.update(PerconaClusterHelper().parse_config())
    render(os.path.basename(config_file), config_file, context, perms=0o444)
    def __call__(self):
        # distro defaults
        ctxt = {
            # /etc/libvirt/libvirtd.conf (
            'listen_tls': 0
        }
        cmp_distro_codename = CompareHostReleases(
            lsb_release()['DISTRIB_CODENAME'].lower())
        cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))

        # NOTE(jamespage): deal with switch to systemd
        if cmp_distro_codename < "wily":
            ctxt['libvirtd_opts'] = '-d'
        else:
            ctxt['libvirtd_opts'] = ''

        # NOTE(jamespage): deal with alignment with Debian in
        #                  Ubuntu yakkety and beyond.
        if cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata':
            ctxt['libvirt_user'] = '******'
        else:
            ctxt['libvirt_user'] = '******'

        # get the processor architecture to use in the nova.conf template
        ctxt['arch'] = platform.machine()

        # enable tcp listening if configured for live migration.
        if config('enable-live-migration'):
            ctxt['libvirtd_opts'] += ' -l'

        if config('enable-live-migration') and \
                config('migration-auth-type') in ['none', 'None', 'ssh']:
            ctxt['listen_tls'] = 0

        if config('enable-live-migration') and \
                config('migration-auth-type') == 'ssh':
            # nova.conf
            ctxt['live_migration_uri'] = 'qemu+ssh://%s/system'

        if config('enable-live-migration'):
            ctxt['live_migration_permit_post_copy'] = \
                config('live-migration-permit-post-copy')
            ctxt['live_migration_permit_auto_converge'] = \
                config('live-migration-permit-auto-converge')

        if config('instances-path') is not None:
            ctxt['instances_path'] = config('instances-path')

        if config('disk-cachemodes'):
            ctxt['disk_cachemodes'] = config('disk-cachemodes')

        if config('use-multipath'):
            ctxt['use_multipath'] = config('use-multipath')

        if config('cpu-mode'):
            ctxt['cpu_mode'] = config('cpu-mode')
        elif ctxt['arch'] in ('ppc64el', 'ppc64le', 'aarch64'):
            ctxt['cpu_mode'] = 'host-passthrough'
        elif ctxt['arch'] == 's390x':
            ctxt['cpu_mode'] = 'none'

        if config('cpu-model'):
            ctxt['cpu_model'] = config('cpu-model')

        if config('cpu-model-extra-flags'):
            ctxt['cpu_model_extra_flags'] = ', '.join(
                config('cpu-model-extra-flags').split(' '))

        if config('hugepages'):
            ctxt['hugepages'] = True
            ctxt['kvm_hugepages'] = 1
        else:
            ctxt['kvm_hugepages'] = 0

        if config('ksm') in (
                "1",
                "0",
        ):
            ctxt['ksm'] = config('ksm')
        else:
            if cmp_os_release < 'kilo':
                log("KSM set to 1 by default on openstack releases < kilo",
                    level=INFO)
                ctxt['ksm'] = "1"
            else:
                ctxt['ksm'] = "AUTO"

        if config('reserved-huge-pages'):
            # To bypass juju limitation with list of strings, we
            # consider separate the option's values per semicolons.
            ctxt['reserved_huge_pages'] = ([
                o.strip() for o in config('reserved-huge-pages').split(";")
            ])

        if config('pci-passthrough-whitelist'):
            ctxt['pci_passthrough_whitelist'] = \
                config('pci-passthrough-whitelist')

        if config('pci-alias'):
            ctxt['pci_alias'] = config('pci-alias')

        if config('vcpu-pin-set'):
            ctxt['vcpu_pin_set'] = config('vcpu-pin-set')

        if config('cpu-shared-set'):
            ctxt['cpu_shared_set'] = config('cpu-shared-set')

        if config('virtio-net-tx-queue-size'):
            ctxt['virtio_net_tx_queue_size'] = (
                config('virtio-net-tx-queue-size'))
        if config('virtio-net-rx-queue-size'):
            ctxt['virtio_net_rx_queue_size'] = (
                config('virtio-net-rx-queue-size'))

        ctxt['reserved_host_memory'] = config('reserved-host-memory')

        db = kv()
        if db.get('host_uuid'):
            ctxt['host_uuid'] = db.get('host_uuid')
        else:
            host_uuid = str(uuid.uuid4())
            db.set('host_uuid', host_uuid)
            db.flush()
            ctxt['host_uuid'] = host_uuid

        if config('libvirt-image-backend'):
            ctxt['libvirt_images_type'] = config('libvirt-image-backend')

        ctxt['force_raw_images'] = config('force-raw-images')

        return ctxt
예제 #8
0
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    drop_config = []
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    if use_dvr():
        resource_map.update(DVR_RESOURCE_MAP)
        resource_map.update(METADATA_RESOURCE_MAP)
        dvr_services = ['neutron-metadata-agent', 'neutron-l3-agent']
        resource_map[NEUTRON_CONF]['services'] += dvr_services
    if enable_local_dhcp():
        resource_map.update(METADATA_RESOURCE_MAP)
        resource_map.update(DHCP_RESOURCE_MAP)
        metadata_services = ['neutron-metadata-agent', 'neutron-dhcp-agent']
        resource_map[NEUTRON_CONF]['services'] += metadata_services
    # Remap any service names as required
    _os_release = os_release('neutron-common', base='icehouse')
    if CompareOpenStackReleases(_os_release) >= 'mitaka':
        # ml2_conf.ini -> openvswitch_agent.ini
        drop_config.append(ML2_CONF)
        # drop of -plugin from service name
        resource_map[NEUTRON_CONF]['services'].remove(
            'neutron-plugin-openvswitch-agent'
        )
        resource_map[NEUTRON_CONF]['services'].append(
            'neutron-openvswitch-agent'
        )
        if not use_dpdk():
            drop_config.append(DPDK_INTERFACES)
            drop_config.append(OVS_DEFAULT)
        elif ovs_has_late_dpdk_init():
            drop_config.append(OVS_DEFAULT)
    else:
        drop_config.extend([OVS_CONF, DPDK_INTERFACES])

    if enable_sriov():
        sriov_agent_name = 'neutron-sriov-agent'
        sriov_resource_map = deepcopy(SRIOV_RESOURCE_MAP)

        if CompareOpenStackReleases(_os_release) < 'mitaka':
            sriov_agent_name = 'neutron-plugin-sriov-agent'
            # Patch resource_map for Kilo and Liberty
            sriov_resource_map[NEUTRON_SRIOV_AGENT_CONF]['services'] = \
                [sriov_agent_name]

        resource_map.update(sriov_resource_map)
        resource_map[NEUTRON_CONF]['services'].append(
            sriov_agent_name)

    # Use MAAS1.9 for MTU and external port config on xenial and above
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
        drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])

    for _conf in drop_config:
        try:
            del resource_map[_conf]
        except KeyError:
            pass

    return resource_map
def enable_sriov():
    '''Determine whether SR-IOV is enabled and supported'''
    cmp_release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
    return (cmp_release >= 'xenial' and config('enable-sriov'))
예제 #10
0
def update_nrpe_config():
    # Validate options (DEPRECATED)
    valid_alerts = ['ignore', 'warning', 'critical']
    if config('failed_actions_alert_type').lower() not in valid_alerts:
        status_set(
            'blocked', 'The value of option failed_actions_alert_type must be '
            'among {}'.format(valid_alerts))
        return
    if config('failed_actions_threshold') < 0:
        status_set(
            'blocked',
            'The value of option failed_actions_threshold must be a '
            'positive integer')
        return

    scripts_src = os.path.join(os.environ["CHARM_DIR"], "files", "nrpe")

    scripts_dst = "/usr/local/lib/nagios/plugins"
    if not os.path.exists(scripts_dst):
        os.makedirs(scripts_dst)
    for fname in glob.glob(os.path.join(scripts_src, "*")):
        if os.path.isfile(fname):
            shutil.copy2(fname,
                         os.path.join(scripts_dst, os.path.basename(fname)))

    sudoers_src = os.path.join(os.environ["CHARM_DIR"], "files", "sudoers")
    sudoers_dst = "/etc/sudoers.d"
    for fname in glob.glob(os.path.join(sudoers_src, "*")):
        if os.path.isfile(fname):
            shutil.copy2(fname,
                         os.path.join(sudoers_dst, os.path.basename(fname)))

    hostname = nrpe.get_nagios_hostname()
    current_unit = nrpe.get_nagios_unit_name()

    nrpe_setup = nrpe.NRPE(hostname=hostname)

    apt_install('python-dbus')

    check_crm_cmd = 'check_crm -s'
    check_crm_cmd += ' --failedactions={}'.format(
        config('failed_actions_alert_type').lower())
    if config('failed_actions_threshold'):
        check_crm_cmd += ' --failcount={}'.format(
            config('failed_actions_threshold'))
    for err_type in ['warn', 'crit']:
        check_crm_cmd += ' --failcount-{}={}'.format(
            err_type,
            config('res_failcount_{}'.format(err_type)) or 0)

    if nrpe.NRPE.does_nrpe_conf_dir_exist():
        # corosync/crm checks

        # LP #1902919 - corosync version 2.99 changed the ring status output
        # for udp/udpu to hardcode the status to always report 'OK'. This
        # results in the check providing no value over what is provided by the
        # crm_status check. A version check on the package would be more ideal,
        # however populating the apt-cache object is expensive to run on each
        # config-changed hook, so use the faster check of comparing the
        # release name.
        ring_check = {
            'shortname': 'corosync_rings',
            'description': 'Check Corosync rings {}'.format(current_unit),
            'check_cmd': 'check_corosync_rings',
        }
        if CompareHostReleases(get_distrib_codename()) < 'eoan':
            nrpe_setup.add_check(**ring_check)
        else:
            nrpe_setup.remove_check(**ring_check)

        nrpe_setup.add_check(
            shortname='crm_status',
            description='Check crm status {}'.format(current_unit),
            check_cmd=check_crm_cmd)

        # process checks
        nrpe_setup.add_check(
            shortname='corosync_proc',
            description='Check Corosync process {}'.format(current_unit),
            check_cmd='check_procs -c 1:1 -C corosync')
        nrpe_setup.add_check(
            shortname='pacemakerd_proc',
            description='Check Pacemakerd process {}'.format(current_unit),
            check_cmd='check_procs -c 1:1 -C pacemakerd')

        nrpe_setup.write()
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    drop_config = []
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    if use_dvr():
        resource_map.update(DVR_RESOURCE_MAP)
        resource_map.update(METADATA_RESOURCE_MAP)
        dvr_services = ['neutron-metadata-agent', 'neutron-l3-agent']
        resource_map[NEUTRON_CONF]['services'] += dvr_services
    if enable_local_dhcp():
        resource_map.update(METADATA_RESOURCE_MAP)
        resource_map.update(DHCP_RESOURCE_MAP)
        metadata_services = ['neutron-metadata-agent', 'neutron-dhcp-agent']
        resource_map[NEUTRON_CONF]['services'] += metadata_services
    # Remap any service names as required
    _os_release = os_release('neutron-common', base='icehouse')
    if CompareOpenStackReleases(_os_release) >= 'mitaka':
        # ml2_conf.ini -> openvswitch_agent.ini
        drop_config.append(ML2_CONF)
        # drop of -plugin from service name
        resource_map[NEUTRON_CONF]['services'].remove(
            'neutron-plugin-openvswitch-agent')
        resource_map[NEUTRON_CONF]['services'].append(
            'neutron-openvswitch-agent')
        if not use_dpdk():
            drop_config.append(DPDK_INTERFACES)
    else:
        drop_config.extend([OVS_CONF, DPDK_INTERFACES])

    if enable_sriov():
        sriov_agent_name = 'neutron-sriov-agent'
        sriov_resource_map = deepcopy(SRIOV_RESOURCE_MAP)

        if CompareOpenStackReleases(_os_release) < 'mitaka':
            sriov_agent_name = 'neutron-plugin-sriov-agent'
            # Patch resource_map for Kilo and Liberty
            sriov_resource_map[NEUTRON_SRIOV_AGENT_CONF]['services'] = \
                [sriov_agent_name]

        resource_map.update(sriov_resource_map)
        resource_map[NEUTRON_CONF]['services'].append(sriov_agent_name)
    if enable_sriov() or use_hw_offload():
        # We do late initialization of this as a call to
        # ``context.SRIOVContext`` requires the ``sriov-netplan-shim`` package
        # to already be installed on the system.
        #
        # Note that we also do not want the charm to manage the service, but
        # only update the configuration for boot-time initialization.
        # LP: #1908351
        try:
            resource_map.update(
                OrderedDict([
                    (
                        SRIOV_NETPLAN_SHIM_CONF,
                        {
                            # We deliberately omit service here as we only want changes
                            # to be applied at boot time.
                            'services': [],
                            'contexts': [SRIOVContext_adapter()],
                        }),
                ]))
        except NameError:
            # The resource_map is built at module import time and as such this
            # function is called multiple times prior to the charm actually
            # being installed. As the SRIOVContext depends on a Python module
            # provided by the ``sriov-netplan-shim`` package gracefully ignore
            # this to allow the package to be installed.
            pass

    # Use MAAS1.9 for MTU and external port config on xenial and above
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
        drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])

    for _conf in drop_config:
        try:
            del resource_map[_conf]
        except KeyError:
            pass

    return resource_map
예제 #12
0
    def is_compliant(self, *args, **kwargs):
        self.pass_cases = []
        self.fail_cases = []
        settings = utils.get_settings('ssh')

        _release = lsb_release()['DISTRIB_CODENAME'].lower()
        if CompareHostReleases(_release) >= 'trusty':
            if not settings['server']['weak_hmac']:
                self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
            else:
                self.pass_cases.append(r'^MACs.+,hmac-sha1$')

            if settings['server']['weak_kex']:
                self.fail_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?'
                )  # noqa
                self.pass_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?'
                )  # noqa
                self.pass_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?'
                )  # noqa
                self.pass_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?'
                )  # noqa
            else:
                self.pass_cases.append(
                    r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$'
                )  # noqa
                self.fail_cases.append(
                    r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?'
                )  # noqa

            if settings['server']['cbc_required']:
                self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
                self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
                self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
                self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
            else:
                self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
                self.pass_cases.append(
                    r'^Ciphers\[email protected],.+')  # noqa
                self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
                self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
                self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
        else:
            if not settings['server']['weak_hmac']:
                self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
            else:
                self.pass_cases.append(r'^MACs.+,hmac-sha1$')

            if settings['server']['weak_kex']:
                self.fail_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?'
                )  # noqa
                self.pass_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?'
                )  # noqa
                self.pass_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?'
                )  # noqa
                self.pass_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?'
                )  # noqa
            else:
                self.pass_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$'
                )  # noqa
                self.fail_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?'
                )  # noqa
                self.fail_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?'
                )  # noqa
                self.fail_cases.append(
                    r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?'
                )  # noqa

            if settings['server']['cbc_required']:
                self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
                self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
                self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
                self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
            else:
                self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
                self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
                self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
                self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')

        if settings['server']['sftp_enable']:
            self.pass_cases.append(r'^Subsystem\ssftp')
        else:
            self.fail_cases.append(r'^Subsystem\ssftp')

        return super(SSHDConfigFileContentAudit,
                     self).is_compliant(*args, **kwargs)
예제 #13
0
def systemd():
    return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid'
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    # TODO: Cache this on first call?
    virt_type = config('virt-type').lower()
    if virt_type in ('lxd', 'ironic'):
        resource_map = deepcopy(BASE_RESOURCE_MAP)
    else:
        resource_map = deepcopy(LIBVIRT_RESOURCE_MAP)

    # if vault deps are not installed it is not yet possible to check the vault
    # context status since it requires the hvac dependency.
    if not vaultlocker_installed():
        to_delete = []
        for item in resource_map[NOVA_CONF]['contexts']:
            if isinstance(item, type(vaultlocker.VaultKVContext())):
                to_delete.append(item)

        for item in to_delete:
            resource_map[NOVA_CONF]['contexts'].remove(item)

    net_manager = network_manager()

    # Network manager gets set late by the cloud-compute interface.
    # FlatDHCPManager only requires some extra packages.
    cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
    if (net_manager in ['flatmanager', 'flatdhcpmanager'] and
            config('multi-host').lower() == 'yes' and
            cmp_os_release < 'ocata'):
        resource_map[NOVA_CONF]['services'].extend(
            ['nova-api', 'nova-network']
        )
    else:
        resource_map.pop(NOVA_API_AA_PROFILE_PATH)
        resource_map.pop(NOVA_NETWORK_AA_PROFILE_PATH)

    if virt_type == 'ironic':
        # NOTE(gsamfira): OpenStack versions prior to Victoria do not have a
        # dedicated nova-compute-ironic package which provides a suitable
        # nova-compute.conf file. We use a template to compensate for that.
        if cmp_os_release < 'victoria':
            resource_map[NOVA_COMPUTE_CONF] = {
                "services": ["nova-compute"],
                "contexts": [],
            }

    cmp_distro_codename = CompareHostReleases(
        lsb_release()['DISTRIB_CODENAME'].lower())
    if (cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata'):
        for data in resource_map.values():
            if LIBVIRT_BIN_DAEMON in data['services']:
                data['services'].remove(LIBVIRT_BIN_DAEMON)
                data['services'].append(LIBVIRTD_DAEMON)

    # Neutron/quantum requires additional contexts, as well as new resources
    # depending on the plugin used.
    # NOTE(james-page): only required for ovs plugin right now
    if net_manager in ['neutron', 'quantum']:
        resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext())

    if relation_ids('ceph'):
        CEPH_RESOURCES[ceph_config_file()] = {
            'contexts': [NovaComputeCephContext()],
            'services': ['nova-compute']
        }
        resource_map.update(CEPH_RESOURCES)

    enable_nova_metadata, _ = nova_metadata_requirement()
    if enable_nova_metadata:
        resource_map[NOVA_CONF]['services'].append('nova-api-metadata')

    # NOTE(james-page): If not on an upstart based system, don't write
    #                   and override file for libvirt-bin.
    if not os.path.exists('/etc/init'):
        if LIBVIRT_BIN_OVERRIDES in resource_map:
            del resource_map[LIBVIRT_BIN_OVERRIDES]

    return resource_map
예제 #15
0
    def set_mysql_password(self, username, password, current_password=None):
        """Update a mysql password for the provided username changing the
        leader settings

        To update root's password pass `None` in the username

        :param username: Username to change password of
        :type username: str
        :param password: New password for user.
        :type password: str
        :param current_password: Existing password for user.
        :type current_password: str
        """

        if username is None:
            username = '******'

        # get root password via leader-get, it may be that in the past (when
        # changes to root-password were not supported) the user changed the
        # password, so leader-get is more reliable source than
        # config.previous('root-password').
        rel_username = None if username == 'root' else username
        if not current_password:
            current_password = self.get_mysql_password(rel_username)

        # password that needs to be set
        new_passwd = password

        # update password for all users (e.g. root@localhost, root@::1, etc)
        try:
            self.connect(user=username, password=current_password)
            cursor = self.connection.cursor()
        except MySQLdb.OperationalError as ex:
            raise MySQLSetPasswordError(('Cannot connect using password in '
                                         'leader settings (%s)') % ex, ex)

        try:
            # NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account
            # fails when using SET PASSWORD so using UPDATE against the
            # mysql.user table is needed, but changes to this table are not
            # replicated across the cluster, so this update needs to run in
            # all the nodes. More info at
            # http://galeracluster.com/documentation-webpages/userchanges.html
            release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
            if release < 'bionic':
                SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = "******"PASSWORD( %s ) WHERE user = %s;")
            else:
                # PXC 5.7 (introduced in Bionic) uses authentication_string
                SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET "
                                     "authentication_string = "
                                     "PASSWORD( %s ) WHERE user = %s;")
            cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username))
            cursor.execute('FLUSH PRIVILEGES;')
            self.connection.commit()
        except MySQLdb.OperationalError as ex:
            raise MySQLSetPasswordError('Cannot update password: %s' % str(ex),
                                        ex)
        finally:
            cursor.close()

        # check the password was changed
        try:
            self.connect(user=username, password=new_passwd)
            self.execute('select 1;')
        except MySQLdb.OperationalError as ex:
            raise MySQLSetPasswordError(('Cannot connect using new password: '******'%s') % str(ex), ex)

        if not is_leader():
            log('Only the leader can set a new password in the relation',
                level=DEBUG)
            return

        for key in self.passwd_keys(rel_username):
            _password = leader_get(key)
            if _password:
                log('Updating password for %s (%s)' % (key, rel_username),
                    level=DEBUG)
                leader_set(settings={key: new_passwd})
예제 #16
0
def assert_charm_supports_ipv6():
    """Check whether we are able to support charms ipv6."""
    release = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(release) < "trusty":
        raise Exception("IPv6 is not supported in the charms for Ubuntu "
                        "versions less than Trusty 14.04")
예제 #17
0
def enable_replication():
    ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
    return CompareHostReleases(ubuntu_release) > "trusty"