def bootstrap_pxc():
    """Bootstrap PXC
    On systemd systems systemctl bootstrap-pxc mysql does not work.
    Run service mysql bootstrap-pxc to bootstrap."""
    service('stop', 'mysql')
    bootstrapped = service('bootstrap-pxc', 'mysql')
    if not bootstrapped:
        try:
            cmp_os = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
            if cmp_os < 'bionic':
                # NOTE(jamespage): execute under systemd-run to ensure
                #                  that the bootstrap-pxc mysqld does
                #                  not end up in the juju unit daemons
                #                  cgroup scope.
                cmd = [
                    'systemd-run', '--service-type=forking', 'service',
                    'mysql', 'bootstrap-pxc'
                ]
                subprocess.check_call(cmd)
            else:
                service('start', 'mysql@bootstrap')
        except subprocess.CalledProcessError as e:
            msg = 'Bootstrap PXC failed'
            error_msg = '{}: {}'.format(msg, e)
            status_set('blocked', msg)
            log(error_msg, ERROR)
            raise Exception(error_msg)
        if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
            # To make systemd aware mysql is running after a bootstrap
            service('start', 'mysql')
    log("Bootstrap PXC Succeeded", DEBUG)
def render_config(hosts=None):
    if hosts is None:
        hosts = []

    config_file = resolve_cnf_file()
    if not os.path.exists(os.path.dirname(config_file)):
        os.makedirs(os.path.dirname(config_file))

    context = {
        'cluster_name': 'juju_cluster',
        'private_address': get_cluster_host_ip(),
        'cluster_hosts': ",".join(hosts),
        'sst_method': config('sst-method'),
        'sst_password': sst_password(),
        'innodb_file_per_table': config('innodb-file-per-table'),
        'table_open_cache': config('table-open-cache'),
        'lp1366997_workaround': config('lp1366997-workaround'),
        'binlogs_path': config('binlogs-path'),
        'enable_binlogs': config('enable-binlogs'),
        'binlogs_max_size': config('binlogs-max-size'),
        'binlogs_expire_days': config('binlogs-expire-days'),
        'performance_schema': config('performance-schema'),
        'is_leader': is_leader(),
        'server_id': get_server_id(),
    }

    if config('prefer-ipv6'):
        # NOTE(hopem): this is a kludge to get percona working with ipv6.
        # See lp 1380747 for more info. This is intended as a stop gap until
        # percona package is fixed to support ipv6.
        context['bind_address'] = '::'
        context['ipv6'] = True
    else:
        context['ipv6'] = False

    wsrep_provider_options = get_wsrep_provider_options()
    if wsrep_provider_options:
        context['wsrep_provider_options'] = wsrep_provider_options

    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
        # myisam_recover is not valid for PXC 5.7 (introduced in Bionic) so we
        # only set it for PXC 5.6.
        context['myisam_recover'] = 'BACKUP'
        context['wsrep_provider'] = '/usr/lib/libgalera_smm.so'
    elif CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic':
        context['wsrep_provider'] = '/usr/lib/galera3/libgalera_smm.so'
        context['default_storage_engine'] = 'InnoDB'
        context['wsrep_log_conflicts'] = True
        context['innodb_autoinc_lock_mode'] = '2'
        context['pxc_strict_mode'] = config('pxc-strict-mode')

    context.update(PerconaClusterHelper().parse_config())
    render(os.path.basename(config_file), config_file, context, perms=0o444)
Exemple #3
0
def install():
    pkgs = copy.deepcopy(PACKAGES)
    ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
    # use libnagios on anything older than Xenial
    if CompareHostReleases(ubuntu_release) < 'xenial':
        pkgs.remove('libmonitoring-plugin-perl')
        pkgs.append('libnagios-plugin-perl')
    elif CompareHostReleases(ubuntu_release) >= 'bionic':
        pkgs.append('python3-libmaas')
    # NOTE(dosaboy): we currently disallow upgrades due to bug #1382842. This
    # should be removed once the pacemaker package is fixed.
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(pkgs), fatal=True)
    setup_ocf_files()
Exemple #4
0
def install():
    execd_preinstall()
    add_source(hookenv.config('source'), hookenv.config('key'))
    for directory in [CONF_FILE_DIR, USR_SHARE_DIR]:
        hookenv.log("creating config dir at {}".format(directory))
        if not os.path.isdir(directory):
            if os.path.exists(directory):
                hookenv.log("error: {} exists but is not a directory."
                            " exiting.".format(directory))
                return
            os.mkdir(directory)

    _packages = PACKAGES
    if not hookenv.config("use_swift"):
        hookenv.log('Configuring for local hosting of product stream.')
        _packages += ["apache2"]

    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) > 'bionic':
        _packages = [pkg for pkg in _packages if not pkg.startswith('python-')]
        _packages.extend(PY3_PACKAGES)

    apt_update(fatal=True)

    apt_install(_packages)

    hookenv.log('end install hook.')
def get_packages():
    '''Return a list of packages for install based on the configured plugin'''
    plugin = config('plugin')
    packages = deepcopy(GATEWAY_PKGS[plugin])
    cmp_os_source = CompareOpenStackReleases(os_release('neutron-common'))
    cmp_host_release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
    if plugin == OVS:
        if (cmp_os_source >= 'icehouse' and cmp_os_source < 'mitaka' and
                cmp_host_release < 'utopic'):
            # NOTE(jamespage) neutron-vpn-agent supercedes l3-agent for
            # icehouse but openswan was removed in utopic.
            packages.remove('neutron-l3-agent')
            packages.append('neutron-vpn-agent')
            packages.append('openswan')
        if cmp_os_source >= 'liberty':
            # Switch out mysql driver
            packages.remove('python-mysqldb')
            packages.append('python-pymysql')
        if cmp_os_source >= 'mitaka':
            # Switch out to actual ovs agent package
            packages.remove('neutron-plugin-openvswitch-agent')
            packages.append('neutron-openvswitch-agent')
        if cmp_os_source >= 'kilo':
            packages.append('python-neutron-fwaas')
    if plugin in (OVS, OVS_ODL):
        if cmp_os_source >= 'newton':
            # LBaaS v1 dropped in newton
            packages.remove('neutron-lbaas-agent')
            packages.append('neutron-lbaasv2-agent')
    packages.extend(determine_l3ha_packages())

    return packages
 def __call__(self):
     ctxt = {
         'local_ip': unit_private_ip(),
         'account_server_port': config('account-server-port'),
         'account_server_port_rep': config('account-server-port-rep'),
         'container_server_port': config('container-server-port'),
         'container_server_port_rep': config('container-server-port-rep'),
         'object_server_port': config('object-server-port'),
         'object_server_port_rep': config('object-server-port-rep'),
         'object_server_threads_per_disk': config(
             'object-server-threads-per-disk'),
         'account_max_connections': config('account-max-connections'),
         'container_max_connections': config('container-max-connections'),
         'object_max_connections': config('object-max-connections'),
         'object_replicator_concurrency': config(
             'object-replicator-concurrency'),
         'object_rsync_timeout': config('object-rsync-timeout'),
         'statsd_host': config('statsd-host'),
         'statsd_port': config('statsd-port'),
         'statsd_sample_rate': config('statsd-sample-rate'),
     }
     ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
     if CompareHostReleases(ubuntu_release) > "trusty":
         ctxt['standalone_replicator'] = True
     else:
         ctxt['standalone_replicator'] = False
     return ctxt
Exemple #7
0
def _get_firewall_driver(ovs_ctxt):
    '''
    Determine the firewall driver to use based on configuration,
    OpenStack and Ubuntu releases.

    @returns str: firewall driver to use for OpenvSwitch
    '''
    driver = config('firewall-driver') or IPTABLES_HYBRID
    release = lsb_release()['DISTRIB_CODENAME']
    if driver not in VALID_FIREWALL_DRIVERS:
        return IPTABLES_HYBRID

    if driver == IPTABLES_HYBRID and ovs_ctxt['enable_nsg_logging']:
        msg = "NSG logging can not be enabled - need to set " \
              "firewall driver to 'openvswitch' explicitly"
        log(msg, "WARN")

    if (driver == OPENVSWITCH and
            CompareHostReleases(release) < 'xenial'):
        # NOTE(jamespage): Switch back to iptables_hybrid for
        #                  Ubuntu releases prior to Xenial due
        #                  to requirements for Linux >= 4.4 and
        #                  Open vSwitch >= 2.5
        return IPTABLES_HYBRID

    return driver
Exemple #8
0
    def get_kexs(self, allow_weak_kex):
        if allow_weak_kex:
            weak_kex = 'weak'
        else:
            weak_kex = 'default'

        default = 'diffie-hellman-group-exchange-sha256'
        weak = (default + ',diffie-hellman-group14-sha1,'
                'diffie-hellman-group-exchange-sha1,'
                'diffie-hellman-group1-sha1')
        kex = {'default': default, 'weak': weak}

        default = ('[email protected],'
                   'diffie-hellman-group-exchange-sha256')
        weak = (default + ',diffie-hellman-group14-sha1,'
                'diffie-hellman-group-exchange-sha1,'
                'diffie-hellman-group1-sha1')
        kex_66 = {'default': default, 'weak': weak}

        # Use newer kex on Ubuntu Trusty and above
        _release = lsb_release()['DISTRIB_CODENAME'].lower()
        if CompareHostReleases(_release) >= 'trusty':
            log(
                'Detected Ubuntu 14.04 or newer, using new key exchange '
                'algorithms',
                level=DEBUG)
            kex = kex_66

        return kex[weak_kex]
Exemple #9
0
    def get_ciphers(self, cbc_required):
        if cbc_required:
            weak_ciphers = 'weak'
        else:
            weak_ciphers = 'default'

        default = 'aes256-ctr,aes192-ctr,aes128-ctr'
        cipher = {
            'default': default,
            'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'
        }

        default = ('[email protected],[email protected],'
                   '[email protected],aes256-ctr,aes192-ctr,aes128-ctr')
        ciphers_66 = {
            'default': default,
            'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'
        }

        # Use newer ciphers on ubuntu Trusty and above
        _release = lsb_release()['DISTRIB_CODENAME'].lower()
        if CompareHostReleases(_release) >= 'trusty':
            log('Detected Ubuntu 14.04 or newer, using new ciphers',
                level=DEBUG)
            cipher = ciphers_66

        return cipher[weak_ciphers]
Exemple #10
0
def _get_keyid_by_gpg_key(key_material):
    """Get a GPG key fingerprint by GPG key material.
    Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
    or binary GPG key material. Can be used, for example, to generate file
    names for keys passed via charm options.

    :param key_material: ASCII armor-encoded or binary GPG key material
    :type key_material: bytes
    :raises: GPGKeyError if invalid key material has been provided
    :returns: A GPG key fingerprint
    :rtype: str
    """
    # trusty, xenial and bionic handling differs due to gpg 1.x to 2.x change
    release = get_distrib_codename()
    is_gpgv2_distro = CompareHostReleases(release) >= "bionic"
    if is_gpgv2_distro:
        # --import is mandatory, otherwise fingerprint is not printed
        cmd = 'gpg --with-colons --import-options show-only --import --dry-run'
    else:
        cmd = 'gpg --with-colons --with-fingerprint'
    ps = subprocess.Popen(cmd.split(),
                          stdout=subprocess.PIPE,
                          stderr=subprocess.PIPE,
                          stdin=subprocess.PIPE)
    out, err = ps.communicate(input=key_material)
    if six.PY3:
        out = out.decode('utf-8')
        err = err.decode('utf-8')
    if 'gpg: no valid OpenPGP data found.' in err:
        raise GPGKeyError('Invalid GPG key material provided')
    # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
    return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
def resolve_config_files(plugin, release):
    '''
    Resolve configuration files and contexts

    :param plugin: shortname of plugin e.g. ovs
    :param release: openstack release codename
    :returns: dict of configuration files, contexts
              and associated services
    '''
    config_files = deepcopy(get_config_files())
    drop_config = []
    cmp_os_release = CompareOpenStackReleases(release)
    if plugin == OVS:
        # NOTE: deal with switch to ML2 plugin for >= icehouse
        drop_config = [NEUTRON_OVS_AGENT_CONF]
        if cmp_os_release >= 'mitaka':
            # ml2 -> ovs_agent
            drop_config = [NEUTRON_ML2_PLUGIN_CONF]

    # Use MAAS1.9 for MTU and external port config on xenial and above
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
        drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])

    # Rename to lbaasv2 in newton
    if cmp_os_release < 'newton':
        drop_config.extend([NEUTRON_LBAASV2_AA_PROFILE_PATH])
    else:
        drop_config.extend([NEUTRON_LBAAS_AA_PROFILE_PATH])

    # Drop lbaasv2 at train
    # or drop if disable-lbaas option is true
    if disable_neutron_lbaas():
        if cmp_os_release >= 'newton':
            drop_config.extend([
                NEUTRON_LBAASV2_AA_PROFILE_PATH,
                NEUTRON_LBAAS_AGENT_CONF,
            ])
        else:
            drop_config.extend([
                NEUTRON_LBAAS_AA_PROFILE_PATH,
                NEUTRON_LBAAS_AGENT_CONF,
            ])

    if disable_nova_metadata(cmp_os_release):
        drop_config.extend(get_nova_config_files().keys())
    else:
        if is_relation_made('amqp-nova'):
            amqp_nova_ctxt = context.AMQPContext(ssl_dir=NOVA_CONF_DIR,
                                                 rel_name='amqp-nova',
                                                 relation_prefix='nova')
        else:
            amqp_nova_ctxt = context.AMQPContext(ssl_dir=NOVA_CONF_DIR,
                                                 rel_name='amqp')
        config_files[plugin][NOVA_CONF]['hook_contexts'].append(amqp_nova_ctxt)

    for _config in drop_config:
        if _config in config_files[plugin]:
            config_files[plugin].pop(_config)
    return config_files
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    drop_config = []
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    if use_dvr():
        resource_map.update(DVR_RESOURCE_MAP)
        resource_map.update(METADATA_RESOURCE_MAP)
        dvr_services = ['neutron-metadata-agent', 'neutron-l3-agent']
        resource_map[NEUTRON_CONF]['services'] += dvr_services
    if enable_local_dhcp():
        resource_map.update(METADATA_RESOURCE_MAP)
        resource_map.update(DHCP_RESOURCE_MAP)
        metadata_services = ['neutron-metadata-agent', 'neutron-dhcp-agent']
        resource_map[NEUTRON_CONF]['services'] += metadata_services
    # Remap any service names as required
    _os_release = os_release('neutron-common', base='icehouse')
    if CompareOpenStackReleases(_os_release) >= 'mitaka':
        # ml2_conf.ini -> openvswitch_agent.ini
        drop_config.append(ML2_CONF)
        # drop of -plugin from service name
        resource_map[NEUTRON_CONF]['services'].remove(
            'neutron-plugin-openvswitch-agent')
        resource_map[NEUTRON_CONF]['services'].append(
            'neutron-openvswitch-agent')
        if not use_dpdk():
            drop_config.append(DPDK_INTERFACES)
            drop_config.append(OVS_DEFAULT)
        elif ovs_has_late_dpdk_init():
            drop_config.append(OVS_DEFAULT)
    else:
        drop_config.extend([OVS_CONF, DPDK_INTERFACES])

    if enable_sriov():
        sriov_agent_name = 'neutron-sriov-agent'
        sriov_resource_map = deepcopy(SRIOV_RESOURCE_MAP)

        if CompareOpenStackReleases(_os_release) < 'mitaka':
            sriov_agent_name = 'neutron-plugin-sriov-agent'
            # Patch resource_map for Kilo and Liberty
            sriov_resource_map[NEUTRON_SRIOV_AGENT_CONF]['services'] = \
                [sriov_agent_name]

        resource_map.update(sriov_resource_map)
        resource_map[NEUTRON_CONF]['services'].append(sriov_agent_name)

    # Use MAAS1.9 for MTU and external port config on xenial and above
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
        drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])

    for _conf in drop_config:
        try:
            del resource_map[_conf]
        except KeyError:
            pass

    return resource_map
    def test_410_rmq_amqp_messages_all_units_ssl_alt_port(self):
        """Send amqp messages with ssl on, to every rmq unit and check
        every rmq unit for messages.  Custom ssl tcp port."""
        # http://pad.lv/1625044
        if (CompareHostReleases(self.client_series) >= 'xenial' and
                CompareHostReleases(self.series) <= 'trusty'):
            u.log.info('SKIP')
            u.log.info('Skipping SSL tests due to client'
                       ' compatibility issues')
            return
        u.log.debug('Checking amqp message publish/get on all units '
                    '(ssl on)...')

        sentry_units = self._get_rmq_sentry_units()
        self._test_rmq_amqp_messages_all_units(sentry_units,
                                               ssl=True, port=5999)
        u.log.info('OK\n')
Exemple #14
0
def assert_charm_supports_ipv6():
    """Check whether we are able to support charms ipv6."""
    _release = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(_release) < "trusty":
        msg = "IPv6 is not supported in the charms for Ubuntu " \
              "versions less than Trusty 14.04"
        status_set('blocked', msg)
        raise Exception(msg)
def libvirt_daemon():
    '''Resolve the correct name of the libvirt daemon service'''
    distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
    if (CompareHostReleases(distro_codename) >= 'yakkety'
            or CompareOpenStackReleases(os_release('nova-common')) >= 'ocata'):
        return LIBVIRTD_DAEMON
    else:
        return LIBVIRT_BIN_DAEMON
Exemple #16
0
 def __call__(self):
     ctxt = {}
     _release = lsb_release()['DISTRIB_CODENAME'].lower()
     if CompareHostReleases(_release) >= "yakkety":
         ctxt['virt_type'] = config('virt-type')
         ctxt['enable_live_migration'] = config('enable-live-migration')
     ctxt['resume_guests_state_on_host_boot'] =\
         config('resume-guests-state-on-host-boot')
     return ctxt
def configure_lxd(user='******'):
    ''' Configure lxd use for nova user '''
    _release = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(_release) < "vivid":
        raise Exception("LXD is not supported for Ubuntu "
                        "versions less than 15.04 (vivid)")

    configure_subuid(user)
    lxc_list(user)
def determine_packages_arch():
    '''Generate list of architecture-specific packages'''
    packages = []
    distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
    if (platform.machine() == 'aarch64'
            and CompareHostReleases(distro_codename) >= 'wily'):
        packages.extend(['qemu-efi']),  # AArch64 cloud images require UEFI fw

    return packages
Exemple #19
0
    def __call__(self):
        ctxt = {
            'local_ip':
            unit_private_ip(),
            'account_server_port':
            config('account-server-port'),
            'account_server_port_rep':
            config('account-server-port-rep'),
            'container_server_port':
            config('container-server-port'),
            'container_server_port_rep':
            config('container-server-port-rep'),
            'object_server_port':
            config('object-server-port'),
            'object_server_port_rep':
            config('object-server-port-rep'),
            'object_server_threads_per_disk':
            config('object-server-threads-per-disk'),
            'account_max_connections':
            config('account-max-connections'),
            'container_max_connections':
            config('container-max-connections'),
            'object_max_connections':
            config('object-max-connections'),
            'object_replicator_concurrency':
            config('object-replicator-concurrency'),
            'object_rsync_timeout':
            config('object-rsync-timeout'),
            'object_handoffs_first':
            config('object-handoffs-first'),
            'fallocate_reserve':
            config('file-allocation-reserve'),
            'statsd_host':
            config('statsd-host'),
            'statsd_port':
            config('statsd-port'),
            'statsd_sample_rate':
            config('statsd-sample-rate'),
        }

        # ensure lockup_timeout > rsync_timeout. See bug 1575277
        ctxt['object_lockup_timeout'] = max(config('object-lockup-timeout'),
                                            2 * ctxt['object_rsync_timeout'])

        if config('node-timeout'):
            node_timeout = config('node-timeout')
            ctxt['node_timeout'] = node_timeout
            # docs say this must always be higher
            ctxt['http_timeout'] = max(60, node_timeout + 20)

        ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
        if CompareHostReleases(ubuntu_release) > "trusty":
            ctxt['standalone_replicator'] = True
        else:
            ctxt['standalone_replicator'] = False
        return ctxt
def install():
    execd_preinstall()
    _release = lsb_release()['DISTRIB_CODENAME'].lower()
    if (config('source') is None and CompareHostReleases(_release) < 'trusty'):
        setup_percona_repo()
    elif config('source') is not None:
        add_source(config('source'), config('key'))
    apt_update(fatal=True)

    install_percona_xtradb_cluster()
Exemple #21
0
def install():
    ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(ubuntu_release) >= 'zesty':
        PACKAGES.remove('libnagios-plugin-perl')
        PACKAGES.append('libnagios-object-perl')
    # NOTE(dosaboy): we currently disallow upgrades due to bug #1382842. This
    # should be removed once the pacemaker package is fixed.
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(PACKAGES), fatal=True)
    setup_ocf_files()
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    # TODO: Cache this on first call?
    if config('virt-type').lower() == 'lxd':
        resource_map = deepcopy(BASE_RESOURCE_MAP)
    else:
        resource_map = deepcopy(LIBVIRT_RESOURCE_MAP)
    net_manager = network_manager()

    # Network manager gets set late by the cloud-compute interface.
    # FlatDHCPManager only requires some extra packages.
    cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
    if (net_manager in ['flatmanager', 'flatdhcpmanager']
            and config('multi-host').lower() == 'yes'
            and cmp_os_release < 'ocata'):
        resource_map[NOVA_CONF]['services'].extend(
            ['nova-api', 'nova-network'])
    else:
        resource_map.pop(NOVA_API_AA_PROFILE_PATH)
        resource_map.pop(NOVA_NETWORK_AA_PROFILE_PATH)

    cmp_distro_codename = CompareHostReleases(
        lsb_release()['DISTRIB_CODENAME'].lower())
    if (cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata'):
        for data in resource_map.values():
            if LIBVIRT_BIN_DAEMON in data['services']:
                data['services'].remove(LIBVIRT_BIN_DAEMON)
                data['services'].append(LIBVIRTD_DAEMON)

    # Neutron/quantum requires additional contexts, as well as new resources
    # depending on the plugin used.
    # NOTE(james-page): only required for ovs plugin right now
    if net_manager in ['neutron', 'quantum']:
        resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext())

    if relation_ids('ceph'):
        CEPH_RESOURCES[ceph_config_file()] = {
            'contexts': [NovaComputeCephContext()],
            'services': ['nova-compute']
        }
        resource_map.update(CEPH_RESOURCES)

    enable_nova_metadata, _ = nova_metadata_requirement()
    if enable_nova_metadata:
        resource_map[NOVA_CONF]['services'].append('nova-api-metadata')

    # NOTE(james-page): If not on an upstart based system, don't write
    #                   and override file for libvirt-bin.
    if not os.path.exists('/etc/init'):
        del resource_map[LIBVIRT_BIN_OVERRIDES]

    return resource_map
def determine_packages():
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'wily':
        # NOTE(beisner): Use recommended mysql-client package
        # https://launchpad.net/bugs/1476845
        # https://launchpad.net/bugs/1571789
        return [
            'percona-xtradb-cluster-server-5.6',
        ]
    else:
        return [
            'percona-xtradb-cluster-server-5.5',
            'percona-xtradb-cluster-client-5.5',
        ]
def configure_sstuser(sst_password):
    # xtrabackup 2.4 (introduced in Bionic) needs PROCESS privilege for backups
    permissions = ["RELOAD", "LOCK TABLES", "REPLICATION CLIENT"]
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic':
        permissions.append('PROCESS')

    m_helper = get_db_helper()
    m_helper.connect(password=m_helper.get_mysql_root_password())
    m_helper.execute(
        SQL_SST_USER_SETUP.format(permissions=','.join(permissions),
                                  password=sst_password))
    m_helper.execute(
        SQL_SST_USER_SETUP_IPV6.format(permissions=','.join(permissions),
                                       password=sst_password))
Exemple #25
0
def setup_ipv6():
    ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(ubuntu_rel) < "trusty":
        raise Exception("IPv6 is not supported in the charms for Ubuntu "
                        "versions less than Trusty 14.04")

    # Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to
    # use trusty-backports otherwise we can use the UCA.
    if (ubuntu_rel == 'trusty'
            and CompareOpenStackReleases(os_release('glance')) < 'liberty'):
        add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports '
                   'main')
        apt_update()
        apt_install('haproxy/trusty-backports', fatal=True)
def determine_packages():
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'wily':
        # NOTE(beisner): Use recommended mysql-client package
        # https://launchpad.net/bugs/1476845
        # https://launchpad.net/bugs/1571789
        # NOTE(coreycb): This will install percona-xtradb-cluster-server-5.6
        # for >= wily and percona-xtradb-cluster-server-5.7 for >= bionic.
        return [
            'percona-xtradb-cluster-server',
        ]
    else:
        return [
            'percona-xtradb-cluster-server-5.5',
            'percona-xtradb-cluster-client-5.5',
        ]
Exemple #27
0
def determine_packages():
    packages = [] + BASE_PACKAGES
    packages = list(set(packages))

    # criu package doesn't exist for arm64/s390x prior to artful
    machine = platform.machine()
    if (CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'artful'
            and (machine == 'arm64' or machine == 's390x')):
        packages.remove('criu')

    if config('use-source'):
        packages.extend(LXD_SOURCE_PACKAGES)
    else:
        packages.extend(LXD_PACKAGES)
    return packages
Exemple #28
0
def get_packages():
    '''Return a list of packages for install based on the configured plugin'''
    plugin = config('plugin')
    packages = deepcopy(GATEWAY_PKGS[plugin])
    cmp_os_source = CompareOpenStackReleases(os_release('neutron-common'))
    cmp_host_release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
    if plugin == OVS:
        if (cmp_os_source >= 'icehouse' and cmp_os_source < 'mitaka'
                and cmp_host_release < 'utopic'):
            # NOTE(jamespage) neutron-vpn-agent supercedes l3-agent for
            # icehouse but openswan was removed in utopic.
            packages.remove('neutron-l3-agent')
            packages.append('neutron-vpn-agent')
            packages.append('openswan')
        if cmp_os_source >= 'liberty':
            # Switch out mysql driver
            packages.remove('python-mysqldb')
            packages.append('python-pymysql')
        if cmp_os_source >= 'mitaka':
            # Switch out to actual ovs agent package
            packages.remove('neutron-plugin-openvswitch-agent')
            packages.append('neutron-openvswitch-agent')
        if cmp_os_source >= 'kilo':
            packages.append('python-neutron-fwaas')
    if plugin in (OVS, OVS_ODL):
        if cmp_os_source >= 'newton':
            # LBaaS v1 dropped in newton
            packages.remove('neutron-lbaas-agent')
            packages.append('neutron-lbaasv2-agent')
        if cmp_os_source >= 'train':
            # LBaaS v2 dropped in train
            packages.remove('neutron-lbaasv2-agent')

    if disable_nova_metadata(cmp_os_source):
        packages.remove('nova-api-metadata')
    packages.extend(determine_l3ha_packages())

    if cmp_os_source >= 'rocky':
        packages = [p for p in packages if not p.startswith('python-')]
        packages.extend(PY3_PACKAGES)
        if cmp_os_source >= 'train':
            packages.remove('python3-neutron-lbaas')
        # Remove python3-neutron-fwaas from stein release as the package is
        # included as dependency for neutron-l3-agent.
        if cmp_os_source >= 'stein':
            packages.remove('python3-neutron-fwaas')

    return packages
Exemple #29
0
def update_nrpe_config():
    # python-dbus is used by check_upstart_job
    # fasteners is used by apt_install collect_ceph_osd_services.py
    pkgs = ['python3-dbus']
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic':
        pkgs.append('python3-fasteners')
    apt_install(pkgs)

    # copy the check and collect files over to the plugins directory
    charm_dir = os.environ.get('CHARM_DIR', '')
    nagios_plugins = '/usr/local/lib/nagios/plugins'
    # Grab nagios user/group ID's from original source
    _dir = os.stat(nagios_plugins)
    uid = _dir.st_uid
    gid = _dir.st_gid
    for name in ('collect_ceph_osd_services.py', 'check_ceph_osd_services.py'):
        target = os.path.join(nagios_plugins, name)
        shutil.copy(os.path.join(charm_dir, 'files', 'nagios', name), target)
        os.chown(target, uid, gid)

    hostname = nrpe.get_nagios_hostname()
    current_unit = nrpe.get_nagios_unit_name()

    # BUG#1810749 - the nagios user can't access /var/lib/ceph/.. and that's a
    # GOOD THING, as it keeps ceph secure from Nagios.  However, to check
    # whether ceph is okay, the check_systemd.py or 'status ceph-osd' still
    # needs to be called with the contents of ../osd/ceph-*/whoami files.  To
    # get around this conundrum, instead a cron.d job that runs as root will
    # perform the checks every minute, and write to a tempory file the results,
    # and the nrpe check will grep this file and error out (return 2) if the
    # first 3 characters of a line are not 'OK:'.

    cmd = ('MAILTO=""\n'
           '* * * * * root '
           '/usr/local/lib/nagios/plugins/collect_ceph_osd_services.py'
           ' 2>&1 | logger -t check-osd\n')
    with open(CRON_CEPH_CHECK_FILE, "wt") as f:
        f.write(cmd)

    nrpe_cmd = '/usr/local/lib/nagios/plugins/check_ceph_osd_services.py'

    nrpe_setup = nrpe.NRPE(hostname=hostname)
    nrpe_setup.add_check(
        shortname='ceph-osd',
        description='process check {%s}' % current_unit,
        check_cmd=nrpe_cmd
    )
    nrpe_setup.write()
Exemple #30
0
def setup_ipv6():
    ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(ubuntu_rel) < "trusty":
        raise Exception("IPv6 is not supported in the charms for Ubuntu "
                        "versions less than Trusty 14.04")

    from apt import apt_pkg
    apt_pkg.init()

    # Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to
    # use trusty-backports otherwise we can use the UCA.
    vc = apt_pkg.version_compare(get_pkg_version('haproxy'), '1.5.3')
    if ubuntu_rel == 'trusty' and vc == -1:
        add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports '
                   'main')
        apt_update(fatal=True)
        apt_install('haproxy/trusty-backports', fatal=True)