def bootstrap_pxc():
    """Bootstrap PXC
    On systemd systems systemctl bootstrap-pxc mysql does not work.
    Run service mysql bootstrap-pxc to bootstrap."""
    service('stop', 'mysql')
    bootstrapped = service('bootstrap-pxc', 'mysql')
    if not bootstrapped:
        try:
            cmp_os = CompareHostReleases(
                lsb_release()['DISTRIB_CODENAME']
            )
            if cmp_os < 'bionic':
                # NOTE(jamespage): execute under systemd-run to ensure
                #                  that the bootstrap-pxc mysqld does
                #                  not end up in the juju unit daemons
                #                  cgroup scope.
                cmd = ['systemd-run', '--service-type=forking',
                       'service', 'mysql', 'bootstrap-pxc']
                subprocess.check_call(cmd)
            else:
                service('start', 'mysql@bootstrap')
        except subprocess.CalledProcessError as e:
            msg = 'Bootstrap PXC failed'
            error_msg = '{}: {}'.format(msg, e)
            status_set('blocked', msg)
            log(error_msg, ERROR)
            raise Exception(error_msg)
        if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
            # To make systemd aware mysql is running after a bootstrap
            service('start', 'mysql')
    log("Bootstrap PXC Succeeded", DEBUG)
    def check_bigtop_repo_package(self, pkg):
        """
        Return the package version from the repo if different than the version
        that is currently installed .

        :param: str pkg: package name as known by the package manager
        :returns: str ver_str: version of new repo package, or None
        """
        distro = lsb_release()['DISTRIB_ID'].lower()
        if distro == 'ubuntu':
            # NB: we cannot use the charmhelpers.fetch.apt_cache nor the
            # apt module from the python3-apt deb as they are only available
            # as system packages. Charms with use_system_packages=False in
            # layer.yaml would fail. Subprocess apt-cache madison instead.
            madison_cmd = ['apt-cache', 'madison', pkg]
            grep_cmd = ['grep', self.bigtop_apt]
            p1 = subprocess.Popen(madison_cmd, stdout=subprocess.PIPE)
            p2 = subprocess.Popen(grep_cmd, stdin=p1.stdout,
                                  stdout=subprocess.PIPE)
            p1.stdout.close()
            madison_output = p2.communicate()[0].strip().decode()
            p1.wait()

            # madison_output will look like this:
            #  spark-core |    2.1.1-1 | <repo>
            try:
                ver_str = madison_output.split('|')[1].strip()
            except IndexError:
                hookenv.log(
                    'Could not find {} in the configured repo'.format(pkg),
                    hookenv.WARNING)
                return None
            return ver_str if ver_str != get_package_version(pkg) else None
        else:
            raise BigtopError(u"Repo query is only supported on Ubuntu")
    def update_bigtop_repo(self, remove=False):
        """
        Add or Remove a bigtop repository.

        Typically, the Bigtop repository is configured when running 'puppet
        apply'. However, sometimes the system needs to know about a repo
        outside of puppet. For example, when changing the bigtop version, we
        use this method to add a new repo and query for package updates
        without actually installing anything.

        :param: bool remove: True to remove the repo; False to add it
        """
        distro = lsb_release()['DISTRIB_ID'].lower()
        if distro == 'ubuntu':
            repo = "deb {} bigtop contrib".format(self.bigtop_apt)
            flags = '-yur' if remove else '-yu'
            cmd = ['add-apt-repository', flags, repo]
            try:
                subprocess.check_call(cmd)
            except subprocess.CalledProcessError:
                hookenv.log('Failed to update the bigtop repo with: {}'.format(
                            ' '.join(cmd)), hookenv.ERROR)
            else:
                hookenv.log('Successfully updated the bigtop repo',
                            hookenv.INFO)
Esempio n. 4
0
def rerender_service_template():
    ''' If we change proxy settings, re-render the bootstrap service definition
    and attempt to resume where we left off.  '''

    # Note: At this point if we hijack the workload daemon, heavy fisted
    # reprocussions will occur, like disruption  of services.

    codename = host.lsb_release()['DISTRIB_CODENAME']
    # by default, dont reboot the daemon unless we have previously rendered
    # system files.

    # Deterministic method to probe if we actually need to restart the
    # daemon.
    reboot = (os.path.exists('/lib/systemd/system/bootstrap-docker.service') or
              os.path.exists('/etc/init/bootstrap-docker.conf'))

    if codename != "trusty":
        # Handle SystemD
        render('bootstrap-docker.service',
               '/lib/systemd/system/bootstrap-docker.service',
               config(), owner='root', group='root')
        cmd = ["systemctl", "daemon-reload"]
        check_call(cmd)
    else:
        # Handle Upstart
        render('bootstrap-docker.upstart',
               '/etc/init/bootstrap-docker.conf',
               config(), owner='root', group='root')

    if reboot:
        service_restart('bootstrap-docker')
Esempio n. 5
0
def mongodb(ver=None):
    if not ver and installed():
        ver = version()
    if platform.machine() in _arch_map:
        return _arch_map[platform.machine()]('archive')
    if not ver or ver == 'archive':
        distro = lsb_release()['DISTRIB_CODENAME']
        if distro not in _distro_map.keys():
            _msg = 'Unknown distribution: {0}. Please deploy only on: {1}'
            raise Exception(_msg.format(distro, _distro_map.keys()))

        return _distro_map[distro]('archive')

    def subclasses(cls):
        return cls.__subclasses__() + [g for s in cls.__subclasses__()
                                       for g in subclasses(s)]

    def search(version):
        # Does a count down search of version until next lowest match. So
        # long as it doesn't drop below a major version things should be good.
        major, minor = [c for c in version.replace('.', '')[:2]]
        minor_range = reversed(range(0, int(minor) + 1))
        needles = ['MongoDB{0}{1}'.format(major, v) for v in minor_range]

        for needle in needles:
            for m in subclasses(MongoDB):
                if m.__name__ == needle:
                    return m('upstream', version)

        warnings.warn('No viable major version found')
        return None

    return search(ver)
Esempio n. 6
0
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    src = config('openstack-origin')
    if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and
            src == 'distro'):
        src = 'cloud:precise-folsom'
    configure_installation_source(src)
    status_set('maintenance', 'Installing apt packages')
    apt_update(fatal=True)
    apt_install('python-six', fatal=True)  # Force upgrade
    if valid_plugin():
        apt_install(filter_installed_packages(get_early_packages()),
                    fatal=True)
        apt_install(filter_installed_packages(get_packages()),
                    fatal=True)
        status_set('maintenance', 'Git install')
        git_install(config('openstack-origin-git'))
    else:
        message = 'Please provide a valid plugin config'
        log(message, level=ERROR)
        status_set('blocked', message)
        sys.exit(1)

    # Legacy HA for Icehouse
    update_legacy_ha_files()
def get_package_version(pkg):
    """
    Return a version string for an installed package name.

    :param: str pkg: package name as known by the package manager
    :returns: str ver_str: version string from package manager, or empty string
    """
    if pkg:
        distro = lsb_release()['DISTRIB_ID'].lower()
        ver_str = ''
        if distro == 'ubuntu':
            # NB: we cannot use the charmhelpers.fetch.apt_cache nor the
            # apt module from the python3-apt deb as they are only available
            # as system packages. Any charm with use_system_packages=False in
            # layer.yaml would fail. Use dpkg-query instead.
            cmd = ['dpkg-query', '--show', r'--showformat=${Version}', pkg]
            try:
                ver_str = subprocess.check_output(cmd).strip().decode()
            except subprocess.CalledProcessError as e:
                hookenv.log(
                    'Error getting local package version: {}'.format(e.output),
                    hookenv.ERROR)
        return ver_str
    else:
        raise BigtopError(u"Valid package name required")
Esempio n. 8
0
def install():
    """
    Install jenkins-job-builder from a archive, remote git repository or a
    locally bundled copy shipped with the charm.  Any locally bundled copy
    overrides 'jjb-install-source' setting.
    """
    if not os.path.isdir(CONFIG_DIR):
        os.mkdir(CONFIG_DIR)
    src = config('jjb-install-source')
    tarball = os.path.join(charm_dir(), 'files', TARBALL)

    if os.path.isfile(tarball):
        log('Installing jenkins-job-builder from bundled file: %s.' % tarball)
        install_from_file(tarball)
    elif src.startswith('git://'):
        log('Installing jenkins-job-builder from remote git: %s.' % src)
        install_from_git(src)
    elif src == 'distro':
        log('Installing jenkins-job-builder from Ubuntu archive.')
        if lsb_release()['DISTRIB_CODENAME'] in ['precise', 'quantal']:
            m = ('jenkins-job-builder package only available in Ubuntu 13.04 '
                 'and later.')
            raise Exception(m)
        apt_update(fatal=True)
        apt_install(['jenkins-job-builder', 'python-pbr'],
                    fatal=True)
    else:
        m = ('Must specify a git url as install source or bundled source with '
             'the charm.')
        log(m, ERROR)
        raise Exception(m)
Esempio n. 9
0
def install_dockerbeat():
    ''' Installs dockerbeat from resources, with a fallback option
    to try to fetch over the network, for 1.25.5 hosts'''

    try:
        bin_path = resource_get('dockerbeat')
    except NotImplementedError:
        # Attempt to fetch and install from configured uri with validation
        bin_path = download_from_upstream()

    full_beat_path = '/usr/local/bin/dockerbeat'

    if not bin_path:
        status_set('blocked', 'Missing dockerbeat binary')
        return

    install(bin_path, full_beat_path)
    os.chmod(full_beat_path, 0o755)

    codename = lsb_release()['DISTRIB_CODENAME']

    # render the apropriate init systems configuration
    if codename == 'trusty':
        render('upstart', '/etc/init/dockerbeat.conf', {})
    else:
        render('systemd', '/etc/systemd/system/dockerbeat.service', {})

    set_state('dockerbeat.installed')
Esempio n. 10
0
def check_for_upgrade():
    release_info = host.lsb_release()
    if not release_info['DISTRIB_CODENAME'] == 'trusty':
        log("Invalid upgrade path from {}.  Only trusty is currently "
            "supported".format(release_info['DISTRIB_CODENAME']))
        return

    c = hookenv.config()
    old_version = c.previous('source')
    log('old_version: {}'.format(old_version))
    # Strip all whitespace
    new_version = hookenv.config('source')
    if new_version:
        # replace all whitespace
        new_version = new_version.replace(' ', '')
    log('new_version: {}'.format(new_version))

    if old_version in upgrade_paths:
        if new_version == upgrade_paths[old_version]:
            log("{} to {} is a valid upgrade path.  Proceeding.".format(
                old_version, new_version))
            roll_monitor_cluster(new_version)
        else:
            # Log a helpful error message
            log("Invalid upgrade path from {} to {}.  "
                "Valid paths are: {}".format(old_version,
                                             new_version,
                                             pretty_print_upgrade_paths()))
def _get_firewall_driver(ovs_ctxt):
    '''
    Determine the firewall driver to use based on configuration,
    OpenStack and Ubuntu releases.

    @returns str: firewall driver to use for OpenvSwitch
    '''
    driver = config('firewall-driver') or IPTABLES_HYBRID
    release = lsb_release()['DISTRIB_CODENAME']
    if driver not in VALID_FIREWALL_DRIVERS:
        return IPTABLES_HYBRID

    if driver == IPTABLES_HYBRID and ovs_ctxt['enable_nsg_logging']:
        msg = "NSG logging can not be enabled - need to set " \
              "firewall driver to 'openvswitch' explicitly"
        log(msg, "WARN")

    if (driver == OPENVSWITCH and
            CompareHostReleases(release) < 'xenial'):
        # NOTE(jamespage): Switch back to iptables_hybrid for
        #                  Ubuntu releases prior to Xenial due
        #                  to requirements for Linux >= 4.4 and
        #                  Open vSwitch >= 2.5
        return IPTABLES_HYBRID

    return driver
Esempio n. 12
0
def get_packages():
    '''Return a list of packages for install based on the configured plugin'''
    plugin = remap_plugin(config('plugin'))
    packages = deepcopy(GATEWAY_PKGS[networking_name()][plugin])
    source = get_os_codename_install_source(config('openstack-origin'))
    if plugin == 'ovs':
        if (source >= 'icehouse' and
                lsb_release()['DISTRIB_CODENAME'] < 'utopic'):
            # NOTE(jamespage) neutron-vpn-agent supercedes l3-agent for
            # icehouse but openswan was removed in utopic.
            packages.remove('neutron-l3-agent')
            packages.append('neutron-vpn-agent')
            packages.append('openswan')
        if source >= 'kilo':
            packages.append('python-neutron-fwaas')
        if source >= 'liberty':
            # Switch out mysql driver
            packages.remove('python-mysqldb')
            packages.append('python-pymysql')
            # Switch out to actual metering agent package
            packages.remove('neutron-plugin-metering-agent')
            packages.append('neutron-metering-agent')
    packages.extend(determine_l3ha_packages())

    if git_install_requested():
        packages = list(set(packages))
        packages.extend(BASE_GIT_PACKAGES)
        # don't include packages that will be installed from git
        for p in GIT_PACKAGE_BLACKLIST:
            if p in packages:
                packages.remove(p)

    return packages
Esempio n. 13
0
 def __init__(self):
     self.options = layer.options('puppet-base')
     self.puppet_pkg = self.options.get('puppet-srvc')
     self.puppet_base_url = 'http://apt.puppetlabs.com'
     self.puppet_gpg_key = config['puppet-gpg-key']
     self.puppet_exe = '/opt/puppetlabs/bin/puppet'
     self.facter_exe = '/opt/puppetlabs/bin/facter'
     self.puppet_conf_dir = '/etc/puppetlabs/puppet'
     self.modules_dir = '/etc/puppetlabs/code/modules/'
     self.puppet_apt_src = \
         'deb %s %s PC1' % (self.puppet_base_url,
                            lsb_release()['DISTRIB_CODENAME'])
     # Determine puppet apt package
     if self.puppet_pkg == 'master':
         self.puppet_apt_pkg = 'puppetserver'
         self.puppet_srvc = self.puppet_apt_pkg
     elif self.puppet_pkg == 'agent':
         self.puppet_apt_pkg = 'puppet-agent'
         self.puppet_srvc = 'puppet'
     elif self.puppet_pkg == 'db':
         self.puppet_apt_pkg = 'puppetdb'
         self.puppet_srvc = self.puppet_apt_pkg
     elif self.puppet_pkg == 'ca':
         self.puppet_apt_pkg = 'puppetserver'
         self.puppet_srvc = self.puppet_apt_pkg
     elif self.puppet_pkg == 'standalone':
         self.puppet_apt_pkg = 'puppet-agent'
         self.puppet_srvc = None
     else:
         raise PuppetException("puppet-srvc option value '{}' unkown. \
             Please change this option in the puppet-base layer options.")
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    src = config('openstack-origin')
    if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and
            src == 'distro'):
        src = 'cloud:precise-icehouse'
    configure_installation_source(src)
    status_set('maintenance', 'Installing apt packages')
    apt_update(fatal=True)
    apt_install('python-six', fatal=True)  # Force upgrade
    if valid_plugin():
        apt_install(filter_installed_packages(get_early_packages()),
                    fatal=True)
        apt_install(filter_installed_packages(get_packages()),
                    fatal=True)
    else:
        message = 'Please provide a valid plugin config'
        log(message, level=ERROR)
        status_set('blocked', message)
        sys.exit(1)

    # Legacy HA for Icehouse
    update_legacy_ha_files()

    # Install systemd overrides to remove service startup race between
    # n-gateway and n-cloud-controller services.
    install_systemd_override()
Esempio n. 15
0
def get_packages():
    """Return a list of packages for install based on the configured plugin"""
    plugin = remap_plugin(config("plugin"))
    packages = deepcopy(GATEWAY_PKGS[networking_name()][plugin])
    source = get_os_codename_install_source(config("openstack-origin"))
    if plugin == "ovs":
        if source >= "icehouse" and lsb_release()["DISTRIB_CODENAME"] < "utopic":
            # NOTE(jamespage) neutron-vpn-agent supercedes l3-agent for
            # icehouse but openswan was removed in utopic.
            packages.remove("neutron-l3-agent")
            packages.append("neutron-vpn-agent")
            packages.append("openswan")
        if source >= "kilo":
            packages.append("python-neutron-fwaas")
        if source >= "liberty":
            # Switch out mysql driver
            packages.remove("python-mysqldb")
            packages.append("python-pymysql")
            # Switch out to actual metering agent package
            packages.remove("neutron-plugin-metering-agent")
            packages.append("neutron-metering-agent")
    packages.extend(determine_l3ha_packages())

    if git_install_requested():
        packages = list(set(packages))
        packages.extend(BASE_GIT_PACKAGES)
        # don't include packages that will be installed from git
        for p in GIT_PACKAGE_BLACKLIST:
            if p in packages:
                packages.remove(p)

    return packages
Esempio n. 16
0
def configure_lxd_host():
    ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
    if ubuntu_release > "vivid":
        log('>= Wily deployment - configuring LXD trust password and address',
            level=INFO)
        cmd = ['lxc', 'config', 'set',
               'core.trust_password', lxd_trust_password()]
        check_call(cmd)
        cmd = ['lxc', 'config', 'set',
               'core.https_address', '[::]']
        check_call(cmd)

        if ubuntu_release == 'xenial':
            apt_install('linux-image-extra-%s' % os.uname()[2],
                        fatal=True)

        if ubuntu_release >= 'xenial':
            modprobe('netlink_diag')
    elif ubuntu_release == "vivid":
        log('Vivid deployment - loading overlay kernel module', level=INFO)
        cmd = ['modprobe', 'overlay']
        check_call(cmd)
        with open('/etc/modules', 'r+') as modules:
            if 'overlay' not in modules.read():
                modules.write('overlay')
Esempio n. 17
0
    def get_kexs(self, allow_weak_kex):
        if allow_weak_kex:
            weak_kex = 'weak'
        else:
            weak_kex = 'default'

        default = 'diffie-hellman-group-exchange-sha256'
        weak = (default + ',diffie-hellman-group14-sha1,'
                'diffie-hellman-group-exchange-sha1,'
                'diffie-hellman-group1-sha1')
        kex = {'default': default,
               'weak': weak}

        default = ('[email protected],'
                   'diffie-hellman-group-exchange-sha256')
        weak = (default + ',diffie-hellman-group14-sha1,'
                'diffie-hellman-group-exchange-sha1,'
                'diffie-hellman-group1-sha1')
        kex_66 = {'default': default,
                  'weak': weak}

        # Use newer kex on Ubuntu Trusty and above
        if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
            log('Detected Ubuntu 14.04 or newer, using new key exchange '
                'algorithms', level=DEBUG)
            kex = kex_66

        return kex[weak_kex]
Esempio n. 18
0
def get_os_codename_install_source(src):
    '''Derive OpenStack release codename from a given installation source.'''
    ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
    rel = ''
    if src is None:
        return rel
    if src in ['distro', 'distro-proposed']:
        try:
            rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
        except KeyError:
            e = 'Could not derive openstack release for '\
                'this Ubuntu release: %s' % ubuntu_rel
            error_out(e)
        return rel

    if src.startswith('cloud:'):
        ca_rel = src.split(':')[1]
        ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
        return ca_rel

    # Best guess match based on deb string provided
    if src.startswith('deb') or src.startswith('ppa'):
        for k, v in six.iteritems(OPENSTACK_CODENAMES):
            if v in src:
                return v
Esempio n. 19
0
def deploy_docker_bootstrap_daemon():
    ''' This is a nifty trick. We're going to init and start
    a secondary docker engine instance to run applications that
    can modify the "workload docker engine" '''
    # Render static template for init job
    status_set('maintenance', 'Configuring bootstrap docker daemon.')
    codename = host.lsb_release()['DISTRIB_CODENAME']

    # Render static template for daemon options
    render('bootstrap-docker.defaults', '/etc/default/bootstrap-docker', {},
           owner='root', group='root')

    # The templates are static, but running through the templating engine for
    # future modification. This doesn't add much overhead.
    if codename == 'trusty':
        render('bootstrap-docker.upstart', '/etc/init/bootstrap-docker.conf',
               {}, owner='root', group='root')
    else:
        # Render the service definition
        render('bootstrap-docker.service',
               '/lib/systemd/system/bootstrap-docker.service',
               {}, owner='root', group='root')
        # let systemd allocate the unix socket
        render('bootstrap-docker.socket',
               '/lib/systemd/system/bootstrap-docker.socket',
               {}, owner='root', group='root')
        # this creates the proper symlinks in /etc/systemd/system path
        check_call(split('systemctl enable /lib/systemd/system/bootstrap-docker.socket'))  # noqa
        check_call(split('systemctl enable /lib/systemd/system/bootstrap-docker.service'))  # noqa

    # start the bootstrap daemon
    service_restart('bootstrap-docker')
    set_state('bootstrap_daemon.available')
Esempio n. 20
0
 def all_packages(self):
     _packages = self.packages[:]
     if host.lsb_release()['DISTRIB_RELEASE'] > '14.04':
         _packages.append('tox')
     else:
         _packages.append('python-tox')
     return _packages
Esempio n. 21
0
    def memcache(self):
        ctxt = {}
        ctxt['use_memcache'] = False
        if self.charm_instance:
            if (ch_utils.OPENSTACK_RELEASES.index(
                    self.charm_instance.release) >=
                    ch_utils.OPENSTACK_RELEASES.index('mitaka')):
                ctxt['use_memcache'] = True

        if ctxt['use_memcache']:
            # Trusty version of memcached does not support ::1 as a listen
            # address so use host file entry instead
            release = ch_host.lsb_release()['DISTRIB_CODENAME'].lower()
            if ch_ip.is_ipv6_disabled():
                if ch_host.CompareHostReleases(release) > 'trusty':
                    ctxt['memcache_server'] = '127.0.0.1'
                else:
                    ctxt['memcache_server'] = 'localhost'
                ctxt['memcache_server_formatted'] = '127.0.0.1'
                ctxt['memcache_port'] = '11211'
                ctxt['memcache_url'] = '{}:{}'.format(
                    ctxt['memcache_server_formatted'],
                    ctxt['memcache_port'])
            else:
                if ch_host.CompareHostReleases(release) > 'trusty':
                    ctxt['memcache_server'] = '::1'
                else:
                    ctxt['memcache_server'] = 'ip6-localhost'
                ctxt['memcache_server_formatted'] = '[::1]'
                ctxt['memcache_port'] = '11211'
                ctxt['memcache_url'] = 'inet6:{}:{}'.format(
                    ctxt['memcache_server_formatted'],
                    ctxt['memcache_port'])
        return ctxt
Esempio n. 22
0
def add_source(source, key=None):
    if source is None:
        log('Source is not present. Skipping')
        return

    if (source.startswith('ppa:') or
        source.startswith('http') or
        source.startswith('deb ') or
            source.startswith('cloud-archive:')):
        subprocess.check_call(['add-apt-repository', '--yes', source])
    elif source.startswith('cloud:'):
        apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
                    fatal=True)
        pocket = source.split(':')[-1]
        if pocket not in CLOUD_ARCHIVE_POCKETS:
            raise SourceConfigError(
                'Unsupported cloud: source option %s' %
                pocket)
        actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
        with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
            apt.write(CLOUD_ARCHIVE.format(actual_pocket))
    elif source == 'proposed':
        release = lsb_release()['DISTRIB_CODENAME']
        with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
            apt.write(PROPOSED_POCKET.format(release))
    if key:
        subprocess.check_call(['apt-key', 'adv', '--keyserver',
                               'hkp://keyserver.ubuntu.com:80', '--recv',
                               key])
def determine_packages_arch():
    '''Generate list of architecture-specific packages'''
    packages = []
    distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
    if platform.machine() == 'aarch64' and distro_codename >= 'wily':
        packages.extend(['qemu-efi']),  # AArch64 cloud images require UEFI fw

    return packages
def libvirt_daemon():
    '''Resolve the correct name of the libvirt daemon service'''
    distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
    if (distro_codename >= 'yakkety' or
            os_release('nova-common') >= 'ocata'):
        return LIBVIRTD_DAEMON
    else:
        return LIBVIRT_BIN_DAEMON
Esempio n. 25
0
    def __init__(self, source, version=None):
        lsb = lsb_release()
        year = lsb['DISTRIB_RELEASE'].split('.')[0]
        if int(year) < 16:
            distrib = lsb['DISTRIB_CODENAME']
            raise Exception('{0} is not deployable on zSeries'.format(distrib))

        super(MongoDBzSeries, self).__init__(source, version)
 def __call__(self):
     ctxt = {}
     if lsb_release()['DISTRIB_CODENAME'].lower() >= "yakkety":
         ctxt['virt_type'] = config('virt-type')
         ctxt['enable_live_migration'] = config('enable-live-migration')
     ctxt['resume_guests_state_on_host_boot'] =\
         config('resume-guests-state-on-host-boot')
     return ctxt
Esempio n. 27
0
def assert_charm_supports_ipv6():
    """Check whether we are able to support charms ipv6."""
    _release = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(_release) < "trusty":
        msg = "IPv6 is not supported in the charms for Ubuntu " \
              "versions less than Trusty 14.04"
        status_set('blocked', msg)
        raise Exception(msg)
Esempio n. 28
0
def _add_apt_repository(spec):
    """Add the spec using add_apt_repository

    :param spec: the parameter to pass to add_apt_repository
    """
    if '{series}' in spec:
        series = lsb_release()['DISTRIB_CODENAME']
        spec = spec.replace('{series}', series)
    _run_with_retries(['add-apt-repository', '--yes', spec])
Esempio n. 29
0
def reload_system_daemons():
    ''' Reload the system daemons from on-disk configuration changes '''
    lsb = lsb_release()
    code = lsb['DISTRIB_CODENAME']
    if code != 'trusty':
        command = ['systemctl', 'daemon-reload']
        check_call(command)
    else:
        service_reload('docker')
def configure_lxd(user='******'):
    ''' Configure lxd use for nova user '''
    if not git_install_requested():
        if lsb_release()['DISTRIB_CODENAME'].lower() < "vivid":
            raise Exception("LXD is not supported for Ubuntu "
                            "versions less than 15.04 (vivid)")

    configure_subuid(user)
    lxc_list(user)
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    # TODO: Cache this on first call?
    if config('virt-type').lower() == 'lxd':
        resource_map = deepcopy(BASE_RESOURCE_MAP)
    else:
        resource_map = deepcopy(LIBVIRT_RESOURCE_MAP)
    net_manager = network_manager()

    # Network manager gets set late by the cloud-compute interface.
    # FlatDHCPManager only requires some extra packages.
    if (net_manager in ['flatmanager', 'flatdhcpmanager']
            and config('multi-host').lower() == 'yes'):
        resource_map[NOVA_CONF]['services'].extend(
            ['nova-api', 'nova-network'])
    else:
        resource_map.pop(NOVA_API_AA_PROFILE_PATH)
        resource_map.pop(NOVA_NETWORK_AA_PROFILE_PATH)

    distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
    if distro_codename >= 'yakkety':
        for data in resource_map.values():
            if LIBVIRT_BIN_DAEMON in data['services']:
                data['services'].remove(LIBVIRT_BIN_DAEMON)
                data['services'].append(LIBVIRTD_DAEMON)

    # Neutron/quantum requires additional contexts, as well as new resources
    # depending on the plugin used.
    # NOTE(james-page): only required for ovs plugin right now
    if net_manager in ['neutron', 'quantum']:
        resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext())

    if relation_ids('ceph'):
        CEPH_RESOURCES[ceph_config_file()] = {
            'contexts': [NovaComputeCephContext()],
            'services': ['nova-compute']
        }
        resource_map.update(CEPH_RESOURCES)

    enable_nova_metadata, _ = nova_metadata_requirement()
    if enable_nova_metadata:
        resource_map[NOVA_CONF]['services'].append('nova-api-metadata')
    return resource_map
Esempio n. 32
0
def update_nrpe_config():
    # python-dbus is used by check_upstart_job
    # fasteners is used by apt_install collect_ceph_osd_services.py
    pkgs = ['python3-dbus']
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic':
        pkgs.append('python3-fasteners')
    apt_install(pkgs)

    # copy the check and collect files over to the plugins directory
    charm_dir = os.environ.get('CHARM_DIR', '')
    nagios_plugins = '/usr/local/lib/nagios/plugins'
    # Grab nagios user/group ID's from original source
    _dir = os.stat(nagios_plugins)
    uid = _dir.st_uid
    gid = _dir.st_gid
    for name in ('collect_ceph_osd_services.py', 'check_ceph_osd_services.py'):
        target = os.path.join(nagios_plugins, name)
        shutil.copy(os.path.join(charm_dir, 'files', 'nagios', name), target)
        os.chown(target, uid, gid)

    hostname = nrpe.get_nagios_hostname()
    current_unit = nrpe.get_nagios_unit_name()

    # BUG#1810749 - the nagios user can't access /var/lib/ceph/.. and that's a
    # GOOD THING, as it keeps ceph secure from Nagios.  However, to check
    # whether ceph is okay, the check_systemd.py or 'status ceph-osd' still
    # needs to be called with the contents of ../osd/ceph-*/whoami files.  To
    # get around this conundrum, instead a cron.d job that runs as root will
    # perform the checks every minute, and write to a tempory file the results,
    # and the nrpe check will grep this file and error out (return 2) if the
    # first 3 characters of a line are not 'OK:'.

    cmd = ('MAILTO=""\n'
           '* * * * * root '
           '/usr/local/lib/nagios/plugins/collect_ceph_osd_services.py'
           ' 2>&1 | logger -t check-osd\n')
    with open(CRON_CEPH_CHECK_FILE, "wt") as f:
        f.write(cmd)

    nrpe_cmd = '/usr/local/lib/nagios/plugins/check_ceph_osd_services.py'

    nrpe_setup = nrpe.NRPE(hostname=hostname)
    nrpe_setup.add_check(shortname='ceph-osd',
                         description='process check {%s}' % current_unit,
                         check_cmd=nrpe_cmd)
    nrpe_setup.write()
Esempio n. 33
0
def install():
    execd_preinstall()
    configure_installation_source(config('openstack-origin'))

    apt_update(fatal=True)
    packages = determine_packages()
    if os_release('openstack-dashboard') < 'icehouse':
        packages += ['nodejs', 'node-less']
    if lsb_release()['DISTRIB_CODENAME'] == 'precise':
        # Explicitly upgrade python-six Bug#1420708
        apt_install('python-six', fatal=True)
    packages = filter_installed_packages(packages)
    if packages:
        status_set('maintenance', 'Installing packages')
        apt_install(packages, fatal=True)

    git_install(config('openstack-origin-git'))
Esempio n. 34
0
def setup_ipv6():
    ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(ubuntu_rel) < "trusty":
        raise Exception("IPv6 is not supported in the charms for Ubuntu "
                        "versions less than Trusty 14.04")

    from apt import apt_pkg
    apt_pkg.init()

    # Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to
    # use trusty-backports otherwise we can use the UCA.
    vc = apt_pkg.version_compare(get_pkg_version('haproxy'), '1.5.3')
    if ubuntu_rel == 'trusty' and vc == -1:
        add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports '
                   'main')
        apt_update(fatal=True)
        apt_install('haproxy/trusty-backports', fatal=True)
Esempio n. 35
0
def install():
    execd_preinstall()
    origin = config('openstack-origin')
    if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and origin == 'distro'):
        origin = 'cloud:precise-grizzly'
    configure_installation_source(origin)
    packages = filter_installed_packages(get_packages())
    if packages:
        status_set('maintenance', 'Installing packages')
        apt_update(fatal=True)
        apt_install(packages, fatal=True)
    if init_is_systemd():
        # NOTE(jamespage): ensure systemd override folder exists prior to
        #                  attempting to write override.conf
        mkdir(os.path.dirname(CEILOMETER_API_SYSTEMD_CONF))
    if run_in_apache():
        disable_package_apache_site()
Esempio n. 36
0
def resolve_config_files(plugin, release):
    '''
    Resolve configuration files and contexts

    :param plugin: shortname of plugin e.g. ovs
    :param release: openstack release codename
    :returns: dict of configuration files, contexts
              and associated services
    '''
    config_files = deepcopy(CONFIG_FILES)
    drop_config = []
    cmp_os_release = CompareOpenStackReleases(release)
    if plugin == OVS:
        # NOTE: deal with switch to ML2 plugin for >= icehouse
        drop_config = [NEUTRON_OVS_AGENT_CONF]
        if cmp_os_release >= 'mitaka':
            # ml2 -> ovs_agent
            drop_config = [NEUTRON_ML2_PLUGIN_CONF]

    # Use MAAS1.9 for MTU and external port config on xenial and above
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
        drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])

    # Rename to lbaasv2 in newton
    if cmp_os_release < 'newton':
        drop_config.extend([NEUTRON_LBAASV2_AA_PROFILE_PATH])
    else:
        drop_config.extend([NEUTRON_LBAAS_AA_PROFILE_PATH])

    if disable_nova_metadata(cmp_os_release):
        drop_config.extend(NOVA_CONFIG_FILES.keys())
    else:
        if is_relation_made('amqp-nova'):
            amqp_nova_ctxt = context.AMQPContext(ssl_dir=NOVA_CONF_DIR,
                                                 rel_name='amqp-nova',
                                                 relation_prefix='nova')
        else:
            amqp_nova_ctxt = context.AMQPContext(ssl_dir=NOVA_CONF_DIR,
                                                 rel_name='amqp')
        config_files[plugin][NOVA_CONF]['hook_contexts'].append(amqp_nova_ctxt)

    for _config in drop_config:
        if _config in config_files[plugin]:
            config_files[plugin].pop(_config)
    return config_files
Esempio n. 37
0
def install_kata():
    """
    Install the Kata container runtime.

    :returns: None
    """
    dist = host.lsb_release()
    release = '{}_{}'.format(
        dist['DISTRIB_ID'],
        dist['DISTRIB_RELEASE']
    )

    arch = check_output(['arch']).decode().strip()

    archive = resource_get('kata-archive')

    if not archive or os.path.getsize(archive) == 0:
        status.maintenance('Installing Kata via apt')
        gpg_key = requests.get(
            'http://download.opensuse.org/repositories/home:/katacontainers:/'
            'releases:/{}:/master/x{}/Release.key'.format(arch, release)).text
        import_key(gpg_key)

        with open('/etc/apt/sources.list.d/kata-containers.list', 'w') as f:
            f.write(
                'deb http://download.opensuse.org/repositories/home:/'
                'katacontainers:/releases:/{}:/master/x{}/ /'
                .format(arch, release)
            )

        apt_update()
        apt_install(KATA_PACKAGES)

    else:
        status.maintenance('Installing Kata via resource')
        unpack = '/tmp/kata-debs'

        if not os.path.isdir(unpack):
            os.makedirs(unpack, exist_ok=True)

        check_call(['tar', '-xvf', archive, '-C', unpack])
        check_call('apt-get install -y {}/*.deb'.format(unpack), shell=True)

    status.active('Kata runtime available')
    set_state('kata.installed')
def _get_firewall_driver():
    '''
    Determine the firewall driver to use based on configuration,
    OpenStack and Ubuntu releases.

    @returns str: firewall driver to use for OpenvSwitch
    '''
    driver = config('firewall-driver') or IPTABLES_HYBRID
    release = lsb_release()['DISTRIB_CODENAME']
    if driver not in VALID_FIREWALL_DRIVERS:
        return IPTABLES_HYBRID
    if (driver == OPENVSWITCH and CompareHostReleases(release) < 'xenial'):
        # NOTE(jamespage): Switch back to iptables_hybrid for
        #                  Ubuntu releases prior to Xenial due
        #                  to requirements for Linux >= 4.4 and
        #                  Open vSwitch >= 2.5
        return IPTABLES_HYBRID
    return driver
Esempio n. 39
0
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    conf = config()
    src = conf['openstack-origin']
    if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and src == 'distro'):
        src = 'cloud:precise-folsom'
    configure_installation_source(src)

    status_set('maintenance', 'Installing apt packages')
    apt_update()
    apt_install(determine_packages(), fatal=True)

    if run_in_apache():
        disable_package_apache_site()

    status_set('maintenance', 'Git install')
    git_install(config('openstack-origin-git'))
Esempio n. 40
0
def fix_libvirt():
    # do some fixes for libvirt with DPDK
    # it's not required for non-DPDK deployments

    # add apparmor exception for huge pages
    check_output([
        "sed", "-E", "-i", "-e",
        "\!^[[:space:]]*owner \"/run/hugepages/kvm/libvirt/qemu/\*\*\" rw"
        "!a\\\n  owner \"/hugepages/libvirt/qemu/**\" rw,",
        "/etc/apparmor.d/abstractions/libvirt-qemu"
    ])

    if lsb_release()['DISTRIB_CODENAME'] == 'xenial':
        # fix libvirt tempate for xenial
        render("TEMPLATE.qemu", "/etc/apparmor.d/libvirt/TEMPLATE.qemu",
               dict())

    service_restart("apparmor")
def get_packages():
    '''Return a list of packages for install based on the configured plugin'''
    plugin = config('plugin')
    packages = deepcopy(GATEWAY_PKGS[plugin])
    cmp_os_source = CompareOpenStackReleases(os_release('neutron-common'))
    cmp_host_release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
    if plugin == OVS:
        if (cmp_os_source >= 'icehouse' and cmp_os_source < 'mitaka'
                and cmp_host_release < 'utopic'):
            # NOTE(jamespage) neutron-vpn-agent supercedes l3-agent for
            # icehouse but openswan was removed in utopic.
            packages.remove('neutron-l3-agent')
            packages.append('neutron-vpn-agent')
            packages.append('openswan')
        if cmp_os_source >= 'liberty':
            # Switch out mysql driver
            packages.remove('python-mysqldb')
            packages.append('python-pymysql')
        if cmp_os_source >= 'mitaka':
            # Switch out to actual ovs agent package
            packages.remove('neutron-plugin-openvswitch-agent')
            packages.append('neutron-openvswitch-agent')
        if cmp_os_source >= 'kilo':
            packages.append('python-neutron-fwaas')
    if plugin in (OVS, OVS_ODL):
        if cmp_os_source >= 'newton':
            # LBaaS v1 dropped in newton
            packages.remove('neutron-lbaas-agent')
            packages.append('neutron-lbaasv2-agent')
        if cmp_os_source >= 'train':
            # LBaaS v2 dropped in train
            packages.remove('neutron-lbaasv2-agent')

    if disable_nova_metadata(cmp_os_source):
        packages.remove('nova-api-metadata')
    packages.extend(determine_l3ha_packages())

    if cmp_os_source >= 'rocky':
        packages = [p for p in packages if not p.startswith('python-')]
        packages.extend(PY3_PACKAGES)
        if cmp_os_source >= 'train':
            packages.remove('python3-neutron-lbaas')

    return packages
def configure_nvidia():
    """
    Based on charm config, install and configure Nivida drivers.

    :return: None
    """
    status.maintenance('Installing Nvidia drivers.')

    dist = host.lsb_release()
    os_release_id = dist['DISTRIB_ID'].lower()
    os_release_version_id = dist['DISTRIB_RELEASE']
    os_release_version_id_no_dot = os_release_version_id.replace('.', '')
    proxies = {
        "http": config('http_proxy'),
        "https": config('https_proxy')
    }
    key_urls = config('nvidia_apt_key_urls').split()
    for key_url in key_urls:
        formatted_key_url = key_url.format(
            id=os_release_id,
            version_id=os_release_version_id,
            version_id_no_dot=os_release_version_id_no_dot
        )
        gpg_key = requests.get(formatted_key_url, proxies=proxies).text
        import_key(gpg_key)

    sources = config('nvidia_apt_sources').splitlines()
    formatted_sources = [
        source.format(
            id=os_release_id,
            version_id=os_release_version_id,
            version_id_no_dot=os_release_version_id_no_dot
        )
        for source in sources
    ]
    with open('/etc/apt/sources.list.d/nvidia.list', 'w') as f:
        f.write('\n'.join(formatted_sources))

    apt_update()
    packages = config('nvidia_apt_packages').split()
    apt_install(packages, fatal=True)

    set_state('containerd.nvidia.ready')
    config_changed()
Esempio n. 43
0
def configure_nvidia():
    """
    Based on charm config, install and configure Nivida drivers.

    :return: None
    """
    status.maintenance('Installing Nvidia drivers.')

    dist = host.lsb_release()
    release = '{}{}'.format(dist['DISTRIB_ID'].lower(),
                            dist['DISTRIB_RELEASE'])
    proxies = {"http": config('http_proxy'), "https": config('https_proxy')}
    ncr_gpg_key = requests.get(
        'https://nvidia.github.io/nvidia-container-runtime/gpgkey',
        proxies=proxies).text
    import_key(ncr_gpg_key)
    with open('/etc/apt/sources.list.d/nvidia-container-runtime.list',
              'w') as f:
        f.write('deb '
                'https://nvidia.github.io/libnvidia-container/{}/$(ARCH) /\n'.
                format(release))
        f.write(
            'deb '
            'https://nvidia.github.io/nvidia-container-runtime/{}/$(ARCH) /\n'.
            format(release))

    cuda_gpg_key = requests.get(
        'https://developer.download.nvidia.com/'
        'compute/cuda/repos/{}/x86_64/7fa2af80.pub'.format(
            release.replace('.', '')),
        proxies=proxies).text
    import_key(cuda_gpg_key)
    with open('/etc/apt/sources.list.d/cuda.list', 'w') as f:
        f.write('deb '
                'http://developer.download.nvidia.com/'
                'compute/cuda/repos/{}/x86_64 /\n'.format(
                    release.replace('.', '')))

    apt_update()

    apt_install(NVIDIA_PACKAGES, fatal=True)

    set_state('containerd.nvidia.ready')
    config_changed()
Esempio n. 44
0
def configure_lxd_host():
    ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
    cmp_ubuntu_release = CompareHostReleases(ubuntu_release)
    if cmp_ubuntu_release > "vivid":
        log('>= Wily deployment - configuring LXD trust password and address',
            level=INFO)
        cmd = [
            'lxc', 'config', 'set', 'core.trust_password',
            lxd_trust_password()
        ]
        check_call(cmd)
        cmd = ['lxc', 'config', 'set', 'core.https_address', '[::]']
        check_call(cmd)

        if not is_container():
            # NOTE(jamespage): None of the below is worth doing when running
            #                  within a container on an all-in-one install

            # Configure live migration
            if cmp_ubuntu_release == 'xenial':
                uname = os.uname()[2]
                if uname > '4.4.0-122-generic':
                    pkg = "linux-modules-extra-{}"
                else:
                    pkg = "linux-image-extra-{}"
                apt_install(pkg.format(uname), fatal=True)

            if cmp_ubuntu_release >= 'xenial':
                modprobe('netlink_diag')

            # Enable/disable use of ext4 within nova-lxd containers
            if os.path.exists(EXT4_USERNS_MOUNTS):
                with open(EXT4_USERNS_MOUNTS, 'w') as userns_mounts:
                    userns_mounts.write(
                        'Y\n' if config('enable-ext4-userns') else 'N\n')

        configure_uid_mapping()
    elif cmp_ubuntu_release == "vivid":
        log('Vivid deployment - loading overlay kernel module', level=INFO)
        cmd = ['modprobe', 'overlay']
        check_call(cmd)
        with open('/etc/modules', 'r+') as modules:
            if 'overlay' not in modules.read():
                modules.write('overlay')
Esempio n. 45
0
def hanode_relation_changed(relid=None):
    ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
    log("Checking if any units have notified about a series upgrade", INFO)
    series_notifications = get_series_upgrade_notifications(relid)
    for unit, from_release in series_notifications.items():
        # from_release is the release that the peer is currently on and is
        # therefore upgrading from.
        if CompareHostReleases(ubuntu_rel) <= from_release:
            log("Shutting down services for peer upgrade", INFO)
            disable_ha_services()
            log("Setting waiting-unit-upgrade to True", INFO)
            set_waiting_unit_series_upgrade()
        else:
            log("Already series ahead of peer, no shutdown needed", INFO)
    if is_waiting_unit_series_upgrade_set():
        log("Unit is waiting for upgrade", INFO)
    else:
        log("No units have notified a series upgrade", INFO)
        ha_relation_changed()
Esempio n. 46
0
def check_for_upgrade():
    if not ceph.is_bootstrapped():
        log("Ceph is not bootstrapped, skipping upgrade checks.")
        return

    release_info = host.lsb_release()
    if not release_info['DISTRIB_CODENAME'] == 'trusty':
        log("Invalid upgrade path from {}.  Only trusty is currently "
            "supported".format(release_info['DISTRIB_CODENAME']))
        return

    c = hookenv.config()
    old_version = ceph.resolve_ceph_version(c.previous('source') or 'distro')
    log('old_version: {}'.format(old_version))
    new_version = ceph.resolve_ceph_version(
        hookenv.config('source') or 'distro')
    log('new_version: {}'.format(new_version))

    # May be in a previous upgrade that was failed if the directories
    # still need an ownership update. Check this condition.
    resuming_upgrade = ceph.dirs_need_ownership_update('osd')

    if old_version == new_version and not resuming_upgrade:
        log("No new ceph version detected, skipping upgrade.", DEBUG)
        return

    if (ceph.UPGRADE_PATHS.get(old_version) == new_version) or\
       resuming_upgrade:
        if old_version == new_version:
            log('Attempting to resume possibly failed upgrade.', INFO)
        else:
            log("{} to {} is a valid upgrade path. Proceeding.".format(
                old_version, new_version))

        emit_cephconf(upgrading=True)
        ceph.roll_osd_cluster(new_version=new_version,
                              upgrade_key='osd-upgrade')
        emit_cephconf(upgrading=False)
    else:
        # Log a helpful error message
        log("Invalid upgrade path from {} to {}.  "
            "Valid paths are: {}".format(old_version, new_version,
                                         ceph.pretty_print_upgrade_paths()))
Esempio n. 47
0
def install():
    pkgs = copy.deepcopy(PACKAGES)
    ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(ubuntu_release) < 'xenial':
        # use libnagios on anything older than Xenial
        pkgs.remove('libmonitoring-plugin-perl')
        pkgs.append('libnagios-plugin-perl')

        pkgs.remove('python3-netaddr')
        pkgs.append('python-netaddr')

    elif CompareHostReleases(ubuntu_release) >= 'bionic':
        pkgs.append('python3-libmaas')

    # NOTE(dosaboy): we currently disallow upgrades due to bug #1382842. This
    # should be removed once the pacemaker package is fixed.
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(pkgs), fatal=True)
    setup_ocf_files()
Esempio n. 48
0
def install_from_nvidia_apt():
    ''' Install cuda docker from the nvidia apt repository. '''
    status_set('maintenance', 'Installing docker-engine from Nvidia PPA.')
    # Get the server and key in the apt-key management tool.
    add_apt_key("9DC858229FC7DD38854AE2D88D81803C0EBFCD88")
    # Install key for nvidia-docker. This key changes frequently
    # ([expires: 2019-09-20]) so we should do what the official docs say and
    # not try to get it through its fingerprint.
    add_apt_key_url("https://nvidia.github.io/nvidia-container-runtime/gpgkey")

    # Get the package architecture (amd64), not the machine hardware (x86_64)
    architecture = arch()
    # Get the lsb information as a dictionary.
    lsb = host.lsb_release()
    code = lsb['DISTRIB_CODENAME']
    rel = lsb['DISTRIB_RELEASE']
    ubuntu = str(lsb['DISTRIB_ID']).lower()
    dockurl = "https://download.docker.com/linux/ubuntu"
    nvidurl = 'https://nvidia.github.io'
    repo = 'stable'

    deb = list()
    deb.append('deb [arch={0}] {1} {2} {3}'.format(architecture, dockurl, code,
                                                   repo))
    for i in [
            'libnvidia-container', 'nvidia-container-runtime', 'nvidia-docker'
    ]:
        deb.append('deb {0}/{1}/ubuntu{2}/{3} /'.format(
            nvidurl, i, rel, architecture))

    write_docker_sources(deb)

    install_cuda_drivers_repo(architecture, rel, ubuntu)

    apt_update(fatal=True)
    # actually install the required packages docker-ce nvidia-docker2

    docker_ce = hookenv.config('docker-ce-package')
    nvidia_docker2 = hookenv.config('nvidia-docker-package')
    nv_container_runtime = hookenv.config('nvidia-container-runtime-package')
    apt_install(
        ['cuda-drivers', docker_ce, nvidia_docker2, nv_container_runtime],
        fatal=True)
Esempio n. 49
0
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    conf = config()
    src = conf['openstack-origin']
    if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and src == 'distro'):
        src = 'cloud:precise-folsom'
    configure_installation_source(src)

    status_set('maintenance', 'Installing apt packages')
    apt_update()
    apt_install(determine_packages(), fatal=True)

    if run_in_apache():
        disable_package_apache_site()
    # call the policy overrides handler which will install any policy overrides
    maybe_do_policyd_overrides(
        os_release('cinder-common'),
        'cinder',
        restart_handler=lambda: service_restart('cinder-api'))
Esempio n. 50
0
def configure_sstuser(sst_password):
    # xtrabackup 2.4 (introduced in Bionic) needs PROCESS privilege for backups
    permissions = [
        "RELOAD",
        "LOCK TABLES",
        "REPLICATION CLIENT"
    ]
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic':
        permissions.append('PROCESS')

    m_helper = get_db_helper()
    m_helper.connect(password=m_helper.get_mysql_root_password())
    m_helper.execute(SQL_SST_USER_SETUP.format(
        permissions=','.join(permissions),
        password=sst_password)
    )
    m_helper.execute(SQL_SST_USER_SETUP_IPV6.format(
        permissions=','.join(permissions),
        password=sst_password)
    )
Esempio n. 51
0
def setup_ipv6():
    """Validate that we can support IPv6 mode.

    This should be called if prefer-ipv6 is True to ensure that we are running
    in an environment that supports ipv6.
    """
    ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
    if CompareHostReleases(ubuntu_rel) < "trusty":
        msg = ("IPv6 is not supported in the charms for Ubuntu versions less "
               "than Trusty 14.04")
        raise SwiftProxyCharmException(msg)

    # Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to
    # use trusty-backports otherwise we can use the UCA.
    if (ubuntu_rel == 'trusty' and
            CompareOpenStackReleases(os_release('swift-proxy')) < 'liberty'):
        add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports '
                   'main')
        apt_update()
        apt_install('haproxy/trusty-backports', fatal=True)
    def __init__(self, series=None, openstack=None, source=None, stable=False):
        """Deploy the entire test environment."""
        super(RmqBasicDeployment, self).__init__(series, openstack, source,
                                                 stable)
        self.client_series = lsb_release()['DISTRIB_CODENAME']
        self._add_services()
        self._add_relations()
        self._configure_services()
        self._deploy()

        u.log.info('Waiting on extended status checks...')
        exclude_services = ['mysql', 'nrpe']

        # Wait for deployment ready msgs, except exclusions
        self._auto_wait_for_status(exclude_services=exclude_services)

        # Specifically wait for rmq cluster status msgs
        u.rmq_wait_for_cluster(self, init_sleep=0)

        self._initialize_tests()
Esempio n. 53
0
def install():
    status_set("maintenance", "Installing...")

    configure_sources(True, "install-sources", "install-keys")
    apt_upgrade(fatal=True, dist=True)
    packages = list()
    packages.extend(PACKAGES)
    # TODO: support dpdk config option
    packages.extend(PACKAGES_DKMS_INIT)
    apt_install(packages, fatal=True)
    try:
        output = check_output([
            "dpkg-query", "-f", "${Version}\\n", "-W", "contrail-vrouter-agent"
        ])
        version = output.decode('UTF-8').rstrip()
        application_version_set(version)
    except CalledProcessError:
        return None

    status_set("maintenance", "Configuring...")
    os.chmod("/etc/contrail", 0o755)
    os.chown("/etc/contrail", 0, 0)

    # supervisord must be started after installation
    release = lsb_release()["DISTRIB_CODENAME"]
    if release == 'trusty':
        # supervisord
        service_restart("supervisor-vrouter")

    try:
        log("Loading kernel module vrouter")
        modprobe("vrouter")
    except CalledProcessError:
        log("vrouter kernel module failed to load,"
            " clearing pagecache and retrying")
        drop_caches()
        modprobe("vrouter")
    dkms_autoinstall("vrouter")
    configure_vrouter_interface()
    config["vrouter-expected-provision-state"] = False
    status_set("blocked", "Missing relation to contrail-controller")
Esempio n. 54
0
def backup(args):
    basedir = (action_get("basedir")).lower()
    compress = action_get("compress")
    incremental = action_get("incremental")
    sstpw = _get_password("sst-password")
    optionlist = []

    # innobackupex will not create recursive dirs that do not already exist,
    # so help it along
    if not os.path.exists(basedir):
        os.makedirs(basedir)

    # Build a list of options to pass to innobackupex
    if compress:
        optionlist.append("--compress")

    if incremental:
        optionlist.append("--incremental")

    # xtrabackup 2.4 (introduced in Bionic) doesn't support compact backups
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
        optionlist.append("--compact")

    try:
        subprocess.check_call([
            'innobackupex', '--galera-info', '--rsync', basedir,
            '--user=sstuser', '--password={}'.format(sstpw)
        ] + optionlist)
        action_set({
            'time-completed': (strftime("%Y-%m-%d %H:%M:%S", gmtime())),
            'outcome': 'Success'
        })
    except subprocess.CalledProcessError as e:
        action_set({
            'time-completed': (strftime("%Y-%m-%d %H:%M:%S", gmtime())),
            'output': e.output,
            'return-code': e.returncode,
            'traceback': traceback.format_exc()
        })
        action_fail("innobackupex failed, you should log on to the unit"
                    "and check the status of the database")
    def install_java(self):
        """
        Possibly install java.
        """
        java_package = self.options.get("install_java")
        if not java_package:
            # noop if we are setting up the openjdk relation.
            return

        if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
            # No Java 8 on trusty
            fetch.add_source("ppa:openjdk-r/ppa")
            fetch.apt_update()
        fetch.apt_install(java_package)

        java_home_ = java_home()
        data_changed('java_home', java_home_)  # Prime data changed

        utils.re_edit_in_place('/etc/environment', {
            r'#? *JAVA_HOME *=.*': 'JAVA_HOME={}'.format(java_home_),
        }, append_non_matches=True)
Esempio n. 56
0
def add_source(source, key=None):
    if (source.startswith('ppa:') or source.startswith('http:')
            or source.startswith('deb ')
            or source.startswith('cloud-archive:')):
        subprocess.check_call(['add-apt-repository', '--yes', source])
    elif source.startswith('cloud:'):
        apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
                    fatal=True)
        pocket = source.split(':')[-1]
        if pocket not in CLOUD_ARCHIVE_POCKETS:
            raise SourceConfigError('Unsupported cloud: source option %s' %
                                    pocket)
        actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
        with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
            apt.write(CLOUD_ARCHIVE.format(actual_pocket))
    elif source == 'proposed':
        release = lsb_release()['DISTRIB_CODENAME']
        with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
            apt.write(PROPOSED_POCKET.format(release))
    if key:
        subprocess.check_call(['apt-key', 'import', key])
Esempio n. 57
0
def setup_eni():
    """Makes sure /etc/network/interfaces.d/ exists and will be parsed.

    When setting up interfaces, Juju removes from
    /etc/network/interfaces the line sourcing interfaces.d/

    WARNING: Not supported on focal and newer anymore. Will raise.
    """
    release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
    if release >= 'focal':
        raise RuntimeError("NetworkManager isn't supported anymore")

    if not os.path.exists('/etc/network/interfaces.d'):
        os.makedirs('/etc/network/interfaces.d', mode=0o755)
    with open('/etc/network/interfaces', 'r') as eni:
        for line in eni:
            if re.search(r'^\s*source\s+/etc/network/interfaces.d/\*\s*$',
                         line):
                return
    with open('/etc/network/interfaces', 'a') as eni:
        eni.write('\nsource /etc/network/interfaces.d/*')
Esempio n. 58
0
def install_from_custom_apt():
    """
    Install docker from custom repository.

    :return: None or False
    """
    status.maintenance("Installing Docker from custom repository.")

    repo_string = config("docker_runtime_repo")
    key_url = config("docker_runtime_key_url")
    package_name = config("docker_runtime_package")

    if not repo_string:
        message = "`docker_runtime_repo` must be set"
        hookenv.log(message)
        status.blocked(message)
        return False

    if not key_url:
        message = "`docker_runtime_key_url` must be set"
        hookenv.log(message)
        status.blocked(message)
        return False

    if not package_name:
        message = "`docker_runtime_package` must be set"
        hookenv.log(message)
        status.blocked(message)
        return False

    lsb = host.lsb_release()

    format_dictionary = {"ARCH": arch(), "CODE": lsb["DISTRIB_CODENAME"]}

    add_apt_key_url(key_url)
    write_docker_sources([repo_string.format(**format_dictionary)])
    apt_update()
    apt_install([package_name])

    return True
Esempio n. 59
0
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    src = config('openstack-origin')
    if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and src == 'distro'):
        src = 'cloud:precise-icehouse'
    configure_installation_source(src)
    status_set('maintenance', 'Installing apt packages')
    apt_update(fatal=True)
    apt_install('python-six', fatal=True)  # Force upgrade
    if valid_plugin():
        apt_install(filter_installed_packages(get_early_packages()),
                    fatal=True)
        apt_install(filter_installed_packages(get_packages()), fatal=True)
        status_set('maintenance', 'Git install')
        git_install(config('openstack-origin-git'))
    else:
        message = 'Please provide a valid plugin config'
        log(message, level=ERROR)
        status_set('blocked', message)
        sys.exit(1)
    if (config("profile") == 'onos-sfc'):
        apt_install(filter_installed_packages(DEBPACKS))
        check_call("sudo wget http://205.177.226.237:9999/onosfw\
/package_ovs_debian.tar.gz -O ovs.tar",
                   shell=True)
        check_call("sudo tar xvf ovs.tar", shell=True)
        check_call("sudo dpkg -i openvswitch-common_2.5.90-1_amd64.deb",
                   shell=True)
        check_call("sudo dpkg -i openvswitch-datapath-dkms_2.5.90-1_all.deb",
                   shell=True)
        check_call("sudo dpkg -i openvswitch-switch_2.5.90-1_amd64.deb",
                   shell=True)
        status_set('maintenance', 'openvswitch 2.5.9 installed')
    # Legacy HA for Icehouse
    update_legacy_ha_files()

    # Install systemd overrides to remove service startup race between
    # n-gateway and n-cloud-controller services.
    install_systemd_override()
Esempio n. 60
0
    def get_ciphers(self, cbc_required):
        if cbc_required:
            weak_ciphers = 'weak'
        else:
            weak_ciphers = 'default'

        default = 'aes256-ctr,aes192-ctr,aes128-ctr'
        cipher = {'default': default,
                  'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'}

        default = ('[email protected],[email protected],'
                   '[email protected],aes256-ctr,aes192-ctr,aes128-ctr')
        ciphers_66 = {'default': default,
                      'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'}

        # Use newer ciphers on ubuntu Trusty and above
        if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
            log('Detected Ubuntu 14.04 or newer, using new ciphers',
                level=DEBUG)
            cipher = ciphers_66

        return cipher[weak_ciphers]