Beispiel #1
0
    def setup_init_scripts(self):
        if host.init_is_systemd():
            template_path = '/etc/systemd/system/zeppelin.service'
            template_name = 'systemd.conf'
        else:
            template_path = '/etc/init/zeppelin.conf'
            template_name = 'upstart.conf'
        if os.path.exists(template_path):
            template_path_backup = "{}.backup".format(template_path)
            if os.path.exists(template_path_backup):
                os.remove(template_path_backup)
            os.rename(template_path, template_path_backup)

        render(
            template_name,
            template_path,
            context={
                'zeppelin_home': self.dist_config.path('zeppelin'),
                'zeppelin_conf': self.dist_config.path('zeppelin_conf')
            },
        )

        if host.init_is_systemd():
            utils.run_as('root', 'systemctl', 'enable', 'zeppelin.service')
            utils.run_as('root', 'systemctl', 'daemon-reload')
Beispiel #2
0
def install():
    status_set("maintenance", "Installing...")

    configure_crashes()
    configure_sources(True, "install-sources", "install-keys")
    apt_upgrade(fatal=True, dist=True)
    packages = list()
    packages.extend(PACKAGES)
    if not config.get("dpdk"):
        packages.extend(PACKAGES_DKMS_INIT)
    else:
        # services must not be started before config files creation
        if not init_is_systemd():
            with open("/etc/init/supervisor-vrouter.override", "w") as conf:
                conf.write("manual\n")
        else:
            # and another way with systemd
            for srv in ("contrail-vrouter-agent", "contrail-vrouter-dpdk"):
                try:
                    os.remove("/etc/systemd/system/{}.sevice".format(srv))
                except OSError:
                    pass
                os.symlink("/dev/null",
                           "/etc/systemd/system/{}.sevice".format(srv))
        packages.extend(PACKAGES_DPDK_INIT)
        # apt-get upgrade can install new kernel so we need to re-install
        # packages with dpdk drivers
        kver = check_output(["uname", "-r"]).rstrip()
        packages.append("linux-image-extra-" + kver)
    apt_install(packages, fatal=True)
    try:
        output = check_output([
            "dpkg-query", "-f", "${Version}\\n", "-W", "contrail-vrouter-agent"
        ])
        version = output.decode('UTF-8').rstrip()
        application_version_set(version)
    except CalledProcessError:
        return None

    status_set("maintenance", "Configuring...")
    os.chmod("/etc/contrail", 0o755)
    os.chown("/etc/contrail", 0, 0)

    if config.get("dpdk"):
        install_dpdk()
    else:
        # supervisord must be started after installation
        if not init_is_systemd():
            # supervisord
            service_restart("supervisor-vrouter")
        install_dkms()
Beispiel #3
0
def set_dpdk_coremask():
    mask = config.get("dpdk-coremask")
    service = "/usr/bin/contrail-vrouter-dpdk"
    mask_arg = mask if mask.startswith("0x") else "-c " + mask
    if not init_is_systemd():
        check_call([
            "sed", "-i", "-e", "s!^command=.*{service}!"
            "command=taskset {mask} {service}!".format(service=service,
                                                       mask=mask_arg),
            "/etc/contrail/supervisord_vrouter_files"
            "/contrail-vrouter-dpdk.ini"
        ])
        return

    # systemd magic
    srv_orig = "/lib/systemd/system/contrail-vrouter-dpdk.service"
    with open(srv_orig, "r") as f:
        for line in f:
            if line.startswith("ExecStart="):
                args = line.split(service)[1]
                break
        else:
            args = " --no-daemon --socket-mem 1024"

    srv_dir = "/etc/systemd/system/contrail-vrouter-dpdk.service.d/"
    try:
        os.mkdir(srv_dir)
    except:
        pass
    with open(srv_dir + "/override.conf", "w") as f:
        f.write("[Service]\nExecStart=\n")
        f.write("ExecStart=/usr/bin/taskset {mask} {service} {args}".format(
            service=service, mask=mask_arg, args=args))
    check_call(["systemctl", "daemon-reload"])
def install_review_queue():
    status_set('maintenance', 'Installing Review Queue')

    with tempfile.TemporaryDirectory() as tmp_dir:
        install_dir = install_remote(config['repo'], dest=tmp_dir)
        contents = os.listdir(install_dir)
        if install_dir == tmp_dir and len(contents) == 1:
            # unlike the git handler, the archive handler just returns tmp_dir
            # even if the archive contents are nested in a folder as they
            # should be, so we have to normalize for that here
            install_dir = os.path.join(install_dir, contents[0])
        shutil.rmtree(APP_DIR, ignore_errors=True)
        log('Moving app source from {} to {}'.format(
            install_dir, APP_DIR))
        shutil.move(install_dir, APP_DIR)
    subprocess.check_call('make .venv'.split(), cwd=APP_DIR)
    if init_is_systemd():
        shutil.copyfile(SYSTEMD_SRC, SYSTEMD_DEST)
        shutil.copyfile(SYSTEMD_TASK_SRC, SYSTEMD_TASK_DEST)
        subprocess.check_call(['systemctl', 'daemon-reload'])
    else:
        shutil.copyfile(UPSTART_SRC, UPSTART_DEST)
        shutil.copyfile(UPSTART_TASK_SRC, UPSTART_TASK_DEST)
        subprocess.check_call(['initctl', 'reload-configuration'])
    shutil.copyfile(LP_CREDS_SRC, LP_CREDS_DEST)
    shutil.copyfile(APP_INI_SRC, APP_INI_DEST)
    chownr(APP_DIR, APP_USER, APP_GROUP)

    set_state('reviewqueue.installed')
    change_config()
    update_db()
    update_amqp()
    update_secret()
    set_state('reviewqueue.restart')
def install_dpdk():
    modprobe(config["dpdk-driver"])
    try:
        modprobe("vfio-pci")
    except:
        pass
    dkms_autoinstall()
    pages = get_hugepages()
    if pages:
        hugepage_support("root", group="root", nr_hugepages=pages,
                         mnt_point="/hugepages")
        service_restart("libvirt-bin")

    configure_vrouter_interface()
    set_dpdk_coremask()
    write_configs()

    if not init_is_systemd():
        os.remove("/etc/init/supervisor-vrouter.override")
        service_start("supervisor-vrouter")
        service_restart("contrail-vrouter-agent")
    else:
        service("enable", "contrail-vrouter-dpdk")
        service_start("contrail-vrouter-dpdk")
        service("enable", "contrail-vrouter-agent")
        service_start("contrail-vrouter-agent")

    fix_libvirt()
Beispiel #6
0
def install_ntpmon():
    """
    Install package dependencies, source files, and startup configuration.
    """
    install_dir = layer.options.get('ntpmon', 'install-dir')
    service_name = layer.options.get('ntpmon', 'service-name')
    using_systemd = host.init_is_systemd()
    if install_dir:
        log('installing ntpmon')
        host.mkdir(os.path.dirname(install_dir))
        host.rsync('src/', '{}/'.format(install_dir))

        if service_name:
            if using_systemd:
                systemd_config = '/etc/systemd/system/' + service_name + '.service'
                log('installing systemd service: {}'.format(service_name))
                with open(systemd_config, 'w') as conffile:
                    conffile.write(
                        templating.render('src/' + service_name + '.systemd',
                                          layer.options.get('ntpmon')))
                subprocess.call(['systemd', 'daemon-reload'])
            else:
                upstart_config = '/etc/init/' + service_name + '.conf'
                log('installing upstart service: {}'.format(service_name))
                with open(upstart_config, 'w') as conffile:
                    conffile.write(
                        templating.render('src/' + service_name + '.upstart',
                                          layer.options.get('ntpmon')))

    set_flag('ntpmon.installed')
    clear_flag('ntpmon.configured')
Beispiel #7
0
def emit_systemd_overrides_file():
    """Generate the systemd overrides file
    With Start and Stop timeout values
    Note: (David Ames) Bug#1654403 Work around
    May be removed if bug is resolved
    If timeout value is set to -1 pass infinity
    """
    if not init_is_systemd():
        return

    stop_timeout = int(config('service_stop_timeout'))
    if stop_timeout < 0:
        stop_timeout = 'infinity'
    start_timeout = int(config('service_start_timeout'))
    if start_timeout < 0:
        start_timeout = 'infinity'

    systemd_overrides_context = {'service_stop_timeout': stop_timeout,
                                 'service_start_timeout': start_timeout,
                                 }

    for service in ['corosync', 'pacemaker']:
        overrides_dir = SYSTEMD_OVERRIDES_DIR.format(service)
        overrides_file = SYSTEMD_OVERRIDES_FILE.format(overrides_dir)
        if not os.path.isdir(overrides_dir):
            os.mkdir(overrides_dir)

        write_file(path=overrides_file,
                   content=render_template('systemd-overrides.conf',
                                           systemd_overrides_context))

    # Update systemd with the new information
    subprocess.check_call(['systemctl', 'daemon-reload'])
Beispiel #8
0
def install_ntpmon():
    """
    Install package dependencies, source files, and startup configuration.
    """
    install_dir = layer.options.get('ntpmon', 'install-dir')
    service_name = layer.options.get('ntpmon', 'service-name')
    using_systemd = host.init_is_systemd()
    if install_dir:
        log('installing ntpmon')
        host.mkdir(os.path.dirname(install_dir))
        host.rsync('src/', '{}/'.format(install_dir))

        if service_name:
            if using_systemd:
                systemd_config = '/etc/systemd/system/' + service_name + '.service'
                log('installing systemd service: {}'.format(service_name))
                with open(systemd_config, 'w') as conffile:
                    conffile.write(templating.render('src/' + service_name + '.systemd', layer.options.get('ntpmon')))
                subprocess.call(['systemd', 'daemon-reload'])
            else:
                upstart_config = '/etc/init/' + service_name + '.conf'
                log('installing upstart service: {}'.format(service_name))
                with open(upstart_config, 'w') as conffile:
                    conffile.write(templating.render('src/' + service_name + '.upstart', layer.options.get('ntpmon')))

    set_flag('ntpmon.installed')
    clear_flag('ntpmon.configured')
def install_dpdk():
    modprobe(config["dpdk-driver"])
    try:
        modprobe("vfio-pci")
    except:
        pass
    dkms_autoinstall()
    pages = get_hugepages()
    if pages:
        hugepage_support("root", group="root", nr_hugepages=pages,
                         mnt_point="/hugepages")
        service_restart("libvirt-bin")

    configure_vrouter_interface()
    set_dpdk_options()

    if not init_is_systemd():
        os.remove("/etc/init/supervisor-vrouter.override")
        service_start("supervisor-vrouter")
        service_restart("contrail-vrouter-agent")
    else:
        # unmask them first
        for srv in ("contrail-vrouter-agent", "contrail-vrouter-dpdk"):
            try:
                os.remove("/etc/systemd/system/{}.service".format(srv))
            except OSError:
                pass
        service("enable", "contrail-vrouter-dpdk")
        service_start("contrail-vrouter-dpdk")
        service("enable", "contrail-vrouter-agent")
        service_start("contrail-vrouter-agent")

    fix_libvirt()
Beispiel #10
0
def install_tmpfilesd():
    '''Install systemd-tmpfiles configuration for ovs vhost-user sockets'''
    # NOTE(jamespage): Only do this if libvirt is actually installed
    if (init_is_systemd() and user_exists('libvirt-qemu')
            and group_exists('kvm')):
        shutil.copy('files/nova-ovs-vhost-user.conf', '/etc/tmpfiles.d')
        subprocess.check_call(['systemd-tmpfiles', '--create'])
Beispiel #11
0
def emit_systemd_overrides_file():
    """Generate the systemd overrides file
    With Start and Stop timeout values
    Note: (David Ames) Bug#1654403 Work around
    May be removed if bug is resolved
    If timeout value is set to -1 pass infinity
    """
    if not init_is_systemd():
        return

    stop_timeout = int(config('service_stop_timeout'))
    if stop_timeout < 0:
        stop_timeout = 'infinity'
    start_timeout = int(config('service_start_timeout'))
    if start_timeout < 0:
        start_timeout = 'infinity'

    systemd_overrides_context = {
        'service_stop_timeout': stop_timeout,
        'service_start_timeout': start_timeout,
    }

    for service in ['corosync', 'pacemaker']:
        overrides_dir = SYSTEMD_OVERRIDES_DIR.format(service)
        overrides_file = SYSTEMD_OVERRIDES_FILE.format(overrides_dir)
        if not os.path.isdir(overrides_dir):
            os.mkdir(overrides_dir)

        write_file(path=overrides_file,
                   content=render_template('systemd-overrides.conf',
                                           systemd_overrides_context))

    # Update systemd with the new information
    subprocess.check_call(['systemctl', 'daemon-reload'])
def post_series_upgrade():
    log("Running complete series upgrade hook", "INFO")
    if init_is_systemd():
        # NOTE(ajkavangh): ensure systemd override folder exists prior to
        #                  attempting to write override.conf
        #                  See bug: #1838634
        mkdir(os.path.dirname(CEILOMETER_API_SYSTEMD_CONF))
    series_upgrade_complete(
        resume_unit_helper, CONFIGS)
Beispiel #13
0
def post_series_upgrade():
    postgresql.clear_version_cache()  # PG version upgrades should work on the master, but will break standbys
    config = hookenv.config()
    if config["pgdg"]:
        add_pgdg_source()
    if host.init_is_systemd():
        reactive.set_flag("postgresql.upgrade.systemd")
    reactive.clear_flag("postgresql.cluster.support-scripts")
    reactive.clear_flag("postgresql.cluster.configured")
Beispiel #14
0
def set_dpdk_options():
    mask = config.get("dpdk-coremask")
    service = "/usr/bin/contrail-vrouter-dpdk"
    mask_arg = mask if mask.startswith("0x") else "-c " + mask
    if not init_is_systemd():
        srv = "/etc/contrail/supervisord_vrouter_files/contrail-vrouter-dpdk.ini"
        with open(srv, "r") as f:
            data = f.readlines()
        for index, line in enumerate(data):
            if not (line.startswith("command=") and service in line):
                continue
            original_args = line.split(service)[1].rstrip()
            command_args_dict, other_args = _get_args_from_command_string(
                original_args)
            config_args_dict = _dpdk_args_from_config_to_dict()
            command_args_dict.update(config_args_dict)
            dpdk_args_string = " ".join(" ".join(_)
                                        for _ in command_args_dict.items())
            args = dpdk_args_string + other_args
            newline = 'command=taskset ' + mask_arg + ' ' + service + ' ' + args + '\n'
            data[index] = newline

        with open(srv, "w") as f:
            f.writelines(data)
        service_restart("contrail-vrouter-dpdk")
        return

    # systemd magic
    srv_orig = "/lib/systemd/system/contrail-vrouter-dpdk.service"
    with open(srv_orig, "r") as f:
        data = f.readlines()
    for line in data:
        if not line.startswith("ExecStart="):
            continue
        original_args = line.split(service)[1].rstrip()
        dpdk_args_dict, other_args = _get_args_from_command_string(
            original_args)
        config_args_dict = _dpdk_args_from_config_to_dict()
        dpdk_args_dict.update(config_args_dict)
        break
    else:
        dpdk_args_dict = _dpdk_args_from_config_to_dict()
        other_args = " --no-daemon --socket-mem 1024"
    dpdk_args_string = " ".join(" ".join(_) for _ in dpdk_args_dict.items())
    args = dpdk_args_string + other_args

    srv_dir = "/etc/systemd/system/contrail-vrouter-dpdk.service.d/"
    try:
        os.mkdir(srv_dir)
    except:
        pass
    with open(srv_dir + "/override.conf", "w") as f:
        f.write("[Service]\nExecStart=\n")
        f.write("ExecStart=/usr/bin/taskset {mask} {service} {args}".format(
            service=service, mask=mask_arg, args=args))
    check_call(["systemctl", "daemon-reload"])
    service_restart("contrail-vrouter-dpdk")
def install_tmpfilesd():
    '''Install systemd-tmpfiles configuration for ovs vhost-user sockets'''
    # NOTE(jamespage): Only do this if libvirt is actually installed
    if (init_is_systemd() and
            user_exists('libvirt-qemu') and
            group_exists('kvm')):
        shutil.copy('files/nova-ovs-vhost-user.conf',
                    '/etc/tmpfiles.d')
        subprocess.check_call(['systemd-tmpfiles', '--create'])
Beispiel #16
0
def register_configs():
    """
    Register config files with their respective contexts.
    Regstration of some configs may not be required depending on
    existing of certain relations.
    """
    # if called without anything installed (eg during install hook)
    # just default to earliest supported release. configs dont get touched
    # till post-install, anyway.

    release = get_os_codename_package('ceilometer-common', fatal=False)
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)
    if not release:
        log("Not installed yet, no way to determine the OS release. "
            "Skipping register configs", DEBUG)
        return configs

    if CompareOpenStackReleases(release) >= 'queens':
        for conf in QUEENS_CONFIG_FILES:
            configs.register(conf, QUEENS_CONFIG_FILES[conf]['hook_contexts'])
        configs.register(PIPELINE_CONF, [RemoteSinksContext()])
    else:
        for conf in (CEILOMETER_CONF, HAPROXY_CONF):
            configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])

        if init_is_systemd():
            configs.register(
                CEILOMETER_API_SYSTEMD_CONF,
                CONFIG_FILES[CEILOMETER_API_SYSTEMD_CONF]['hook_contexts']
            )

        if os.path.exists('/etc/apache2/conf-available'):
            configs.register(
                HTTPS_APACHE_24_CONF,
                CONFIG_FILES[HTTPS_APACHE_24_CONF]['hook_contexts']
            )
        else:
            configs.register(
                HTTPS_APACHE_CONF,
                CONFIG_FILES[HTTPS_APACHE_CONF]['hook_contexts']
            )
        if enable_memcache(release=release):
            configs.register(MEMCACHED_CONF, [context.MemcacheContext()])

        if run_in_apache():
            wsgi_script = "/usr/share/ceilometer/app.wsgi"
            configs.register(
                WSGI_CEILOMETER_API_CONF,
                [context.WSGIWorkerConfigContext(name="ceilometer",
                                                 script=wsgi_script),
                 CeilometerContext(),
                 HAProxyContext()]
            )
        if CompareOpenStackReleases(release) >= 'mitaka':
            configs.register(PIPELINE_CONF, [RemoteSinksContext()])
    return configs
Beispiel #17
0
def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
    """
    Add checks for each service in list

    :param NRPE nrpe: NRPE object to add check to
    :param list services: List of services to check
    :param str unit_name: Unit name to use in check description
    :param bool immediate_check: For sysv init, run the service check immediately
    """
    for svc in services:
        # Don't add a check for these services from neutron-gateway
        if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
            next

        upstart_init = '/etc/init/%s.conf' % svc
        sysv_init = '/etc/init.d/%s' % svc

        if host.init_is_systemd(service_name=svc):
            nrpe.add_check(
                shortname=svc,
                description='process check {%s}' % unit_name,
                check_cmd='check_systemd.py %s' % svc
            )
        elif os.path.exists(upstart_init):
            nrpe.add_check(
                shortname=svc,
                description='process check {%s}' % unit_name,
                check_cmd='check_upstart_job %s' % svc
            )
        elif os.path.exists(sysv_init):
            cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
            checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
            croncmd = (
                '/usr/local/lib/nagios/plugins/check_exit_status.pl '
                '-e -s /etc/init.d/%s status' % svc
            )
            cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
            f = open(cronpath, 'w')
            f.write(cron_file)
            f.close()
            nrpe.add_check(
                shortname=svc,
                description='service check {%s}' % unit_name,
                check_cmd='check_status_file.py -f %s' % checkpath,
            )
            # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
            # (LP: #1670223).
            if immediate_check and os.path.isdir(nrpe.homedir):
                f = open(checkpath, 'w')
                subprocess.call(
                    croncmd.split(),
                    stdout=f,
                    stderr=subprocess.STDOUT
                )
                f.close()
                os.chmod(checkpath, 0o644)
Beispiel #18
0
def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
    """
    Add checks for each service in list

    :param NRPE nrpe: NRPE object to add check to
    :param list services: List of services to check
    :param str unit_name: Unit name to use in check description
    :param bool immediate_check: For sysv init, run the service check immediately
    """
    for svc in services:
        # Don't add a check for these services from neutron-gateway
        if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
            next

        upstart_init = '/etc/init/%s.conf' % svc
        sysv_init = '/etc/init.d/%s' % svc

        if host.init_is_systemd():
            nrpe.add_check(
                shortname=svc,
                description='process check {%s}' % unit_name,
                check_cmd='check_systemd.py %s' % svc
            )
        elif os.path.exists(upstart_init):
            nrpe.add_check(
                shortname=svc,
                description='process check {%s}' % unit_name,
                check_cmd='check_upstart_job %s' % svc
            )
        elif os.path.exists(sysv_init):
            cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
            checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
            croncmd = (
                '/usr/local/lib/nagios/plugins/check_exit_status.pl '
                '-e -s /etc/init.d/%s status' % svc
            )
            cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
            f = open(cronpath, 'w')
            f.write(cron_file)
            f.close()
            nrpe.add_check(
                shortname=svc,
                description='service check {%s}' % unit_name,
                check_cmd='check_status_file.py -f %s' % checkpath,
            )
            # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
            # (LP: #1670223).
            if immediate_check and os.path.isdir(nrpe.homedir):
                f = open(checkpath, 'w')
                subprocess.call(
                    croncmd.split(),
                    stdout=f,
                    stderr=subprocess.STDOUT
                )
                f.close()
                os.chmod(checkpath, 0o644)
Beispiel #19
0
def install_systemd_override():
    '''
    Install systemd override files for nova-api-metadata
    and reload systemd daemon if required.
    '''
    if init_is_systemd() and not os.path.exists(SYSTEMD_NOVA_OVERRIDE):
        mkdir(os.path.dirname(SYSTEMD_NOVA_OVERRIDE))
        shutil.copy(
            os.path.join('files', os.path.basename(SYSTEMD_NOVA_OVERRIDE)),
            SYSTEMD_NOVA_OVERRIDE)
        subprocess.check_call(['systemctl', 'daemon-reload'])
Beispiel #20
0
    def setup_init_scripts(self):
        templates_list = ['history', 'master', 'slave']
        for template in templates_list:
            if host.init_is_systemd():
                template_path = '/etc/systemd/system/spark-{}.service'.format(
                    template)
            else:
                template_path = '/etc/init/spark-{}.conf'.format(template)
            if os.path.exists(template_path):
                os.remove(template_path)

        self.stop()

        mode = hookenv.config()['spark_execution_mode']
        templates_list = ['history']
        if mode == 'standalone':
            templates_list.append('master')
            templates_list.append('slave')

        for template in templates_list:
            template_name = '{}-upstart.conf'.format(template)
            template_path = '/etc/init/spark-{}.conf'.format(template)
            if host.init_is_systemd():
                template_name = '{}-systemd.conf'.format(template)
                template_path = '/etc/systemd/system/spark-{}.service'.format(
                    template)

            render(
                template_name,
                template_path,
                context={
                    'spark_bin': self.dist_config.path('spark'),
                    'master': self.get_master()
                },
            )
            if host.init_is_systemd():
                utils.run_as('root', 'systemctl', 'enable',
                             'spark-{}.service'.format(template))

        if host.init_is_systemd():
            utils.run_as('root', 'systemctl', 'daemon-reload')
Beispiel #21
0
    def setup_init_script(self, user, servicename):
        daemon = "yarn"
        if user == "hdfs":
            daemon = "hadoop"
        elif user == "mapred":
            daemon = "mr-jobhistory"

        template_name = 'templates/upstart.conf'
        target_template_path = '/etc/init/{}.conf'.format(servicename)
        if host.init_is_systemd():
            template_name = 'templates/systemd.conf'
            target_template_path = '/etc/systemd/system/{}.service'.format(
                servicename)

        d = os.path.dirname(sys.modules['jujubigdata'].__file__)
        source_template_path = os.path.join(d, template_name)

        if os.path.exists(target_template_path):
            os.remove(target_template_path)

        render(
            source_template_path,
            target_template_path,
            templates_dir="/",
            context={
                'service': servicename,
                'user': user,
                'hadoop_path': self.dist_config.path('hadoop'),
                'hadoop_conf': self.dist_config.path('hadoop_conf'),
                'daemon': daemon,
            },
        )
        if host.init_is_systemd():
            utils.run_as('root', 'systemctl', 'enable',
                         '{}.service'.format(servicename))

        if host.init_is_systemd():
            utils.run_as('root', 'systemctl', 'daemon-reload')
    def setup_init_scripts(self):
        templates_list = ['history', 'master', 'slave']
        for template in templates_list:
            if host.init_is_systemd():
                template_path = '/etc/systemd/system/spark-{}.service'.format(template)
            else:
                template_path = '/etc/init/spark-{}.conf'.format(template)
            if os.path.exists(template_path):
                os.remove(template_path)

        self.stop()

        mode = hookenv.config()['spark_execution_mode']
        templates_list = ['history']
        if mode == 'standalone':
            templates_list.append('master')
            templates_list.append('slave')

        for template in templates_list:
            template_name = '{}-upstart.conf'.format(template)
            template_path = '/etc/init/spark-{}.conf'.format(template)
            if host.init_is_systemd():
                template_name = '{}-systemd.conf'.format(template)
                template_path = '/etc/systemd/system/spark-{}.service'.format(template)

            render(
                template_name,
                template_path,
                context={
                    'spark_bin': self.dist_config.path('spark'),
                    'master': self.get_master()
                },
            )
            if host.init_is_systemd():
                utils.run_as('root', 'systemctl', 'enable', 'spark-{}.service'.format(template))

        if host.init_is_systemd():
            utils.run_as('root', 'systemctl', 'daemon-reload')
Beispiel #23
0
    def setup_init_script(self, user, servicename):
        daemon = "yarn"
        if user == "hdfs":
            daemon = "hadoop"
        elif user == "mapred":
            daemon = "mr-jobhistory"

        template_name = 'templates/upstart.conf'
        target_template_path = '/etc/init/{}.conf'.format(servicename)
        if host.init_is_systemd():
            template_name = 'templates/systemd.conf'
            target_template_path = '/etc/systemd/system/{}.service'.format(servicename)

        d = os.path.dirname(sys.modules['jujubigdata'].__file__)
        source_template_path = os.path.join(d, template_name)

        if os.path.exists(target_template_path):
            os.remove(target_template_path)

        render(
            source_template_path,
            target_template_path,
            templates_dir="/",
            context={
                'service': servicename,
                'user': user,
                'hadoop_path': self.dist_config.path('hadoop'),
                'hadoop_conf': self.dist_config.path('hadoop_conf'),
                'daemon': daemon,
            },
        )
        if host.init_is_systemd():
            utils.run_as('root', 'systemctl', 'enable', '{}.service'.format(servicename))

        if host.init_is_systemd():
            utils.run_as('root', 'systemctl', 'daemon-reload')
def install():
    execd_preinstall()
    origin = config('openstack-origin')
    if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and origin == 'distro'):
        origin = 'cloud:precise-grizzly'
    configure_installation_source(origin)
    packages = filter_installed_packages(get_packages())
    if packages:
        status_set('maintenance', 'Installing packages')
        apt_update(fatal=True)
        apt_install(packages, fatal=True)
    if init_is_systemd():
        # NOTE(jamespage): ensure systemd override folder exists prior to
        #                  attempting to write override.conf
        mkdir(os.path.dirname(CEILOMETER_API_SYSTEMD_CONF))
    if run_in_apache():
        disable_package_apache_site()
Beispiel #25
0
def add_init_service_checks(nrpe, services, unit_name):
    """
    Add checks for each service in list

    :param NRPE nrpe: NRPE object to add check to
    :param list services: List of services to check
    :param str unit_name: Unit name to use in check description
    """
    for svc in services:
        # Don't add a check for these services from neutron-gateway
        if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
            next

        upstart_init = '/etc/init/%s.conf' % svc
        sysv_init = '/etc/init.d/%s' % svc

        if host.init_is_systemd():
            nrpe.add_check(
                shortname=svc,
                description='process check {%s}' % unit_name,
                check_cmd='check_systemd.py %s' % svc
            )
        elif os.path.exists(upstart_init):
            nrpe.add_check(
                shortname=svc,
                description='process check {%s}' % unit_name,
                check_cmd='check_upstart_job %s' % svc
            )
        elif os.path.exists(sysv_init):
            cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
            cron_file = ('*/5 * * * * root '
                         '/usr/local/lib/nagios/plugins/check_exit_status.pl '
                         '-s /etc/init.d/%s status > '
                         '/var/lib/nagios/service-check-%s.txt\n' % (svc,
                                                                     svc)
                         )
            f = open(cronpath, 'w')
            f.write(cron_file)
            f.close()
            nrpe.add_check(
                shortname=svc,
                description='process check {%s}' % unit_name,
                check_cmd='check_status_file.py -f '
                          '/var/lib/nagios/service-check-%s.txt' % svc,
            )
Beispiel #26
0
def install():
    if config.get("install-sources"):
        configure_sources(update=True, sources_var="install-sources",
                          keys_var="install-keys")

    # install packages
    apt_install(PACKAGES, fatal=True)

    install_url = config["install-url"]
    if install_url:
        # install opendaylight from tarball

        # this extracts the archive too
        install_remote(install_url, dest="/opt")
        # The extracted dirname. Look at what's on disk instead of mangling, so
        # the distribution tar.gz's name doesn't matter.
        install_dir_name = [
            f for f in os.listdir("/opt")
            if f.startswith("distribution-karaf")][0]
        if not os.path.exists("/opt/opendaylight-karaf"):
            os.symlink(install_dir_name, "/opt/opendaylight-karaf")
    else:
        apt_install([KARAF_PACKAGE], fatal=True)
        install_dir_name = "opendaylight-karaf"

    if init_is_systemd():
        shutil.copy("files/odl-controller.service", "/lib/systemd/system")
        service('enable', 'odl-controller')
    else:
        shutil.copy("files/odl-controller.conf", "/etc/init")

    adduser("opendaylight", system_user=True)
    mkdir("/home/opendaylight", owner="opendaylight", group="opendaylight",
          perms=0755)
    check_call(
        ["chown", "-R", "opendaylight:opendaylight",
         os.path.join("/opt", install_dir_name)])
    mkdir("/var/log/opendaylight", owner="opendaylight", group="opendaylight",
          perms=0755)

    # install features
    write_mvn_config()
    service_start("odl-controller")
Beispiel #27
0
def install_ntpmon():
    """
    Install package dependencies, source files, and startup configuration.
    """
    hookenv.log('installing ntpmon dependencies')
    apt_install(['python3-psutil'])

    hookenv.log('installing ntpmon')
    host.rsync('src/', ntpmon_dir)

    if host.init_is_systemd():
        hookenv.log('installing ntpmon systemd configuration')
        host.rsync('src/' + service_name + '.systemd', systemd_config)
        subprocess.call(['systemd', 'daemon-reload'])
    else:
        hookenv.log('installing ntpmon upstart configuration')
        host.rsync('src/' + service_name + '.upstart', upstart_config)
    set_state('ntpmon.installed')
    remove_state('ntpmon.configured')
    def install_swap(self):
        """
        Setup swap space if needed.

        Big Data apps can be memory intensive and lead to kernel OOM errors. To
        provide a safety net against these, add swap space if none exists.
        """
        try:
            swap_out = subprocess.check_output(['cat', '/proc/swaps']).decode()
        except subprocess.CalledProcessError as e:
            hookenv.log('Could not inspect /proc/swaps: {}'.format(e),
                        hookenv.INFO)
            swap_out = None
        lines = swap_out.splitlines() if swap_out else []
        if len(lines) < 2:
            # /proc/swaps has a header row; if we dont have at least 2 lines,
            # we don't have swap space. Install dphys-swapfile to create some.
            hookenv.log('No swap space found in /proc/swaps', hookenv.INFO)
            try:
                subprocess.check_call(['apt-get', 'install', '-qy', 'dphys-swapfile'])
            except subprocess.CalledProcessError as e:
                hookenv.log('Proceeding with no swap due to an error '
                            'installing dphys-swapfile: {}'.format(e),
                            hookenv.ERROR)

            # Always include dphys-swapfile status in the log
            if init_is_systemd():
                cmd = ['systemctl', 'status', 'dphys-swapfile.service']
            else:
                cmd = ['service', 'dphys-swapfile', 'status']
            try:
                systemd_out = subprocess.check_output(cmd)
            except subprocess.CalledProcessError as e:
                hookenv.log('Failed to get dphys-swapfile status: {}'.format(e.output),
                            hookenv.ERROR)
            else:
                hookenv.log('Status of dphys-swapfile: {}'.format(systemd_out),
                            hookenv.INFO)
        else:
            # Log the fact that we already have swap space.
            hookenv.log('Swap space exists; skipping dphys-swapfile install',
                        hookenv.INFO)
    def install_swap(self):
        """
        Setup swap space if needed.

        Big Data apps can be memory intensive and lead to kernel OOM errors. To
        provide a safety net against these, add swap space if none exists.
        """
        try:
            swap_out = subprocess.check_output(['cat', '/proc/swaps']).decode()
        except subprocess.CalledProcessError as e:
            hookenv.log('Could not inspect /proc/swaps: {}'.format(e),
                        hookenv.INFO)
            swap_out = None
        lines = swap_out.splitlines() if swap_out else []
        if len(lines) < 2:
            # /proc/swaps has a header row; if we dont have at least 2 lines,
            # we don't have swap space. Install dphys-swapfile to create some.
            hookenv.log('No swap space found in /proc/swaps', hookenv.INFO)
            try:
                subprocess.check_call(['apt-get', 'install', '-qy', 'dphys-swapfile'])
            except subprocess.CalledProcessError as e:
                hookenv.log('Proceeding with no swap due to an error '
                            'installing dphys-swapfile: {}'.format(e),
                            hookenv.ERROR)

            # Always include dphys-swapfile status in the log
            if init_is_systemd():
                cmd = ['systemctl', 'status', 'dphys-swapfile.service']
            else:
                cmd = ['service', 'dphys-swapfile', 'status']
            try:
                systemd_out = subprocess.check_output(cmd)
            except subprocess.CalledProcessError as e:
                hookenv.log('Failed to get dphys-swapfile status: {}'.format(e.output),
                            hookenv.ERROR)
            else:
                hookenv.log('Status of dphys-swapfile: {}'.format(systemd_out),
                            hookenv.INFO)
        else:
            # Log the fact that we already have swap space.
            hookenv.log('Swap space exists; skipping dphys-swapfile install',
                        hookenv.INFO)
Beispiel #30
0
def update_nrpe_config():
    # python-dbus is used by check_upstart_job
    apt_install('python3-dbus')
    hostname = nrpe.get_nagios_hostname()
    current_unit = nrpe.get_nagios_unit_name()

    # create systemd or upstart check
    cmd = '/bin/cat /var/lib/ceph/osd/ceph-*/whoami |'
    if init_is_systemd():
        cmd += 'xargs -I_@ /usr/local/lib/nagios/plugins/check_systemd.py'
        cmd += ' ceph-osd@_@'
    else:
        cmd += 'xargs -I@ status ceph-osd id=@'
    cmd += ' && exit 0 || exit 2'

    nrpe_setup = nrpe.NRPE(hostname=hostname)
    nrpe_setup.add_check(shortname='ceph-osd',
                         description='process check {%s}' % current_unit,
                         check_cmd=cmd)
    nrpe_setup.write()
Beispiel #31
0
def add_init_service_checks(nrpe, services, unit_name):
    """
    Add checks for each service in list

    :param NRPE nrpe: NRPE object to add check to
    :param list services: List of services to check
    :param str unit_name: Unit name to use in check description
    """
    for svc in services:
        # Don't add a check for these services from neutron-gateway
        if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
            next

        upstart_init = '/etc/init/%s.conf' % svc
        sysv_init = '/etc/init.d/%s' % svc

        if host.init_is_systemd():
            nrpe.add_check(shortname=svc,
                           description='process check {%s}' % unit_name,
                           check_cmd='check_systemd.py %s' % svc)
        elif os.path.exists(upstart_init):
            nrpe.add_check(shortname=svc,
                           description='process check {%s}' % unit_name,
                           check_cmd='check_upstart_job %s' % svc)
        elif os.path.exists(sysv_init):
            cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
            cron_file = ('*/5 * * * * root '
                         '/usr/local/lib/nagios/plugins/check_exit_status.pl '
                         '-s /etc/init.d/%s status > '
                         '/var/lib/nagios/service-check-%s.txt\n' % (svc, svc))
            f = open(cronpath, 'w')
            f.write(cron_file)
            f.close()
            nrpe.add_check(
                shortname=svc,
                description='process check {%s}' % unit_name,
                check_cmd='check_status_file.py -f '
                '/var/lib/nagios/service-check-%s.txt' % svc,
            )
def reload_systemd():
    """Reload systemd configuration on systemd based installs
    """
    if init_is_systemd():
        subprocess.check_call(['systemctl', 'daemon-reload'])
Beispiel #33
0
 def reload_and_restart():
     if ch_host.init_is_systemd():
         subprocess.check_call(['systemctl', 'daemon-reload'])
Beispiel #34
0
def install():
    reactive.set_state("config.changed.pgdg")
    if host.init_is_systemd():
        reactive.set_flag("postgresql.upgrade.systemd")
    def setup_kafka_config(self):
        '''
        copy the default configuration files to kafka_conf property
        defined in dist.yaml
        '''
        default_conf = self.dist_config.path('kafka') / 'config'
        kafka_conf = self.dist_config.path('kafka_conf')
        kafka_conf.rmtree_p()
        default_conf.copytree(kafka_conf)
        # Now remove the conf included in the tarball and symlink our real conf
        # dir. we've seen issues where kafka still looks for config in
        # KAFKA_HOME/config.
        default_conf.rmtree_p()
        kafka_conf.symlink(default_conf)

        # Similarly, we've seen issues where kafka wants to write to
        # KAFKA_HOME/logs regardless of the LOG_DIR, so make a symlink.
        default_logs = self.dist_config.path('kafka') / 'logs'
        kafka_logs = self.dist_config.path('kafka_app_logs')
        default_logs.rmtree_p()
        kafka_logs.symlink(default_logs)

        # Configure environment
        kafka_bin = self.dist_config.path('kafka') / 'bin'
        with utils.environment_edit_in_place('/etc/environment') as env:
            if kafka_bin not in env['PATH']:
                env['PATH'] = ':'.join([env['PATH'], kafka_bin])
            env['LOG_DIR'] = self.dist_config.path('kafka_app_logs')

        # Configure server.properties
        # NB: We set the advertised.host.name below to our short hostname
        # instead of our private ip so external (non-Juju) clients can connect
        # to kafka (admin will still have to expose kafka and ensure the
        # external client can resolve the short hostname to our public ip).
        network_interface = hookenv.config('network_interface')
        if network_interface:
            short_host = get_ip_for_interface(network_interface)
        else:
            short_host = check_output(['hostname', '-s']).decode('utf8').strip()
        kafka_port = self.dist_config.port('kafka')
        kafka_server_conf = self.dist_config.path('kafka_conf') / 'server.properties'
        service, unit_num = os.environ['JUJU_UNIT_NAME'].split('/', 1)
        utils.re_edit_in_place(kafka_server_conf, {
            r'^broker.id=.*': 'broker.id=%s' % unit_num,
            r'^port=.*': 'port=%s' % kafka_port,
            r'^log.dirs=.*': 'log.dirs=%s' % self.dist_config.path('kafka_data_logs'),
            r'^#?advertised.host.name=.*': 'advertised.host.name=%s' % short_host,
        })

        # Configure producer.properties
        # note: we set the broker list to whatever we advertise our broker to
        # be (advertised.host.name from above, which is our short hostname).
        kafka_producer_conf = self.dist_config.path('kafka_conf') / 'producer.properties'
        utils.re_edit_in_place(kafka_producer_conf, {
            r'^#?metadata.broker.list=.*': 'metadata.broker.list=%s:%s' % (short_host, kafka_port),
        })

        # Configure log properties
        kafka_log4j = self.dist_config.path('kafka_conf') / 'log4j.properties'
        utils.re_edit_in_place(kafka_log4j, {
            r'^kafka.logs.dir=.*': 'kafka.logs.dir=%s' % self.dist_config.path('kafka_app_logs'),
        })

        # Configure init script
        template_name = 'upstart.conf'
        template_path = '/etc/init/kafka.conf'
        if host.init_is_systemd():
            template_name = 'systemd.conf'
            template_path = '/etc/systemd/system/kafka.service'

        templating.render(
            template_name,
            template_path,
            context={
                'kafka_conf': self.dist_config.path('kafka_conf'),
                'kafka_bin': '{}/bin'.format(self.dist_config.path('kafka'))
            },
        )
Beispiel #36
0
def request_per_unit_key():
    """Determine if a per-unit cephx key should be requested"""
    return (cmp_pkgrevno('radosgw', '12.2.0') >= 0 and init_is_systemd())
Beispiel #37
0
    def setup_kafka_config(self):
        '''
        copy the default configuration files to kafka_conf property
        defined in dist.yaml
        '''
        default_conf = self.dist_config.path('kafka') / 'config'
        kafka_conf = self.dist_config.path('kafka_conf')
        kafka_conf.rmtree_p()
        default_conf.copytree(kafka_conf)
        # Now remove the conf included in the tarball and symlink our real conf
        # dir. we've seen issues where kafka still looks for config in
        # KAFKA_HOME/config.
        default_conf.rmtree_p()
        kafka_conf.symlink(default_conf)

        # Similarly, we've seen issues where kafka wants to write to
        # KAFKA_HOME/logs regardless of the LOG_DIR, so make a symlink.
        default_logs = self.dist_config.path('kafka') / 'logs'
        kafka_logs = self.dist_config.path('kafka_app_logs')
        default_logs.rmtree_p()
        kafka_logs.symlink(default_logs)

        # Configure environment
        kafka_bin = self.dist_config.path('kafka') / 'bin'
        with utils.environment_edit_in_place('/etc/environment') as env:
            if kafka_bin not in env['PATH']:
                env['PATH'] = ':'.join([env['PATH'], kafka_bin])
            env['LOG_DIR'] = self.dist_config.path('kafka_app_logs')

        # Configure server.properties
        # NB: We set the advertised.host.name below to our short hostname
        # instead of our private ip so external (non-Juju) clients can connect
        # to kafka (admin will still have to expose kafka and ensure the
        # external client can resolve the short hostname to our public ip).
        short_host = get_ip_for_interface(hookenv.config(network_interface))
        if not short_host:
            short_host = hookenv.config().get('hostname')
        if not short_host:
            short_host = check_output(['hostname',
                                       '-s']).decode('utf8').strip()
        kafka_port = self.dist_config.port('kafka')
        kafka_server_conf = self.dist_config.path(
            'kafka_conf') / 'server.properties'
        service, unit_num = os.environ['JUJU_UNIT_NAME'].split('/', 1)
        utils.re_edit_in_place(
            kafka_server_conf, {
                r'^broker.id=.*':
                'broker.id=%s' % unit_num,
                r'^port=.*':
                'port=%s' % kafka_port,
                r'^log.dirs=.*':
                'log.dirs=%s' % self.dist_config.path('kafka_data_logs'),
                r'^#?advertised.host.name=.*':
                'advertised.host.name=%s' % short_host,
            })

        # Configure producer.properties
        # note: we set the broker list to whatever we advertise our broker to
        # be (advertised.host.name from above, which is our short hostname).
        kafka_producer_conf = self.dist_config.path(
            'kafka_conf') / 'producer.properties'
        utils.re_edit_in_place(
            kafka_producer_conf, {
                r'^#?metadata.broker.list=.*':
                'metadata.broker.list=%s:%s' % (short_host, kafka_port),
            })

        # Configure log properties
        kafka_log4j = self.dist_config.path('kafka_conf') / 'log4j.properties'
        utils.re_edit_in_place(
            kafka_log4j, {
                r'^kafka.logs.dir=.*':
                'kafka.logs.dir=%s' % self.dist_config.path('kafka_app_logs'),
            })

        # Configure init script
        template_name = 'upstart.conf'
        template_path = '/etc/init/kafka.conf'
        if host.init_is_systemd():
            template_name = 'systemd.conf'
            template_path = '/etc/systemd/system/kafka.service'

        templating.render(
            template_name,
            template_path,
            context={
                'kafka_conf': self.dist_config.path('kafka_conf'),
                'kafka_bin': '{}/bin'.format(self.dist_config.path('kafka'))
            },
        )
Beispiel #38
0
 def reload_and_restart():
     if ch_host.init_is_systemd():
         subprocess.check_call(['systemctl', 'daemon-reload'])
Beispiel #39
0
def request_per_unit_key():
    """Determine if a per-unit cephx key should be requested"""
    return (cmp_pkgrevno('radosgw', '12.2.0') >= 0 and init_is_systemd())
Beispiel #40
0
def upgrade_charm():
    workloadstatus.status_set("maintenance", "Upgrading charm")

    rels = context.Relations()

    # The master is now appointed by the leader.
    if hookenv.is_leader():
        master = replication.get_master()
        if not master:
            master = hookenv.local_unit()
            peer_rel = helpers.get_peer_relation()
            if peer_rel:
                for peer_relinfo in peer_rel.values():
                    if peer_relinfo.get("state") == "master":
                        master = peer_relinfo.unit
                        break
            hookenv.log("Discovered {} is the master".format(master))
            leadership.leader_set(master=master)

    # The name of this crontab has changed. It will get regenerated.
    if os.path.exists("/etc/cron.d/postgresql"):
        hookenv.log("Removing old crontab")
        os.unlink("/etc/cron.d/postgresql")

    # Older generated usernames where generated from the relation id,
    # and really old ones contained random components. This made it
    # problematic to restore a database into a fresh environment,
    # because the new usernames would not match the old usernames and
    # done of the database permissions would match. We now generate
    # usernames using just the client service name, so restoring a
    # database into a fresh environment will work provided the service
    # names match. We want to update the old usernames in upgraded
    # services to the new format to improve their disaster recovery
    # story.
    for relname, superuser in [("db", False), ("db-admin", True)]:
        for client_rel in rels[relname].values():
            hookenv.log("Migrating database users for {}".format(client_rel))
            password = client_rel.local.get("password", host.pwgen())
            old_username = client_rel.local.get("user")
            new_username = postgresql.username(client_rel.service, superuser,
                                               False)
            if old_username and old_username != new_username:
                migrate_user(old_username, new_username, password, superuser)
                client_rel.local["user"] = new_username
                client_rel.local["password"] = password

            old_username = client_rel.local.get("schema_user")
            if old_username and old_username != new_username:
                migrate_user(old_username, new_username, password, superuser)
                client_rel.local["schema_user"] = new_username
                client_rel.local["schema_password"] = password

    # Admin relations used to get 'all' published as the database name,
    # which was bogus.
    for client_rel in rels["db-admin"].values():
        if client_rel.local.get("database") == "all":
            client_rel.local["database"] = client_rel.service

    # Reconfigure PostgreSQL and republish client relations.
    reactive.remove_state("postgresql.cluster.configured")
    reactive.remove_state("postgresql.client.published")

    # Don't recreate the cluster.
    reactive.set_state("postgresql.cluster.created")

    # Set the postgresql.replication.cloned flag, so we don't rebuild
    # standbys when upgrading the charm from a pre-reactive version.
    reactive.set_state("postgresql.replication.cloned")

    # Publish which node we are following
    peer_rel = helpers.get_peer_relation()
    if peer_rel and "following" not in peer_rel.local:
        following = unitdata.kv().get("postgresql.replication.following")
        if following is None and not replication.is_master():
            following = replication.get_master()
        peer_rel.local["following"] = following

    # Ensure storage that was attached but ignored is no longer ignored.
    if not reactive.is_state("postgresql.storage.pgdata.attached"):
        if hookenv.storage_list("pgdata"):
            storage.attach()

    # Ensure client usernames and passwords match leader settings.
    for relname in ("db", "db-admin"):
        for rel in rels[relname].values():
            del rel.local["user"]
            del rel.local["password"]

    # Ensure the configure version is cached.
    postgresql.version()

    # Skip checks for pre-existing databases, as that has already happened.
    reactive.set_state("postgresql.cluster.initial-check")

    # Reinstall support scripts
    reactive.remove_state("postgresql.cluster.support-scripts")

    # Ensure that systemd is managing the PostgreSQL process
    if host.init_is_systemd(
    ) and not reactive.is_flag_set("postgresql.upgrade.systemd"):
        reactive.set_flag("postgresql.upgrade.systemd")
        if reactive.is_flag_set("postgresql.cluster.is_running"):
            hookenv.log("Restarting PostgreSQL under systemd", hookenv.WARNING)
            reactive.clear_flag("postgresql.cluster.is_running")
            postgresql.stop_pgctlcluster()

    # Update the PGDG source, in case the signing key has changed.
    config = hookenv.config()
    if config["pgdg"]:
        service.add_pgdg_source()
Beispiel #41
0
def configure_ovs():
    """Configure the OVS plugin.

    This function uses the config.yaml parameters ext-port, data-port and
    bridge-mappings to configure the bridges and ports on the ovs on the
    unit.

    Note that the ext-port is deprecated and data-port/bridge-mappings are
    preferred.

    Thus, if data-port is set, then ext-port is ignored (and if set, then
    it is removed from the set of bridges unless it is defined in
    bridge-mappings/data-port).  A warning is issued, if both data-port and
    ext-port are set.
    """
    status_set('maintenance', 'Configuring ovs')
    if not service_running('openvswitch-switch'):
        full_restart()

    # all bridges use the same datapath_type
    brdata = {
        'datapath-type': determine_datapath_type(),
    }
    brdata.update(generate_external_ids())

    add_bridge(INT_BRIDGE, brdata=brdata)
    add_bridge(EXT_BRIDGE, brdata=brdata)

    # If data-port is defined in the config, we can ignore ext-port value
    # and log an error to the unit's log
    if config('data-port') and config('ext-port'):
        log(
            "Both ext-port and data-port are set. ext-port is deprecated"
            " and is not used when data-port is set.",
            level=ERROR)

    ext_port_ctx = None
    if use_dvr():
        ext_port_ctx = ExternalPortContext()()
    # Set ext-port only if data-port isn't defined.
    if not config('data-port') and ext_port_ctx and ext_port_ctx['ext_port']:
        add_bridge_port(EXT_BRIDGE,
                        ext_port_ctx['ext_port'],
                        ifdata=generate_external_ids(EXT_BRIDGE),
                        portdata=generate_external_ids(EXT_BRIDGE))

    modern_ovs = ovs_has_late_dpdk_init()

    bridgemaps = None
    portmaps = None
    if not use_dpdk():
        # NOTE(jamespage):
        # Its possible to support both hardware offloaded 'direct' ports
        # and default 'openvswitch' ports on the same hypervisor, so
        # configure bridge mappings in addition to any hardware offload
        # enablement.
        portmaps = DataPortContext()()
        bridgemaps = parse_bridge_mappings(config('bridge-mappings'))
        for br in bridgemaps.values():
            add_bridge(br, brdata=brdata)
            if not portmaps:
                continue

            for port, _br in portmaps.items():
                if _br == br:
                    if not is_linuxbridge_interface(port):
                        add_bridge_port(br,
                                        port,
                                        promisc=True,
                                        ifdata=generate_external_ids(br),
                                        portdata=generate_external_ids(br))
                    else:
                        log('{} is a Linux bridge: using Linux bridges in the '
                            'data-port config is deprecated for removal after '
                            '21.10 release of OpenStack charms.'.format(port),
                            level=WARNING)
                        add_ovsbridge_linuxbridge(
                            br,
                            port,
                            ifdata=generate_external_ids(br),
                            portdata=generate_external_ids(br))

    # NOTE(jamespage):
    # hw-offload and dpdk are mutually exclusive so log and error
    # and skip any subsequent DPDK configuration
    if use_dpdk() and use_hw_offload():
        log(
            'DPDK and Hardware offload are mutually exclusive, '
            'please disable enable-dpdk or enable-hardware-offload',
            level=ERROR)
    elif use_dpdk():
        log('Configuring bridges with DPDK', level=DEBUG)

        # TODO(sahid): We should also take into account the
        # "physical-network-mtus" in case different MTUs are
        # configured based on physical networks.
        global_mtu = (
            neutron_ovs_context.NeutronAPIContext()()['global_physnet_mtu'])

        dpdk_context = OVSDPDKDeviceContext()
        devices = dpdk_context.devices()

        portmaps = parse_data_port_mappings(config('data-port'))
        bridgemaps = parse_bridge_mappings(config('bridge-mappings'))

        bridge_port_interface_map = BridgePortInterfaceMap()
        bond_config = BondConfig()

        for br, port_iface_map in bridge_port_interface_map.items():
            log('Adding DPDK bridge: {}:{}'.format(br, brdata), level=DEBUG)
            add_bridge(br, brdata=brdata)
            if modern_ovs:
                for port in port_iface_map.keys():
                    ifdatamap = bridge_port_interface_map.get_ifdatamap(
                        br, port)
                    # set external-ids for all interfaces
                    for iface in ifdatamap:
                        ifdatamap[iface].update(generate_external_ids(br))
                    # NOTE: DPDK bonds are referenced by name and can be found
                    #       in the data-port config, regular DPDK ports are
                    #       referenced by MAC addresses and their names should
                    #       never be found in data-port
                    if port in portmaps.keys():
                        portdata = bond_config.get_ovs_portdata(port)
                        portdata.update(generate_external_ids(br))
                        log('Adding DPDK bond: {}({}) to bridge: {}'.format(
                            port, list(ifdatamap.keys()), br),
                            level=DEBUG)
                        add_bridge_bond(br,
                                        port,
                                        list(ifdatamap.keys()),
                                        portdata=portdata,
                                        ifdatamap=ifdatamap)
                    else:
                        log('Adding DPDK port: {} to bridge: {}'.format(
                            port, br),
                            level=DEBUG)
                        ifdata = ifdatamap[port]
                        add_bridge_port(br,
                                        port,
                                        ifdata=ifdata,
                                        portdata=generate_external_ids(br),
                                        linkup=False,
                                        promisc=None)
        if not modern_ovs:
            # port enumeration in legacy OVS-DPDK must follow alphabetic order
            # of the PCI addresses
            dev_idx = 0
            for pci, mac in sorted(devices.items()):
                # if mac.entity is a bridge, then the port can be added
                # directly, otherwise it is a bond (supported only in
                # modern_ovs) or misconfiguration
                if mac.entity in bridgemaps.values():
                    ifdata = {'type': 'dpdk', 'mtu-request': global_mtu}
                    ifdata.update(generate_external_ids(mac.entity))
                    ifname = 'dpdk{}'.format(dev_idx)
                    log('Adding DPDK port {}:{} to bridge {}'.format(
                        ifname, ifdata, mac.entity),
                        level=DEBUG)
                    add_bridge_port(mac.entity,
                                    ifname,
                                    ifdata=ifdata,
                                    portdata=generate_external_ids(mac.entity),
                                    linkup=False,
                                    promisc=None)
                else:
                    log('DPDK device {} skipped, {} is not a bridge'.format(
                        pci, mac.entity),
                        level=WARNING)
                dev_idx += 1

    target = config('ipfix-target')
    bridges = [INT_BRIDGE, EXT_BRIDGE]
    if bridgemaps:
        bridges.extend(bridgemaps.values())
    elif portmaps:
        bridges.extend([bridge_mac.entity for bridge_mac in portmaps.values()])

    if target:
        for bridge in bridges:
            disable_ipfix(bridge)
            enable_ipfix(bridge, target)
    else:
        # NOTE: removing ipfix setting from a bridge is idempotent and
        #       will pass regardless of the existence of the setting
        for bridge in bridges:
            disable_ipfix(bridge)

    # Ensure this runs so that mtu is applied to data-port interfaces if
    # provided.
    # NOTE(ajkavanagh) for pause/resume we don't gate this as it's not a
    # running service, but rather running a few commands.
    if not init_is_systemd():
        service_restart('os-charm-phy-nic-mtu')
Beispiel #42
0
    def setup_kafka_config(self):
        '''
        copy the default configuration files to kafka_conf property
        defined in dist.yaml
        '''
        default_conf = self.dist_config.path('kafka') / 'config'
        kafka_conf = self.dist_config.path('kafka_conf')
        kafka_conf.rmtree_p()
        default_conf.copytree(kafka_conf)
        # Now remove the conf included in the tarball and symlink our real conf
        # dir. we've seen issues where kafka still looks for config in
        # KAFKA_HOME/config.
        default_conf.rmtree_p()
        kafka_conf.symlink(default_conf)

        # Similarly, we've seen issues where kafka wants to write to
        # KAFKA_HOME/logs regardless of the LOG_DIR, so make a symlink.
        default_logs = self.dist_config.path('kafka') / 'logs'
        kafka_logs = self.dist_config.path('kafka_app_logs')
        default_logs.rmtree_p()
        kafka_logs.symlink(default_logs)

        # Configure environment
        kafka_bin = self.dist_config.path('kafka') / 'bin'
        with utils.environment_edit_in_place('/etc/environment') as env:
            if kafka_bin not in env['PATH']:
                env['PATH'] = ':'.join([env['PATH'], kafka_bin])
            env['LOG_DIR'] = self.dist_config.path('kafka_app_logs')

        # Configure server.properties
        # note: we set the advertised.host.name below to the public_address
        # to ensure that external (non-Juju) clients can connect to Kafka
        public_address = hookenv.unit_get('public-address')
        private_ip = utils.resolve_private_address(hookenv.unit_get('private-address'))
        kafka_port = self.dist_config.port('kafka')
        kafka_server_conf = self.dist_config.path('kafka_conf') / 'server.properties'
        service, unit_num = os.environ['JUJU_UNIT_NAME'].split('/', 1)
        utils.re_edit_in_place(kafka_server_conf, {
            r'^broker.id=.*': 'broker.id=%s' % unit_num,
            r'^port=.*': 'port=%s' % kafka_port,
            r'^log.dirs=.*': 'log.dirs=%s' % self.dist_config.path('kafka_data_logs'),
            r'^#?advertised.host.name=.*': 'advertised.host.name=%s' % public_address,
        })

        # Configure producer.properties
        # note: we set the broker list host below to the public_address
        # to ensure that external (non-Juju) clients can connect to Kafka.
        # It must match our advertised.host.name from above.
        kafka_producer_conf = self.dist_config.path('kafka_conf') / 'producer.properties'
        utils.re_edit_in_place(kafka_producer_conf, {
            r'^#?metadata.broker.list=.*': 'metadata.broker.list=%s:%s' % (public_address, kafka_port),
        })

        # Configure log properties
        kafka_log4j = self.dist_config.path('kafka_conf') / 'log4j.properties'
        utils.re_edit_in_place(kafka_log4j, {
            r'^kafka.logs.dir=.*': 'kafka.logs.dir=%s' % self.dist_config.path('kafka_app_logs'),
        })

        template_name = 'upstart.conf'
        template_path = '/etc/init/kafka.conf'
        if host.init_is_systemd():
            template_name = 'systemd.conf'
            template_path = '/etc/systemd/system/kafka.service'

        templating.render(
            template_name,
            template_path,
            context={
                'kafka_conf': self.dist_config.path('kafka_conf'),
                'kafka_bin': '{}/bin'.format(self.dist_config.path('kafka'))
            },
        )

        # fix for lxc containers and some corner cases in manual provider
        # ensure that public_address is resolvable internally by mapping it to the private IP
        utils.update_kv_host(private_ip, public_address)
        utils.manage_etc_hosts()
def git_post_install(projects_yaml):
    """Perform post-install setup."""
    http_proxy = git_yaml_value(projects_yaml, 'http_proxy')
    if http_proxy:
        pip_install('libvirt-python', proxy=http_proxy,
                    venv=git_pip_venv_dir(projects_yaml))
    else:
        pip_install('libvirt-python',
                    venv=git_pip_venv_dir(projects_yaml))

    src_etc = os.path.join(git_src_dir(projects_yaml, 'nova'), 'etc/nova')
    configs = [
        {'src': src_etc,
         'dest': '/etc/nova'},
    ]

    for c in configs:
        if os.path.exists(c['dest']):
            shutil.rmtree(c['dest'])
        shutil.copytree(c['src'], c['dest'])

    # NOTE(coreycb): Need to find better solution than bin symlinks.
    symlinks = [
        {'src': os.path.join(git_pip_venv_dir(projects_yaml),
                             'bin/nova-rootwrap'),
         'link': '/usr/local/bin/nova-rootwrap'},
    ]

    for s in symlinks:
        if os.path.lexists(s['link']):
            os.remove(s['link'])
        os.symlink(s['src'], s['link'])

    virt_type = VIRT_TYPES[config('virt-type')][0]
    nova_compute_conf = 'git/{}.conf'.format(virt_type)
    render(nova_compute_conf, '/etc/nova/nova-compute.conf', {}, perms=0o644)
    render('git/nova_sudoers', '/etc/sudoers.d/nova_sudoers', {}, perms=0o440)

    service_name = 'nova-compute'
    nova_user = '******'
    start_dir = '/var/lib/nova'
    bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
    nova_conf = '/etc/nova/nova.conf'
    nova_api_metadata_context = {
        'service_description': 'Nova Metadata API server',
        'service_name': service_name,
        'user_name': nova_user,
        'start_dir': start_dir,
        'process_name': 'nova-api-metadata',
        'executable_name': os.path.join(bin_dir, 'nova-api-metadata'),
        'config_files': [nova_conf],
    }
    nova_api_context = {
        'service_description': 'Nova API server',
        'service_name': service_name,
        'user_name': nova_user,
        'start_dir': start_dir,
        'process_name': 'nova-api',
        'executable_name': os.path.join(bin_dir, 'nova-api'),
        'config_files': [nova_conf],
    }
    # Use systemd init units/scripts from ubuntu wily onwar
    if init_is_systemd():
        activate_path = os.path.join(git_pip_venv_dir(projects_yaml), 'bin',
                                     'activate')
        nova_compute_context = {
            'daemon_path': os.path.join(bin_dir, 'nova-compute'),
            'activate_path': activate_path,
        }
        templates_dir = os.path.join(charm_dir(), 'templates/git')
        render('git/nova-compute.system.in.template',
               '/lib/systemd/system/nova-compute.service',
               nova_compute_context, perms=0o644)
    else:
        nova_compute_context = {
            'service_description': 'Nova compute worker',
            'service_name': service_name,
            'user_name': nova_user,
            'process_name': 'nova-compute',
            'executable_name': os.path.join(bin_dir, 'nova-compute'),
            'config_files': [nova_conf, '/etc/nova/nova-compute.conf'],
        }
        render('git/upstart/nova-compute.upstart',
               '/etc/init/nova-compute.conf',
               nova_compute_context, perms=0o644)

    nova_network_context = {
        'service_description': 'Nova network worker',
        'service_name': service_name,
        'user_name': nova_user,
        'start_dir': start_dir,
        'process_name': 'nova-network',
        'executable_name': os.path.join(bin_dir, 'nova-network'),
        'config_files': [nova_conf],
    }

    # NOTE(coreycb): Needs systemd support
    templates_dir = 'hooks/charmhelpers/contrib/openstack/templates'
    templates_dir = os.path.join(charm_dir(), templates_dir)
    render('git.upstart', '/etc/init/nova-api-metadata.conf',
           nova_api_metadata_context, perms=0o644, templates_dir=templates_dir)
    render('git.upstart', '/etc/init/nova-api.conf',
           nova_api_context, perms=0o644, templates_dir=templates_dir)
    render('git.upstart', '/etc/init/nova-network.conf',
           nova_network_context, perms=0o644, templates_dir=templates_dir)

    apt_update()
    apt_install(LATE_GIT_PACKAGES, fatal=True)
def reload_systemd():
    """Reload systemd configuration on systemd based installs
    """
    if init_is_systemd():
        subprocess.check_call(['systemctl', 'daemon-reload'])