Beispiel #1
0
def main(cluster_data={}):

    # Grab the boilerplate config entries
    cluster_data['unit_name'] = environ['JUJU_UNIT_NAME'].replace('/', '')
    cluster_data['private_address'] = private_address
    cluster_data['public_address'] = public_address
    cluster_data['cluster_state'] = 'new'

    if not leader_status:
        cluster_data['cluster_state'] = hookenv.relation_get('cluster-state')
        leader_address = hookenv.relation_get('leader-address')

        # do self registration
        if not db.get('registered'):
            cmd = "/opt/etcd/etcdctl -C http://{}:4001 member add {}" \
                  " http://{}:7001".format(leader_address,
                                           cluster_data['unit_name'],
                                           private_address)
            print(cmd)
            check_call(shlex.split(cmd))
            db.set('registered', True)

    # introspect the cluster, and form the cluster string.
    # https://github.com/coreos/etcd/blob/master/Documentation/configuration.md#-initial-cluster

    templating.render('etcd.conf.jinja2', '/etc/init/etcd.conf',
                      cluster_data, owner='root', group='root')

    host.service('restart', 'etcd')
Beispiel #2
0
def restart():
    open_port(80)
    if service_running("traefik"):
        service("restart", "traefik")
    else:
        service("start", "traefik")
    status_set("active", "")
Beispiel #3
0
def restart():
    open_port(port())
    if service_running('omnivector-challenge'):
        service('restart', 'omnivector-challenge')
    else:
        service('start', 'omnivector-challenge')
    status_set("active", "")
Beispiel #4
0
def maybe_handle_policyd_override(openstack_release, hook):
    """Handle the use-policy-override config flag and resource file.

    This function checks that policy overrides are supported on this release,
    that the config flag is enabled, and then processes the resources, copies
    the package policies to the config area, loads the override files.  In the
    case where the config flag is false, it removes the policy overrides by
    deleting the config area policys.  Note that the template for
    `local_settings.py` controls where the horizon service actually reads the
    policies from.

    Note that for the 'config-changed' hook, the function is only interested in
    whether the config value of `use-policy-override` matches the current
    status of the policy overrides success file.  If it doesn't, either the
    config area policies are removed (i.e. False) or the policy overrides file
    is processed.

    :param openstack_release: The release of OpenStack installed.
    :type openstack_release: str
    :param hook: The hook name
    :type hook: str
    """
    log("Seeing if policyd overrides need doing", level=INFO)
    if not policyd.is_policyd_override_valid_on_this_release(
            openstack_release):
        log("... policy overrides not valid on this release: {}".format(
            openstack_release),
            level=INFO)
        return
    # if policy config is not set, then remove the entire directory
    _config = config()
    if not _config.get(policyd.POLICYD_CONFIG_NAME, False):
        _dir = policyd.policyd_dir_for('openstack-dashboard')
        if os.path.exists(_dir):
            log("... config is cleared, and removing {}".format(_dir), INFO)
            shutil.rmtree(_dir)
        else:
            log("... nothing to do", INFO)
        policyd.remove_policy_success_file()
        return
    # config-change and the policyd overrides have been performed just return
    if hook == "config-changed" and policyd.is_policy_success_file_set():
        log("... already setup, so skipping.", level=INFO)
        return
    # from now on it should succeed; if it doesn't then status line will show
    # broken.
    resource_filename = policyd.get_policy_resource_filename()
    restart = policyd.process_policy_resource_file(
        resource_filename,
        'openstack-dashboard',
        blacklist_paths=blacklist_policyd_paths(),
        preserve_topdir=True,
        preprocess_filename=policyd_preprocess_name,
        user='******',
        group='horizon')
    copy_conf_to_policyd()
    if restart:
        service('stop', 'apache2')
        service('start', 'apache2')
    log("Policy override processing complete.", level=INFO)
Beispiel #5
0
def render_nimsoft_robot_config():
    """Create the nimbus.conf config file.

    Renders the appropriate template for the Nimbus Robot
    """
    # The v5 template is compatible with all versions < 6
    cfg_original_hash = file_hash(NIMBUS_ROBOT_CONFIG)
    context = {
        'hub': config("hub"),
        'domain': config("domain"),
        'hubip': config("hubip"),
        'hub_robot_name': config("hub-robot-name"),
        'secondary_domain': config("secondary-domain"),
        'secondary_hubip': config("secondary-hubip"),
        'secondary_hub': config("secondary-hub"),
        'secondary_hub_robot_name': config("secondary-hub-robot-name"),
        'private_address': unit_private_ip(),
        'hostname': os.uname()[1]
    }
    render('robot.cfg', NIMBUS_ROBOT_CONFIG, context=context)
    cfg_new_hash = file_hash(NIMBUS_ROBOT_CONFIG)

    rsync(charm_dir() + '/files/request_linux_prod.cfg',
          '/opt/nimsoft/request.cfg')

    # Install the nimbus service
    rsync(charm_dir() + '/files/nimbus.service',
          '/lib/systemd/system/nimbus.service')

    if cfg_original_hash != cfg_new_hash:
        service('restart', 'nimbus')
        status.active('nimbus ready.')
Beispiel #6
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except Exception:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }

        # update-status hooks are configured to firing every 5 minutes by
        # default. When nagios-nrpe-server is restarted, the nagios server
        # reports checks failing causing unnecessary alerts. Let's not restart
        # on update-status hooks.
        if not hook_name() == 'update-status':
            service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
Beispiel #7
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except Exception:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}

        # check that the charm can write to the conf dir.  If not, then nagios
        # probably isn't installed, and we can defer.
        if not self.does_nrpe_conf_dir_exist():
            return

        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }
            # If we were passed max_check_attempts, add that to the relation data
            try:
                nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts
            except AttributeError:
                pass

        # update-status hooks are configured to firing every 5 minutes by
        # default. When nagios-nrpe-server is restarted, the nagios server
        # reports checks failing causing unnecessary alerts. Let's not restart
        # on update-status hooks.
        if not hook_name() == 'update-status':
            service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            reldata = relation_get(unit=local_unit(), rid=rid)
            if 'monitors' in reldata:
                # update the existing set of monitors with the new data
                old_monitors = yaml.safe_load(reldata['monitors'])
                old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
                # remove keys that are in the remove_check_queue
                old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
                                     if k not in self.remove_check_queue}
                # update/add nrpe_monitors
                old_nrpe_monitors.update(nrpe_monitors)
                old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
                # write back to the relation
                relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
            else:
                # write a brand new set of monitors, as no existing ones.
                relation_set(relation_id=rid, monitors=yaml.dump(monitors))

        self.remove_check_queue.clear()
Beispiel #8
0
def restart():
    open_port(9000)
    if service_running("minio"):
        service("restart", "minio")
    else:
        service("start", "minio")
    status_set("active", "")
def install_dpdk():
    modprobe(config["dpdk-driver"])
    try:
        modprobe("vfio-pci")
    except:
        pass
    dkms_autoinstall()
    pages = get_hugepages()
    if pages:
        hugepage_support("root", group="root", nr_hugepages=pages,
                         mnt_point="/hugepages")
        service_restart("libvirt-bin")

    configure_vrouter_interface()
    set_dpdk_options()

    if not init_is_systemd():
        os.remove("/etc/init/supervisor-vrouter.override")
        service_start("supervisor-vrouter")
        service_restart("contrail-vrouter-agent")
    else:
        # unmask them first
        for srv in ("contrail-vrouter-agent", "contrail-vrouter-dpdk"):
            try:
                os.remove("/etc/systemd/system/{}.service".format(srv))
            except OSError:
                pass
        service("enable", "contrail-vrouter-dpdk")
        service_start("contrail-vrouter-dpdk")
        service("enable", "contrail-vrouter-agent")
        service_start("contrail-vrouter-agent")

    fix_libvirt()
Beispiel #10
0
def main(cluster_data={}):

    # Grab the boilerplate config entries
    cluster_data['unit_name'] = environ['JUJU_UNIT_NAME'].replace('/', '')
    cluster_data['private_address'] = private_address
    cluster_data['public_address'] = public_address
    cluster_data['cluster_state'] = 'new'

    if not leader_status:
        cluster_data['cluster_state'] = hookenv.relation_get('cluster-state')
        leader_address = hookenv.relation_get('leader-address')

        # do self registration
        if not db.get('registered'):
            cmd = "etcdctl -C http://{}:2379 member add {}" \
                  " http://{}:2380".format(leader_address,
                                           cluster_data['unit_name'],
                                           private_address)
            print(cmd)
            check_call(shlex.split(cmd))
            db.set('registered', True)

    # introspect the cluster, and form the cluster string.
    # https://github.com/coreos/etcd/blob/master/Documentation/configuration.md#-initial-cluster

    templating.render('etcd.default.jinja2', '/etc/default/etcd',
                      cluster_data, owner='root', group='root')

    host.service('stop', 'etcd')
    check_output(['rm', '-Rf', '/var/lib/etcd/default'])
    host.service('start', 'etcd')
    if leader_status:
        status_set('active', 'Etcd leader running')
    else:
        status_set('active', 'Etcd follower running')
Beispiel #11
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }

        service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
Beispiel #12
0
def configure_vault(context):
    log("Running configure_vault", level=DEBUG)
    context['disable_mlock'] = is_container() or config('disable-mlock')

    context['ssl_available'] = is_state('vault.ssl.available')

    if is_flag_set('etcd.tls.available'):
        etcd = endpoint_from_flag('etcd.available')
        log("Etcd detected, adding to context", level=DEBUG)
        context['etcd_conn'] = etcd.connection_string()
        context['etcd_tls_ca_file'] = '/var/snap/vault/common/etcd-ca.pem'
        context['etcd_tls_cert_file'] = '/var/snap/vault/common/etcd-cert.pem'
        context['etcd_tls_key_file'] = '/var/snap/vault/common/etcd.key'
        save_etcd_client_credentials(etcd,
                                     key=context['etcd_tls_key_file'],
                                     cert=context['etcd_tls_cert_file'],
                                     ca=context['etcd_tls_ca_file'])
        context['api_addr'] = vault.get_api_url()
        context['cluster_addr'] = vault.get_cluster_url()
        log("Etcd detected, setting api_addr to {}".format(
            context['api_addr']))
    else:
        log("Etcd not detected", level=DEBUG)
    log("Rendering vault.hcl.j2", level=DEBUG)
    render('vault.hcl.j2', VAULT_CONFIG, context, perms=0o600)
    log("Rendering vault systemd configuation", level=DEBUG)
    render('vault.service.j2', VAULT_SYSTEMD_CONFIG, {}, perms=0o644)
    service('enable', 'vault')
    log("Opening vault port", level=DEBUG)
    open_port(8200)
    set_flag('configured')
    if any_file_changed([VAULT_CONFIG, VAULT_SYSTEMD_CONFIG]):
        # force a restart if config has changed
        clear_flag('started')
Beispiel #13
0
def restart_services():
    """Determine which service names are required to be restarted based on
    Ubuntu release and restart them.
    """
    ubuntu_release = host.lsb_release()['DISTRIB_CODENAME']
    for service in QUAGGA_SERVICES[ubuntu_release]:
        host.service('restart', service)
Beispiel #14
0
def main(cluster_data={}):

    # Grab the boilerplate config entries
    cluster_data['unit_name'] = environ['JUJU_UNIT_NAME'].replace('/', '')
    cluster_data['private_address'] = private_address
    cluster_data['public_address'] = public_address
    cluster_data['cluster_state'] = 'new'

    if not leader_status:
        cluster_data['cluster_state'] = hookenv.relation_get('cluster-state')
        leader_address = hookenv.relation_get('leader-address')

        # do self registration
        if not db.get('registered'):
            cmd = "/opt/etcd/etcdctl -C http://{}:4001 member add {}" \
                  " http://{}:7001".format(leader_address,
                                           cluster_data['unit_name'],
                                           private_address)
            print(cmd)
            check_call(shlex.split(cmd))
            db.set('registered', True)

    # introspect the cluster, and form the cluster string.
    # https://github.com/coreos/etcd/blob/master/Documentation/configuration.md#-initial-cluster

    templating.render('etcd.conf.jinja2',
                      '/etc/init/etcd.conf',
                      cluster_data,
                      owner='root',
                      group='root')

    host.service('restart', 'etcd')
Beispiel #15
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }

        service('restart', 'nagios-nrpe-server')

        for rid in relation_ids("local-monitors"):
            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
Beispiel #16
0
def restart():
    open_port(port())
    if service_running('hello-juju'):
        service('restart', 'hello-juju')
    else:
        service('start', 'hello-juju')
    status_set("active", "")
Beispiel #17
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except Exception:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }

        # update-status hooks are configured to firing every 5 minutes by
        # default. When nagios-nrpe-server is restarted, the nagios server
        # reports checks failing causing unnecessary alerts. Let's not restart
        # on update-status hooks.
        if not hook_name() == 'update-status':
            service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
Beispiel #18
0
def install_calico_service():
    ''' Install the calico-node systemd service. '''
    status.maintenance('Installing calico-node service.')

    # keep track of our etcd connections so we can detect when it changes later
    etcd = endpoint_from_flag('etcd.available')
    etcd_connections = etcd.get_connection_string()
    data_changed('calico_etcd_connections', etcd_connections)
    data_changed('calico_etcd_cert', etcd.get_client_credentials())

    service_path = os.path.join(os.sep, 'lib', 'systemd', 'system',
                                'calico-node.service')
    render(
        'calico-node.service',
        service_path,
        {
            'connection_string': etcd_connections,
            'etcd_key_path': ETCD_KEY_PATH,
            'etcd_ca_path': ETCD_CA_PATH,
            'etcd_cert_path': ETCD_CERT_PATH,
            'nodename': gethostname(),
            # specify IP so calico doesn't grab a silly one from, say, lxdbr0
            'ip': get_bind_address(),
            'calico_node_image': hookenv.config('calico-node-image'),
            'ignore_loose_rpf': hookenv.config('ignore-loose-rpf'),
            'lc_all': os.environ.get('LC_ALL', 'C.UTF-8'),
            'lang': os.environ.get('LANG', 'C.UTF-8')
        })
    check_call(['systemctl', 'daemon-reload'])
    service_restart('calico-node')
    service('enable', 'calico-node')
    set_state('calico.service.installed')
def install_dpdk():
    modprobe(config["dpdk-driver"])
    try:
        modprobe("vfio-pci")
    except:
        pass
    dkms_autoinstall()
    pages = get_hugepages()
    if pages:
        hugepage_support("root", group="root", nr_hugepages=pages,
                         mnt_point="/hugepages")
        service_restart("libvirt-bin")

    configure_vrouter_interface()
    set_dpdk_coremask()
    write_configs()

    if not init_is_systemd():
        os.remove("/etc/init/supervisor-vrouter.override")
        service_start("supervisor-vrouter")
        service_restart("contrail-vrouter-agent")
    else:
        service("enable", "contrail-vrouter-dpdk")
        service_start("contrail-vrouter-dpdk")
        service("enable", "contrail-vrouter-agent")
        service_start("contrail-vrouter-agent")

    fix_libvirt()
def install_calico_service():
    ''' Install the calico-node systemd service. '''
    status.maintenance('Installing calico-node service.')
    etcd = endpoint_from_flag('etcd.available')
    service_path = os.path.join(os.sep, 'lib', 'systemd', 'system',
                                'calico-node.service')

    registry = hookenv.config('registry') or DEFAULT_REGISTRY
    image = hookenv.config('calico-node-image')
    uri = os.path.join(registry, image)

    render(
        'calico-node.service',
        service_path,
        {
            'connection_string': etcd.get_connection_string(),
            'etcd_key_path': ETCD_KEY_PATH,
            'etcd_ca_path': ETCD_CA_PATH,
            'etcd_cert_path': ETCD_CERT_PATH,
            'nodename': gethostname(),
            # specify IP so calico doesn't grab a silly one from, say, lxdbr0
            'ip': get_bind_address(),
            'cnx_node_image': uri,
            'ignore_loose_rpf': hookenv.config('ignore-loose-rpf'),
            'lc_all': os.environ.get('LC_ALL', 'C.UTF-8'),
            'lang': os.environ.get('LANG', 'C.UTF-8')
        })
    service_restart('calico-node')
    service('enable', 'calico-node')
    set_state('calico.service.installed')
def bootstrap_pxc():
    """Bootstrap PXC
    On systemd systems systemctl bootstrap-pxc mysql does not work.
    Run service mysql bootstrap-pxc to bootstrap."""
    service('stop', 'mysql')
    bootstrapped = service('bootstrap-pxc', 'mysql')
    if not bootstrapped:
        try:
            # NOTE(jamespage): execute under systemd-run to ensure
            #                  that the bootstrap-pxc mysqld does
            #                  not end up in the juju unit daemons
            #                  cgroup scope.
            cmd = [
                'systemd-run', '--service-type=forking', 'service', 'mysql',
                'bootstrap-pxc'
            ]
            subprocess.check_call(cmd)
        except subprocess.CalledProcessError as e:
            msg = 'Bootstrap PXC failed'
            error_msg = '{}: {}'.format(msg, e)
            status_set('blocked', msg)
            log(error_msg, ERROR)
            raise Exception(error_msg)
        # To make systemd aware mysql is running after a bootstrap
        service('start', 'mysql')
    log("Bootstrap PXC Succeeded", DEBUG)
Beispiel #22
0
def render_filebeat_template():
    """Create the filebeat.yaml config file.

    Renders the appropriate template for the major version of filebeat that
    is installed.
    """
    # kube_logs requires access to k8s-related filesystem data. If configured,
    # don't try to start filebeat until that data is present.
    if config().get('kube_logs') and not os.path.exists(KUBE_CONFIG):
        status.maint('Waiting for: {}'.format(KUBE_CONFIG))
        return

    version = charms.apt.get_package_version('filebeat')[0]
    cfg_original_hash = file_hash(FILEBEAT_CONFIG)
    connections = render_without_context('filebeat-{}.yml'.format(version),
                                         FILEBEAT_CONFIG)
    cfg_new_hash = file_hash(FILEBEAT_CONFIG)

    # Ensure ssl files match config each time we render a new template
    manage_filebeat_logstash_ssl()
    remove_state('beat.render')

    if connections:
        if cfg_original_hash != cfg_new_hash:
            service('restart', 'filebeat')
        status.active('Filebeat ready.')
    else:
        # Stop the service when not connected to any log handlers.
        # NB: beat base layer will handle setting a waiting status
        service('stop', 'filebeat')
Beispiel #23
0
def remove_legacy_nova_metadata():
    """Remove nova metadata files."""
    service_name = 'nova-api-metadata'
    service_stop(service_name)
    service('disable', service_name)
    service('mask', service_name)
    for f in NOVA_CONFIG_FILES.keys():
        remove_file(f)
Beispiel #24
0
def cmd_all_services(cmd):
    if cmd == 'start':
        for svc in services():
            if not service_running(svc):
                service_start(svc)
    else:
        for svc in services():
            service(cmd, svc)
def remove_legacy_nova_metadata():
    """Remove nova metadata files."""
    service_name = 'nova-api-metadata'
    service_stop(service_name)
    service('disable', service_name)
    service('mask', service_name)
    for f in get_nova_config_files().keys():
        remove_file(f)
Beispiel #26
0
def cmd_all_services(cmd):
    if cmd == 'start':
        for svc in services():
            if not service_running(svc):
                service_start(svc)
    else:
        for svc in services():
            service(cmd, svc)
Beispiel #27
0
 def upgrade(self):
     """Install or upgrade the GitLab runner packages, adding APT sources as needed."""
     self.add_sources()
     apt_update()
     apt_install("gitlab-runner")
     self.set_global_config()
     service("enable", "gitlab-runner")
     service("start", "gitlab-runner")
     return True
Beispiel #28
0
def start_ftb(java):
    """ start instance """
    log('ftb-infinity: start_ftb')
    service('enable', CHARM_NAME)
    service_start(CHARM_NAME)

    open_port(conf['server_port'])
    set_state(CHARM_STATE_STARTED)
    status_set('active', 'ftb started')
def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
    """Render mysql config and restart mysql service if file changes as a
    result.

    If bootstrap is True we do a bootstrap-pxc in order to bootstrap the
    percona cluster. This should only be performed once at cluster creation
    time.

    If percona is already bootstrapped we can get away with just ensuring that
    it is started so long as the new node to be added is guaranteed to have
    been restarted so as to apply the new config.
    """
    pre_hash = file_hash(resolve_cnf_file())
    render_config(clustered, hosts)
    create_binlogs_directory()
    update_db_rels = False
    if file_hash(resolve_cnf_file()) != pre_hash or bootstrap:
        if bootstrap:
            service('stop', 'mysql')
            service('bootstrap-pxc', 'mysql')
            # NOTE(dosaboy): this will not actually do anything if no cluster
            # relation id exists yet.
            notify_bootstrapped()
            update_db_rels = True
        else:
            delay = 1
            attempts = 0
            max_retries = 5
            # NOTE(dosaboy): avoid unnecessary restarts. Once mysql is started
            # it needn't be restarted when new units join the cluster since the
            # new units will join and apply their own config.
            if not seeded():
                action = service_restart
            else:
                action = service_start

            while not action('mysql'):
                if attempts == max_retries:
                    raise Exception("Failed to start mysql (max retries "
                                    "reached)")

                log("Failed to start mysql - retrying in %ss" % (delay),
                    WARNING)
                time.sleep(delay)
                delay += 2
                attempts += 1

        # If we get here we assume prior actions have succeeded to always
        # this unit is marked as seeded so that subsequent calls don't result
        # in a restart.
        mark_seeded()

        if update_db_rels:
            update_shared_db_rels()
    else:
        log("Config file '%s' unchanged", level=DEBUG)
def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
    """Render mysql config and restart mysql service if file changes as a
    result.

    If bootstrap is True we do a bootstrap-pxc in order to bootstrap the
    percona cluster. This should only be performed once at cluster creation
    time.

    If percona is already bootstrapped we can get away with just ensuring that
    it is started so long as the new node to be added is guaranteed to have
    been restarted so as to apply the new config.
    """
    pre_hash = file_hash(resolve_cnf_file())
    render_config(clustered, hosts)
    update_db_rels = False
    if file_hash(resolve_cnf_file()) != pre_hash or bootstrap:
        if bootstrap:
            service('bootstrap-pxc', 'mysql')
            # NOTE(dosaboy): this will not actually do anything if no cluster
            # relation id exists yet.
            notify_bootstrapped()
            update_db_rels = True
        else:
            delay = 1
            attempts = 0
            max_retries = 5
            # NOTE(dosaboy): avoid unnecessary restarts. Once mysql is started
            # it needn't be restarted when new units join the cluster since the
            # new units will join and apply their own config.
            if not seeded():
                action = service_restart
            else:
                action = service_start

            while not action('mysql'):
                if attempts == max_retries:
                    raise Exception("Failed to start mysql (max retries "
                                    "reached)")

                log("Failed to start mysql - retrying in %ss" % (delay),
                    WARNING)
                time.sleep(delay)
                delay += 2
                attempts += 1

        # If we get here we assume prior actions have succeeded to always
        # this unit is marked as seeded so that subsequent calls don't result
        # in a restart.
        mark_seeded()

        if update_db_rels:
            update_shared_db_rels()
    else:
        log("Config file '%s' unchanged", level=DEBUG)
Beispiel #31
0
def install_flannel_service(etcd):
    ''' Install the flannel service. '''
    status_set('maintenance', 'Installing flannel service.')
    iface = config('iface') or get_bind_address_interface()
    context = {'iface': iface,
               'connection_string': etcd.get_connection_string(),
               'cert_path': ETCD_PATH}
    render('flannel.service', '/lib/systemd/system/flannel.service', context)
    service('enable', 'flannel')
    set_state('flannel.service.installed')
    remove_state('flannel.service.started')
 def setup_systemd(self):
     context = {'user': self.user,
                'group': self.user,
                'mono': self.mono_path,
                'radarr': self.executable
                }
     templating.render(source="{}.j2".format(self.service_name),
                       target=self.service_file,
                       context=context)
     host.service('enable', self.service_name)
     host.service('start', self.service_name)
def remove_legacy_neutron_lbaas():
    """Remove neutron lbaas files."""
    cmp_os_source = CompareOpenStackReleases(os_release('neutron-common'))
    service_name = 'neutron-lbaas-agent'
    if cmp_os_source >= 'train':
        return
    if cmp_os_source >= 'newton':
        service_name = 'neutron-lbaasv2-agent'
    service_stop(service_name)
    service('disable', service_name)
    service('mask', service_name)
def cmd_all_services(cmd):
    if is_unit_paused_set():
        log('Unit is in paused state, not issuing {} to all'
            'services'.format(cmd))
        return
    if cmd == 'start':
        for svc in services():
            if not service_running(svc):
                service_start(svc)
    else:
        for svc in services():
            service(cmd, svc)
Beispiel #35
0
def cmd_all_services(cmd):
    if is_unit_paused_set():
        log('Unit is in paused state, not issuing {} to all'
            'services'.format(cmd))
        return
    if cmd == 'start':
        for svc in services():
            if not service_running(svc):
                service_start(svc)
    else:
        for svc in services():
            service(cmd, svc)
Beispiel #36
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        if request_per_unit_key():
            relation_set(relation_id=rid,
                         key_name=key_name)
        # NOTE: prefer zone name if in use over pool-prefix.
        rq = ceph.get_create_rgw_pools_rq(
            prefix=config('zone') or config('pool-prefix'))
        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid, unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key',
                                   rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key,
                                                      name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                service('enable', service_name())
                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and
                        new_keyring and
                        not multisite_deployment()):
                    service_restart(service_name())

            process_multisite_relations()
        else:
            send_request_if_needed(rq, relation='mon')
Beispiel #37
0
def configure_gunicorn():
    status_set("maintenance", "Configuring gunicorn service")
    render('omnivector-challenge.service.j2',
           '/etc/systemd/system/omnivector-challenge.service',
           perms=0o755,
           context={
               'port': port(),
               'project_root': '/srv/omnivector-challenge/app',
               'user': '******',
               'group': 'www-data',
           })
    service("enable", "omnivector-challenge")
    status_set("active", "Serving HTTP from gunicorn")
Beispiel #38
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        if request_per_unit_key():
            relation_set(relation_id=rid, key_name=key_name)
        # NOTE: prefer zone name if in use over pool-prefix.
        rq = ceph.get_create_rgw_pools_rq(
            prefix=config('zone') or config('pool-prefix'))
        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid,
                               unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key', rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key, name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and new_keyring
                        and not multisite_deployment()):
                    log('Resume service "{}" as we now have keys for it.'.
                        format(service_name()),
                        level=DEBUG)
                    service_resume(service_name())

            process_multisite_relations()
        else:
            send_request_if_needed(rq, relation='mon')
Beispiel #39
0
def config_changed():

    ls_tgz = os.path.join(os.environ['CHARM_DIR'], 'files', LS_FNAME)

    # sorry if you reboot the server, we're fetching it again unless
    # you fatpacked the charm. YOLO DORITOS
    if not os.path.exists(ls_tgz):
        link = cfg['logstash_url']
        sum = cfg['logstash_sum']
        tpath = download_and_validate(link, sum)

        cmd = 'tar -xvz --strip-components=1 -C /opt/logstash -f {}'
        subprocess.check_call(shlex.split(cmd.format(tpath)))

    service('restart', 'logstash-indexer')
 def wrapped_f(*args, **kwargs):
     checksums = {path: path_hash(path) for path in restart_map}
     f(*args, **kwargs)
     restarts = []
     for path in restart_map:
         if path_hash(path) != checksums[path]:
             restarts += restart_map[path]
     services_list = list(OrderedDict.fromkeys(restarts))
     if not stopstart:
         for service_name in services_list:
             service('restart', service_name)
     else:
         for action in ['stop', 'start']:
             for service_name in services_list:
                 service(action, service_name)
                 if action == 'stop' and sleep:
                     time.sleep(sleep)
Beispiel #41
0
def start_builtin_server(
    ssl_cert_path,
    serve_tests,
    sandbox,
    builtin_server_logging,
    insecure,
    charmworld_url,
    env_password=None,
    env_uuid=None,
    juju_version=None,
    debug=False,
    port=None,
    bundleservice_url=None,
    interactive_login=False,
    gzip=True,
    gtm_enabled=False,
    gisf_enabled=False,
    charmstore_url=None,
):
    """Start the builtin server."""
    if (port is not None) and not port_in_range(port):
        # Do not use the user provided port if it is not valid.
        port = None
    write_builtin_server_startup(
        ssl_cert_path,
        serve_tests=serve_tests,
        sandbox=sandbox,
        builtin_server_logging=builtin_server_logging,
        insecure=insecure,
        charmworld_url=charmworld_url,
        env_password=env_password,
        env_uuid=env_uuid,
        juju_version=juju_version,
        debug=debug,
        port=port,
        bundleservice_url=bundleservice_url,
        interactive_login=interactive_login,
        gzip=gzip,
        gtm_enabled=gtm_enabled,
        gisf_enabled=gisf_enabled,
        charmstore_url=charmstore_url,
    )
    log("Starting the builtin server.")
    with su("root"):
        service(RESTART, GUISERVER)
def bootstrap_pxc():
    """Bootstrap PXC
    On systemd systems systemctl bootstrap-pxc mysql does not work.
    Run service mysql bootstrap-pxc to bootstrap."""
    service('stop', 'mysql')
    bootstrapped = service('bootstrap-pxc', 'mysql')
    if not bootstrapped:
        try:
            cmp_os = CompareHostReleases(
                lsb_release()['DISTRIB_CODENAME']
            )
            if cmp_os < 'bionic':
                # NOTE(jamespage): execute under systemd-run to ensure
                #                  that the bootstrap-pxc mysqld does
                #                  not end up in the juju unit daemons
                #                  cgroup scope.
                cmd = ['systemd-run', '--service-type=forking',
                       'service', 'mysql', 'bootstrap-pxc']
                subprocess.check_call(cmd)
            else:
                service('start', 'mysql@bootstrap')
        except subprocess.CalledProcessError as e:
            msg = 'Bootstrap PXC failed'
            error_msg = '{}: {}'.format(msg, e)
            status_set('blocked', msg)
            log(error_msg, ERROR)
            raise Exception(error_msg)
        if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
            # To make systemd aware mysql is running after a bootstrap
            service('start', 'mysql')
    log("Bootstrap PXC Succeeded", DEBUG)
Beispiel #43
0
def install_etcd():
    ''' Attempt resource get on the "etcd" and "etcdctl" resources. If no
    resources are provided attempt to install from the archive only on the
    16.04 (xenial) series. '''

    status_set('maintenance', 'Installing etcd from apt.')
    pkg_list = ['etcd']
    apt_update()
    apt_install(pkg_list, fatal=True)
    # Stop the service and remove the defaults
    # I hate that I have to do this. Sorry short-lived local data #RIP
    # State control is to prevent upgrade-charm from nuking cluster
    # data.
    if not is_state('etcd.package.adjusted'):
        host.service('stop', 'etcd')
        if os.path.exists('/var/lib/etcd/default'):
            shutil.rmtree('/var/lib/etcd/default')
        set_state('etcd.package.adjusted')
    set_state('etcd.installed')
def services():
    """Return a list of services that are managed by this charm.

    @returns [services] - list of strings that are service names.
    """
    # NOTE(jamespage): Native systemd variants of the packagin
    #                  use mysql@bootstrap to seed the cluster
    #                  however this is cleared after a reboot,
    #                  so dynamically check to see if this active
    if service('is-active', 'mysql@bootstrap'):
        return ['mysql@bootstrap']
    return ['mysql']
Beispiel #45
0
def stop_builtin_server():
    """Stop the builtin server."""
    log("Stopping the builtin server.")
    with su("root"):
        service(STOP, GUISERVER)
    cmd_log(run("rm", "-f", GUISERVER_INIT_PATH))
def configure_sriov():
    '''Configure SR-IOV devices based on provided configuration options

    NOTE(fnordahl): Boot time configuration is done by init script
    intalled by this charm.

    This function only does runtime configuration!
    '''
    charm_config = config()
    if not enable_sriov():
        return

    # make sure init script has correct mode and that boot time execution
    # is enabled
    os.chmod(NEUTRON_SRIOV_INIT_SCRIPT, 0o755)
    service('enable', 'neutron-openvswitch-networking-sriov')

    devices = PCINetDevices()
    sriov_numvfs = charm_config.get('sriov-numvfs')

    # automatic configuration of all SR-IOV devices
    if sriov_numvfs == 'auto':
        log('Configuring SR-IOV device VF functions in auto mode')
        for device in devices.pci_devices:
            if device and device.sriov:
                log("Configuring SR-IOV device"
                    " {} with {} VF's".format(device.interface_name,
                                              device.sriov_totalvfs))
                device.set_sriov_numvfs(device.sriov_totalvfs)
    else:
        # Single int blanket configuration
        try:
            log('Configuring SR-IOV device VF functions'
                ' with blanket setting')
            for device in devices.pci_devices:
                if device and device.sriov:
                    numvfs = min(int(sriov_numvfs), device.sriov_totalvfs)
                    if int(sriov_numvfs) > device.sriov_totalvfs:
                        log('Requested value for sriov-numvfs ({}) too '
                            'high for interface {}. Falling back to '
                            'interface totalvfs '
                            'value: {}'.format(sriov_numvfs,
                                               device.interface_name,
                                               device.sriov_totalvfs))
                    log("Configuring SR-IOV device {} with {} "
                        "VFs".format(device.interface_name, numvfs))
                    device.set_sriov_numvfs(numvfs)
        except ValueError:
            # <device>:<numvfs>[ <device>:numvfs] configuration
            sriov_numvfs = sriov_numvfs.split()
            for device_config in sriov_numvfs:
                log('Configuring SR-IOV device VF functions per interface')
                interface_name, numvfs = device_config.split(':')
                device = devices.get_device_from_interface_name(
                    interface_name)
                if device and device.sriov:
                    if int(numvfs) > device.sriov_totalvfs:
                        log('Requested value for sriov-numfs ({}) too '
                            'high for interface {}. Falling back to '
                            'interface totalvfs '
                            'value: {}'.format(numvfs,
                                               device.interface_name,
                                               device.sriov_totalvfs))
                        numvfs = device.sriov_totalvfs
                    log("Configuring SR-IOV device {} with {} "
                        "VF's".format(device.interface_name, numvfs))
                    device.set_sriov_numvfs(int(numvfs))

    # Trigger remote restart in parent application
    remote_restart('neutron-plugin', 'nova-compute')

    # Restart of SRIOV agent is required after changes to system runtime
    # VF configuration
    cmp_release = CompareOpenStackReleases(
        os_release('neutron-common', base='icehouse'))
    if cmp_release >= 'mitaka':
        service_restart('neutron-sriov-agent')
    else:
        service_restart('neutron-plugin-sriov-agent')
def full_restart():
    ''' Full restart and reload of openvswitch '''
    if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
        service('start', 'openvswitch-force-reload-kmod')
    else:
        service('force-reload-kmod', 'openvswitch-switch')
Beispiel #48
0
def stop_builtin_server():
    """Stop the builtin server."""
    log('Stopping the builtin server.')
    with su('root'):
        service(STOP, GUISERVER)
    cmd_log(run('rm', '-f', GUISERVER_INIT_PATH))
def git_post_install(projects_yaml):
    """Perform post-install setup."""
    src_etc = os.path.join(git_src_dir(projects_yaml, 'neutron'), 'etc')
    configs = [
        {'src': src_etc,
         'dest': '/etc/neutron'},
        {'src': os.path.join(src_etc, 'neutron/plugins'),
         'dest': '/etc/neutron/plugins'},
        {'src': os.path.join(src_etc, 'neutron/rootwrap.d'),
         'dest': '/etc/neutron/rootwrap.d'},
    ]

    for c in configs:
        if os.path.exists(c['dest']):
            shutil.rmtree(c['dest'])
        shutil.copytree(c['src'], c['dest'])

    # NOTE(coreycb): Need to find better solution than bin symlinks.
    symlinks = [
        {'src': os.path.join(git_pip_venv_dir(projects_yaml),
                             'bin/neutron-rootwrap'),
         'link': '/usr/local/bin/neutron-rootwrap'},
    ]

    for s in symlinks:
        if os.path.lexists(s['link']):
            os.remove(s['link'])
        os.symlink(s['src'], s['link'])

    render('git/neutron_sudoers', '/etc/sudoers.d/neutron_sudoers', {},
           perms=0o440)

    bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
    # Use systemd init units/scripts from ubuntu wily onward
    if lsb_release()['DISTRIB_RELEASE'] >= '15.10':
        templates_dir = os.path.join(charm_dir(), 'templates/git')
        daemons = ['neutron-openvswitch-agent', 'neutron-ovs-cleanup']
        for daemon in daemons:
            neutron_ovs_context = {
                'daemon_path': os.path.join(bin_dir, daemon),
            }
            filename = daemon
            if daemon == 'neutron-openvswitch-agent':
                if os_release('neutron-common') < 'mitaka':
                    filename = 'neutron-plugin-openvswitch-agent'
            template_file = 'git/{}.init.in.template'.format(filename)
            init_in_file = '{}.init.in'.format(filename)
            render(template_file, os.path.join(templates_dir, init_in_file),
                   neutron_ovs_context, perms=0o644)
        git_generate_systemd_init_files(templates_dir)

        for daemon in daemons:
            filename = daemon
            if daemon == 'neutron-openvswitch-agent':
                if os_release('neutron-common') < 'mitaka':
                    filename = 'neutron-plugin-openvswitch-agent'
            service('enable', filename)
    else:
        neutron_ovs_agent_context = {
            'service_description': 'Neutron OpenvSwitch Plugin Agent',
            'charm_name': 'neutron-openvswitch',
            'process_name': 'neutron-openvswitch-agent',
            'executable_name': os.path.join(bin_dir,
                                            'neutron-openvswitch-agent'),
            'cleanup_process_name': 'neutron-ovs-cleanup',
            'plugin_config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
            'log_file': '/var/log/neutron/openvswitch-agent.log',
        }

        neutron_ovs_cleanup_context = {
            'service_description': 'Neutron OpenvSwitch Cleanup',
            'charm_name': 'neutron-openvswitch',
            'process_name': 'neutron-ovs-cleanup',
            'executable_name': os.path.join(bin_dir, 'neutron-ovs-cleanup'),
            'log_file': '/var/log/neutron/ovs-cleanup.log',
        }

        if os_release('neutron-common') < 'mitaka':
            render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
                   '/etc/init/neutron-plugin-openvswitch-agent.conf',
                   neutron_ovs_agent_context, perms=0o644)
        else:
            render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
                   '/etc/init/neutron-openvswitch-agent.conf',
                   neutron_ovs_agent_context, perms=0o644)
        render('git/upstart/neutron-ovs-cleanup.upstart',
               '/etc/init/neutron-ovs-cleanup.conf',
               neutron_ovs_cleanup_context, perms=0o644)

    if not is_unit_paused_set():
        service_restart('neutron-plugin-openvswitch-agent')
Beispiel #50
0
def install_git():
    apt_install('git')
    configure_sshd()
    host.service('restart', 'ssh')
Beispiel #51
0
def full_restart():
    ''' Full restart and reload of openvswitch '''
    service('force-reload-kmod', 'openvswitch-switch')
def ftb_systemd_remove():
    service('disable', CHARM_NAME)
    os.unlink(SYSTEMD_SVC_PATH)
    check_call(['systemctl', 'daemon-reload'])