Ejemplo n.º 1
0
def snap_install():
    channel = config('channel') or 'stable'
    if validate_snap_channel(channel):
        clear_flag('snap.channel.invalid')
        snap.install('vault', channel=channel)
    else:
        set_flag('snap.channel.invalid')
Ejemplo n.º 2
0
def cluster_connected(hacluster):
    """Configure HA resources in corosync"""
    dns_record = config('dns-ha-access-record')
    vips = config('vip') or None
    if vips and dns_record:
        set_flag('config.dns_vip.invalid')
        log("Unsupported configuration. vip and dns-ha cannot both be set",
            level=ERROR)
        return
    else:
        clear_flag('config.dns_vip.invalid')

    if vips:
        vips = vips.split()
        for vip in vips:
            if vip == vault.get_vip(binding='external'):
                hacluster.add_vip('vault-ext', vip)
            else:
                hacluster.add_vip('vault', vip)
    elif dns_record:
        try:
            ip = network_get_primary_address('access')
        except NotImplementedError:
            ip = unit_private_ip()
        hacluster.add_dnsha('vault', ip, dns_record, 'access')
    hacluster.bind_resources()
Ejemplo n.º 3
0
def disable_pki(*args):
    if not hookenv.is_leader():
        hookenv.action_fail('Please run action on lead unit')
        return
    vault_pki.disable_pki_backend()
    clear_flag('charm.vault.ca.ready')
    clear_flag('pki.backend.tuned')
Ejemplo n.º 4
0
def upgrade():
    """An upgrade has been triggered."""
    pki_directory = os.path.join(easyrsa_directory, "pki")
    if os.path.isdir(pki_directory):
        # specific handling if the upgrade is from a previous version
        # where certificate_authority_serial is not set at install
        serial_file = "serial"
        with chdir(pki_directory):
            # if the ca and ca_key are set and serial is not
            # set this to serial in the pki directory
            if (os.path.isfile(serial_file)
                    and leader_get("certificate_authority")
                    and leader_get("certificate_authority_key")
                    and not leader_get("certificate_authority_serial")):
                with open(serial_file, "r") as stream:
                    ca_serial = stream.read()
                # set the previously unset certificate authority serial
                leader_set({"certificate_authority_serial": ca_serial})

        charm_pki_directory = os.path.join(charm_directory, "pki")
        # When the charm pki directory exists, it is stale, remove it.
        if os.path.isdir(charm_pki_directory):
            shutil.rmtree(charm_pki_directory)
        # Copy the EasyRSA/pki to the charm pki directory.
        shutil.copytree(pki_directory, charm_pki_directory, symlinks=True)
    clear_flag("easyrsa.installed")
    clear_flag("easyrsa.configured")
Ejemplo n.º 5
0
def create_certs():
    reissue_requested = is_flag_set('certificates.reissue.requested')
    tls = endpoint_from_flag('certificates.available')
    requests = tls.all_requests if reissue_requested else tls.new_requests
    if reissue_requested:
        log('Reissuing all certs')
    processed_applications = []
    for request in requests:
        log('Processing certificate request from {} for {}'.format(
            request.unit_name, request.common_name))
        if request.cert_type == 'application':
            cert_type = 'server'
            # When an application cert is published all units recieve the same
            # data so one need to process one request per application.
            if request.application_name in processed_applications:
                log('Already done {}'.format(request.application_name))
                continue
            else:
                processed_applications.append(request.application_name)
        else:
            cert_type = request.cert_type
        try:
            ttl = config()['default-ttl']
            max_ttl = config()['max-ttl']
            bundle = vault_pki.generate_certificate(cert_type,
                                                    request.common_name,
                                                    request.sans, ttl, max_ttl)
            request.set_cert(bundle['certificate'], bundle['private_key'])
        except vault.VaultInvalidRequest as e:
            log(str(e), level=ERROR)
            continue  # TODO: report failure back to client
    clear_flag('certificates.reissue.requested')
Ejemplo n.º 6
0
    def controllers_changed(self):
        """Assess related controllers and only take relation data from the
        active one"""
        self._active_data = self._controller_config()

        if self._controller_config_ready(self._active_data):
            flags.set_flag(
                self.expand_name('endpoint.{endpoint_name}.active.available'))
            log('Set {} flag'.format(
                self.expand_name('endpoint.{endpoint_name}.active.available')))
            flags.set_flag(
                self.expand_name('endpoint.{endpoint_name}.active.changed'))
            log('Set {} flag'.format(
                self.expand_name('endpoint.{endpoint_name}.active.changed')))
        else:
            log('Controller config not ready, clearing active.available'
                ' and active.changed flags')
            self.controller_broken()
            # TODO: JSON is not serializable => need to either remove
            # this and execute more or solve the problem
            # if helpers.data_changed('active_data', self._active_data):
            #    flags.set_flag(self.expand_name(
            #        'endpoint.{endpoint_name}.active.changed'))

        # processed the relation changed event - can clear this flag now
        flags.clear_flag(self.expand_name('changed'))
        log('Cleared {} flag'.format(self.expand_name('changed')))
Ejemplo n.º 7
0
    def _manage_flags(self):
        """
        Manage automatic relation flags.
        """
        already_joined = is_flag_set(self.expand_name('joined'))
        hook_name = hookenv.hook_name()
        rel_hook = hook_name.startswith(self.endpoint_name + '-relation-')
        departed_hook = rel_hook and hook_name.endswith('-departed')

        toggle_flag(self.expand_name('joined'), self.is_joined)

        if departed_hook:
            set_flag(self.expand_name('departed'))
        elif self.is_joined:
            clear_flag(self.expand_name('departed'))

        if already_joined and not rel_hook:
            # skip checking relation data outside hooks for this relation
            # to save on API calls to the controller (unless we didn't have
            # the joined flag before, since then we might migrating to Endpoints)
            return

        for unit in self.all_units:
            for key, value in unit.received.items():
                data_key = 'endpoint.{}.{}.{}.{}'.format(self.endpoint_name,
                                                         unit.relation.relation_id,
                                                         unit.unit_name,
                                                         key)
                if data_changed(data_key, value):
                    set_flag(self.expand_name('changed'))
                    set_flag(self.expand_name('changed.{}'.format(key)))
Ejemplo n.º 8
0
    def remove_state(self, state):
        """
        Remove this conversation from the given state, and potentially
        deactivate the state if no more conversations are in it.

        The relation name will be interpolated in the state name, and it is
        recommended that it be included to avoid conflicts with states from
        other relations.  For example::

            conversation.remove_state('{relation_name}.state')

        If called from a converation handling the relation "foo", this will
        remove the conversation from the "foo.state" state, and, if no more
        conversations are in this the state, will deactivate it.
        """
        state = state.format(relation_name=self.relation_name)
        value = _get_flag_value(state)
        if not value:
            return
        if self.key in value['conversations']:
            value['conversations'].remove(self.key)
        if value['conversations']:
            set_flag(state, value)
        else:
            clear_flag(state)
Ejemplo n.º 9
0
def configure_vault(context):
    log("Running configure_vault", level=DEBUG)
    context['disable_mlock'] = is_container() or config('disable-mlock')

    context['ssl_available'] = is_state('vault.ssl.available')

    if is_flag_set('etcd.tls.available'):
        etcd = endpoint_from_flag('etcd.available')
        log("Etcd detected, adding to context", level=DEBUG)
        context['etcd_conn'] = etcd.connection_string()
        context['etcd_tls_ca_file'] = '/var/snap/vault/common/etcd-ca.pem'
        context['etcd_tls_cert_file'] = '/var/snap/vault/common/etcd-cert.pem'
        context['etcd_tls_key_file'] = '/var/snap/vault/common/etcd.key'
        save_etcd_client_credentials(etcd,
                                     key=context['etcd_tls_key_file'],
                                     cert=context['etcd_tls_cert_file'],
                                     ca=context['etcd_tls_ca_file'])
        context['api_addr'] = vault.get_api_url()
        context['cluster_addr'] = vault.get_cluster_url()
        log("Etcd detected, setting api_addr to {}".format(
            context['api_addr']))
    else:
        log("Etcd not detected", level=DEBUG)
    log("Rendering vault.hcl.j2", level=DEBUG)
    render('vault.hcl.j2', VAULT_CONFIG, context, perms=0o600)
    log("Rendering vault systemd configuation", level=DEBUG)
    render('vault.service.j2', VAULT_SYSTEMD_CONFIG, {}, perms=0o644)
    service('enable', 'vault')
    log("Opening vault port", level=DEBUG)
    open_port(8200)
    set_flag('configured')
    if any_file_changed([VAULT_CONFIG, VAULT_SYSTEMD_CONFIG]):
        # force a restart if config has changed
        clear_flag('started')
Ejemplo n.º 10
0
def configure_node(cluster_changed, cluster_joined):
    status_set('maintenance', 'Configuring slurm-node')

    controller_data = cluster_changed.active_data
    create_spool_dir(context=controller_data)

    render_munge_key(context=controller_data)
    # If the munge.key has been changed on the controller and munge is
    # running, the service must be restarted to use the new key
    if flags.is_flag_set('endpoint.slurm-cluster.changed.munge_key'
                         ) and service_running(MUNGE_SERVICE):
        log('Restarting munge due to key change on slurm-controller')
        service_restart(MUNGE_SERVICE)

    render_slurm_config(context=controller_data)

    # Make sure munge is running
    if not service_running(MUNGE_SERVICE):
        service_start(MUNGE_SERVICE)
    # Make sure slurmd is running
    if not service_running(SLURMD_SERVICE):
        service_start(SLURMD_SERVICE)

    flags.set_flag('slurm-node.configured')
    log('Set {} flag'.format('slurm-node.configured'))

    flags.clear_flag('endpoint.slurm-cluster.active.changed')
    log('Cleared {} flag'.format('endpoint.slurm-cluster.active.changed'))

    # Clear this flag to be able to signal munge_key changed if it occurs from
    # a controller.
    flags.clear_flag('endpoint.slurm-cluster.changed.munge_key')
    log('Cleared {} flag'.format('endpoint.slurm-cluster.changed.munge_key'))
Ejemplo n.º 11
0
def publish_global_client_cert():
    """
    This is for backwards compatibility with older tls-certificate clients
    only.  Obviously, it's not good security / design to have clients sharing
    a certificate, but it seems that there are clients that depend on this
    (though some, like etcd, only block on the flag that it triggers but don't
    actually use the cert), so we have to set it for now.
    """
    if not client_approle_authorized():
        log("Vault not authorized: Skipping publish_global_client_cert",
            "WARNING")
        return
    cert_created = is_flag_set('charm.vault.global-client-cert.created')
    reissue_requested = is_flag_set('certificates.reissue.global.requested')
    tls = endpoint_from_flag('certificates.available')
    if not cert_created or reissue_requested:
        ttl = config()['default-ttl']
        max_ttl = config()['max-ttl']
        bundle = vault_pki.generate_certificate('client', 'global-client', [],
                                                ttl, max_ttl)
        unitdata.kv().set('charm.vault.global-client-cert', bundle)
        set_flag('charm.vault.global-client-cert.created')
        clear_flag('certificates.reissue.global.requested')
    else:
        bundle = unitdata.kv().get('charm.vault.global-client-cert')
    tls.set_client_cert(bundle['certificate'], bundle['private_key'])
Ejemplo n.º 12
0
def upgrade_charm():
    clear_flag('prometheus-virtfs-exporter.version')
    apply_playbook(playbook='ansible/playbook.yaml',
                   extra_vars=dict(
                       exp_port=config.get('port'),
                       exp_host=get_ip()[0],
                   ))
    status_set('active', 'ready')
Ejemplo n.º 13
0
def handle_munge_change():
    '''
    A trigger sets needs_restart when munge.configured goes from unset to set
    after a change. Need to handle this by restarting slurmctld service.
    '''
    hookenv.status_set('maintenance', 'Munge key changed, restarting service')
    host.service_restart(helpers.SLURMCTLD_SERVICE)
    flags.clear_flag('slurm-controller.needs_restart')
Ejemplo n.º 14
0
def missing_controller():
    status_set('blocked', 'Missing a relation to slurm-controller')
    # Stop slurmd
    service_stop(SLURMD_SERVICE)

    for f in ['slurm-node.configured', 'slurm-node.info.sent']:
        flags.clear_flag(f)
        log('Cleared {} flag'.format(f))
Ejemplo n.º 15
0
def keystone_departed():
    """
    Service restart should be handled on the keystone side
    in this case.
    """
    flags.clear_flag('domain-name-configured')
    with charm.provide_charm_instance() as kldap_charm:
        kldap_charm.remove_config()
Ejemplo n.º 16
0
def check_really_is_update_status():
    """Clear the is-update-status-hook if the hook is not assess-status.

    This is in case the previous update-status hook execution died for some
    reason and the flag never got cleared.
    """
    if hook_name() != 'update-status':
        clear_flag('is-update-status-hook')
Ejemplo n.º 17
0
def send_clustername():
    clustername = hookenv.config().get('clustername')
    hookenv.log("ready to send %s on endpoint" % clustername)
    endpoint = endpoint_from_flag('endpoint.slurm-dbd-consumer.joined')
    endpoint.configure_dbd(clustername)
    flags.set_flag('slurm-controller.dbdname-requested')
    # clear the changed flag on endpoint, or clustername will be requested
    # on every hook run
    flags.clear_flag('endpoint.slurm-dbd-consumer.changed')
Ejemplo n.º 18
0
def consume_munge_key(munge_consumer):
    '''consume a munge key if a relation to a provider has been made
    via a consumer interface regardless of whether it has been generated
    or not. Store it in leader settings to propagate to other units.'''
    munge_key = munge_consumer.munge_key
    # do not do anything unless there is actually a key available
    # otherwise, keep using whatever was there before
    if munge_key:
        leadership.leader_set(munge_key=munge_key)
    flags.clear_flag('endpoint.munge-consumer.munge_key_updated')
Ejemplo n.º 19
0
def configure_dbd(mysql_endpoint):
    '''A dbd is only configured after leader election is
    performed and a database is believed to be configured'''
    hookenv.status_set('maintenance', 'Configuring slurm-dbd')

    is_active = dbd.is_active_dbd()

    role = dbd.ROLES[is_active]
    peer_role = dbd.ROLES[not is_active]

    dbd_conf = copy.deepcopy(hookenv.config())
    dbd_conf.update({
        'db_hostname': mysql_endpoint.db_host(),
        'db_port': dbd.MYSQL_DB_PORT,
        'db_password': mysql_endpoint.password(),
        'db_name': mysql_endpoint.database(),
        'db_username': mysql_endpoint.username(),
    })

    ha_endpoint = relations.endpoint_from_flag('endpoint.slurm-dbd-ha.joined')
    if ha_endpoint:
        net_details = dbd.add_key_prefix(ha_endpoint.network_details(), role)
        dbd_conf.update(net_details)

        # add prefixed peer data
        peer_data = dbd.add_key_prefix(ha_endpoint.peer_data, peer_role)
        dbd_conf.update(peer_data)
    else:
        # if running in standalone mode, just use network-get with HA endpoint
        # name to get an ingress address and a hostname
        net_details = dbd.add_key_prefix(dbd.network_details(), role)
        dbd_conf.update(net_details)
        peer_data = None

    # a dbd service is configurable if it is an active dbd
    # or a backup dbd that knows about an active dbd
    is_configurable = is_active or (not is_active and peer_data)
    if is_configurable:
        hookenv.log('dbd is configurable ({})'.format(role))
        # Setup slurm dirs and config
        dbd.render_slurmdbd_config(context=dbd_conf)
        # Render a minimal dummy slurm.conf
        dbd.render_slurm_config(context=dbd_conf)
        # Make sure slurmctld is running
        if not host.service_running(dbd.SLURMDBD_SERVICE):
            host.service_start(dbd.SLURMDBD_SERVICE)
        flags.set_flag('slurm-dbd.configured')
        flags.clear_flag('slurm-dbd.standalone_startup')
        host.service_restart(dbd.SLURMDBD_SERVICE)
    else:
        hookenv.log('dbd is NOT configurable ({})'.format(role))
        if not is_active:
            hookenv.status_set('maintenance',
                               'Backup dbd is waiting for peer data')
Ejemplo n.º 20
0
def send_config():
    layer.status.maintenance("Sending NBI configuration")
    try:
        nbi = endpoint_from_flag("nbi.joined")
        if nbi:
            service_ip = get_service_ip("nbi")
            if service_ip:
                nbi.send_connection(service_ip, get_nbi_port())
                clear_flag("nbi.joined")
    except Exception as e:
        log("Fail sending NBI configuration: {}".format(e))
Ejemplo n.º 21
0
def snap_refresh():
    channel = config('channel') or 'stable'
    if validate_snap_channel(channel):
        clear_flag('snap.channel.invalid')
        snap.refresh('vault', channel=channel)
        if vault.can_restart():
            log("Restarting vault", level=DEBUG)
            service_restart('vault')
            if config('totally-unsecure-auto-unlock'):
                vault.prepare_vault()
    else:
        set_flag('snap.channel.invalid')
Ejemplo n.º 22
0
def upgrade():
    '''An upgrade has been triggered.'''
    pki_directory = os.path.join(easyrsa_directory, 'pki')
    if os.path.isdir(pki_directory):
        charm_pki_directory = os.path.join(charm_directory, 'pki')
        # When the charm pki directory exists, it is stale, remove it.
        if os.path.isdir(charm_pki_directory):
            shutil.rmtree(charm_pki_directory)
        # Copy the EasyRSA/pki to the charm pki directory.
        shutil.copytree(pki_directory, charm_pki_directory, symlinks=True)
    clear_flag('easyrsa.installed')
    clear_flag('easyrsa.configured')
Ejemplo n.º 23
0
def get_set_redis_uri(redis):
    """Get set redis connection details
    """
    status_set('maintenance', 'Acquiring Redis URI')
    redis_data = redis.redis_data()
    kv.set('redis_uri', redis_data['uri'])
    kv.set('redis_host', redis_data['host'])
    kv.set('redis_port', redis_data['port'])
    kv.set('redis_password', redis_data['password'])
    status_set('active', 'Redis URI acquired')
    clear_flag('django.redis.settings.available')
    set_flag('django.redis.available')
Ejemplo n.º 24
0
def send_config():
    layer.status.maintenance("Sending RO configuration")
    try:
        ro = endpoint_from_flag("ro.joined")
        if ro:
            service_ip = get_service_ip("ro")
            if service_ip:
                ro.send_connection(
                    service_ip, get_ro_port(),
                )
                clear_flag("ro.joined")
    except Exception as e:
        log("Fail sending RO configuration: {}".format(e))
Ejemplo n.º 25
0
def consume_dbd_host_change(dbd_consumer):
    # TODO: Tuples are better?
    dbd_host = dbd_consumer.dbd_host
    dbd_port = dbd_consumer.dbd_port
    dbd_ipaddr = dbd_consumer.dbd_ipaddr
    if dbd_host:
        leadership.leader_set(dbd_host=dbd_host)
    if dbd_port:
        leadership.leader_set(dbd_port=dbd_port)
    if dbd_ipaddr:
        leadership.leader_set(dbd_ipaddr=dbd_ipaddr)
    flags.clear_flag('endpoint.slurm-dbd-consumer.dbd_host_updated')
    # Announce to configure_controller that the nodes need new information
    flags.set_flag('slurm.dbd_host_updated')
Ejemplo n.º 26
0
def ensure_etcd_connections():
    '''Ensure etcd connection strings are accurate.

    Etcd connection info is written to config files when various install/config
    handlers are run. Watch this data for changes, and when changed, remove
    relevant flags to make sure accurate config is regenerated.
    '''
    etcd = endpoint_from_flag('etcd.available')
    if data_changed('flannel_etcd_connections', etcd.get_connection_string()):
        clear_flag('flannel.service.installed')

        # Clearing the above flag will change config that the flannel
        # service depends on. Set ourselves up to (re)invoke the start handler.
        clear_flag('flannel.service.started')
Ejemplo n.º 27
0
    def test_when_not_set_clear(self):
        flags.register_trigger(when_not='foo',
                               set_flag='bar',
                               clear_flag='qux')
        flags.set_flag('noop')
        flags.clear_flag('noop')
        assert not flags.is_flag_set('bar')

        flags.set_flag('foo')
        flags.set_flag('qux')
        assert not flags.is_flag_set('bar')
        flags.clear_flag('foo')
        assert flags.is_flag_set('bar')
        assert not flags.is_flag_set('qux')
Ejemplo n.º 28
0
 def provide_munge_key(self):
     '''Provide an exposed munge key. Changed flag is set
     when new units are joined as the relation lifecycle
     includes a joined and consecutive changed event'''
     hookenv.log(
         'provide_munge_key() triggered, sending out munge key on all relations!!'
     )
     for rel in self.relations:
         #rel.to_publish.update({'munge_key': self._exposed_munge_key})
         rel.to_publish.update(
             {'munge_key': leadership.leader_get('munge_key')})
     flags.clear_flag(self.expand_name('endpoint.{endpoint_name}.changed'))
     hookenv.log(
         'provide_munge_key(): munge key published on endpoint in MungeAuthProvides'
     )
Ejemplo n.º 29
0
 def check_key(self):
     rel = self._munge_auth_relation()
     joined_units = rel.joined_units()
     hookenv.log('Joined munge provider units: {}'.format(joined_units))
     remote_unit = hookenv.remote_unit()
     hookenv.log('Remote unit: {}'.format(remote_unit))
     remote_munge_key = joined_units[remote_unit].get('munge_key')
     # update a munge key if it's different from a local one
     # there were no per-application endpoint buckets at the time of writing
     # implemented in Juju
     if remote_munge_key and remote_munge_key != self._cached_munge_key:
         self._cached_munge_key = remote_munge_key
         flags.set_flag(
             self.expand_name('endpoint.{endpoint_name}.munge_key_updated'))
     flags.clear_flag('endpoint.munge-consumer.changed.munge_key')
Ejemplo n.º 30
0
def config_changed():
    hookenv.log('config_changed(): leader detected charm config change')
    charmconf = hookenv.config()
    # We are only interested if the munge_key has changed
    if charmconf.changed('munge_key'):
        hookenv.log('config:changed(): munge key has changed')
        prev_key = charmconf.previous('munge_key')
        munge_key = charmconf['munge_key']
        hookenv.log('config:changed(): previous key: %s' % prev_key)
        hookenv.log('config:changed(): new key: %s' % munge_key)
        leadership.leader_set(munge_key=munge_key)
        # clear munge.configured and munge.exposed
        flags.clear_flag('munge.configured')
        flags.clear_flag('munge.exposed')
        flags.set_flag('munge.changed_key_file')