Пример #1
0
def configure():
    """Configure wordpress-k8s pod

    Conditions:
        - wordpress-image.available
        - Not wordpress-k8s.configured
    """
    layer.status.maintenance('Configuring wordpress container')
    try:
        wordpressdb = endpoint_from_flag('wordpressdb.available')

        spec = make_pod_spec(
            wordpressdb.host(),
            wordpressdb.user(),
            wordpressdb.password(),
        )

        log('set pod spec: {}'.format(spec))
        success = pod_spec_set(spec)
        if success:
            set_flag('wordpress-k8s.configured')
            layer.status.active('configured')
        else:
            layer.status.blocked('k8s spec failed to deploy')

    except Exception as e:
        layer.status.blocked('k8s spec failed to deploy: {}'.format(e))
Пример #2
0
def install_slurm():
    hookenv.status_set('maintenance', 'installing slurmdbd packages')
    packages = [dbd.SLURMDBD_PACKAGE]
    ch_fetch.apt_install(packages)
    hookenv.application_version_set(
        ch_fetch.get_upstream_version(dbd.SLURMDBD_PACKAGE))
    flags.set_flag('slurmdbd.installed')
Пример #3
0
def install_slurm():
    hookenv.status_set('maintenance', 'installing munge package')

    packages = [MUNGE_PACKAGE]
    apt_install(packages)

    flags.set_flag('munge.installed')
Пример #4
0
def snap_install():
    channel = config('channel') or 'stable'
    if validate_snap_channel(channel):
        clear_flag('snap.channel.invalid')
        snap.install('vault', channel=channel)
    else:
        set_flag('snap.channel.invalid')
Пример #5
0
    def _controller_config(self):
        rel = self._controller_relation()

        log('Joined controller units: {}'.format(rel.joined_units))
        partitions = None
        recv_active = None
        for u in rel.joined_units:
            recv = u.received
            log('Received from {}: {}'.format(u.unit_name, recv))
            # the expectation is that backup controller units will not
            # post any config and there will only be one config posted
            # by the active controller
            cpartitions = recv.get('partitions')
            log('partitions: {}'.format(repr(partitions)))
            log('cpartitions: {}'.format(repr(cpartitions)))
            if partitions and cpartitions:
                log('Two controllers presenting active data: split-brain')
                # catch a split-brain condition when two controllers
                # advertise possibly conflicting config data which means
                # that active controller change
                # TODO: error/status handling for this
                flags.set_flag(
                    self.expand_name('endpoint.{endpoint_name}.split-brain'))
            elif not partitions:
                log('Controller partitions: {}'.format(cpartitions))
                partitions = cpartitions
                recv_active = recv
        log('Active controller partitions: {}'.format(partitions))
        return recv_active if partitions else {}
Пример #6
0
def configure():
    layer.status.maintenance("Configuring mon container")
    try:
        kafka = endpoint_from_flag("kafka.ready")
        mongo = endpoint_from_flag("mongo.ready")
        prometheus = endpoint_from_flag("endpoint.prometheus.available")

        if kafka and mongo and prometheus:
            kafka_units = kafka.kafkas()
            kafka_unit = kafka_units[0]

            mongo_uri = mongo.connection_string()
            log("Mongo URI: {}".format(mongo_uri))

            prometheus_url = prometheus.targets()[0]["targets"][0]

            if mongo_uri and kafka_unit["host"]:

                spec = make_pod_spec(kafka_unit["host"], kafka_unit["port"],
                                     mongo_uri, prometheus_url)

                log("set pod spec:\n{}".format(spec))
                pod_spec_set(spec)
                set_flag("mon-k8s.configured")
    except Exception as e:
        layer.status.blocked("k8s spec failed to deploy: {}".format(e))
Пример #7
0
def cluster_connected(hacluster):
    """Configure HA resources in corosync"""
    dns_record = config('dns-ha-access-record')
    vips = config('vip') or None
    if vips and dns_record:
        set_flag('config.dns_vip.invalid')
        log("Unsupported configuration. vip and dns-ha cannot both be set",
            level=ERROR)
        return
    else:
        clear_flag('config.dns_vip.invalid')

    if vips:
        vips = vips.split()
        for vip in vips:
            if vip == vault.get_vip(binding='external'):
                hacluster.add_vip('vault-ext', vip)
            else:
                hacluster.add_vip('vault', vip)
    elif dns_record:
        try:
            ip = network_get_primary_address('access')
        except NotImplementedError:
            ip = unit_private_ip()
        hacluster.add_dnsha('vault', ip, dns_record, 'access')
    hacluster.bind_resources()
Пример #8
0
def send_node_info(cluster_endpoint):
    cluster_endpoint.send_node_info(hostname=gethostname(),
                                    partition=config('partition'),
                                    default=config('default'),
                                    inventory=get_inventory())
    flags.set_flag('slurm-node.info.sent')
    log('Set {} flag'.format('slurm-node.info.sent'))
Пример #9
0
def configure_node(cluster_changed, cluster_joined):
    status_set('maintenance', 'Configuring slurm-node')

    controller_data = cluster_changed.active_data
    create_spool_dir(context=controller_data)

    render_munge_key(context=controller_data)
    # If the munge.key has been changed on the controller and munge is
    # running, the service must be restarted to use the new key
    if flags.is_flag_set('endpoint.slurm-cluster.changed.munge_key'
                         ) and service_running(MUNGE_SERVICE):
        log('Restarting munge due to key change on slurm-controller')
        service_restart(MUNGE_SERVICE)

    render_slurm_config(context=controller_data)

    # Make sure munge is running
    if not service_running(MUNGE_SERVICE):
        service_start(MUNGE_SERVICE)
    # Make sure slurmd is running
    if not service_running(SLURMD_SERVICE):
        service_start(SLURMD_SERVICE)

    flags.set_flag('slurm-node.configured')
    log('Set {} flag'.format('slurm-node.configured'))

    flags.clear_flag('endpoint.slurm-cluster.active.changed')
    log('Cleared {} flag'.format('endpoint.slurm-cluster.active.changed'))

    # Clear this flag to be able to signal munge_key changed if it occurs from
    # a controller.
    flags.clear_flag('endpoint.slurm-cluster.changed.munge_key')
    log('Cleared {} flag'.format('endpoint.slurm-cluster.changed.munge_key'))
Пример #10
0
def request_lb():
    lb_provider = endpoint_from_name('lb-provider')
    req = lb_provider.get_request('vault')
    req.protocol = req.protocols.tcp
    req.port_mapping = {8220: 8220}
    lb_provider.send_request(req)
    set_flag('vault.requested-lb')
Пример #11
0
def configure_vault(context):
    log("Running configure_vault", level=DEBUG)
    context['disable_mlock'] = is_container() or config('disable-mlock')

    context['ssl_available'] = is_state('vault.ssl.available')

    if is_flag_set('etcd.tls.available'):
        etcd = endpoint_from_flag('etcd.available')
        log("Etcd detected, adding to context", level=DEBUG)
        context['etcd_conn'] = etcd.connection_string()
        context['etcd_tls_ca_file'] = '/var/snap/vault/common/etcd-ca.pem'
        context['etcd_tls_cert_file'] = '/var/snap/vault/common/etcd-cert.pem'
        context['etcd_tls_key_file'] = '/var/snap/vault/common/etcd.key'
        save_etcd_client_credentials(etcd,
                                     key=context['etcd_tls_key_file'],
                                     cert=context['etcd_tls_cert_file'],
                                     ca=context['etcd_tls_ca_file'])
        context['api_addr'] = vault.get_api_url()
        context['cluster_addr'] = vault.get_cluster_url()
        log("Etcd detected, setting api_addr to {}".format(
            context['api_addr']))
    else:
        log("Etcd not detected", level=DEBUG)
    log("Rendering vault.hcl.j2", level=DEBUG)
    render('vault.hcl.j2', VAULT_CONFIG, context, perms=0o600)
    log("Rendering vault systemd configuation", level=DEBUG)
    render('vault.service.j2', VAULT_SYSTEMD_CONFIG, {}, perms=0o644)
    service('enable', 'vault')
    log("Opening vault port", level=DEBUG)
    open_port(8200)
    set_flag('configured')
    if any_file_changed([VAULT_CONFIG, VAULT_SYSTEMD_CONFIG]):
        # force a restart if config has changed
        clear_flag('started')
Пример #12
0
    def get_clustername_ack(self):
        epunit = hookenv.remote_unit()
        hookenv.log("get_clustername_ack(): remote unit: %s" % epunit)
        joined_units = self.all_joined_units

        # also pick up ip-adress etc to dbd here
        if epunit != None:
            namerequest = joined_units[epunit].received.get('requested_clustername')
            nameresult = joined_units[epunit].received.get('accepted_clustername')
            dbd_host = joined_units[epunit].received.get('dbd_host')
            if nameresult:
                hookenv.log("get_clustername_ack(): name %s was accepted by %s on %s" % (nameresult, epunit, dbd_host))
                # all is fine
                flags.set_flag('slurm-controller.dbdname-accepted')
            else:
                status_set('blocked', 'Cluster name %s rejected by DBD on %s: name already taken. Run juju config <slurm-controller-charm> clustername=New_Name' % (namerequest, epunit))
                hookenv.log("get_clustername_ack(): request for %s was rejected by %s" % (namerequest, epunit))
                flags.clear_flag('slurm-controller.dbdname-requested')

            """
            TODO: raise some flag so that layer-slurm-controller reconfigures
            itself+peers and updates config on all nodes
            """
        # clear all the flags that was sent in changed() on the provider side
        flags.clear_flag('endpoint.slurm-dbd-consumer.changed.requested_clustername')
        flags.clear_flag('endpoint.slurm-dbd-consumer.changed.accepted_clustername')
Пример #13
0
def publish_global_client_cert():
    """
    This is for backwards compatibility with older tls-certificate clients
    only.  Obviously, it's not good security / design to have clients sharing
    a certificate, but it seems that there are clients that depend on this
    (though some, like etcd, only block on the flag that it triggers but don't
    actually use the cert), so we have to set it for now.
    """
    if not client_approle_authorized():
        log("Vault not authorized: Skipping publish_global_client_cert",
            "WARNING")
        return
    cert_created = is_flag_set('charm.vault.global-client-cert.created')
    reissue_requested = is_flag_set('certificates.reissue.global.requested')
    tls = endpoint_from_flag('certificates.available')
    if not cert_created or reissue_requested:
        ttl = config()['default-ttl']
        max_ttl = config()['max-ttl']
        bundle = vault_pki.generate_certificate('client', 'global-client', [],
                                                ttl, max_ttl)
        unitdata.kv().set('charm.vault.global-client-cert', bundle)
        set_flag('charm.vault.global-client-cert.created')
        clear_flag('certificates.reissue.global.requested')
    else:
        bundle = unitdata.kv().get('charm.vault.global-client-cert')
    tls.set_client_cert(bundle['certificate'], bundle['private_key'])
Пример #14
0
def tune_pki_backend():
    """Ensure Vault PKI backend is correctly tuned
    """
    ttl = config()['default-ttl']
    max_ttl = config()['max-ttl']
    vault_pki.tune_pki_backend(ttl=ttl, max_ttl=max_ttl)
    set_flag('pki.backend.tuned')
Пример #15
0
def configure():
    layer.status.maintenance("Configuring lcm container")
    try:
        kafka = endpoint_from_flag("kafka.ready")
        mongo = endpoint_from_flag("mongo.ready")
        osm_ro = endpoint_from_flag("ro.ready")

        if kafka and mongo and osm_ro:
            kafka_units = kafka.kafkas()
            kafka_unit = kafka_units[0]

            mongo_uri = mongo.connection_string()
            log("Mongo URI: {}".format(mongo_uri))

            ros = osm_ro.ros()
            ro_unit = ros[0]

            if (mongo_uri and kafka_unit["host"] and kafka_unit["port"]
                    and ro_unit["host"] and ro_unit["port"]):
                spec = make_pod_spec(
                    ro_unit["host"],
                    ro_unit["port"],
                    kafka_unit["host"],
                    kafka_unit["port"],
                    mongo_uri,
                )

                log("set pod spec:\n{}".format(spec))
                pod_spec_set(spec)
                layer.status.active("creating container")
                set_flag("lcm-k8s.configured")
    except Exception as e:
        layer.status.blocked("k8s spec failed to deploy: {}".format(e))
Пример #16
0
    def set_state(self, state):
        """
        Activate and put this conversation into the given state.

        The relation name will be interpolated in the state name, and it is
        recommended that it be included to avoid conflicts with states from
        other relations.  For example::

            conversation.set_state('{relation_name}.state')

        If called from a converation handling the relation "foo", this will
        activate the "foo.state" state, and will add this conversation to
        that state.

        Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
        :meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
        """
        state = state.format(relation_name=self.relation_name)
        value = _get_flag_value(state, {
            'relation': self.relation_name,
            'conversations': [],
        })
        if self.key not in value['conversations']:
            value['conversations'].append(self.key)
        set_flag(state, value)
Пример #17
0
    def remove_state(self, state):
        """
        Remove this conversation from the given state, and potentially
        deactivate the state if no more conversations are in it.

        The relation name will be interpolated in the state name, and it is
        recommended that it be included to avoid conflicts with states from
        other relations.  For example::

            conversation.remove_state('{relation_name}.state')

        If called from a converation handling the relation "foo", this will
        remove the conversation from the "foo.state" state, and, if no more
        conversations are in this the state, will deactivate it.
        """
        state = state.format(relation_name=self.relation_name)
        value = _get_flag_value(state)
        if not value:
            return
        if self.key in value['conversations']:
            value['conversations'].remove(self.key)
        if value['conversations']:
            set_flag(state, value)
        else:
            clear_flag(state)
Пример #18
0
    def controllers_changed(self):
        """Assess related controllers and only take relation data from the
        active one"""
        self._active_data = self._controller_config()

        if self._controller_config_ready(self._active_data):
            flags.set_flag(
                self.expand_name('endpoint.{endpoint_name}.active.available'))
            log('Set {} flag'.format(
                self.expand_name('endpoint.{endpoint_name}.active.available')))
            flags.set_flag(
                self.expand_name('endpoint.{endpoint_name}.active.changed'))
            log('Set {} flag'.format(
                self.expand_name('endpoint.{endpoint_name}.active.changed')))
        else:
            log('Controller config not ready, clearing active.available'
                ' and active.changed flags')
            self.controller_broken()
            # TODO: JSON is not serializable => need to either remove
            # this and execute more or solve the problem
            # if helpers.data_changed('active_data', self._active_data):
            #    flags.set_flag(self.expand_name(
            #        'endpoint.{endpoint_name}.active.changed'))

        # processed the relation changed event - can clear this flag now
        flags.clear_flag(self.expand_name('changed'))
        log('Cleared {} flag'.format(self.expand_name('changed')))
Пример #19
0
def configure_easyrsa():
    """A transitional state to allow modifications to configuration before
    generating the certificates and working with PKI."""
    hookenv.log("Configuring OpenSSL to copy extensions.")
    configure_copy_extensions()
    hookenv.log("Configuring X509 server extensions with clientAuth.")
    configure_client_authorization()
    set_flag("easyrsa.configured")
Пример #20
0
def render_wsgi_py():
    """Write out settings.py
    """
    status_set('maintenance', "Rendering wsgi.py")
    secrets = {'project_name': config('django-project-name')}
    render_settings_py(settings_filename="wsgi.py", secrets=secrets)
    status_set('active', "Django wsgi.py rendered")
    set_flag('django.wsgi.available')
Пример #21
0
def provide_munge_key_to_interface(munge_provider):
    '''Provide munge key if any consumers are related and if '''
    munge_key = leadership.leader_get('munge_key')
    hookenv.log('provide_munge_key_to_interface(): exposing munge key: %s' %
                munge_key)
    munge_provider.expose_munge_key(munge_key)
    munge_provider.provide_munge_key()
    flags.set_flag('munge.exposed')
Пример #22
0
def configure_munge_key():
    munge_key = leadership.leader_get('munge_key')
    munge.render_munge_key(context={'munge_key': munge_key})
    hookenv.log(
        'configure_munge_key(): leadership detected new munge key, rendered new file'
    )
    # set a flag confirming that munge key is rendered
    flags.set_flag('munge.configured')
Пример #23
0
def render_s3_storage_config():
    status_set('maintenance', "Configuring S3 storage")

    render_settings_py(settings_filename="storage.py",
                       secrets=kv.getrange('aws'))

    status_set('active', "S3 storage available")
    set_flag('s3.storage.settings.available')
Пример #24
0
def config_mysql():
    status_set('maintenance', 'Configuring mysql container')

    spec = make_pod_spec()
    log('set pod spec:\n{}'.format(spec))
    layer.caas_base.pod_spec_set(spec)

    set_flag('mysql.configured')
def initial_setup():
    hookenv.status_set('maintenance', 'Setting up munge key')
    # use leader-get here as this is executed on both active and
    # backup controllers
    munge_key = hookenv.leader_get('munge_key')
    # Disable slurmd on controller
    host.service_pause(helpers.SLURMD_SERVICE)
    helpers.render_munge_key(context={'munge_key': munge_key})
    flags.set_flag('munge.configured')
Пример #26
0
def config_gitlab():
    status_set('maintenance', 'Configuring mysql container')

    spec = make_pod_spec()
    log('set pod spec:\n{}'.format(spec))
    pod_spec_set(spec)

    set_flag('mysql.configured')
    status_set('maintenance', 'Creating mysql container')
Пример #27
0
def configure_cinder_backup():
    # don't always have a relation context - obtain from the flag
    endp = endpoint_from_flag('endpoint.backup-backend.joined')
    with charms_openstack.charm.provide_charm_instance() as charm_instance:
        # publish config options for all remote units of a given rel
        name, config = charm_instance.get_swift_backup_config()
        endp.publish(name, config)
        charm_instance.configure_ca()
        flags.set_flag('config.complete')
Пример #28
0
def authorize_charm_action(*args):
    """Create a role allowing the charm to perform certain vault actions.
    """
    if not hookenv.is_leader():
        hookenv.action_fail('Please run action on lead unit')
    action_config = hookenv.action_get()
    role_id = vault.setup_charm_vault_access(action_config['token'])
    hookenv.leader_set({vault.CHARM_ACCESS_ROLE_ID: role_id})
    set_flag('secrets.refresh')
Пример #29
0
def install_deps():
    status_set('maintenance', 'installing dependencies')
    apply_playbook(playbook='ansible/playbook.yaml',
                   extra_vars=dict(
                       exp_port=config.get('port'),
                       exp_host=get_ip()[0],
                   ))
    status_set('active', 'ready')
    set_flag('prometheus-virtfs-exporter.installed')
Пример #30
0
def send_clustername():
    clustername = hookenv.config().get('clustername')
    hookenv.log("ready to send %s on endpoint" % clustername)
    endpoint = endpoint_from_flag('endpoint.slurm-dbd-consumer.joined')
    endpoint.configure_dbd(clustername)
    flags.set_flag('slurm-controller.dbdname-requested')
    # clear the changed flag on endpoint, or clustername will be requested
    # on every hook run
    flags.clear_flag('endpoint.slurm-dbd-consumer.changed')