Example #1
0
def configure_sources():
    """Add user specified package sources from the service configuration.

    See charmhelpers.fetch.configure_sources for details.
    """
    hookenv.log('Initializing Apt Layer')
    config = hookenv.config()

    # We don't have enums, so we need to validate this ourselves.
    package_status = config.get('package_status')
    if package_status not in ('hold', 'install'):
        charms.apt.status_set('blocked',
                              'Unknown package_status {}'
                              ''.format(package_status))
        # Die before further hooks are run. This isn't very nice, but
        # there is no other way to inform the operator that they have
        # invalid configuration.
        raise SystemExit(0)

    sources = config.get('install_sources')
    keys = config.get('install_keys')
    if reactive.helpers.data_changed('apt.configure_sources', (sources, keys)):
        fetch.configure_sources(update=False,
                                sources_var='install_sources',
                                keys_var='install_keys')
        reactive.set_state('apt.needs_update')

    extra_packages = sorted(config.get('extra_packages', '').split())
    if extra_packages:
        queue_install(extra_packages)
Example #2
0
def install_dockerbeat():
    ''' Installs dockerbeat from resources, with a fallback option
    to try to fetch over the network, for 1.25.5 hosts'''

    try:
        bin_path = resource_get('dockerbeat')
    except NotImplementedError:
        # Attempt to fetch and install from configured uri with validation
        bin_path = download_from_upstream()

    full_beat_path = '/usr/local/bin/dockerbeat'

    if not bin_path:
        status_set('blocked', 'Missing dockerbeat binary')
        return

    install(bin_path, full_beat_path)
    os.chmod(full_beat_path, 0o755)

    codename = lsb_release()['DISTRIB_CODENAME']

    # render the apropriate init systems configuration
    if codename == 'trusty':
        render('upstart', '/etc/init/dockerbeat.conf', {})
    else:
        render('systemd', '/etc/systemd/system/dockerbeat.service', {})

    set_state('dockerbeat.installed')
def set_upgrade_needed():
    set_state('kubernetes-worker.snaps.upgrade-needed')
    config = hookenv.config()
    previous_channel = config.previous('channel')
    require_manual = config.get('require-manual-upgrade')
    if previous_channel is None or not require_manual:
        set_state('kubernetes-worker.snaps.upgrade-specified')
Example #4
0
def setup_mysql(db):
    apt_install(['php5-mysql', 'mysql-client'])
    reset_wallabag()
    setup('mysql', db)
    remove_state('mysql.available')
    remove_state('wallabag.connected.sqlite')
    set_state('wallabag.connected.mysql')
Example #5
0
def install():
    """Install REST2JFed"""
    try:
        # update needed because of weird error
        hookenv.log("Installing dependencies")
        subprocess.check_output(['apt-get', 'update'])
        subprocess.check_output(['pip2', 'install', 'Jinja2', 'Flask', 'pyyaml', 'click', 'python-dateutil'])
    except subprocess.CalledProcessError as exception:
        hookenv.log(exception.output)
        exit(1)
    hookenv.log("Extracting and moving required files and folders")
    mergecopytree(charm_dir() + '/files/jfedS4', "/opt/jfedS4")
    mergecopytree(charm_dir() + '/files/rest2jfed', "/opt/rest2jfed")
    hookenv.log("Generating upstart file")
    with open(charm_dir()+'/templates/upstart.conf', 'r') as upstart_t_file:
        upstart_template = upstart_t_file.read()
    with open('/etc/init/rest2jfed.conf', 'w') as upstart_file:
        upstart_file = upstart_file.write(upstart_template)
    hookenv.log("Starting rest2jfed service")
    try:
        subprocess.check_output(['service', 'rest2jfed', 'start'])
    except subprocess.CalledProcessError as exception:
        hookenv.log(exception.output)
        exit(1)
    open_port(5000)
    status_set('active', 'Ready')
    set_state('rest2jfed.installed')
def upgrade_charm():
    # Trigger removal of PPA docker installation if it was previously set.
    set_state('config.changed.install_from_upstream')
    hookenv.atexit(remove_state, 'config.changed.install_from_upstream')

    cleanup_pre_snap_services()
    check_resources_for_upgrade_needed()

    # Remove the RC for nginx ingress if it exists
    if hookenv.config().get('ingress'):
        kubectl_success('delete', 'rc', 'nginx-ingress-controller')

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    if is_state('kubernetes-worker.gpu.enabled'):
        remove_state('kubernetes-worker.gpu.enabled')
        try:
            disable_gpu()
        except ApplyNodeLabelFailed:
            # Removing node label failed. Probably the master is unavailable.
            # Proceed with the upgrade in hope GPUs will still be there.
            hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')

    remove_state('kubernetes-worker.cni-plugins.installed')
    remove_state('kubernetes-worker.config.created')
    remove_state('kubernetes-worker.ingress.available')
    remove_state('worker.auth.bootstrapped')
    set_state('kubernetes-worker.restart-needed')
def enable_gpu():
    """Enable GPU usage on this node.

    """
    if get_version('kubelet') < (1, 9):
        hookenv.status_set(
            'active',
            'Upgrade to snap channel >= 1.9/stable to enable GPU suppport.'
        )
        return

    hookenv.log('Enabling gpu mode')
    try:
        # Not sure why this is necessary, but if you don't run this, k8s will
        # think that the node has 0 gpus (as shown by the output of
        # `kubectl get nodes -o yaml`
        check_call(['nvidia-smi'])
    except CalledProcessError as cpe:
        hookenv.log('Unable to communicate with the NVIDIA driver.')
        hookenv.log(cpe)
        return

    set_label('gpu', 'true')
    set_label('cuda', 'true')

    set_state('kubernetes-worker.gpu.enabled')
    set_state('kubernetes-worker.restart-needed')
def start_worker(kube_api, kube_control, auth_control, cni):
    ''' Start kubelet using the provided API and DNS info.'''
    servers = get_kube_api_servers(kube_api)
    # Note that the DNS server doesn't necessarily exist at this point. We know
    # what its IP will eventually be, though, so we can go ahead and configure
    # kubelet with that info. This ensures that early pods are configured with
    # the correct DNS even though the server isn't ready yet.

    dns = kube_control.get_dns()
    ingress_ip = get_ingress_address(kube_control)
    cluster_cidr = cni.get_config()['cidr']

    if cluster_cidr is None:
        hookenv.log('Waiting for cluster cidr.')
        return

    creds = db.get('credentials')
    data_changed('kube-control.creds', creds)

    create_config(random.choice(servers), creds)
    configure_kubelet(dns, ingress_ip)
    configure_kube_proxy(servers, cluster_cidr)
    set_state('kubernetes-worker.config.created')
    restart_unit_services()
    update_kubelet_status()
    set_state('kubernetes-worker.label-config-required')
    remove_state('kubernetes-worker.restart-needed')
def run_docker_login():
    """Login to a docker registry with configured credentials."""
    config = hookenv.config()

    previous_logins = config.previous('docker-logins')
    logins = config['docker-logins']
    logins = json.loads(logins)

    if previous_logins:
        previous_logins = json.loads(previous_logins)
        next_servers = {login['server'] for login in logins}
        previous_servers = {login['server'] for login in previous_logins}
        servers_to_logout = previous_servers - next_servers
        for server in servers_to_logout:
            cmd = ['docker', 'logout', server]
            subprocess.check_call(cmd)

    for login in logins:
        server = login['server']
        username = login['username']
        password = login['password']
        cmd = ['docker', 'login', server, '-u', username, '-p', password]
        subprocess.check_call(cmd)

    remove_state('kubernetes-worker.docker-login')
    set_state('kubernetes-worker.restart-needed')
def request_integration():
    hookenv.status_set('maintenance', 'requesting cloud integration')
    kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
    cluster_tag = kube_control.get_cluster_tag()
    if is_state('endpoint.aws.joined'):
        cloud = endpoint_from_flag('endpoint.aws.joined')
        cloud.tag_instance({
            'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
        })
        cloud.tag_instance_security_group({
            'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
        })
        cloud.tag_instance_subnet({
            'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
        })
        cloud.enable_object_storage_management(['kubernetes-*'])
    elif is_state('endpoint.gcp.joined'):
        cloud = endpoint_from_flag('endpoint.gcp.joined')
        cloud.label_instance({
            'k8s-io-cluster-name': cluster_tag,
        })
        cloud.enable_object_storage_management()
    cloud.enable_instance_inspection()
    cloud.enable_dns_management()
    set_state('kubernetes-worker.cloud-request-sent')
    hookenv.status_set('waiting', 'waiting for cloud integration')
def restart_for_cloud():
    if is_state('endpoint.gcp.ready'):
        _write_gcp_snap_config('kubelet')
    elif is_state('endpoint.openstack.ready'):
        _write_openstack_snap_config('kubelet')
    set_state('kubernetes-worker.restarted-for-cloud')
    set_state('kubernetes-worker.restart-needed')
def install_resourcemanager(namenode):
    """Install if the namenode has sent its FQDN.

    We only need the namenode FQDN to perform the RM install, so poll for
    namenodes() data whenever we have a namenode relation. This allows us to
    install asap, even if 'namenode.ready' is not set yet.
    """
    if namenode.namenodes():
        hookenv.status_set('maintenance', 'installing resourcemanager')
        nn_host = namenode.namenodes()[0]
        rm_host = get_fqdn()
        bigtop = Bigtop()
        hosts = {'namenode': nn_host, 'resourcemanager': rm_host}
        bigtop.render_site_yaml(hosts=hosts, roles='resourcemanager')
        bigtop.trigger_puppet()

        # /etc/hosts entries from the KV are not currently used for bigtop,
        # but a hosts_map attribute is required by some interfaces (eg: mapred-slave)
        # to signify RM's readiness. Set our RM info in the KV to fulfill this
        # requirement.
        utils.initialize_kv_host()

        # Add our ubuntu user to the hadoop and mapred groups.
        get_layer_opts().add_users()

        set_state('apache-bigtop-resourcemanager.installed')
        hookenv.status_set('maintenance', 'resourcemanager installed')
    else:
        hookenv.status_set('waiting', 'waiting for namenode fqdn')
Example #13
0
def send_nm_all_info(nodemanager):
    """Send nodemanagers all mapred-slave relation data.

    At this point, the resourcemanager is ready to serve nodemanagers. Send all
    mapred-slave relation data so that our 'resourcemanager.ready' state becomes set.
    """
    bigtop = Bigtop()
    rm_host = get_fqdn()
    rm_ipc = get_layer_opts().port('resourcemanager')
    jh_ipc = get_layer_opts().port('jobhistory')
    jh_http = get_layer_opts().port('jh_webapp_http')

    nodemanager.send_resourcemanagers([rm_host])
    nodemanager.send_spec(bigtop.spec())
    nodemanager.send_ports(rm_ipc, jh_http, jh_ipc)

    # hosts_map and ssh_key are required by the mapred-slave interface to signify
    # RM's readiness. Send them, even though they are not utilized by bigtop.
    # NB: update KV hosts with all nodemanagers prior to sending the hosts_map
    # because mapred-slave gates readiness on a NM's presence in the hosts_map.
    utils.update_kv_hosts(nodemanager.hosts_map())
    nodemanager.send_hosts_map(utils.get_kv_hosts())
    nodemanager.send_ssh_key('invalid')

    # update status with slave count and report ready for hdfs
    num_slaves = len(nodemanager.nodes())
    hookenv.status_set('active', 'ready ({count} nodemanager{s})'.format(
        count=num_slaves,
        s='s' if num_slaves > 1 else '',
    ))
    set_state('apache-bigtop-resourcemanager.ready')
Example #14
0
def enable_gpu():
    """Enable GPU usage on this node.

    """
    config = hookenv.config()
    if config['allow-privileged'] == "false":
        hookenv.status_set(
            'active',
            'GPUs available. Set allow-privileged="auto" to enable.'
        )
        return

    hookenv.log('Enabling gpu mode')
    try:
        # Not sure why this is necessary, but if you don't run this, k8s will
        # think that the node has 0 gpus (as shown by the output of
        # `kubectl get nodes -o yaml`
        check_call(['nvidia-smi'])
    except CalledProcessError as cpe:
        hookenv.log('Unable to communicate with the NVIDIA driver.')
        hookenv.log(cpe)
        return

    # Apply node labels
    _apply_node_label('gpu=true', overwrite=True)
    _apply_node_label('cuda=true', overwrite=True)

    set_state('kubernetes-worker.gpu.enabled')
    set_state('kubernetes-worker.restart-needed')
Example #15
0
def configure_with_remote_db(db):
    hookenv.status_set('maintenance', 'configuring external database')
    hive = Hive()
    hive.configure_remote_db(db)
    hive.restart()
    set_state('hive.db.configured')
    report_status()
Example #16
0
def prometheus_client(prometheus):
    template = """
[[outputs.prometheus_client]]
  listen = "{{ listen }}"
"""
    if get_prometheus_port():
        hookenv.log("Prometheus configured globally, skipping plugin setup")
        prometheus.configure(get_prometheus_port())
        # bail out, nothing more need to be configured here
        return
    port = 9126
    extra_options = get_extra_options()
    options = extra_options['outputs'].get('prometheus-client', {})
    listen = options.pop('listen', None)
    if listen is not None:
        hookenv.log("Configuring prometheus_client plugin to listen on: '{}'".format(listen))
        port = int(listen.split(":", 1)[1])
    else:
        listen = ":{}".format(port)
    check_port("prometheus_output", get_prometheus_port())
    prometheus.configure(port)
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'prometheus-client')
    hookenv.log("Updating {} plugin config file".format('prometheus-client'))
    context = {"listen": listen}
    content = render_template(template, context) + \
        render_extra_options("outputs", "prometheus_client",
                             extra_options=extra_options)
    host.write_file(config_path, content.encode('utf-8'))
    set_state('plugins.prometheus-client.configured')
def install_nodemanager(namenode, resourcemanager):
    """Install if we have FQDNs.

    We only need the master FQDNs to perform the nodemanager install, so poll
    for master host data from the appropriate relation. This allows us to
    install asap, even if '<master>.ready' is not set.
    """
    namenodes = namenode.namenodes()
    resourcemanagers = resourcemanager.resourcemanagers()
    masters = namenodes + resourcemanagers
    if namenodes and resourcemanagers and data_changed('nm.masters', masters):
        installed = is_state('apache-bigtop-nodemanager.installed')
        action = 'installing' if not installed else 'configuring'
        hookenv.status_set('maintenance', '%s nodemanager' % action)
        bigtop = Bigtop()
        bigtop.render_site_yaml(
            hosts={
                'namenode': namenodes[0],
                'resourcemanager': resourcemanagers[0],
            },
            roles=[
                'nodemanager',
                'mapred-app',
            ],
        )
        bigtop.queue_puppet()
        set_state('apache-bigtop-nodemanager.pending')
Example #18
0
def apache_input(apache):
    template = """
[[inputs.apache]]
  urls = {{ urls }}
"""
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'apache')
    port = '8080'
    vhost = render(source='apache-server-status.tmpl',
                   templates_dir=get_templates_dir(),
                   target=None,
                   context={'port': port})
    relation_info = {"ports": port,
                     "domain": "apache-status",
                     "enabled": True,
                     "site_config": vhost,
                     "site_modules": "status"}
    urls = []
    rels = hookenv.relations_of_type('apache')
    for rel in rels:
        hookenv.relation_set(rel['__relid__'], relation_settings=relation_info)
        addr = rel['private-address']
        url = 'http://{}:{}/server-status?auto'.format(addr, port)
        urls.append(url)
    if urls:
        context = {"urls": json.dumps(urls)}
        input_config = render_template(template, context) + \
            render_extra_options("inputs", "apache")
        hookenv.log("Updating {} plugin config file".format('apache'))
        host.write_file(config_path, input_config.encode('utf-8'))
        set_state('plugins.apache.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
Example #19
0
def influxdb_api_output(influxdb):
    required_keys = ['hostname', 'port', 'user', 'password']
    rels = hookenv.relations_of_type('influxdb-api')
    endpoints = []
    user = None
    password = None
    for rel in rels:
        if all([rel.get(key) for key in required_keys]):
            endpoints.append("http://{}:{}".format(rel['hostname'], rel['port']))
            if user is None:
                user = rel['user']
            if password is None:
                password = rel['password']
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'influxdb-api')
    if endpoints:
        hookenv.log("Updating {} plugin config file".format('influxdb-api'))
        content = render(source='influxdb-api.conf.tmpl', target=None,
                         templates_dir=get_templates_dir(),
                         context={'urls': json.dumps(endpoints),
                                  'username': '******'.format(user),
                                  'password': '******'.format(password)})
        extra_opts = render_extra_options("outputs", "influxdb")
        host.write_file(config_path, '\n'.join([content, extra_opts]).encode('utf-8'))
        set_state('plugins.influxdb-api.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
Example #20
0
def elasticsearch_input(es):
    template = """
[[inputs.elasticsearch]]
  servers = {{ servers }}
"""
    hosts = []
    rels = hookenv.relations_of_type('elasticsearch')
    for rel in rels:
        es_host = rel.get('host')
        port = rel.get('port')
        if not es_host or not port:
            hookenv.log('No host received for relation: {}.'.format(rel))
            continue
        hosts.append("http://{}:{}".format(es_host, port))
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'elasticsearch')
    if hosts:
        context = {"servers": json.dumps(hosts)}
        input_config = render_template(template, context) + \
            render_extra_options("inputs", "elasticsearch")
        hookenv.log("Updating {} plugin config file".format('elasticsearch'))
        host.write_file(config_path, input_config.encode('utf-8'))
        set_state('plugins.elasticsearch.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
        remove_state('plugins.elasticsearch.configured')
Example #21
0
def mongodb_input(mongodb):
    template = """
[[inputs.mongodb]]
  servers = {{ servers }}
"""
    rels = hookenv.relations_of_type('mongodb')
    mongo_addresses = []
    for rel in rels:
        addr = rel['private-address']
        port = rel.get('port', None)
        if port:
            mongo_address = '{}:{}'.format(addr, port)
        else:
            mongo_address = addr
        mongo_addresses.append(mongo_address)
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'mongodb')
    if mongo_addresses:
        context = {"servers": json.dumps(mongo_addresses)}
        input_config = render_template(template, context) + \
            render_extra_options("inputs", "mongodb")
        hookenv.log("Updating {} plugin config file".format('mongodb'))
        host.write_file(config_path, input_config.encode('utf-8'))
        set_state('plugins.mongodb.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
Example #22
0
def configure_zookeeper(zks):
    hookenv.status_set('maintenance', 'Configuring Hue for Zookeeper')
    hue = Hue(get_dist_config())
    hue.configure_zookeeper(zks.zookeepers())
    hue.update_apps()
    hue.restart()
    set_state('zookeeper.configured')
def configure():
    dist_dir = node_dist_dir()
    user = get_user()

    if is_systemd():
        conf_path = SYSTEMD_PATH
        template_type = 'systemd'
    else:
        conf_path = UPSTART_PATH
        template_type = 'upstart'

    with maintenance_status('Generating {} configuration'.format(
                                template_type),
                            'upstart configuration generated'):
        config_ctx = hookenv.config()
        config_ctx['working_dir'] = dist_dir
        config_ctx['user'] = user
        config_ctx['npm_cache_path'] = get_cache(dist_dir, user)
        config_ctx['bin_path'] = get_bin_path(dist_dir)
        config_ctx['enable_failover'] = str(
            config_ctx['enable_failover']).lower()
        config_ctx['local_registry_or_host_uri'] = get_local_registry_or_host(
            uri=True)

        render(source='npm-offline-registry_{}.j2'.format(template_type),
               target=conf_path,
               owner='root',
               perms=0o744,
               context=config_ctx)
        set_state('npm-offline-registry.available')
def restart_scheduler():
    prev_state, prev_msg = hookenv.status_get()
    hookenv.status_set('maintenance', 'Restarting kube-scheduler')
    host.service_restart('snap.kube-scheduler.daemon')
    hookenv.status_set(prev_state, prev_msg)
    remove_state('kube-scheduler.do-restart')
    set_state('kube-scheduler.started')
Example #25
0
def install_presentation():

    """ Install presentation
    """

    opts = layer.options('git-deploy')

    # Clone repo
    hookenv.status_set('maintenance', 
                       'Installing and building the presentation.')

    # Build and install
    with chdir(opts.get('target')):
        with open('requirements.txt', 'r') as f:
            for i in list(map(lambda b: b.strip('\n'), f.readlines())):
                pip_install(i)

        sphinx_build_cmd = 'sphinx-build -b html source %s' % opts.get('target')
        subprocess.call(sphinx_build_cmd.split(), shell=False)
    present_chown_cmd = 'chown -R www-data:www-data %s' % opts.get('target')
    subprocess.call(present_chown_cmd.split(), shell=False)   
    
    # Configure nginx vhost
    configure_site('present', 'present.vhost', app_path=opts.get('target'))

    # Open presentation front-end port
    hookenv.open_port(config['port'])

    # Set status
    hookenv.status_set('active', 
                       'Presentation is active on port %s' % config['port'])
    # Set state
    set_state('presentation.available')
Example #26
0
def send_principal_yarn_info(principal, resourcemanager):
    """Send YARN data when the resourcemanager becomes ready."""
    principal.set_installed(get_hadoop_version())
    principal.set_yarn_ready(
        resourcemanager.resourcemanagers(), resourcemanager.port(),
        resourcemanager.hs_http(), resourcemanager.hs_ipc())
    set_state('apache-bigtop-plugin.yarn.ready')
Example #27
0
def run_che():
    status_set('maintenance', 'Installing Eclipse Che')
    # Start and stop Che so che's config is generated
    start_che()
    stop_che()
    # Add Juju stuff to Che config
    json_add_object_to_array(
        "{}/templates/stack-juju-charm.json".format(charm_dir()),
        "/home/ubuntu/instance/data/stacks/stacks.json"
    )
    copyfile(
        "{}/templates/type-juju.svg".format(charm_dir()),
        "/home/ubuntu/instance/data/stacks/images/type-juju.svg"
    )
    json_add_object_to_array(
        "{}/templates/project-template-charms.json".format(charm_dir()),
        "/home/ubuntu/instance/data/templates/samples.json"
    )
    json_add_object_to_array(
        "{}/templates/project-template-interface.json".format(charm_dir()),
        "/home/ubuntu/instance/data/templates/samples.json"
    )
    json_add_object_to_array(
        "{}/templates/project-template-layer.json".format(charm_dir()),
        "/home/ubuntu/instance/data/templates/samples.json"
    )
    # Start Che for real
    start_che()
    # opened ports are used by `juju expose` so It's important to open all
    # ports a user connects to.
    open_port('8080', protocol="TCP")           # Port to the UI
    open_port('32768-65535', protocol="TCP")    # Ports to the workspaces
    status_set('active', 'Ready (eclipse/che)')
    set_state('che.available')
Example #28
0
def install():
    hookenv.log('Installing isc-dhcp')
    fetch.apt_update()
    fetch.apt_install(fetch.filter_installed_packages(
        ['isc-dhcp-server', 'iptables-persistent']
    ))
    set_state('dhcp-server.installed')
Example #29
0
def setup_non_leader_authentication():

    service_key = '/root/cdk/serviceaccount.key'
    basic_auth = '/root/cdk/basic_auth.csv'
    known_tokens = '/root/cdk/known_tokens.csv'

    keys = [service_key, basic_auth, known_tokens]
    # The source of truth for non-leaders is the leader.
    # Therefore we overwrite_local with whatever the leader has.
    if not get_keys_from_leader(keys, overwrite_local=True):
        # the keys were not retrieved. Non-leaders have to retry.
        return

    if not any_file_changed(keys) and is_state('authentication.setup'):
        # No change detected and we have already setup the authentication
        return

    hookenv.status_set('maintenance', 'Rendering authentication templates.')
    api_opts = FlagManager('kube-apiserver')
    api_opts.add('basic-auth-file', basic_auth)
    api_opts.add('token-auth-file', known_tokens)
    api_opts.add('service-account-key-file', service_key)

    controller_opts = FlagManager('kube-controller-manager')
    controller_opts.add('service-account-private-key-file', service_key)

    remove_state('kubernetes-master.components.started')
    set_state('authentication.setup')
Example #30
0
def configure_extra_plugins():
    config = hookenv.config()
    plugins = config['extra_plugins']
    if plugins:
        config_path = '{}/extra_plugins.conf'.format(get_configs_dir())
        host.write_file(config_path, plugins.encode('utf-8'))
        set_state('extra_plugins.configured')
def stop_handler():

    # Set the user defined "stopping" state when this hook event occurs.
    set_state('tvault-contego.stopping')
Example #32
0
def handle_labels_changed():
    set_state('kubernetes-worker.label-config-required')
Example #33
0
def push_filebeat_index(elasticsearch):
    hosts = elasticsearch.list_unit_data()
    for host in hosts:
        host_string = "{}:{}".format(host['host'], host['port'])
    push_beat_index(host_string, 'filebeat')
    set_state('filebeat.index.pushed')
Example #34
0
def enlist_filebeat():
    enable_beat_on_boot('filebeat')
    set_state('filebeat.autostarted')
Example #35
0
def install():
    if not os.path.isdir(dhparam_dir):
        os.mkdir(dhparam_dir)
    shutil.copyfile(os.path.join(charm_dir(), 'files', dhparam),
                    os.path.join(dhparam_dir, dhparam))
    set_state('lets-encrypt-nginx.installed')
Example #36
0
def initial_nrpe_config(nagios=None):
    set_state('nrpe-external-master.initial-config')
    update_nrpe_config(nagios)
def stop_handler():

    # Set the user defined "stopping" state when this hook event occurs.
    set_state('trilio-horizon-plugin.stopping')
Example #38
0
def render_stuff(*args):
    hookenv.log("about to call the render_configs with {}".format(args))
    with charm.provide_charm_instance() as magnum_charm:
        magnum_charm.render_with_interfaces(charm.optional_interfaces(args))
        magnum_charm.assess_status()
    reactive.set_state('config.complete')
def puppet_masterless_ready():
    '''
    Set the `puppet.available` state so that other layers can
    gate puppet operations for masterless puppet state (unconfigured)
    '''
    set_state('puppet.available')
Example #40
0
def run_db_migration():
    magnum.db_sync()
    magnum.restart_all()
    reactive.set_state('db.synced')
    magnum.assess_status()
Example #41
0
def stop_handler():

    # Set the user defined "stopping" state when this hook event occurs.
    set_state('trilio-data-mover-api.stopping')
def puppet_agent_ready():
    '''
    Set the `puppet.masterfull.available` state to indicate puppet agent
    is installed, configured and started in master-client state.
    '''
    set_state('puppet-agent.available')
def on_config_allow_privileged_change():
    """React to changed 'allow-privileged' config value.

    """
    set_state('kubernetes-worker.restart-needed')
    remove_state('config.changed.allow-privileged')
Example #44
0
def start_oozie(hadoop):
    oozie = Oozie(get_dist_config())
    oozie.open_ports()
    oozie.start()
    set_state('oozie.started')
    hookenv.status_set('active', 'Ready')
def extra_args_changed():
    set_state('kubernetes-worker.restart-needed')
Example #46
0
def install_package():
    """
    Install the base StorPool packages.
    """
    rdebug('the common repo has become available and '
           'we do have the configuration')

    rdebug('checking the kernel command line')
    with open('/proc/cmdline', mode='r') as f:
        ln = f.readline()
        if not ln:
            sputils.err('Could not read a single line from /proc/cmdline')
            return
        words = ln.split()

        # OK, so this is a bit naive, but it will do the job
        global KERNEL_REQUIRED_PARAMS
        missing = list(
            filter(lambda param: param not in words, KERNEL_REQUIRED_PARAMS))
        if missing:
            if sputils.bypassed('kernel_parameters'):
                hookenv.log(
                    'The "kernel_parameters" bypass is meant FOR '
                    'DEVELOPMENT ONLY!  DO NOT run a StorPool cluster '
                    'in production with it!', hookenv.WARNING)
            else:
                sputils.err('Missing kernel parameters: {missing}'.format(
                    missing=' '.join(missing)))
                return

    spstatus.npset('maintenance', 'obtaining the requested StorPool version')
    spver = spconfig.m().get('storpool_version', None)
    if spver is None or spver == '':
        rdebug('no storpool_version key in the charm config yet')
        return

    spstatus.npset('maintenance', 'installing the StorPool common packages')
    (err, newly_installed) = sprepo.install_packages({
        'storpool-cli':
        spver,
        'storpool-common':
        spver,
        'storpool-etcfiles':
        spver,
        'kmod-storpool-' + os.uname().release:
        spver,
        'python-storpool':
        spver,
    })
    if err is not None:
        rdebug('oof, we could not install packages: {err}'.format(err=err))
        rdebug('removing the package-installed state')
        return

    if newly_installed:
        rdebug('it seems we managed to install some packages: {names}'.format(
            names=newly_installed))
        sprepo.record_packages('storpool-common', newly_installed)
    else:
        rdebug('it seems that all the packages were installed already')

    rdebug('updating the kernel module dependencies')
    spstatus.npset('maintenance', 'updating the kernel module dependencies')
    subprocess.check_call(['depmod', '-a'])

    rdebug('gathering CPU information for the cgroup configuration')
    with open('/proc/cpuinfo', mode='r') as f:
        lns = f.readlines()
        all_cpus = sorted(
            map(
                lambda lst: int(lst[2]),
                filter(lambda lst: lst and lst[0] == 'processor',
                       map(lambda s: s.split(), lns))))
    if sputils.bypassed('very_few_cpus'):
        hookenv.log(
            'The "very_few_cpus" bypass is meant '
            'FOR DEVELOPMENT ONLY!  DO NOT run a StorPool cluster in '
            'production with it!', hookenv.WARNING)
        last_cpu = all_cpus[-1]
        all_cpus.extend([last_cpu, last_cpu, last_cpu])
    if len(all_cpus) < 4:
        sputils.err('Not enough CPUs, need at least 4')
        return
    tdata = {
        'cpu_rdma': str(all_cpus[0]),
        'cpu_beacon': str(all_cpus[1]),
        'cpu_block': str(all_cpus[2]),
        'cpu_rest': '{min}-{max}'.format(min=all_cpus[3], max=all_cpus[-1]),
    }

    rdebug('gathering system memory information for the cgroup configuration')
    with open('/proc/meminfo', mode='r') as f:
        while True:
            line = f.readline()
            if not line:
                sputils.err('Could not find MemTotal in /proc/meminfo')
                return
            words = line.split()
            if words[0] == 'MemTotal:':
                mem_total = int(words[1])
                unit = words[2].upper()
                if unit.startswith('K'):
                    mem_total = int(mem_total / 1024)
                elif unit.startswith('M'):
                    pass
                elif unit.startswith('G'):
                    mem_total = mem_total * 1024
                else:
                    sputils.err('Could not parse the "{u}" unit for '
                                'MemTotal in /proc/meminfo'.format(u=words[2]))
                    return
                break
    mem_system = 4 * 1024
    mem_user = 4 * 1024
    mem_storpool = 1 * 1024
    mem_kernel = 10 * 1024
    if sputils.bypassed('very_little_memory'):
        hookenv.log(
            'The "very_little_memory" bypass is meant '
            'FOR DEVELOPMENT ONLY!  DO NOT run a StorPool cluster in '
            'production with it!', hookenv.WARNING)
        mem_system = 1 * 1900
        mem_user = 1 * 512
        mem_storpool = 1 * 1024
        mem_kernel = 1 * 512
    mem_reserved = mem_system + mem_user + mem_storpool + mem_kernel
    if mem_total <= mem_reserved:
        sputils.err(
            'Not enough memory, only have {total}M, need {mem}M'.format(
                mem=mem_reserved, total=mem_total))
        return
    mem_machine = mem_total - mem_reserved
    tdata.update({
        'mem_system': mem_system,
        'mem_user': mem_user,
        'mem_storpool': mem_storpool,
        'mem_machine': mem_machine,
    })

    rdebug('generating the cgroup configuration: {tdata}'.format(tdata=tdata))
    if not os.path.isdir('/etc/cgconfig.d'):
        os.mkdir('/etc/cgconfig.d', mode=0o755)
    cgconfig_dir = '/usr/share/doc/storpool/examples/cgconfig/ubuntu1604'
    for (path, _, files) in os.walk(cgconfig_dir):
        for fname in files:
            src = path + '/' + fname
            dst = src.replace(cgconfig_dir, '')
            dstdir = os.path.dirname(dst)
            if not os.path.isdir(dstdir):
                os.makedirs(dstdir, mode=0o755)

            if fname in (
                    'machine.slice.conf',
                    'storpool.slice.conf',
                    'system.slice.conf',
                    'user.slice.conf',
                    'machine-cgsetup.conf',
            ):
                with tempfile.NamedTemporaryFile(dir='/tmp',
                                                 mode='w+t',
                                                 delete=True) as tempf:
                    rdebug('- generating {tempf} for {dst}'.format(
                        dst=dst, tempf=tempf.name))
                    templating.render(
                        source=fname,
                        target=tempf.name,
                        owner='root',
                        perms=0o644,
                        context=tdata,
                    )
                    rdebug('- generating {dst}'.format(dst=dst))
                    txn.install('-o', 'root', '-g', 'root', '-m', '644', '--',
                                tempf.name, dst)
            else:
                mode = '{:o}'.format(os.stat(src).st_mode & 0o777)
                rdebug('- installing {src} as {dst}'.format(src=src, dst=dst))
                txn.install('-o', 'root', '-g', 'root', '-m', mode, '--', src,
                            dst)

    rdebug('starting the cgconfig service')
    rdebug('- refreshing the systemctl service database')
    subprocess.check_call(['systemctl', 'daemon-reload'])
    rdebug('- starting the cgconfig service')
    try:
        host.service_resume('cgconfig')
    except Exception:
        pass

    rdebug('setting the package-installed state')
    reactive.set_state('storpool-common.package-installed')
    spstatus.npset('maintenance', '')
Example #47
0
def start():
    host.service_start('tengu-api')
    set_state('tengu-api.started')
def launch_default_ingress_controller():
    ''' Launch the Kubernetes ingress controller & default backend (404) '''
    config = hookenv.config()

    # need to test this in case we get in
    # here from a config change to the image
    if not config.get('ingress'):
        return

    context = {}
    context['arch'] = arch()
    addon_path = '/root/cdk/addons/{}'

    context['defaultbackend_image'] = config.get('default-backend-image')
    if (context['defaultbackend_image'] == "" or
       context['defaultbackend_image'] == "auto"):
        if context['arch'] == 's390x':
            context['defaultbackend_image'] = \
                "k8s.gcr.io/defaultbackend-s390x:1.4"
        else:
            context['defaultbackend_image'] = \
                "k8s.gcr.io/defaultbackend:1.4"

    # Render the default http backend (404) replicationcontroller manifest
    manifest = addon_path.format('default-http-backend.yaml')
    render('default-http-backend.yaml', manifest, context)
    hookenv.log('Creating the default http backend.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create default-http-backend. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    # Render the ingress daemon set controller manifest
    context['ingress_image'] = config.get('nginx-image')
    if context['ingress_image'] == "" or context['ingress_image'] == "auto":
        if context['arch'] == 's390x':
            context['ingress_image'] = \
                "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
        else:
            context['ingress_image'] = \
                "k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa
    context['juju_application'] = hookenv.service_name()
    manifest = addon_path.format('ingress-daemon-set.yaml')
    render('ingress-daemon-set.yaml', manifest, context)
    hookenv.log('Creating the ingress daemon set.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
Example #49
0
def make_default_cluster_available_handler():
    """Set the default cluster.available state so that the default handler in
    layer-openstack-api can run.
    """
    reactive.set_state('charms.openstack.do-default-cluster.available')
Example #50
0
def install():
    print('install')
    set_state('layered-charm.installed')
Example #51
0
def configure(pgsql, cache):
    db_name = hookenv.config('db_name')
    if pgsql.master is None or pgsql.master.dbname != db_name:
        hookenv.log('Database context not available yet; skipping')
        return
    environment = hookenv.config('environment')
    base_url = hookenv.config('base_url')
    session_secret = hookenv.config('session_secret')
    memcache_session_secret = hookenv.config('memcache_session_secret')
    sentry_dsn = hookenv.config('sentry_dsn')
    lp_api_username = hookenv.config('lp_api_username') or ''
    lp_api_consumer_key = hookenv.config('lp_api_consumer_key') or ''
    lp_api_token = hookenv.config('lp_api_token') or ''
    lp_api_token_secret = hookenv.config('lp_api_token_secret') or ''
    github_auth_client_id = hookenv.config('github_auth_client_id') or ''
    github_auth_client_secret = (hookenv.config('github_auth_client_secret')
                                 or '')
    github_webhook_secret = hookenv.config('github_webhook_secret') or ''
    http_proxy = hookenv.config('http_proxy') or ''
    trusted_networks = (hookenv.config('trusted_networks') or '').split()
    if session_secret and memcache_session_secret:
        render(source='knexfile.js.j2',
               target=KNEXFILE_NORMAL,
               context={
                   'node_env': get_node_env(environment),
                   'db_conn': pgsql.master.uri,
               })
        # XXX cjwatson 2017-03-08: Set NODE_ENV from here instead of in .env
        # files?  This may make more sense as part of entirely getting rid
        # of {staging,production}.env
        # (https://github.com/canonical-ols/build.snapcraft.io/issues/276).
        render(source='snap-build_systemd.j2',
               target=SYSTEMD_CONFIG,
               context={
                   'working_dir': code_dir(),
                   'user': user(),
                   'base_url': base_url,
                   'session_secret': session_secret,
                   'logs_path': logs_dir(),
                   'environment': environment,
                   'cache_hosts': sorted(cache.memcache_hosts()),
                   'memcache_session_secret': memcache_session_secret,
                   'sentry_dsn': sentry_dsn,
                   'lp_api_username': lp_api_username,
                   'lp_api_consumer_key': lp_api_consumer_key,
                   'lp_api_token': lp_api_token,
                   'lp_api_token_secret': lp_api_token_secret,
                   'github_auth_client_id': github_auth_client_id,
                   'github_auth_client_secret': github_auth_client_secret,
                   'github_webhook_secret': github_webhook_secret,
                   'knex_config_path': KNEXFILE_NORMAL,
                   'http_proxy': http_proxy,
                   'trusted_networks': trusted_networks,
               })
        check_call(['systemctl', 'enable', basename(SYSTEMD_CONFIG)])
        check_call(['systemctl', 'daemon-reload'])
        check_port('ols.{}.express'.format(service_name()), port())
        set_state('service.configured')
        hookenv.status_set('active', 'systemd unit configured')
    else:
        hookenv.status_set(
            'blocked', 'Service requires session_secret and '
            'memcache_session_secret to be set')
Example #52
0
def install():
    install_tengu_api()
    set_state('tengu-api.installed')
Example #53
0
def install_hive(hadoop):
    '''
    Anytime our dependencies are available, check to see if we have a valid
    reason to (re)install. These include:
    - initial install
    - HBase has joined/departed
    '''
    # Hive cannot handle - in the metastore db name and
    # mysql uses the service name to name the db
    if "-" in hookenv.service_name():
        hookenv.status_set('blocked', "application name may not contain '-'; "
                                      "redeploy with a different name")
        return

    # Get hbase connection dict if it's available
    if is_state('hbase.ready'):
        hbase = RelationBase.from_state('hbase.ready')
        hbserver = hbase.hbase_servers()[0]
    else:
        hbserver = None

    # Get zookeeper connection dict if it's available
    if is_state('zookeeper.ready'):
        zk = RelationBase.from_state('zookeeper.ready')
        zks = zk.zookeepers()
    else:
        zks = None

    # Use this to determine if we need to reinstall
    deployment_matrix = {
        'hbase': hbserver,
        'zookeepers': zks
    }

    # Handle nuances when installing versus re-installing
    if not is_state('hive.installed'):
        prefix = "installing"

        # On initial install, prime our kv with the current deployment matrix.
        # Subsequent calls will use this to determine if a reinstall is needed.
        data_changed('deployment_matrix', deployment_matrix)
    else:
        prefix = "configuring"

        # Return if our matrix has not changed
        if not data_changed('deployment_matrix', deployment_matrix):
            return

    hookenv.status_set('maintenance', '{} hive'.format(prefix))
    hookenv.log("{} hive with: {}".format(prefix, deployment_matrix))

    hive = Hive()
    hive.install(hbase=hbserver, zk_units=zks)
    hive.restart()
    hive.open_ports()
    set_state('hive.installed')
    report_status()

    # set app version string for juju status output
    hive_version = get_package_version('hive') or 'unknown'
    hookenv.application_version_set(hive_version)
Example #54
0
def make_default_config_rendered_handler():
    """Set the default config.rendered state so that the reactive handler runs.
    """
    reactive.set_state('charms.openstack.do-default-config-rendered')
Example #55
0
def nvidia_departed():
    """Cuda departed, probably due to the docker layer switching to a
     non nvidia-docker."""
    disable_gpu()
    remove_state('kubernetes-worker.gpu.enabled')
    set_state('kubernetes-worker.restart-needed')
Example #56
0
def render_config(*args):
    with charms_openstack.charm.provide_charm_instance() as watcher_charm:
        watcher_charm.render_with_interfaces(args)
        watcher_charm.assess_status()
    reactive.set_state('config.rendered')
def configure_apiserver(etcd_connection_string, leader_etcd_version):
    api_opts = {}

    # Get the tls paths from the layer data.
    layer_options = layer.options('tls-client')
    ca_cert_path = layer_options.get('ca_certificate_path')
    client_cert_path = layer_options.get('client_certificate_path')
    client_key_path = layer_options.get('client_key_path')
    server_cert_path = layer_options.get('server_certificate_path')
    server_key_path = layer_options.get('server_key_path')

    # at one point in time, this code would set ca-client-cert,
    # but this was removed. This was before configure_kubernetes_service
    # kept track of old arguments and removed them, so client-ca-cert
    # was able to hang around forever stored in the snap configuration.
    # This removes that stale configuration from the snap if it still
    # exists.
    api_opts['client-ca-file'] = 'null'

    if is_privileged():
        api_opts['allow-privileged'] = 'true'
        set_state('kubernetes-master.privileged')
    else:
        api_opts['allow-privileged'] = 'false'
        remove_state('kubernetes-master.privileged')

    # Handle static options for now
    api_opts['service-cluster-ip-range'] = service_cidr()
    api_opts['min-request-timeout'] = '300'
    api_opts['v'] = '4'
    api_opts['tls-cert-file'] = server_cert_path
    api_opts['tls-private-key-file'] = server_key_path
    api_opts['kubelet-certificate-authority'] = ca_cert_path
    api_opts['kubelet-client-certificate'] = client_cert_path
    api_opts['kubelet-client-key'] = client_key_path
    api_opts['logtostderr'] = 'true'
    api_opts['insecure-bind-address'] = '127.0.0.1'
    api_opts['insecure-port'] = '8080'
    api_opts['storage-backend'] = leader_etcd_version
    api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv'
    api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv'
    api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key'
    api_opts['kubelet-preferred-address-types'] = \
        '[InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP]'

    etcd_dir = '/root/cdk/etcd'
    etcd_ca = os.path.join(etcd_dir, 'client-ca.pem')
    etcd_key = os.path.join(etcd_dir, 'client-key.pem')
    etcd_cert = os.path.join(etcd_dir, 'client-cert.pem')

    api_opts['etcd-cafile'] = etcd_ca
    api_opts['etcd-keyfile'] = etcd_key
    api_opts['etcd-certfile'] = etcd_cert
    api_opts['etcd-servers'] = etcd_connection_string

    admission_control_pre_1_9 = [
        'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount',
        'ResourceQuota', 'DefaultTolerationSeconds'
    ]

    admission_control = [
        'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount',
        'PersistentVolumeLabel', 'DefaultStorageClass',
        'DefaultTolerationSeconds', 'MutatingAdmissionWebhook',
        'ValidatingAdmissionWebhook', 'ResourceQuota'
    ]

    auth_mode = hookenv.config('authorization-mode')
    if 'Node' in auth_mode:
        admission_control.append('NodeRestriction')

    api_opts['authorization-mode'] = auth_mode

    kube_version = get_version('kube-apiserver')
    if kube_version < (1, 6):
        hookenv.log('Removing DefaultTolerationSeconds from admission-control')
        admission_control_pre_1_9.remove('DefaultTolerationSeconds')
    if kube_version < (1, 7):
        hookenv.log('Removing Initializers from admission-control')
        admission_control_pre_1_9.remove('Initializers')
    if kube_version < (1, 9):
        api_opts['admission-control'] = ','.join(admission_control_pre_1_9)
    else:
        api_opts['admission-control'] = ','.join(admission_control)

    configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args')
    restart_apiserver()
def start_zookeeper():
    zk = Zookeeper()
    zk.start()
    zk.open_ports()
    set_state('zookeeper.started')
    hookenv.status_set('active', 'Ready')
Example #59
0
def configure_apiserver():
    # TODO: investigate if it's possible to use config file to store args
    # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/315
    # Handle api-extra-args config option
    to_add, to_remove = get_config_args()

    api_opts = FlagManager('kube-apiserver')

    # Remove arguments that are no longer provided as config option
    # this allows them to be reverted to charm defaults
    for arg in to_remove:
        hookenv.log('Removing option: {}'.format(arg))
        api_opts.destroy(arg)
        # We need to "unset" options by settig their value to "null" string
        cmd = ['snap', 'set', 'kube-apiserver', '{}=null'.format(arg)]
        check_call(cmd)

    # Get the tls paths from the layer data.
    layer_options = layer.options('tls-client')
    ca_cert_path = layer_options.get('ca_certificate_path')
    client_cert_path = layer_options.get('client_certificate_path')
    client_key_path = layer_options.get('client_key_path')
    server_cert_path = layer_options.get('server_certificate_path')
    server_key_path = layer_options.get('server_key_path')

    if is_privileged():
        api_opts.add('allow-privileged', 'true', strict=True)
        set_state('kubernetes-master.privileged')
    else:
        api_opts.add('allow-privileged', 'false', strict=True)
        remove_state('kubernetes-master.privileged')

    # Handle static options for now
    api_opts.add('service-cluster-ip-range', service_cidr())
    api_opts.add('min-request-timeout', '300')
    api_opts.add('v', '4')
    api_opts.add('tls-cert-file', server_cert_path)
    api_opts.add('tls-private-key-file', server_key_path)
    api_opts.add('kubelet-certificate-authority', ca_cert_path)
    api_opts.add('kubelet-client-certificate', client_cert_path)
    api_opts.add('kubelet-client-key', client_key_path)
    api_opts.add('logtostderr', 'true')
    api_opts.add('insecure-bind-address', '127.0.0.1')
    api_opts.add('insecure-port', '8080')
    api_opts.add('storage-backend', 'etcd2')  # FIXME: add etcd3 support

    admission_control = [
        'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount',
        'ResourceQuota', 'DefaultTolerationSeconds'
    ]

    auth_mode = hookenv.config('authorization-mode')
    if 'Node' in auth_mode:
        admission_control.append('NodeRestriction')

    api_opts.add('authorization-mode', auth_mode, strict=True)

    if get_version('kube-apiserver') < (1, 6):
        hookenv.log('Removing DefaultTolerationSeconds from admission-control')
        admission_control.remove('DefaultTolerationSeconds')
    if get_version('kube-apiserver') < (1, 7):
        hookenv.log('Removing Initializers from admission-control')
        admission_control.remove('Initializers')
    api_opts.add('admission-control', ','.join(admission_control), strict=True)

    # Add operator-provided arguments, this allows operators
    # to override defaults
    for arg in to_add:
        hookenv.log('Adding option: {} {}'.format(arg[0], arg[1]))
        # Make sure old value is gone
        api_opts.destroy(arg[0])
        api_opts.add(arg[0], arg[1])

    cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')
    check_call(cmd)
    set_state('kube-apiserver.do-restart')
def ceph_storage(ceph_admin):
    '''Ceph on kubernetes will require a few things - namely a ceph
    configuration, and the ceph secret key file used for authentication.
    This method will install the client package, and render the requisit files
    in order to consume the ceph-storage relation.'''
    ceph_context = {
        'mon_hosts': ceph_admin.mon_hosts(),
        'fsid': ceph_admin.fsid(),
        'auth_supported': ceph_admin.auth(),
        'use_syslog': "true",
        'ceph_public_network': '',
        'ceph_cluster_network': '',
        'loglevel': 1,
        'hostname': socket.gethostname(),
    }
    # Install the ceph common utilities.
    apt_install(['ceph-common'], fatal=True)

    etc_ceph_directory = '/etc/ceph'
    if not os.path.isdir(etc_ceph_directory):
        os.makedirs(etc_ceph_directory)
    charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
    # Render the ceph configuration from the ceph conf template
    render('ceph.conf', charm_ceph_conf, ceph_context)

    # The key can rotate independently of other ceph config, so validate it
    admin_key = os.path.join(etc_ceph_directory, 'ceph.client.admin.keyring')
    try:
        with open(admin_key, 'w') as key_file:
            key_file.write("[client.admin]\n\tkey = {}\n".format(
                ceph_admin.key()))
    except IOError as err:
        hookenv.log("IOError writing admin.keyring: {}".format(err))

    # Enlist the ceph-admin key as a kubernetes secret
    if ceph_admin.key():
        encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
    else:
        # We didn't have a key, and cannot proceed. Do not set state and
        # allow this method to re-execute
        return

    context = {'secret': encoded_key.decode('ascii')}
    render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
    try:
        # At first glance this is deceptive. The apply stanza will create if
        # it doesn't exist, otherwise it will update the entry, ensuring our
        # ceph-secret is always reflective of what we have in /etc/ceph
        # assuming we have invoked this anytime that file would change.
        cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
        check_call(cmd)
        os.remove('/tmp/ceph-secret.yaml')
    except:  # NOQA
        # the enlistment in kubernetes failed, return and prepare for re-exec
        return

    # when complete, set a state relating to configuration of the storage
    # backend that will allow other modules to hook into this and verify we
    # have performed the necessary pre-req steps to interface with a ceph
    # deployment.
    set_state('ceph-storage.configured')