Exemplo n.º 1
0
def check_running(java=None):
    if data_changed('pdi.url', hookenv.config('pdi_url')):
        stop()
        remove()
        install()

    if (data_changed('carte_password', hookenv.config())):
        change_carte_password(hookenv.config('carte_password'))

    if data_changed('pdi.config', hookenv.config()) \
            and hookenv.config('run_carte'):
        log("config changed, carte needs to be restarted")
        restart(None)
    elif data_changed('pdi.config', hookenv.config()) \
            and hookenv.config('run_carte') is False:
        log("config changed, carte needs to be stopped if running")
        stop()
        status_set('active', 'PDI Installed. Carte Server Disabled.')
    elif hookenv.config('run_carte'):
        log("carte should be running")
        start()
    elif hookenv.config('run_carte') is False:
        log("carte should be stopped")
        stop()
        status_set('active', 'PDI Installed. Carte Server Disabled.')
def log_java_details(java):
    """Log pertinent Java-related details."""
    if (data_changed('java.version', java.java_version())
            or data_changed('java.home', java.java_home())):

        log("Java (%s) using JAVA_HOME: '%s'" %
            (java.java_version(), java.java_home()))
Exemplo n.º 3
0
def persist_state():
    """Fake persistent state by calling helpers that modify unitdata.kv"""
    states = [k for k in bus.get_states().keys()
              if k.startswith('plugins') or k.startswith('extra_plugins')]
    helpers.any_file_changed(telegraf.list_config_files())
    if states:
        helpers.data_changed('active_plugins', states)
Exemplo n.º 4
0
def start_worker(kube_api, kube_control, auth_control, cni):
    ''' Start kubelet using the provided API and DNS info.'''
    servers = get_kube_api_servers(kube_api)
    # Note that the DNS server doesn't necessarily exist at this point. We know
    # what its IP will eventually be, though, so we can go ahead and configure
    # kubelet with that info. This ensures that early pods are configured with
    # the correct DNS even though the server isn't ready yet.

    dns = kube_control.get_dns()
    ingress_ip = get_ingress_address(kube_control)
    cluster_cidr = cni.get_config()['cidr']

    if cluster_cidr is None:
        hookenv.log('Waiting for cluster cidr.')
        return

    creds = db.get('credentials')
    data_changed('kube-control.creds', creds)

    create_config(random.choice(servers), creds)
    configure_kubelet(dns, ingress_ip)
    configure_kube_proxy(servers, cluster_cidr)
    set_state('kubernetes-worker.config.created')
    restart_unit_services()
    update_kubelet_status()
    set_state('kubernetes-worker.label-config-required')
    remove_state('kubernetes-worker.restart-needed')
def start_worker(kube_api, kube_control, cni):
    ''' Start kubelet using the provided API and DNS info.'''
    config = hookenv.config()
    servers = get_kube_api_servers(kube_api)
    # Note that the DNS server doesn't necessarily exist at this point. We know
    # what its IP will eventually be, though, so we can go ahead and configure
    # kubelet with that info. This ensures that early pods are configured with
    # the correct DNS even though the server isn't ready yet.

    dns = kube_control.get_dns()

    if (data_changed('kube-api-servers', servers) or
            data_changed('kube-dns', dns)):

        # Create FlagManager for kubelet and add dns flags
        opts = FlagManager('kubelet')
        opts.add('--cluster-dns', dns['sdn-ip'])  # FIXME sdn-ip needs a rename
        opts.add('--cluster-domain', dns['domain'])

        # Create FlagManager for KUBE_MASTER and add api server addresses
        kube_master_opts = FlagManager('KUBE_MASTER')
        kube_master_opts.add('--master', ','.join(servers))

        # set --allow-privileged flag for kubelet
        set_privileged(
            "true" if config['allow-privileged'] == "true" else "false",
            render_config=False)

        create_config(servers[0])
        render_init_scripts()
        set_state('kubernetes-worker.config.created')
        restart_unit_services()
        update_kubelet_status()
Exemplo n.º 6
0
def start_worker(kube_api, kube_control, auth_control, cni):
    ''' Start kubelet using the provided API and DNS info.'''
    servers = get_kube_api_servers(kube_api)
    # Note that the DNS server doesn't necessarily exist at this point. We know
    # what its IP will eventually be, though, so we can go ahead and configure
    # kubelet with that info. This ensures that early pods are configured with
    # the correct DNS even though the server isn't ready yet.

    dns = kube_control.get_dns()
    cluster_cidr = cni.get_config()['cidr']

    if cluster_cidr is None:
        hookenv.log('Waiting for cluster cidr.')
        return

    nodeuser = '******'.format(gethostname())
    creds = kube_control.get_auth_credentials(nodeuser)
    data_changed('kube-control.creds', creds)

    # set --allow-privileged flag for kubelet
    set_privileged()

    create_config(random.choice(servers), creds)
    configure_worker_services(servers, dns, cluster_cidr)
    set_state('kubernetes-worker.config.created')
    restart_unit_services()
    update_kubelet_status()
    apply_node_labels()
    remove_state('kubernetes-worker.restart-needed')
Exemplo n.º 7
0
def unconfigure_spark():
    hookenv.status_set("maintenance", "removing spark relation")
    zeppelin = Zeppelin()
    # Yarn / Hadoop may not actually be available, but that is the default
    # value and nothing else would reasonably work here either without Spark.
    zeppelin.configure_spark("yarn-client")
    data_changed("spark.master", "yarn-client")  # ensure updated if re-added
    remove_state("zeppelin.spark.configured")
    update_status()
Exemplo n.º 8
0
def disable_zookeepers():
    hookenv.status_set('maintenance', 'Disabling high availability')
    data_changed('available.zookeepers', None)
    spark = Spark(get_dist_config())
    spark.stop()
    spark.disable_ha()
    spark.configure()
    spark.start()
    remove_state('zookeeper.configured')
    report_status(spark)
Exemplo n.º 9
0
def watch_for_changes(kube_api, kube_control, cni):
    ''' Watch for configuration changes and signal if we need to restart the
    worker services '''
    servers = get_kube_api_servers(kube_api)
    dns = kube_control.get_dns()
    cluster_cidr = cni.get_config()['cidr']

    if (data_changed('kube-api-servers', servers) or
            data_changed('kube-dns', dns) or
            data_changed('cluster-cidr', cluster_cidr)):

        set_state('kubernetes-worker.restart-needed')
Exemplo n.º 10
0
def install_hive(hadoop):
    '''
    Anytime our dependencies are available, check to see if we have a valid
    reason to (re)install. These include:
    - initial install
    - HBase has joined/departed
    '''
    # Hive cannot handle - in the metastore db name and
    # mysql uses the service name to name the db
    if "-" in hookenv.service_name():
        hookenv.status_set('blocked', "application name may not contain '-'; "
                                      "redeploy with a different name")
        return

    # Get hbase connection dict if it's available
    if is_state('hbase.ready'):
        hbase = RelationBase.from_state('hbase.ready')
        hbserver = hbase.hbase_servers()[0]
    else:
        hbserver = None

    # Use this to determine if we need to reinstall
    deployment_matrix = {
        'hbase': hbserver,
    }

    # Handle nuances when installing versus re-installing
    if not is_state('hive.installed'):
        prefix = "installing"

        # On initial install, prime our kv with the current deployment matrix.
        # Subsequent calls will use this to determine if a reinstall is needed.
        data_changed('deployment_matrix', deployment_matrix)
    else:
        prefix = "configuring"

        # Return if our matrix has not changed
        if not data_changed('deployment_matrix', deployment_matrix):
            return

    hookenv.status_set('maintenance', '{} hive'.format(prefix))
    hookenv.log("{} hive with: {}".format(prefix, deployment_matrix))
    hive = Hive()
    hive.install(hbase=hbserver)
    hive.restart()
    hive.open_ports()
    set_state('hive.installed')
    report_status()

    # set app version string for juju status output
    hive_version = get_package_version('hive') or 'unknown'
    hookenv.application_version_set(hive_version)
Exemplo n.º 11
0
def configure_kafka(zk):
    hookenv.status_set('maintenance', 'setting up kafka')
    data_changed(  # Prime data changed for network interface
        'kafka.network_interface', hookenv.config().get('network_interface'))
    kafka = Kafka()
    zks = zk.zookeepers()
    kafka.configure_kafka(zks)
    kafka.open_ports()
    set_state('kafka.started')
    hookenv.status_set('active', 'ready')
    # set app version string for juju status output
    kafka_version = get_package_version('kafka') or 'unknown'
    hookenv.application_version_set(kafka_version)
Exemplo n.º 12
0
def install_zookeeper():
    '''
    After Bigtop has done the initial setup, trigger a puppet install,
    via our Zooekeeper library.

    puppet will start the service, as a side effect.

    '''
    hookenv.status_set('maintenance', 'installing zookeeper')
    zookeeper = Zookeeper()
    # Prime data changed
    data_changed('zkpeer.nodes', zookeeper.read_peers())
    data_changed(
        'zk.network_interface',
        hookenv.config().get('network_interface'))
    data_changed(
        'zk.autopurge_purge_interval',
        hookenv.config().get('autopurge_purge_interval'))
    data_changed(
        'zk.autopurge_snap_retain_count',
        hookenv.config().get('autopurge_snap_retain_count'))
    zookeeper.install()
    zookeeper.open_ports()
    set_state('zookeeper.installed')
    set_state('zookeeper.started')
    hookenv.status_set('active', 'ready {}'.format(zookeeper.quorum_check()))
    # set app version string for juju status output
    zoo_version = get_package_version('zookeeper') or 'unknown'
    hookenv.application_version_set(zoo_version)
def send_info(datanode):
    hadoop = get_hadoop_base()
    hdfs = HDFS(hadoop)
    local_hostname = hookenv.local_unit().replace("/", "-")
    hdfs_port = hadoop.dist_config.port("namenode")
    webhdfs_port = hadoop.dist_config.port("nn_webapp_http")

    utils.update_kv_hosts({node["ip"]: node["host"] for node in datanode.nodes()})
    utils.manage_etc_hosts()

    datanode.send_spec(hadoop.spec())
    datanode.send_namenodes([local_hostname])
    datanode.send_ports(hdfs_port, webhdfs_port)
    datanode.send_ssh_key(utils.get_ssh_key("hdfs"))
    datanode.send_hosts_map(utils.get_kv_hosts())

    slaves = [node["host"] for node in datanode.nodes()]
    if data_changed("namenode.slaves", slaves):
        unitdata.kv().set("namenode.slaves", slaves)
        hdfs.register_slaves(slaves)

    hookenv.status_set(
        "active", "Ready ({count} DataNode{s})".format(count=len(slaves), s="s" if len(slaves) > 1 else "")
    )
    set_state("namenode.ready")
def restart_zookeeper_if_config_changed():
    """Restart Zookeeper if zoo.cfg has changed.

    As peers come and go, zoo.cfg will be updated. When that file changes,
    restart the Zookeeper service and set an appropriate status message.
    """

    # Possibly update bind address
    network_interface = hookenv.config().get('network_interface')
    if data_changed("zookeeper.bind_address", network_interface):
        zk = Zookeeper()
        zk.update_bind_address()

    zoo_cfg = DistConfig().path('zookeeper_conf') / 'zoo.cfg'
    if any_file_changed([zoo_cfg]):
        hookenv.status_set('maintenance', 'Server config changed: restarting Zookeeper')
        zk = Zookeeper()
        zk.stop()
        zk.start()
        zk_count = int(zk.get_zk_count())
        extra_status = ""
        if zk_count < 3:
            extra_status = ": less than 3 is suboptimal"
        elif (zk_count % 2 == 0):
            extra_status = ": even number is suboptimal"
        hookenv.status_set('active', 'Ready (%d zk units%s)' % (zk_count, extra_status))
Exemplo n.º 15
0
def install_spark_standalone(zks, peers):
    """
    Called in local/standalone mode after Juju has elected a leader.
    """
    hosts = {
        'spark-master': leadership.leader_get('master-fqdn'),
    }

    # If zks have changed and we are not handling a departed spark peer,
    # give the ensemble time to settle. Otherwise we might try to start
    # spark master with data from the wrong zk leader. Doing so will cause
    # spark-master to shutdown:
    #  https://issues.apache.org/jira/browse/SPARK-15544
    if (zks and data_changed('zks', zks) and not is_state('sparkpeers.departed')):
        hookenv.status_set('maintenance',
                           'waiting for zookeeper ensemble to settle')
        hookenv.log("Waiting 2m to ensure zk ensemble has settled: {}".format(zks))
        time.sleep(120)

    # Let spark know if we have cuda libs installed.
    # NB: spark packages prereq hadoop (boo), so even in standalone mode, we'll
    # have hadoop libs installed. May as well include them in our lib path.
    extra_libs = ["/usr/lib/hadoop/lib/native"]
    if is_state('cuda.installed'):
        extra_libs.append("/usr/local/cuda/lib64")

    spark = Spark()
    spark.configure(hosts, zk_units=zks, peers=peers, extra_libs=extra_libs)
    set_deployment_mode_state('spark.standalone.installed')
Exemplo n.º 16
0
def configure_jupyter_notebook():
    conf = hookenv.config()
    jupyter_dir = '/opt/jupyter'
    port = conf['open-port']
    # Get or create and get password
    kv_store = unitdata.kv()
    password = kv_store.get('password')
    if not password:
        password = generate_password()
        kv_store.set('password', password)
    # Convert to string because some functions can't handle kv object type.
    password = str(password)
    password_hash = generate_hash(password)
    context = {
        'port': port,
        'password_hash': password_hash,
    }
    if data_changed('jupyter-conf', context):
        # Create config directory and render config file
        host.mkdir(jupyter_dir)
        templating.render(
            source='jupyter_notebook_config.py.jinja2',
            target=jupyter_dir + '/jupyter_notebook_config.py',
            context=context
        )
        # Generate upstart template / service file
        context = {}
        if lsb_release.get_lsb_information()['RELEASE'] == "14.04":
            render_api_upstart_template(context)
        else:
            render_api_systemd_template(context)
        restart_notebook()
    chownr(jupyter_dir, 'ubuntu', 'ubuntu', chowntopdir=True)
def send_info(nodemanager):
    hadoop = get_hadoop_base()
    yarn = YARN(hadoop)
    local_hostname = hookenv.local_unit().replace('/', '-')
    port = hadoop.dist_config.port('resourcemanager')
    hs_http = hadoop.dist_config.port('jh_webapp_http')
    hs_ipc = hadoop.dist_config.port('jobhistory')

    utils.update_kv_hosts(nodemanager.hosts_map())
    utils.manage_etc_hosts()

    nodemanager.send_spec(hadoop.spec())
    nodemanager.send_resourcemanagers([local_hostname])
    nodemanager.send_ports(port, hs_http, hs_ipc)
    nodemanager.send_ssh_key(utils.get_ssh_key('yarn'))
    nodemanager.send_hosts_map(utils.get_kv_hosts())

    slaves = nodemanager.nodes()
    if data_changed('resourcemanager.slaves', slaves):
        unitdata.kv().set('resourcemanager.slaves', slaves)
        yarn.register_slaves(slaves)

    hookenv.status_set('active', 'Ready ({count} NodeManager{s})'.format(
        count=len(slaves),
        s='s' if len(slaves) > 1 else '',
    ))
    set_state('resourcemanager.ready')
def send_info(datanode):
    hadoop = get_hadoop_base()
    hdfs = HDFS(hadoop)
    local_hostname = hookenv.local_unit().replace('/', '-')
    hdfs_port = hadoop.dist_config.port('namenode')
    webhdfs_port = hadoop.dist_config.port('nn_webapp_http')

    utils.update_kv_hosts(datanode.hosts_map())
    utils.manage_etc_hosts()

    datanode.send_spec(hadoop.spec())
    datanode.send_namenodes([local_hostname])
    datanode.send_ports(hdfs_port, webhdfs_port)
    datanode.send_ssh_key(utils.get_ssh_key('hdfs'))
    datanode.send_hosts_map(utils.get_kv_hosts())

    slaves = datanode.nodes()
    if data_changed('namenode.slaves', slaves):
        unitdata.kv().set('namenode.slaves', slaves)
        hdfs.register_slaves(slaves)
        hdfs.refresh_slaves()

    hookenv.status_set('active', 'Ready ({count} DataNode{s})'.format(
        count=len(slaves),
        s='s' if len(slaves) > 1 else '',
    ))
    set_state('namenode.ready')
def install_nodemanager(namenode, resourcemanager):
    """Install if we have FQDNs.

    We only need the master FQDNs to perform the nodemanager install, so poll
    for master host data from the appropriate relation. This allows us to
    install asap, even if '<master>.ready' is not set.
    """
    namenodes = namenode.namenodes()
    resourcemanagers = resourcemanager.resourcemanagers()
    masters = namenodes + resourcemanagers
    if namenodes and resourcemanagers and data_changed('nm.masters', masters):
        installed = is_state('apache-bigtop-nodemanager.installed')
        action = 'installing' if not installed else 'configuring'
        hookenv.status_set('maintenance', '%s nodemanager' % action)
        bigtop = Bigtop()
        bigtop.render_site_yaml(
            hosts={
                'namenode': namenodes[0],
                'resourcemanager': resourcemanagers[0],
            },
            roles=[
                'nodemanager',
                'mapred-app',
            ],
        )
        bigtop.queue_puppet()
        set_state('apache-bigtop-nodemanager.pending')
Exemplo n.º 20
0
def build_kubeconfig(server):
    '''Gather the relevant data for Kubernetes configuration objects and create
    a config object with that information.'''
    # Get the options from the tls-client layer.
    layer_options = layer.options('tls-client')
    # Get all the paths to the tls information required for kubeconfig.
    ca = layer_options.get('ca_certificate_path')
    ca_exists = ca and os.path.isfile(ca)
    key = layer_options.get('client_key_path')
    key_exists = key and os.path.isfile(key)
    cert = layer_options.get('client_certificate_path')
    cert_exists = cert and os.path.isfile(cert)
    # Do we have everything we need?
    if ca_exists and key_exists and cert_exists:
        # Cache last server string to know if we need to regenerate the config.
        if not data_changed('kubeconfig.server', server):
            return
        # The final destination of the kubeconfig and kubectl.
        destination_directory = '/home/ubuntu'
        # Create an absolute path for the kubeconfig file.
        kubeconfig_path = os.path.join(destination_directory, 'config')
        # Create the kubeconfig on this system so users can access the cluster.
        create_kubeconfig(kubeconfig_path, server, ca, key, cert)
        # Copy the kubectl binary to the destination directory.
        cmd = ['install', '-v', '-o', 'ubuntu', '-g', 'ubuntu',
               '/usr/local/bin/kubectl', destination_directory]
        check_call(cmd)
        # Make the config file readable by the ubuntu users so juju scp works.
        cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
        check_call(cmd)
Exemplo n.º 21
0
def update_snap_proxy():
    # This is a hack based on
    # https://bugs.launchpad.net/layer-snap/+bug/1533899/comments/1
    # Do it properly when Bug #1533899 is addressed.
    # Note we can't do this in a standard reactive handler as we need
    # to ensure proxies are configured before attempting installs or
    # updates.
    proxy = proxy_settings()

    path = '/etc/systemd/system/snapd.service.d/snap_layer_proxy.conf'
    if not proxy and not os.path.exists(path):
        return  # No proxy asked for and proxy never configured.

    if not data_changed('snap.proxy', proxy):
        return  # Short circuit avoids unnecessary restarts.

    if proxy:
        create_snap_proxy_conf(path, proxy)
    else:
        remove_snap_proxy_conf(path)
    subprocess.check_call(['systemctl', 'daemon-reload'],
                          universal_newlines=True)
    time.sleep(2)
    subprocess.check_call(['systemctl', 'restart', 'snapd.service'],
                          universal_newlines=True)
Exemplo n.º 22
0
def reinstall_spark():
    spark_master_host = leadership.leader_get('master-fqdn')
    peers = []
    zks = []
    if is_state('zookeeper.ready'):
        # if ZK is availuable we are in HA. We do not want reconfigurations if a leader fails
        # HA takes care of this
        spark_master_host = ''
        zk = RelationBase.from_state('zookeeper.ready')
        zks = zk.zookeepers()
        # We need reconfigure Spark when in HA and peers change ignore otherwise
        peers = get_spark_peers()

    deployment_matrix = {
        'spark_master': spark_master_host,
        'yarn_ready': is_state('hadoop.yarn.ready'),
        'hdfs_ready': is_state('hadoop.hdfs.ready'),
        'zookeepers': zks,
        'peers': peers,
    }

    if not data_changed('deployment_matrix', deployment_matrix):
        return

    hookenv.status_set('maintenance', 'configuring spark')
    hadoop = (RelationBase.from_state('hadoop.yarn.ready') or
              RelationBase.from_state('hadoop.hdfs.ready'))
    if install_spark(hadoop, zks):
        if is_state('hadoop.yarn.ready'):
            set_deployment_mode_state('spark.yarn.installed')
        else:
            set_deployment_mode_state('spark.standalone.installed')

        report_status()
Exemplo n.º 23
0
def kick_api_server(tls):
    # need to be idempotent and don't want to kick the api server
    # without need
    if data_changed('cert', tls.get_server_cert()):
        # certificate changed, so restart the api server
        hookenv.log("Certificate information changed, restarting api server")
        set_state('kube-apiserver.do-restart')
def update_ha_config(datanode):
    cluster_nodes = get_cluster_nodes()
    jn_nodes = sorted(datanode.nodes())
    jn_port = datanode.jn_port()
    started = is_state('namenode.started')
    new_cluster_config = data_changed('namenode.cluster-nodes', cluster_nodes)
    new_jn_config = data_changed('namenode.jn.config', (jn_nodes, jn_port))

    hadoop = get_hadoop_base()
    hdfs = HDFS(hadoop)
    hdfs.configure_namenode(cluster_nodes)
    hdfs.register_journalnodes(jn_nodes, jn_port)

    if started and new_cluster_config:
        hdfs.restart_namenode()
    elif started and new_jn_config:
        hdfs.reload_slaves()  # is this actually necessary?
Exemplo n.º 25
0
def kick_api_server(tls):
    # need to be idempotent and don't want to kick the api server
    # without need
    if data_changed('cert', tls.get_server_cert()):
        # certificate changed, so restart the api server
        hookenv.log("Certificate information changed, restarting api server")
        restart_apiserver()
    tls_client.reset_certificate_write_flag('server')
Exemplo n.º 26
0
def unconfigure_spark():
    '''
    Remove remote Spark; reconfigure Zeppelin to use embedded Spark.
    '''
    hookenv.status_set('maintenance', 'removing spark relation')
    zeppelin = Zeppelin()

    # Zepp includes the spark-client role, so reconfigure our built-in spark
    # if our related spark has gone away.
    if is_state('zeppelin.hadoop.configured'):
        local_master = 'yarn-client'
    else:
        local_master = 'local[*]'
    zeppelin.configure_spark(local_master)
    data_changed('spark.master', local_master)  # ensure updated if re-added
    remove_state('zeppelin.spark.configured')
    update_status()
def update_zk_config(zookeeper):
    hadoop = get_hadoop_base()
    hdfs = HDFS(hadoop)
    zk_nodes = sorted(zookeeper.zookeepers(), key=itemgetter('host'))
    zk_started = is_state('namenode.zk.started')
    hdfs.configure_zookeeper(zk_nodes)
    if zk_started and data_changed('namenode.zk', zk_nodes):
        hdfs.restart_zookeeper()
Exemplo n.º 28
0
def kick_nginx(tls):
    # we are just going to sighup it, but still want to avoid kicking it
    # without need
    if data_changed('cert', tls.get_server_cert()):
        # certificate changed, so sighup nginx
        hookenv.log("Certificate information changed, sending SIGHUP to nginx")
        host.service_restart('nginx')
    tls_client.reset_certificate_write_flag('server')
Exemplo n.º 29
0
def configure_sources(relation):
    sources = relation.datasources()
    if not data_changed('grafana.sources', sources):
        return
    for ds in sources:
        hookenv.log('Found datasource: {}'.format(str(ds)))
        # Ensure datasource is configured
        check_datasource(ds)
Exemplo n.º 30
0
def configure_spark(spark):
    master_url = spark.get_master_url()
    if data_changed("spark.master", master_url):
        hookenv.status_set("maintenance", "configuring spark")
        zeppelin = Zeppelin()
        zeppelin.configure_spark(master_url)
        set_state("zeppelin.spark.configured")
        update_status()
Exemplo n.º 31
0
def install_zookeeper():
    '''
    After Bigtop has done the initial setup, trigger a puppet install,
    via our Zooekeeper library.

    puppet will start the service, as a side effect.

    '''
    hookenv.status_set('maintenance', 'installing zookeeper')
    zookeeper = Zookeeper()
    # Prime data changed
    data_changed('zkpeer.nodes', zookeeper.read_peers())
    data_changed('zk.network_interface',
                 hookenv.config().get('network_interface'))
    zookeeper.install()
    zookeeper.open_ports()
    set_state('zookeeper.installed')
    set_state('zookeeper.started')
    hookenv.status_set('active', 'ready {}'.format(zookeeper.quorum_check()))
    # set app version string for juju status output
    zoo_version = get_package_version('zookeeper') or 'unknown'
    hookenv.application_version_set(zoo_version)
def write_config():
    config = hookenv.config()
    hosts = elasticsearch_servers()
    if data_changed('elasticsearch_servers', hosts) or data_changed(
            'template', config['template']):
        log("Writing config")
        if (hosts):
            from jinja2 import Template
            app_name = hookenv.service_name()
            template = Template(config['template'])
            hosts_str = "[{}]".format(', '.join(
                map(lambda x: "'{}'".format(x), hosts)))
            with open('/etc/logstash/conf.d/{}.conf'.format(app_name),
                      'w') as conf_file:
                conf_file.write(str(template.render({'hosts': hosts_str})))
        else:
            log("No elasticsearch servers connected. Removing config.")
            try:
                app_name = hookenv.service_name()
                os.remove('/etc/logstash/conf.d/{}.conf'.format(app_name))
            except FileNotFoundError:
                pass
Exemplo n.º 33
0
def check_cluster(zkpeer):
    '''
    Checkup on the state of the cluster. Start a rolling restart if
    the peers have changed.

    '''
    zk = Zookeeper()
    if data_changed('zkpeer.nodes', zk.read_peers()):
        peers = _ip_list(zk.sort_peers(zkpeer))
        nonce = time.time()
        hookenv.log('Quorum changed. Restart queue: {}'.format(peers))
        leader_set(restart_queue=json.dumps(peers),
                   restart_nonce=json.dumps(nonce))
Exemplo n.º 34
0
def catch_change_in_creds(kube_control):
    """Request a service restart in case credential updates were detected."""
    nodeuser = '******'.format(gethostname().lower())
    creds = kube_control.get_auth_credentials(nodeuser)
    if creds \
            and data_changed('kube-control.creds', creds) \
            and creds['user'] == nodeuser:
        # We need to cache the credentials here because if the
        # master changes (master leader dies and replaced by a new one)
        # the new master will have no recollection of our certs.
        db.set('credentials', creds)
        set_state('worker.auth.bootstrapped')
        set_state('kubernetes-worker.restart-needed')
Exemplo n.º 35
0
def etcd_data_change(etcd):
    ''' Etcd scale events block master reconfiguration due to the
        kubernetes-master.components.started state. We need a way to
        handle these events consistenly only when the number of etcd
        units has actually changed '''

    # key off of the connection string
    connection_string = etcd.get_connection_string()

    # If the connection string changes, remove the started state to trigger
    # handling of the master components
    if data_changed('etcd-connect', connection_string):
        remove_state('kubernetes-master.components.started')
Exemplo n.º 36
0
def set_requested_certificates(requests):
    """takes a list of requests which has the following format:
        [{
            'fqdn': ['example.com', 'blog.example.com'],
            'contact_email': '*****@*****.**'
        }]
        each list item will request one certificate.
    """
    if not data_changed('cert.requests', requests) and not requests:
        return
    unitdata.kv().set('certificate.requests', requests)
    remove_state('lets-encrypt.registered')
    set_state('lets-encrypt.certificate-requested')
Exemplo n.º 37
0
def configure():
    cfg = hookenv.config()
    zookeeper = Zookeeper()
    changed = any((
        data_changed('zkpeer.nodes', zookeeper.read_peers()),
        data_changed('zk.autopurge_purge_interval',
                     cfg.get('autopurge_purge_interval')),
        data_changed('zk.autopurge_snap_retain_count',
                     cfg.get('autopurge_snap_retain_count')),
        data_changed('zk.storage.data_dir',
                     unitdata.kv().get('zookeeper.storage.data_dir')),
    ))
    if changed or is_flag_set('zookeeper.force-reconfigure'):
        zookeeper.install()
        zookeeper.open_ports()
    clear_flag('zookeeper.force-reconfigure')
    set_flag('zookeeper.started')
    set_flag('zookeeper.configured')
    hookenv.status_set('active', 'ready {}'.format(zookeeper.quorum_check()))
    # set app version string for juju status output
    zoo_version = get_installed_version(SNAP_NAME) or 'unknown'
    hookenv.application_version_set(zoo_version)
Exemplo n.º 38
0
def ensure_etcd_connections():
    '''Ensure etcd connection strings are accurate.

    Etcd connection info is written to config files when various install/config
    handlers are run. Watch this info for changes, and when changed, remove
    relevant flags to make sure accurate config is regenerated.
    '''
    etcd = endpoint_from_flag('etcd.available')
    connection_changed = data_changed('calico_etcd_connections',
                                      etcd.get_connection_string())
    cert_changed = data_changed('calico_etcd_cert',
                                etcd.get_client_credentials())
    if connection_changed or cert_changed:
        etcd.save_client_credentials(ETCD_KEY_PATH, ETCD_CERT_PATH,
                                     ETCD_CA_PATH)
        # NB: dont bother guarding clear_flag with is_flag_set; it's safe to
        # clear an unset flag.
        clear_flag('calico.service.installed')
        clear_flag('calico.npc.deployed')

        # Canal config (from ./canal.py) is dependent on calico; if etcd
        # changed, set ourselves up to (re)configure those canal bits.
        clear_flag('canal.cni.configured')
Exemplo n.º 39
0
def host_connected(dh_relation):
    conf = hookenv.config()
    if not data_changed('image', conf.get('image')):
        print("same, skipping")
        return
    print("Different")
    log('config.changed.image, generating new UUID')
    uuid = str(uuid4())
    container_request = {
        'image': conf.get('image'),
    }
    unitdata.kv().set('image', container_request)
    dh_relation.send_container_requests({uuid: container_request})
    status_set('waiting', 'Waiting for image to come online.')
Exemplo n.º 40
0
def configure_kafka_zookeepers(zk):
    """Configure ready zookeepers and restart kafka if needed.

    As zks come and go, server.properties will be updated. When that file
    changes, restart Kafka and set appropriate status messages.
    """
    zks = zk.zookeepers()
    if not data_changed('zookeepers', zks):
        return

    hookenv.log('Checking Zookeeper configuration')
    kafka = Kafka()
    kafka.configure_kafka(zks)
    hookenv.status_set('active', 'Ready')
def restart_services():
    dc = get_dist_config()
    spark = Spark(dc)
    peers = RelationBase.from_state('sparkpeers.joined')
    is_scaled = peers and len(peers.get_nodes()) > 0
    is_master = spark.is_master()
    is_slave = not is_master or not is_scaled
    master_url = spark.get_master()
    master_ip = spark.get_master_ip()
    if data_changed('insightedge.master_url', master_url):
        stop_datagrid_services()
        start_datagrid_services(master_url, master_ip, is_master, is_slave)
    set_state('insightedge.ready')
    hookenv.status_set('active', 'ready')
Exemplo n.º 42
0
def ensure_etcd_connections():
    '''Ensure etcd connection strings are accurate.

    Etcd connection info is written to config files when various install/config
    handlers are run. Watch this data for changes, and when changed, remove
    relevant flags to make sure accurate config is regenerated.
    '''
    etcd = endpoint_from_flag('etcd.available')
    if data_changed('flannel_etcd_connections', etcd.get_connection_string()):
        clear_flag('flannel.service.installed')

        # Clearing the above flag will change config that the flannel
        # service depends on. Set ourselves up to (re)invoke the start handler.
        clear_flag('flannel.service.started')
Exemplo n.º 43
0
def docker_logins_changed():
    """Set a flag to handle new docker login options.

    If docker daemon options have also changed, set a flag to ensure the
    daemon is restarted prior to running docker login.
    """
    config = hookenv.config()

    if data_changed('docker-opts', config['docker-opts']):
        hookenv.log('Found new docker daemon options. Requesting a restart.')
        # State will be removed by layer-docker after restart
        set_state('docker.restart')

    set_state('kubernetes-worker.docker-login')
Exemplo n.º 44
0
    def test_set_java_home(self, mock_relation_base, mock_utils):
        '''
        Verify that we attempt to call out to the system to set java home,
        only when the data has changed.

        '''
        mock_java = mock.Mock()
        mock_java.java_home.return_value = 'foo'
        mock_java.java_version.return_value = 'bar'
        mock_relation_base.from_state.return_value = mock_java
        remove_state('bigtop.available')  # This may be set by previous tests.

        data_changed('java_home', 'foo')  # Prime data changed

        set_java_home()

        # Data did not change, so we should not call edit_in_place.
        self.assertFalse(mock_utils.re_edit_in_place.called)

        mock_java.java_home.return_value = 'baz'

        # Data did change, so now we should call edit_in_place
        set_java_home()

        self.assertTrue(mock_utils.re_edit_in_place.called)

        # Verify that we set the bigtop.java.changed flag when appropriate.

        # Bigtop is available, but java home not changed
        set_state('bigtop.available')
        set_java_home()
        self.assertFalse(is_state('bigtop.java.changed'))

        # Bigtop is available, and java home has changed
        mock_java.java_home.return_value = 'qux'
        set_java_home()
        self.assertTrue(is_state('bigtop.java.changed'))
Exemplo n.º 45
0
def container_sdn_setup(sdn):
    """
    Receive the information from the SDN plugin, and render the docker
    engine options.

    :param sdn: SDNPluginProvider
    :return: None
    """
    sdn_config = sdn.get_sdn_config()
    bind_ip = sdn_config['subnet']
    mtu = sdn_config['mtu']
    if data_changed('bip', bind_ip) or data_changed('mtu', mtu):
        status_set('maintenance', 'Configuring container runtime with SDN.')
        opts = DockerOpts()
        # This is a great way to misconfigure a docker daemon. Remove the
        # existing bind ip and mtu values of the SDN
        if opts.exists('bip'):
            opts.pop('bip')
        if opts.exists('mtu'):
            opts.pop('mtu')
        opts.add('bip', bind_ip)
        opts.add('mtu', mtu)
        _remove_docker_network_bridge()
        set_state('docker.sdn.configured')
Exemplo n.º 46
0
def prepare_kubeconfig_certificates(master):
    ''' Prepare the data to feed to create the kubeconfig file. '''

    layer_options = layer.options('tls-client')
    # Get all the paths to the tls information required for kubeconfig.
    ca = layer_options.get('ca_certificate_path')
    creds = db.get('credentials')
    data_changed('kube-control.creds', creds)

    servers = get_kube_api_servers(master)

    # pedantry
    kubeconfig_path = '/home/ubuntu/.kube/config'

    # Create kubernetes configuration in the default location for ubuntu.
    create_kubeconfig('/root/.kube/config', servers[0], ca,
                      token=creds['client_token'], user='******')
    create_kubeconfig(kubeconfig_path, servers[0], ca,
                      token=creds['client_token'], user='******')
    # Set permissions on the ubuntu users kubeconfig to ensure a consistent UX
    cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
    check_call(cmd)
    messaging()
    set_state('kubeconfig.ready')
Exemplo n.º 47
0
def store_client(tls):
    '''Read the client certificate and client key from the relation object
    and copy them to the certificate directory.'''
    client_cert, client_key = tls.get_client_cert()
    chain = tls.get_chain()
    if chain:
        client_cert = client_cert + '\n' + chain
    if client_cert and client_key:
        layer_options = layer.options('tls-client')
        cert_path = layer_options.get('client_certificate_path')
        key_path = layer_options.get('client_key_path')
        cert_changed = data_changed('client_certificate', client_cert)
        key_changed = data_changed('client_key', client_key)
        if cert_path:
            if cert_changed or not os.path.exists(cert_path):
                log('Writing client certificate to {0}'.format(cert_path))
                _write_file(cert_path, client_cert)
                set_state('tls_client.client.certificate.written')
            set_state('tls_client.client.certificate.saved')
        if key_path:
            if key_changed or not os.path.exists(key_path):
                log('Writing client key to {0}'.format(key_path))
                _write_file(key_path, client_key)
            set_state('tls_client.client.key.saved')
Exemplo n.º 48
0
def store_server(tls):
    '''Read the server certificate and server key from the relation object
    and save them to the certificate directory..'''
    server_cert, server_key = tls.get_server_cert()
    chain = tls.get_chain()
    if chain:
        server_cert = server_cert + '\n' + chain
    if server_cert and server_key:
        layer_options = layer.options('tls-client')
        cert_path = layer_options.get('server_certificate_path')
        key_path = layer_options.get('server_key_path')
        cert_changed = data_changed('server_certificate', server_cert)
        key_changed = data_changed('server_key', server_key)
        if cert_path:
            if cert_changed or not os.path.exists(cert_path):
                log('Writing server certificate to {0}'.format(cert_path))
                _write_file(cert_path, server_cert)
                set_state('tls_client.server.certificate.written')
            set_state('tls_client.server.certificate.saved')
        if key_path:
            if key_changed or not os.path.exists(key_path):
                log('Writing server key to {0}'.format(key_path))
                _write_file(key_path, server_key)
            set_state('tls_client.server.key.saved')
Exemplo n.º 49
0
def start_worker(kube_api, kube_dns, cni):
    ''' Start kubelet using the provided API and DNS info.'''
    servers = get_kube_api_servers(kube_api)
    # Note that the DNS server doesn't necessarily exist at this point. We know
    # what its IP will eventually be, though, so we can go ahead and configure
    # kubelet with that info. This ensures that early pods are configured with
    # the correct DNS even though the server isn't ready yet.

    dns = kube_dns.details()

    if (data_changed('kube-api-servers', servers)
            or data_changed('kube-dns', dns)):
        # Initialize a FlagManager object to add flags to unit data.
        opts = FlagManager('kubelet')
        # Append the DNS flags + data to the FlagManager object.

        opts.add('--cluster-dns', dns['sdn-ip'])  # FIXME sdn-ip needs a rename
        opts.add('--cluster-domain', dns['domain'])

        create_config(servers[0])
        render_init_scripts(servers)
        set_state('kubernetes-worker.config.created')
        restart_unit_services()
        update_kubelet_status()
Exemplo n.º 50
0
def postInt(dcosmaster):
    status_set('maintenance', 'Running installer')
    services = dcosmaster.services()
    set_state('dcos-agent.ready')
    if not data_changed('reverseproxy.services', services):
        return
    for service in services:
        for host in service['hosts']:
            setupMasterConfigs(host['hostname'])
            hookenv.log('{} has a unit {}'.format(service['service_name'],
                                                  host['hostname']))
    process = check_output(["./pkgpanda", "setup"],
                           cwd=basedir + "bin",
                           env=setupEnvVars())
    status_set('active', 'DC/OS Agent Running')
Exemplo n.º 51
0
def write_ceph_exporter_config_def():
    config = hookenv.config()
    if config.get('ceph.config'):
        runtime_args(
            'CEPH_CONFIG', '\'{}\'     # path to ceph config file'.format(
                config['ceph.config']))
    if config.get('ceph.user'):
        runtime_args(
            'CEPH_USER',
            '\'{}\'       # Ceph user to connect to cluster. (default "admin")'
            .format(config['ceph.user']))
    if config.get('exporter.config'):
        runtime_args(
            'EXPORTER_CONFIG',
            '\'{}\' # Path to ceph exporter config. (default "/etc/ceph/exporter.yml")'
            .format(config['exporter.config']))
    if config.get('port', False):
        if config.get('telemetry.addr'):
            runtime_args(
                'TELEMETRY_ADDR',
                '\'{}:{}\'  # host:port for ceph exporter (default ":9128")'.
                format(config['telemetry.addr'], config['port']))
        else:
            runtime_args(
                'TELEMETRY_ADDR',
                '\':{}\'  # host:port for ceph exporter (default ":9128")'.
                format(config['port']))
    if config.get('telemetry.path'):
        runtime_args(
            'TELEMETRY_PATH',
            '\'{}\'  # URL path for surfacing collected metrics (default "/metrics")'
            .format(config['telemetry.path']))
    args = runtime_args()
    hookenv.log('runtime_args: {}'.format(args))
    if args:
        render(
            source=CONFIG_DEF_TMPL,
            target=CONFIG_DEF,
            context={'args': args},
        )
    set_state('ceph-exporter.do-restart')
    if any((
            data_changed('ceph-exporter.args', args),
            templates_changed([CONFIG_DEF_TMPL]),
    )):
        set_state('ceph-exporter.do-reconfig-def')

    remove_state('ceph-exporter.do-reconfig-def')
Exemplo n.º 52
0
def import_srv_crt_to_keystore():
    for cert_type in ('server', 'client'):
        password = keystore_password()
        crt_path = crtPath(cert_type)
        key_path = keyPath(cert_type)

        if os.path.isfile(crt_path) and os.path.isfile(key_path):
            with open(crt_path, 'rt') as f:
                cert = f.read()
                loaded_cert = crypto.load_certificate(crypto.FILETYPE_PEM,
                                                      cert)
                if not data_changed('kafka_{}_certificate'.format(cert_type),
                                    cert):
                    log('server certificate of key file missing')
                    return

            with open(key_path, 'rt') as f:
                loaded_key = crypto.load_privatekey(crypto.FILETYPE_PEM,
                                                    f.read())

            with tempfile.NamedTemporaryFile() as tmp:
                log('server certificate changed')

                keystore_path = keystore(cert_type)

                pkcs12 = crypto.PKCS12Type()
                pkcs12.set_certificate(loaded_cert)
                pkcs12.set_privatekey(loaded_key)
                pkcs12_data = pkcs12.export(password)
                log('opening tmp file {}'.format(tmp.name))

                # write cert and private key to the pkcs12 file
                tmp.write(pkcs12_data)
                tmp.flush()

                log('importing pkcs12')
                # import the pkcs12 into the keystore
                check_call([
                    'keytool', '-v', '-importkeystore', '-srckeystore',
                    str(tmp.name), '-srcstorepass', password, '-srcstoretype',
                    'PKCS12', '-destkeystore', keystore_path, '-deststoretype',
                    'JKS', '-deststorepass', password, '--noprompt'
                ])
                os.chmod(keystore_path, 0o440)

                remove_state('tls_client.certs.changed')
                set_state('kafka.{}.keystore.saved'.format(cert_type))
                remove_state('kafka.started')
Exemplo n.º 53
0
def store_ca(tls):
    '''Read the certificate authority from the relation object and install
    the ca on this system.'''
    # Get the CA from the relationship object.
    certificate_authority = tls.get_ca()
    if certificate_authority:
        layer_options = layer.options('tls-client')
        ca_path = layer_options.get('ca_certificate_path')
        changed = data_changed('certificate_authority', certificate_authority)
        if ca_path:
            if changed or not os.path.exists(ca_path):
                log('Writing CA certificate to {0}'.format(ca_path))
                _write_file(ca_path, certificate_authority)
            set_state('tls_client.ca.saved')
        if changed:
            # Update /etc/ssl/certs and generate ca-certificates.crt
            install_ca(certificate_authority)
Exemplo n.º 54
0
def configure_kafka_connect_base():
    kafka = endpoint_from_flag('kafka.ready')
    kubernetes = endpoint_from_flag('endpoint.kubernetes.available')

    kafka_brokers = []
    for kafka_unit in kafka.kafkas():
        kafka_brokers.append(kafka_unit['host'] + ':' + kafka_unit['port'])

    worker_config = generate_worker_config()
    worker_config['bootstrap.servers'] = ','.join(kafka_brokers)
    port = worker_config['rest.port'] if 'rest.port' in worker_config else 8083

    uuid = kubernetes.get_uuid()

    resource_context = {
        'configmap_name': 'cfgmap-{}'.format(uuid),
        'label': uuid,
        'properties': worker_config,
        'service_name': 'svc-{}'.format(uuid),
        'port': port,
        'deployment_name': 'depl-{}'.format(uuid),
        'replicas': conf.get('workers', 1),
        'container_name': uuid,
        'image': unitdata.kv().get('docker-image'),
        'containerport': port,
    }

    if data_changed('resource-context', resource_context):
        # Trigger a rolling update by setting a new annotation in the deployment
        resource_context['configmap_annotation'] = hashlib.sha1(datetime.datetime.now()
                                                                .isoformat()
                                                                .encode('utf-8')).hexdigest()
        templating.render(source="resources.j2",
                          target="/etc/kafka-connect/resources.yaml",
                          context=resource_context)

        
        resources = []
        with open('/etc/kafka-connect/resources.yaml', 'r') as f:
            docs = yaml.load_all(f)
            for doc in docs:
                resources.append(doc)
        kubernetes.send_create_request(resources)

    status.waiting('Waiting for k8s deployment (will happen in next hook)')
    set_flag('kafka-connect-base.configured')
Exemplo n.º 55
0
def ceph_state_control(ceph_admin):
    ''' Determine if we should remove the state that controls the re-render
    and execution of the ceph-relation-changed event because there
    are changes in the relationship data, and we should re-render any
    configs, keys, and/or service pre-reqs '''

    ceph_relation_data = {
        'mon_hosts': ceph_admin.mon_hosts(),
        'fsid': ceph_admin.fsid(),
        'auth_supported': ceph_admin.auth(),
        'hostname': socket.gethostname(),
        'key': ceph_admin.key()
    }

    # Re-execute the rendering if the data has changed.
    if data_changed('ceph-config', ceph_relation_data):
        remove_state('ceph-storage.configured')
Exemplo n.º 56
0
def installing_hbase(zk, hdfs):
    zks = zk.zookeepers()
    if is_state('hbase.installed') and (not data_changed('zks', zks)):
        return

    msg = "configuring hbase" if is_state(
        'hbase.installed') else "installing hbase"
    hookenv.status_set('maintenance', msg)

    hbase = HBase()
    hosts = {}
    nns = hdfs.namenodes()
    hosts['namenode'] = nns[0]
    hbase.configure(hosts, zks)
    hbase.open_ports()
    set_state('hbase.installed')
    hookenv.status_set('active', 'ready')
Exemplo n.º 57
0
def get_certificate_requests():
    endpoint = endpoint_from_flag('endpoint.ssl-termination.update')
    clear_flag('endpoint.ssl-termination.update')
    cert_requests = endpoint.get_cert_requests()
    if data_changed('sslterm.requests', cert_requests) and cert_requests:
        old_requests = unitdata.kv().get('sslterm.cert-requests', [])
        delete_old_certs(old_requests, cert_requests)
        unitdata.kv().set('sslterm.cert-requests', cert_requests)
        pre_cert_requests = prepare_cert_requests(cert_requests)
        lets_encrypt.set_requested_certificates(pre_cert_requests)
        NginxConfig().delete_all_config(NginxModule.HTTP, 'ssl-termination')
        set_flag('ssl-termination.waiting')
    elif not cert_requests:  # If no more cert_requests remove all configs
        unitdata.kv().set('sslterm.cert-requests', [])
        NginxConfig().delete_all_config(NginxModule.HTTP, 'ssl-termination')
        NginxConfig().validate_nginx().reload_nginx()
        set_flag('ssl-termination.report')
def configure_confluent_schema_registry_zookeepers(zk):
    """Configure ready zookeepers and restart kafka if needed.
    As zks come and go, server.properties will be updated. When that file
    changes, restart Kafka and set appropriate status messages.
    """
    zks = zk.zookeepers()
    if not ((data_changed('zookeepers', zks))):
        return

    hookenv.log('Checking Zookeeper configuration')
    hookenv.status_set('maintenance', 'updating zookeeper instances')
    schemareg = ConfluentSchemaRegistry()
    if schemareg.is_running():
        schemareg.stop()
    schemareg.install(zk_units=zks)
    if not schemareg.is_running():
        schemareg.start()
    hookenv.status_set('active', 'ready')
Exemplo n.º 59
0
def set_java_home():
    java = (RelationBase.from_state('java.ready')
            or RelationBase.from_state('hadoop-plugin.java.ready'))
    java_home = java.java_home()
    unitdata.kv().set('java_home', java_home)
    unitdata.kv().set('java_version', java.java_version())
    if data_changed('java_home', java_home):
        utils.re_edit_in_place(
            '/etc/environment', {
                r'#? *JAVA_HOME *=.*': 'JAVA_HOME={}'.format(java_home),
            },
            append_non_matches=True)

        # If we've potentially setup services with the previous
        # version of Java, set a flag that a layer can use to trigger
        # a restart of those services.
        if is_state('bigtop.available'):
            set_state('bigtop.java.changed')
Exemplo n.º 60
0
def import_ca_crt_to_keystore():
    ca_path = caPath()

    if os.path.isfile(ca_path):
        with open(ca_path, 'rt') as f:
            changed = data_changed('ca_certificate', f.read())

        if changed:
            ca_keystore = caKeystore()
            check_call([
                'keytool', '-import', '-trustcacerts', '-noprompt',
                '-keystore', ca_keystore, '-storepass',
                keystore_password(), '-file', ca_path
            ])
            os.chmod(ca_keystore, 0o444)

            remove_state('tls_client.ca_installed')
            set_state('kafka.ca.keystore.saved')