コード例 #1
0
ファイル: k8s.py プロジェクト: mbruzek/layer-k8s
def launch_dns():
    '''Create the "kube-system" namespace, the kubedns resource controller,
    and the kubedns service. '''
    hookenv.log('Creating kubernetes kubedns on the master node.')
    # Only launch and track this state on the leader.
    # Launching duplicate kubeDNS rc will raise an error
    # Run a command to check if the apiserver is responding.
    return_code = call(split('kubectl cluster-info'))
    if return_code != 0:
        hookenv.log('kubectl command failed, waiting for apiserver to start.')
        remove_state('kubedns.available')
        # Return without setting kubedns.available so this method will retry.
        return
    # Check for the "kube-system" namespace.
    return_code = call(split('kubectl get namespace kube-system'))
    if return_code != 0:
        # Create the kube-system namespace that is used by the kubedns files.
        check_call(split('kubectl create namespace kube-system'))
    # Check for the kubedns replication controller.
    return_code = call(split('kubectl get -f files/manifests/kubedns-rc.yaml'))
    if return_code != 0:
        # Create the kubedns replication controller from the rendered file.
        check_call(split('kubectl create -f files/manifests/kubedns-rc.yaml'))
    # Check for the kubedns service.
    return_code = call(split('kubectl get -f files/manifests/kubedns-svc.yaml'))
    if return_code != 0:
        # Create the kubedns service from the rendered file.
        check_call(split('kubectl create -f files/manifests/kubedns-svc.yaml'))
    set_state('kubedns.available')
コード例 #2
0
def upgrade_charm():
    # Trigger removal of PPA docker installation if it was previously set.
    set_state('config.changed.install_from_upstream')
    hookenv.atexit(remove_state, 'config.changed.install_from_upstream')

    cleanup_pre_snap_services()
    check_resources_for_upgrade_needed()

    # Remove the RC for nginx ingress if it exists
    if hookenv.config().get('ingress'):
        kubectl_success('delete', 'rc', 'nginx-ingress-controller')

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    if is_state('kubernetes-worker.gpu.enabled'):
        remove_state('kubernetes-worker.gpu.enabled')
        try:
            disable_gpu()
        except ApplyNodeLabelFailed:
            # Removing node label failed. Probably the master is unavailable.
            # Proceed with the upgrade in hope GPUs will still be there.
            hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')

    remove_state('kubernetes-worker.cni-plugins.installed')
    remove_state('kubernetes-worker.config.created')
    remove_state('kubernetes-worker.ingress.available')
    remove_state('worker.auth.bootstrapped')
    set_state('kubernetes-worker.restart-needed')
コード例 #3
0
def start_worker(kube_api, kube_control, auth_control, cni):
    ''' Start kubelet using the provided API and DNS info.'''
    servers = get_kube_api_servers(kube_api)
    # Note that the DNS server doesn't necessarily exist at this point. We know
    # what its IP will eventually be, though, so we can go ahead and configure
    # kubelet with that info. This ensures that early pods are configured with
    # the correct DNS even though the server isn't ready yet.

    dns = kube_control.get_dns()
    ingress_ip = get_ingress_address(kube_control)
    cluster_cidr = cni.get_config()['cidr']

    if cluster_cidr is None:
        hookenv.log('Waiting for cluster cidr.')
        return

    creds = db.get('credentials')
    data_changed('kube-control.creds', creds)

    create_config(random.choice(servers), creds)
    configure_kubelet(dns, ingress_ip)
    configure_kube_proxy(servers, cluster_cidr)
    set_state('kubernetes-worker.config.created')
    restart_unit_services()
    update_kubelet_status()
    set_state('kubernetes-worker.label-config-required')
    remove_state('kubernetes-worker.restart-needed')
コード例 #4
0
ファイル: k8s.py プロジェクト: mbruzek/layer-k8s
def config_changed():
    '''If the configuration values change, remove the available states.'''
    config = hookenv.config()
    if any(config.changed(key) for key in config.keys()):
        hookenv.log('The configuration options have changed.')
        # Use the Compose class that encapsulates the docker-compose commands.
        compose = Compose('files/kubernetes')
        if is_leader():
            hookenv.log('Removing master container and kubelet.available state.')  # noqa
            # Stop and remove the Kubernetes kubelet container.
            compose.kill('master')
            compose.rm('master')
            compose.kill('proxy')
            compose.rm('proxy')
            # Remove the state so the code can react to restarting kubelet.
            remove_state('kubelet.available')
        else:
            hookenv.log('Removing kubelet container and kubelet.available state.')  # noqa
            # Stop and remove the Kubernetes kubelet container.
            compose.kill('kubelet')
            compose.rm('kubelet')
            # Remove the state so the code can react to restarting kubelet.
            remove_state('kubelet.available')
            hookenv.log('Removing proxy container and proxy.available state.')
            # Stop and remove the Kubernetes proxy container.
            compose.kill('proxy')
            compose.rm('proxy')
            # Remove the state so the code can react to restarting proxy.
            remove_state('proxy.available')

    if config.changed('version'):
        hookenv.log('The version changed removing the states so the new '
                    'version of kubectl will be downloaded.')
        remove_state('kubectl.downloaded')
        remove_state('kubeconfig.created')
コード例 #5
0
def run_docker_login():
    """Login to a docker registry with configured credentials."""
    config = hookenv.config()

    previous_logins = config.previous('docker-logins')
    logins = config['docker-logins']
    logins = json.loads(logins)

    if previous_logins:
        previous_logins = json.loads(previous_logins)
        next_servers = {login['server'] for login in logins}
        previous_servers = {login['server'] for login in previous_logins}
        servers_to_logout = previous_servers - next_servers
        for server in servers_to_logout:
            cmd = ['docker', 'logout', server]
            subprocess.check_call(cmd)

    for login in logins:
        server = login['server']
        username = login['username']
        password = login['password']
        cmd = ['docker', 'login', server, '-u', username, '-p', password]
        subprocess.check_call(cmd)

    remove_state('kubernetes-worker.docker-login')
    set_state('kubernetes-worker.restart-needed')
コード例 #6
0
ファイル: telegraf.py プロジェクト: cmars/juju-telegraf-charm
def exec_input_departed():
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'exec')
    rels = hookenv.relations_of_type('exec')
    if not rels:
        remove_state('plugins.exec.configured')
        if os.path.exists(config_path):
            os.unlink(config_path)
コード例 #7
0
def cleanup_pre_snap_services():
    # remove old states
    remove_state('kubernetes-worker.components.installed')

    # disable old services
    services = ['kubelet', 'kube-proxy']
    for service in services:
        hookenv.log('Stopping {0} service.'.format(service))
        service_stop(service)

    # cleanup old files
    files = [
        "/lib/systemd/system/kubelet.service",
        "/lib/systemd/system/kube-proxy.service",
        "/etc/default/kube-default",
        "/etc/default/kubelet",
        "/etc/default/kube-proxy",
        "/srv/kubernetes",
        "/usr/local/bin/kubectl",
        "/usr/local/bin/kubelet",
        "/usr/local/bin/kube-proxy",
        "/etc/kubernetes"
    ]
    for file in files:
        if os.path.isdir(file):
            hookenv.log("Removing directory: " + file)
            shutil.rmtree(file)
        elif os.path.isfile(file):
            hookenv.log("Removing file: " + file)
            os.remove(file)
コード例 #8
0
ファイル: apt.py プロジェクト: IBCNServices/tengu-charms
def purge(packages):
    """Purge one or more deb packages from the system"""
    fetch.apt_purge(packages, fatal=True)
    store = unitdata.kv()
    store.unsetrange(packages, prefix='apt.install_queue.')
    for package in packages:
        reactive.remove_state('apt.installed.{}'.format(package))
コード例 #9
0
ファイル: telegraf.py プロジェクト: cmars/juju-telegraf-charm
def elasticsearch_input(es):
    template = """
[[inputs.elasticsearch]]
  servers = {{ servers }}
"""
    hosts = []
    rels = hookenv.relations_of_type('elasticsearch')
    for rel in rels:
        es_host = rel.get('host')
        port = rel.get('port')
        if not es_host or not port:
            hookenv.log('No host received for relation: {}.'.format(rel))
            continue
        hosts.append("http://{}:{}".format(es_host, port))
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'elasticsearch')
    if hosts:
        context = {"servers": json.dumps(hosts)}
        input_config = render_template(template, context) + \
            render_extra_options("inputs", "elasticsearch")
        hookenv.log("Updating {} plugin config file".format('elasticsearch'))
        host.write_file(config_path, input_config.encode('utf-8'))
        set_state('plugins.elasticsearch.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
        remove_state('plugins.elasticsearch.configured')
コード例 #10
0
ファイル: node.py プロジェクト: battlemidget/juju-layer-node
def version_check():
    url = config.get("install_sources")
    key = config.get("install_keys")

    if url != kv.get("nodejs.url") or key != kv.get("nodejs.key"):
        apt.purge(["nodejs"])
        remove_state("nodejs.available")
コード例 #11
0
ファイル: docker.py プロジェクト: juju-solutions/layer-docker
def scrub_sdn_config():
    ''' If this scenario of states is true, we have likely broken a
    relationship to our once configured SDN provider. This necessitates a
    cleanup of the Docker Options for BIP and MTU of the presumed dead SDN
    interface. '''

    opts = DockerOpts()
    try:
        opts.pop('bip')
    except KeyError:
        hookenv.log('Unable to locate bip in Docker config.')
        hookenv.log('Assuming no action required.')

    try:
        opts.pop('mtu')
    except KeyError:
        hookenv.log('Unable to locate mtu in Docker config.')
        hookenv.log('Assuming no action required.')

    # This method does everything we need to ensure the bridge configuration
    # has been removed. restarting the daemon restores docker with its default
    # networking mode.
    _remove_docker_network_bridge()
    recycle_daemon()
    remove_state('docker.sdn.configured')
コード例 #12
0
ファイル: couchdb.py プロジェクト: petevg/layer-couchdb
def stop():
    """
    Halt couch.

    """
    subprocess.check_call(['service', 'couchdb', 'stop'])
    remove_state('couchdb.running')
コード例 #13
0
ファイル: couchdb.py プロジェクト: petevg/layer-couchdb
def end_admin_party(config_path='/etc/couchdb'):
    """
    Couch starts out in 'admin party' mode, which means that anyone
    can create and edit databases. This routine secures couch, and
    flags us to restart.

    @param str config_path: The location of the config files in the system.

    """
    log("Ending the admin party.", DEBUG)
    _maybe_generate_passwords()

    passwords = json.loads(leader_get('passwords'))

    entries = [
        {'section': 'admins', 'key': 'admin', 'value': passwords['admin_pass']},
        {'section': 'admins', 'key': 'replication', 'value': passwords['repl_pass']},
        {'section': 'couch_httpd_auth', 'key': 'require_valid_user', 'value': 'true'},
    ]
    if config("human-auditable-creds"):
        entries += [
            {'section': 'juju_notes', 'key': 'admin_pass', 'value': passwords['admin_pass']},
            {'section': 'juju_notes', 'key': 'repl_pass', 'value': passwords['repl_pass']},
        ]
    _write_config(config_path, 'local', entries)

    remove_state('couchdb.admin_party')
コード例 #14
0
def create_service_configs(kube_control):
    """Create the users for kubelet"""
    should_restart = False
    # generate the username/pass for the requesting unit
    proxy_token = get_token('system:kube-proxy')
    if not proxy_token:
        setup_tokens(None, 'system:kube-proxy', 'kube-proxy')
        proxy_token = get_token('system:kube-proxy')
        should_restart = True

    client_token = get_token('admin')
    if not client_token:
        setup_tokens(None, 'admin', 'admin', "system:masters")
        client_token = get_token('admin')
        should_restart = True

    requests = kube_control.auth_user()
    for request in requests:
        username = request[1]['user']
        group = request[1]['group']
        kubelet_token = get_token(username)
        if not kubelet_token and username and group:
            # Usernames have to be in the form of system:node:<nodeName>
            userid = "kubelet-{}".format(request[0].split('/')[1])
            setup_tokens(None, username, userid, group)
            kubelet_token = get_token(username)
            kube_control.sign_auth_request(request[0], username,
                                           kubelet_token, proxy_token,
                                           client_token)
            should_restart = True

    if should_restart:
        host.service_restart('snap.kube-apiserver.daemon')
        remove_state('authentication.setup')
コード例 #15
0
def restart_scheduler():
    prev_state, prev_msg = hookenv.status_get()
    hookenv.status_set('maintenance', 'Restarting kube-scheduler')
    host.service_restart('snap.kube-scheduler.daemon')
    hookenv.status_set(prev_state, prev_msg)
    remove_state('kube-scheduler.do-restart')
    set_state('kube-scheduler.started')
コード例 #16
0
ファイル: wallabag.py プロジェクト: cmars/juju-charm-wallabag
def setup_mysql(db):
    apt_install(['php5-mysql', 'mysql-client'])
    reset_wallabag()
    setup('mysql', db)
    remove_state('mysql.available')
    remove_state('wallabag.connected.sqlite')
    set_state('wallabag.connected.mysql')
コード例 #17
0
def setup_non_leader_authentication():

    service_key = '/root/cdk/serviceaccount.key'
    basic_auth = '/root/cdk/basic_auth.csv'
    known_tokens = '/root/cdk/known_tokens.csv'

    keys = [service_key, basic_auth, known_tokens]
    # The source of truth for non-leaders is the leader.
    # Therefore we overwrite_local with whatever the leader has.
    if not get_keys_from_leader(keys, overwrite_local=True):
        # the keys were not retrieved. Non-leaders have to retry.
        return

    if not any_file_changed(keys) and is_state('authentication.setup'):
        # No change detected and we have already setup the authentication
        return

    hookenv.status_set('maintenance', 'Rendering authentication templates.')
    api_opts = FlagManager('kube-apiserver')
    api_opts.add('basic-auth-file', basic_auth)
    api_opts.add('token-auth-file', known_tokens)
    api_opts.add('service-account-key-file', service_key)

    controller_opts = FlagManager('kube-controller-manager')
    controller_opts.add('service-account-private-key-file', service_key)

    remove_state('kubernetes-master.components.started')
    set_state('authentication.setup')
コード例 #18
0
ファイル: kafka.py プロジェクト: Guavus/bigtop
def stop_kafka_waiting_for_zookeeper_ready():
    hookenv.status_set('maintenance', 'zookeeper not ready, stopping kafka')
    kafka = Kafka()
    kafka.close_ports()
    kafka.stop()
    remove_state('kafka.started')
    hookenv.status_set('waiting', 'waiting for zookeeper to become ready')
コード例 #19
0
ファイル: node.py プロジェクト: johnsca/juju-layer-node
def version_check():
    url = config.get('install_sources')
    key = config.get('install_keys')

    if url != kv.get('nodejs.url') or key != kv.get('nodejs.key'):
        apt.purge(['nodejs'])
        remove_state('nodejs.available')
コード例 #20
0
def configure_interface():
    """
    Configure an ethernet interface
    """
    iface_name = action_get('iface-name')
    cidr = action_get('cidr')

    # cidr is optional
    if cidr:
        try:
            # Add may fail, but change seems to add or update
            router.ip('address', 'change', cidr, 'dev', iface_name)
        except subprocess.CalledProcessError as e:
            action_fail('Command failed: %s (%s)' %
                        (' '.join(e.cmd), str(e.output)))
            return
        finally:
            remove_state('vpe.configure-interface')
            status_set('active', 'ready!')

    try:
        router.ip('link', 'set', 'dev', iface_name, 'up')
    except subprocess.CalledProcessError as e:
        action_fail('Command failed: %s (%s)' %
                    (' '.join(e.cmd), str(e.output)))
    finally:
        remove_state('vpe.configure-interface')
        status_set('active', 'ready!')
コード例 #21
0
def remove_dashboard_addons():
    ''' Removes dashboard addons if they are disabled in config '''
    if not hookenv.config('enable-dashboard-addons'):
        hookenv.log('Removing kubernetes dashboard.')
        for template in dashboard_templates:
            delete_addon(template)
        remove_state('kubernetes.dashboard.available')
コード例 #22
0
ファイル: bsbstorage.py プロジェクト: stub42/postgresql-charm
def remount():
    if reactive.is_state('postgresql.cluster.is_running'):
        # Attempting this while PostgreSQL is live would be really, really bad.
        service.stop()

    old_data_dir = postgresql.data_dir()
    new_data_dir = os.path.join(external_volume_mount, 'postgresql',
                                postgresql.version(), 'main')
    backup_data_dir = '{}-{}'.format(old_data_dir, int(time.time()))

    if os.path.isdir(new_data_dir):
        hookenv.log('Remounting existing database at {}'.format(new_data_dir),
                    WARNING)
    else:
        status_set('maintenance',
                   'Migrating data from {} to {}'.format(old_data_dir,
                                                         new_data_dir))
        helpers.makedirs(new_data_dir, mode=0o770,
                         user='******', group='postgres')
        try:
            rsync_cmd = ['rsync', '-av',
                         old_data_dir + '/',
                         new_data_dir + '/']
            hookenv.log('Running {}'.format(' '.join(rsync_cmd)), DEBUG)
            subprocess.check_call(rsync_cmd)
        except subprocess.CalledProcessError:
            status_set('blocked',
                       'Failed to sync data from {} to {}'
                       ''.format(old_data_dir, new_data_dir))
            return

    os.replace(old_data_dir, backup_data_dir)
    os.symlink(new_data_dir, old_data_dir)
    fix_perms(new_data_dir)
    reactive.remove_state('postgresql.storage.needs_remount')
コード例 #23
0
def remove_images(relation):
    container_requests = relation.container_requests
    log(container_requests)
    for uuid in container_requests:
        remove(uuid)
    print("wololo")
    remove_state('dockerhost.broken')
コード例 #24
0
ファイル: spark.py プロジェクト: johnsca/layer-apache-spark
def reconfigure_spark():
    config = hookenv.config()
    maintenance = config['maintenance_mode']
    if maintenance:
        remove_state('not.upgrading')
        spark = Spark(get_dist_config())
        report_status(spark)
        spark.stop()
        current_version = spark.get_current_version()
        if config['upgrade_immediately'] and config['spark_version'] != current_version:
            upgrade_spark()
        return
    else:
        set_state('not.upgrading')

    mode = hookenv.config()['spark_execution_mode']
    hookenv.status_set('maintenance', 'Configuring Apache Spark')
    spark = Spark(get_dist_config())
    spark.stop()
    if is_state('hadoop.ready') and mode.startswith('yarn') and (not is_state('yarn.configured')):
        # was in a mode other than yarn, going to yarn
        hookenv.status_set('maintenance', 'Setting up Apache Spark for YARN')
        spark.configure_yarn_mode()
        set_state('yarn.configured')

    if is_state('hadoop.ready') and (not mode.startswith('yarn')) and is_state('yarn.configured'):
        # was in a yarn mode and going to another mode
        hookenv.status_set('maintenance', 'Disconnecting Apache Spark from YARN')
        spark.disable_yarn_mode()
        remove_state('yarn.configured')

    spark.configure()
    spark.start()
    report_status(spark)
コード例 #25
0
def stop():
    """ Stop hook """
    log('ftb-infinity: stop')
    remove_state(CHARM_STATE_STARTED)
    close_port(conf['server_port'])
    service_stop(CHARM_NAME)
    ftb_systemd_remove()
コード例 #26
0
ファイル: postfix.py プロジェクト: cmars/mail-layers
def config_postfix():
    config = hookenv.config()
    if is_configured(config):
        set_state('postfix.configured')
    else:
        remove_state('postfix.configured')
        remove_state('postfix.start')
コード例 #27
0
ファイル: pki.py プロジェクト: cmars/mail-layers
def config_postfix():
    config = hookenv.config()
    if config.changed('hostname'):
        remove_state('pki.cert.issued')
    if config.get('hostname'):
        issue_cert(config['hostname'])
        set_state('pki.cert.issued')
コード例 #28
0
def reset_states_for_delivery():
    '''An upgrade charm event was triggered by Juju, react to that here.'''
    migrate_from_pre_snaps()
    install_snaps()
    add_rbac_roles()
    set_state('reconfigure.authentication.setup')
    remove_state('authentication.setup')
コード例 #29
0
ファイル: apt.py プロジェクト: galgalesh/tengu-charms
def install_queued():
    '''Installs queued deb packages.

    Removes the apt.queued_installs state and sets the apt.installed state.

    On failure, sets the unit's workload state to 'blocked' and returns
    False. Package installs remain queued.

    On success, sets the apt.installed.{packagename} state for each
    installed package and returns True.
    '''
    store = unitdata.kv()
    queue = sorted((options, package)
                   for package, options in store.getrange('apt.install_queue.',
                                                          strip=True).items())

    installed = set()
    for options, batch in itertools.groupby(queue, lambda x: x[0]):
        packages = [b[1] for b in batch]
        try:
            status_set(None, 'Installing {}'.format(','.join(packages)))
            fetch.apt_install(packages, options, fatal=True)
            store.unsetrange(packages, prefix='apt.install_queue.')
            installed.update(packages)
        except subprocess.CalledProcessError:
            status_set('blocked',
                       'Unable to install packages {}'
                       .format(','.join(packages)))
            return False # Without setting reactive state.

    for package in installed:
        reactive.set_state('apt.installed.{}'.format(package))

    reactive.remove_state('apt.queued_installs')
    return True
コード例 #30
0
def start_worker(kube_api, kube_control, auth_control, cni):
    ''' Start kubelet using the provided API and DNS info.'''
    servers = get_kube_api_servers(kube_api)
    # Note that the DNS server doesn't necessarily exist at this point. We know
    # what its IP will eventually be, though, so we can go ahead and configure
    # kubelet with that info. This ensures that early pods are configured with
    # the correct DNS even though the server isn't ready yet.

    dns = kube_control.get_dns()
    cluster_cidr = cni.get_config()['cidr']

    if cluster_cidr is None:
        hookenv.log('Waiting for cluster cidr.')
        return

    nodeuser = '******'.format(gethostname())
    creds = kube_control.get_auth_credentials(nodeuser)
    data_changed('kube-control.creds', creds)

    # set --allow-privileged flag for kubelet
    set_privileged()

    create_config(random.choice(servers), creds)
    configure_worker_services(servers, dns, cluster_cidr)
    set_state('kubernetes-worker.config.created')
    restart_unit_services()
    update_kubelet_status()
    apply_node_labels()
    remove_state('kubernetes-worker.restart-needed')
コード例 #31
0
def remove_states():
    remove_state('tls_client.ca.saved')
    remove_state('tls_client.server.certificate.saved')
    remove_state('tls_client.server.key.saved')
    remove_state('tls_client.client.certificate.saved')
    remove_state('tls_client.client.key.saved')
コード例 #32
0
def ssl_ca_changed():
    remove_state('vault.ssl.configured')
コード例 #33
0
def database_not_ready():
    remove_state('vault.schema.created')
コード例 #34
0
def upgrade_charm():
    remove_state('configured')
    remove_state('vault.nrpe.configured')
    remove_state('vault.ssl.configured')
コード例 #35
0
def disable_mlock_changed():
    remove_state('configured')
コード例 #36
0
def nagios_servicegroups_changed():
    remove_state('vault.nrpe.configured')
コード例 #37
0
def nagios_context_changed():
    remove_state('vault.nrpe.configured')
コード例 #38
0
def registry_changed():
    remove_state('calico.service.installed')
    remove_state('calico.npc.deployed')
コード例 #39
0
def reconfigure_calico_pool():
    ''' Reconfigure the Calico IP pool '''
    remove_state('calico.pool.configured')
コード例 #40
0
def watch_for_api_endpoint_changes():
    apiserver_ips = get_apiserver_ips()
    old_apiserver_ips = db.get('tigera.apiserver_ips_used')
    if apiserver_ips != old_apiserver_ips:
        log('apiserver endpoints changed, preparing to reapply templates')
        remove_state('calico.npc.deployed')
コード例 #41
0
from charmhelpers.core.templating import render
from charmhelpers.core.host import (arch, service, service_restart,
                                    service_running)
from charms.layer import status

# TODO:
#   - Handle the 'stop' hook by stopping and uninstalling all the things.

os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')

try:
    CTL = getContainerRuntimeCtl()
    set_state('calico.ctl.ready')
except RuntimeError:
    log(traceback.format_exc())
    remove_state('calico.ctl.ready')

DEFAULT_REGISTRY = 'quay.io'
CALICOCTL_PATH = '/opt/calicoctl'
ETCD_KEY_PATH = os.path.join(CALICOCTL_PATH, 'etcd-key')
ETCD_CERT_PATH = os.path.join(CALICOCTL_PATH, 'etcd-cert')
ETCD_CA_PATH = os.path.join(CALICOCTL_PATH, 'etcd-ca')
CALICO_CIDR = '192.168.0.0/16'

db = unitdata.kv()


@hook('upgrade-charm')
def upgrade_charm():
    remove_state('calico.binaries.installed')
    remove_state('calico.cni.configured')
コード例 #42
0
def registry_credentials_changed():
    remove_state('calico.image.pulled')
コード例 #43
0
def unconfigure_hive():
    hookenv.status_set('maintenance', 'removing hive relation')
    zeppelin = Zeppelin()
    zeppelin.configure_hive('jdbc:hive2://:')
    remove_state('zeppelin.hive.configured')
    update_status()
コード例 #44
0
def ignore_loose_rpf_changed():
    remove_state('calico.service.installed')
コード例 #45
0
def on_config_allow_privileged_change():
    """React to changed 'allow-privileged' config value.

    """
    remove_state('kubernetes-master.components.started')
    remove_state('config.changed.allow-privileged')
コード例 #46
0
def unconfigure_hadoop():
    zeppelin = Zeppelin()
    zeppelin.remove_hadoop_notebooks()
    remove_state('zeppelin.hadoop.configured')
コード例 #47
0
def configure_apiserver():
    # TODO: investigate if it's possible to use config file to store args
    # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/315
    # Handle api-extra-args config option
    to_add, to_remove = get_config_args()

    api_opts = FlagManager('kube-apiserver')

    # Remove arguments that are no longer provided as config option
    # this allows them to be reverted to charm defaults
    for arg in to_remove:
        hookenv.log('Removing option: {}'.format(arg))
        api_opts.destroy(arg)
        # We need to "unset" options by settig their value to "null" string
        cmd = ['snap', 'set', 'kube-apiserver', '{}=null'.format(arg)]
        check_call(cmd)

    # Get the tls paths from the layer data.
    layer_options = layer.options('tls-client')
    ca_cert_path = layer_options.get('ca_certificate_path')
    client_cert_path = layer_options.get('client_certificate_path')
    client_key_path = layer_options.get('client_key_path')
    server_cert_path = layer_options.get('server_certificate_path')
    server_key_path = layer_options.get('server_key_path')

    if is_privileged():
        api_opts.add('allow-privileged', 'true', strict=True)
        set_state('kubernetes-master.privileged')
    else:
        api_opts.add('allow-privileged', 'false', strict=True)
        remove_state('kubernetes-master.privileged')

    # Handle static options for now
    api_opts.add('service-cluster-ip-range', service_cidr())
    api_opts.add('min-request-timeout', '300')
    api_opts.add('v', '4')
    api_opts.add('tls-cert-file', server_cert_path)
    api_opts.add('tls-private-key-file', server_key_path)
    api_opts.add('kubelet-certificate-authority', ca_cert_path)
    api_opts.add('kubelet-client-certificate', client_cert_path)
    api_opts.add('kubelet-client-key', client_key_path)
    api_opts.add('logtostderr', 'true')
    api_opts.add('insecure-bind-address', '127.0.0.1')
    api_opts.add('insecure-port', '8080')
    api_opts.add('storage-backend', 'etcd2')  # FIXME: add etcd3 support

    admission_control = [
        'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount',
        'ResourceQuota', 'DefaultTolerationSeconds'
    ]

    auth_mode = hookenv.config('authorization-mode')
    if 'Node' in auth_mode:
        admission_control.append('NodeRestriction')

    api_opts.add('authorization-mode', auth_mode, strict=True)

    if get_version('kube-apiserver') < (1, 6):
        hookenv.log('Removing DefaultTolerationSeconds from admission-control')
        admission_control.remove('DefaultTolerationSeconds')
    if get_version('kube-apiserver') < (1, 7):
        hookenv.log('Removing Initializers from admission-control')
        admission_control.remove('Initializers')
    api_opts.add('admission-control', ','.join(admission_control), strict=True)

    # Add operator-provided arguments, this allows operators
    # to override defaults
    for arg in to_add:
        hookenv.log('Adding option: {} {}'.format(arg[0], arg[1]))
        # Make sure old value is gone
        api_opts.destroy(arg[0])
        api_opts.add(arg[0], arg[1])

    cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')
    check_call(cmd)
    set_state('kube-apiserver.do-restart')
コード例 #48
0
def disable_gpu_mode():
    """We were in gpu mode, but the operator has set allow-privileged="false",
    so we can't run in gpu mode anymore.

    """
    remove_state('kubernetes-master.gpu.enabled')
コード例 #49
0
ファイル: snap.py プロジェクト: lazypower/layer-snap
def remove(snapname):
    hookenv.log('Removing snap {}'.format(snapname))
    subprocess.check_call(['snap', 'remove', snapname],
                          universal_newlines=True)
    reactive.remove_state('snap.installed.{}'.format(snapname))
コード例 #50
0
def switch_auth_mode():
    config = hookenv.config()
    mode = config.get('authorization-mode')
    if data_changed('auth-mode', mode):
        remove_state('kubernetes-master.components.started')
コード例 #51
0
def upgrade_charm():
    # migrate to new flags
    if is_state('kubernetes-worker.restarted-for-cloud'):
        remove_state('kubernetes-worker.restarted-for-cloud')
        set_state('kubernetes-worker.cloud.ready')
    if is_state('kubernetes-worker.cloud-request-sent'):
        # minor change, just for consistency
        remove_state('kubernetes-worker.cloud-request-sent')
        set_state('kubernetes-worker.cloud.request-sent')

    set_state('config.changed.install_from_upstream')
    hookenv.atexit(remove_state, 'config.changed.install_from_upstream')

    cleanup_pre_snap_services()
    migrate_resource_checksums(checksum_prefix, snap_resources)
    if check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
        set_upgrade_needed()

    # Remove the RC for nginx ingress if it exists
    if hookenv.config().get('ingress'):
        set_state('kubernetes-worker.remove-old-ingress')

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    if is_state('kubernetes-worker.gpu.enabled'):
        remove_state('kubernetes-worker.gpu.enabled')
        try:
            disable_gpu()
        except ApplyNodeLabelFailed:
            # Removing node label failed. Probably the master is unavailable.
            # Proceed with the upgrade in hope GPUs will still be there.
            hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')

    if hookenv.config('ingress'):
        set_state('kubernetes-worker.ingress.enabled')
    else:
        remove_state('kubernetes-worker.ingress.enabled')

    # force certs to be updated
    if is_state('certificates.available') and \
       is_state('kube-control.connected'):
        send_data()

    if is_state('kubernetes-worker.registry.configured'):
        set_state('kubernetes-master-worker-base.registry.configured')
        remove_state('kubernetes-worker.registry.configured')

    remove_state('kubernetes-worker.cni-plugins.installed')
    remove_state('kubernetes-worker.config.created')
    remove_state('kubernetes-worker.ingress.available')
    remove_state('worker.auth.bootstrapped')
    set_state('kubernetes-worker.restart-needed')
コード例 #52
0
ファイル: jenkins.py プロジェクト: sfeole/jenkins-charm
def configure_tools():
    remove_state("jenkins.configured.tools")
    status_set("maintenance", "Installing tools")
    packages = Packages()
    packages.install_tools()
    set_state("jenkins.configured.tools")
コード例 #53
0
def restart_for_certs():
    set_state('kubernetes-worker.restart-needed')
    remove_state('tls_client.certs.changed')
    remove_state('tls_client.ca.written')
コード例 #54
0
def nvidia_departed():
    """Cuda departed."""
    disable_gpu()
    remove_state('kubernetes-worker.gpu.enabled')
    set_state('kubernetes-worker.restart-needed')
コード例 #55
0
def clear_cloud_flags():
    remove_state('kubernetes-worker.cloud.pending')
    remove_state('kubernetes-worker.cloud.request-sent')
    remove_state('kubernetes-worker.cloud.blocked')
    remove_state('kubernetes-worker.cloud.ready')
    set_state('kubernetes-worker.restart-needed')  # force restart
コード例 #56
0
def reconfigure_ingress():
    remove_state('kubernetes-worker.ingress.available')
コード例 #57
0
def on_config_allow_privileged_change():
    """React to changed 'allow-privileged' config value.

    """
    set_state('kubernetes-worker.restart-needed')
    remove_state('config.changed.allow-privileged')
コード例 #58
0
def update_registry_location():
    registry_location = get_registry_location()

    if data_changed('registry-location', registry_location):
        remove_state('nfs.configured')
        remove_state('kubernetes-worker.ingress.available')
コード例 #59
0
def sdn_changed():
    '''The Software Defined Network changed on the container so restart the
    kubernetes services.'''
    restart_unit_services()
    update_kubelet_status()
    remove_state('docker.sdn.configured')
コード例 #60
0
def upgrade_charm():
    print("Upgrading mattermost setup.")
    if service_running("mattermost"):
        service_stop("mattermost")
    remove_state('mattermost.installed')
    remove_state('mattermost.backend.started')