def nfs_storage(mount):
    '''NFS on kubernetes requires nfs config rendered into a deployment of
    the nfs client provisioner. That will handle the persistent volume claims
    with no persistent volume to back them.'''

    mount_data = get_first_mount(mount)
    if not mount_data:
        return

    # If present, use the configured registry to define the nfs image location.
    registry_location = get_registry_location()
    if registry_location:
        mount_data['registry'] = registry_location

    addon_path = '/root/cdk/addons/{}'
    # Render the NFS deployment
    manifest = addon_path.format('nfs-provisioner.yaml')
    render('nfs-provisioner.yaml', manifest, mount_data)
    hookenv.log('Creating the nfs provisioner.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            'Failed to create nfs provisioner. Will attempt again next update.'
        )  # noqa
        return

    set_state('nfs.configured')
def shutdown():
    ''' When this unit is destroyed:
        - delete the current node
        - stop the worker services
    '''
    try:
        if os.path.isfile(kubeconfig_path):
            kubectl('delete', 'node', get_node_name())
    except CalledProcessError:
        hookenv.log('Failed to unregister node.')
    service_stop('snap.kubelet.daemon')
    service_stop('snap.kube-proxy.daemon')
Ejemplo n.º 3
0
def shutdown():
    """When this unit is destroyed:
    - delete the current node
    - stop the worker services
    """
    try:
        if os.path.isfile(kubelet_kubeconfig_path):
            kubectl("delete", "node", get_node_name())
    except CalledProcessError:
        hookenv.log("Failed to unregister node.")
    service_stop("snap.kubelet.daemon")
    service_stop("snap.kube-proxy.daemon")
def disable_ingress():
    hookenv.log('Deleting the http backend and ingress.')
    hookenv.close_port(80)
    hookenv.close_port(443)
    try:
        kubectl('delete', '--ignore-not-found', '-f',
                '/root/cdk/addons/default-http-backend.yaml')
        kubectl('delete', '--ignore-not-found', '-f',
                '/root/cdk/addons/ingress-daemon-set.yaml')
    except CalledProcessError:
        traceback.print_exc()
        hookenv.log('Failed to disable ingress, waiting to retry')
        return
    remove_state('kubernetes-worker.ingress.available')
Ejemplo n.º 5
0
def pre_series_upgrade():
    # NB: We use --force here because unmanaged pods are going to die anyway
    # when the node is shut down, and it's better to let drain cleanly
    # terminate them. We use --delete-local-data because the dashboard, at
    # least, uses local data (emptyDir); but local data is documented as being
    # ephemeral anyway, so we can assume it should be ok.
    kubectl(
        "drain",
        get_node_name(),
        "--ignore-daemonsets",
        "--force",
        "--delete-local-data",
    )
    service_pause("snap.kubelet.daemon")
    service_pause("snap.kube-proxy.daemon")
def get_secret_names():
    """Return a dict of 'username: secret_id' for Charmed Kubernetes users."""
    try:
        output = kubernetes_common.kubectl(
            "get",
            "secrets",
            "-n",
            AUTH_SECRET_NS,
            "--field-selector",
            "type={}".format(AUTH_SECRET_TYPE),
            "-o",
            "json",
        ).decode("UTF-8")
    except (CalledProcessError, FileNotFoundError):
        # The api server may not be up, or we may be trying to run kubelet before
        # the snap is installed. Send back an empty dict.
        hookenv.log("Unable to get existing secrets", level=hookenv.WARNING)
        return {}

    secrets = json.loads(output)
    secret_names = {}
    if "items" in secrets:
        for secret in secrets["items"]:
            try:
                secret_id = secret["metadata"]["name"]
                username_b64 = secret["data"]["username"].encode("UTF-8")
            except (KeyError, TypeError):
                # CK secrets will have populated 'data', but not all secrets do
                continue
            secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id
    return secret_names
Ejemplo n.º 7
0
def deploy_network_policy_controller():
    ''' Deploy the Calico network policy controller. '''
    status.maintenance('Deploying network policy controller.')
    etcd = endpoint_from_flag('etcd.available')
    context = {
        'connection_string': etcd.get_connection_string(),
        'etcd_key_path': ETCD_KEY_PATH,
        'etcd_cert_path': ETCD_CERT_PATH,
        'etcd_ca_path': ETCD_CA_PATH,
        'calico_policy_image': charm_config('calico-policy-image'),
        'etcd_cert_last_modified': os.path.getmtime(ETCD_CERT_PATH)
    }
    render('policy-controller.yaml', '/tmp/policy-controller.yaml', context)
    try:
        kubectl('apply', '-f', '/tmp/policy-controller.yaml')
        set_state('calico.npc.deployed')
    except CalledProcessError as e:
        status.waiting('Waiting for kubernetes')
        log(str(e))
Ejemplo n.º 8
0
def disable_ingress():
    hookenv.log("Deleting the http backend and ingress.")
    hookenv.close_port(80)
    hookenv.close_port(443)
    try:
        kubectl(
            "delete",
            "--ignore-not-found",
            "-f",
            "/root/cdk/addons/default-http-backend.yaml",
        )
        kubectl(
            "delete",
            "--ignore-not-found",
            "-f",
            "/root/cdk/addons/ingress-daemon-set.yaml",
        )
    except CalledProcessError:
        traceback.print_exc()
        hookenv.log("Failed to disable ingress, waiting to retry")
        return
    remove_state("kubernetes-worker.ingress.available")
def get_secret_password(username):
    """Get the password for the given user from the secret that CK created."""
    try:
        output = kubernetes_common.kubectl(
            "get",
            "secrets",
            "-n",
            AUTH_SECRET_NS,
            "--field-selector",
            "type={}".format(AUTH_SECRET_TYPE),
            "-o",
            "json",
        ).decode("UTF-8")
    except CalledProcessError:
        # NB: apiserver probably isn't up. This can happen on boostrap or upgrade
        # while trying to build kubeconfig files. If we need the 'admin' token during
        # this time, pull it directly out of the kubeconfig file if possible.
        token = None
        if username == "admin":
            admin_kubeconfig = Path("/root/.kube/config")
            if admin_kubeconfig.exists():
                with admin_kubeconfig.open("r") as f:
                    data = safe_load(f)
                    try:
                        token = data["users"][0]["user"]["token"]
                    except (KeyError, ValueError):
                        pass
        return token
    except FileNotFoundError:
        # New deployments may ask for a token before the kubectl snap is installed.
        # Give them nothing!
        return None

    secrets = json.loads(output)
    if "items" in secrets:
        for secret in secrets["items"]:
            try:
                data_b64 = secret["data"]
                password_b64 = data_b64["password"].encode("UTF-8")
                username_b64 = data_b64["username"].encode("UTF-8")
            except (KeyError, TypeError):
                # CK authn secrets will have populated 'data', but not all secrets do
                continue

            password = b64decode(password_b64).decode("UTF-8")
            secret_user = b64decode(username_b64).decode("UTF-8")
            if username == secret_user:
                return password
    return None
Ejemplo n.º 10
0
def get_secret_password(username):
    """Get the password for the given user from the secret that CK created."""
    try:
        output = kubernetes_common.kubectl(
            'get', 'secrets', '-n', AUTH_SECRET_NS,
            '--field-selector', 'type={}'.format(AUTH_SECRET_TYPE),
            '-o', 'json').decode('UTF-8')
    except CalledProcessError:
        # NB: apiserver probably isn't up. This can happen on boostrap or upgrade
        # while trying to build kubeconfig files. If we need the 'admin' token during
        # this time, pull it directly out of the kubeconfig file if possible.
        token = None
        if username == 'admin':
            admin_kubeconfig = Path('/root/.kube/config')
            if admin_kubeconfig.exists():
                with admin_kubeconfig.open('r') as f:
                    data = safe_load(f)
                    try:
                        token = data['users'][0]['user']['token']
                    except (KeyError, ValueError):
                        pass
        return token
    except FileNotFoundError:
        # New deployments may ask for a token before the kubectl snap is installed.
        # Give them nothing!
        return None

    secrets = json.loads(output)
    if 'items' in secrets:
        for secret in secrets['items']:
            try:
                data_b64 = secret['data']
                password_b64 = data_b64['password'].encode('UTF-8')
                username_b64 = data_b64['username'].encode('UTF-8')
            except (KeyError, TypeError):
                # CK authn secrets will have populated 'data', but not all secrets do
                continue

            password = b64decode(password_b64).decode('UTF-8')
            secret_user = b64decode(username_b64).decode('UTF-8')
            if username == secret_user:
                return password
    return None
def render_and_launch_ingress():
    ''' Launch the Kubernetes ingress controller & default backend (404) '''
    config = hookenv.config()

    # need to test this in case we get in
    # here from a config change to the image
    if not config.get('ingress'):
        return

    context = {}
    context['arch'] = arch()
    addon_path = '/root/cdk/addons/{}'
    context['juju_application'] = hookenv.service_name()

    # If present, workers will get the ingress containers from the configured
    # registry. Otherwise, we'll set an appropriate upstream image registry.
    registry_location = get_registry_location()

    context['defaultbackend_image'] = config.get('default-backend-image')
    if (context['defaultbackend_image'] == ""
            or context['defaultbackend_image'] == "auto"):
        if registry_location:
            backend_registry = registry_location
        else:
            backend_registry = 'k8s.gcr.io'
        if context['arch'] == 's390x':
            context['defaultbackend_image'] = \
                "{}/defaultbackend-s390x:1.4".format(backend_registry)
        elif context['arch'] == 'arm64':
            context['defaultbackend_image'] = \
                "{}/defaultbackend-arm64:1.5".format(backend_registry)
        else:
            context['defaultbackend_image'] = \
                "{}/defaultbackend-amd64:1.5".format(backend_registry)

    # Render the ingress daemon set controller manifest
    context['ssl_chain_completion'] = config.get(
        'ingress-ssl-chain-completion')
    context['ingress_image'] = config.get('nginx-image')
    if context['ingress_image'] == "" or context['ingress_image'] == "auto":
        if registry_location:
            nginx_registry = registry_location
        else:
            nginx_registry = 'quay.io'
        images = {
            'amd64':
            'kubernetes-ingress-controller/nginx-ingress-controller-amd64:0.22.0',  # noqa
            'arm64':
            'kubernetes-ingress-controller/nginx-ingress-controller-arm64:0.22.0',  # noqa
            's390x':
            'kubernetes-ingress-controller/nginx-ingress-controller-s390x:0.20.0',  # noqa
            'ppc64el':
            'kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.20.0',  # noqa
        }
        context['ingress_image'] = '{}/{}'.format(
            nginx_registry, images.get(context['arch'], images['amd64']))

    if get_version('kubelet') < (1, 9):
        context['daemonset_api_version'] = 'extensions/v1beta1'
    else:
        context['daemonset_api_version'] = 'apps/v1beta2'
    manifest = addon_path.format('ingress-daemon-set.yaml')
    render('ingress-daemon-set.yaml', manifest, context)
    hookenv.log('Creating the ingress daemon set.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            'Failed to create ingress controller. Will attempt again next update.'
        )  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    # Render the default http backend (404) deployment manifest
    # needs to happen after ingress-daemon-set since that sets up the namespace
    manifest = addon_path.format('default-http-backend.yaml')
    render('default-http-backend.yaml', manifest, context)
    hookenv.log('Creating the default http backend.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            'Failed to create default-http-backend. Will attempt again next update.'
        )  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
def remove_old_ingress():
    try:
        kubectl('delete', 'rc', 'nginx-ingress-controller',
                '--ignore-not-found')

        # these moved into a different namespace for 1.12
        kubectl('delete', 'rc', 'default-http-backend', '--ignore-not-found')
        kubectl('delete', 'svc', 'default-http-backend', '--ignore-not-found')
        kubectl('delete', 'ds',
                'nginx-ingress-{}-controller'.format(hookenv.service_name()),
                '--ignore-not-found')
        kubectl(
            'delete', 'serviceaccount',
            'nginx-ingress-{}-serviceaccount'.format(hookenv.service_name()),
            '--ignore-not-found')
        kubectl(
            'delete', 'clusterrolebinding',
            'nginx-ingress-clusterrole-nisa-{}-binding'.format(
                hookenv.service_name()), '--ignore-not-found')
        kubectl('delete', 'configmap',
                'nginx-load-balancer-{}-conf'.format(hookenv.service_name()),
                '--ignore-not-found')
    except CalledProcessError:
        # try again next time
        return

    remove_state('kubernetes-worker.remove-old-ingress')
Ejemplo n.º 13
0
def render_and_launch_ingress():
    """Launch the Kubernetes ingress controller & default backend (404)"""
    config = hookenv.config()

    # need to test this in case we get in
    # here from a config change to the image
    if not config.get("ingress"):
        return

    context = {}
    context["arch"] = arch()
    addon_path = "/root/cdk/addons/{}"
    context["juju_application"] = hookenv.service_name()

    # If present, workers will get the ingress containers from the configured
    # registry. Otherwise, we'll set an appropriate upstream image registry.
    registry_location = get_registry_location()

    context["defaultbackend_image"] = config.get("default-backend-image")
    if (
        context["defaultbackend_image"] == ""
        or context["defaultbackend_image"] == "auto"
    ):
        if registry_location:
            backend_registry = registry_location
        else:
            backend_registry = "k8s.gcr.io"
        if context["arch"] == "s390x":
            context["defaultbackend_image"] = "{}/defaultbackend-s390x:1.4".format(
                backend_registry
            )
        elif context["arch"] == "ppc64el":
            context["defaultbackend_image"] = "{}/defaultbackend-ppc64le:1.5".format(
                backend_registry
            )
        else:
            context["defaultbackend_image"] = "{}/defaultbackend-{}:1.5".format(
                backend_registry, context["arch"]
            )

    # Render the ingress daemon set controller manifest
    context["ssl_chain_completion"] = config.get("ingress-ssl-chain-completion")
    context["enable_ssl_passthrough"] = config.get("ingress-ssl-passthrough")
    context["default_ssl_certificate_option"] = None
    if config.get("ingress-default-ssl-certificate") and config.get(
        "ingress-default-ssl-key"
    ):
        context["default_ssl_certificate"] = b64encode(
            config.get("ingress-default-ssl-certificate").encode("utf-8")
        ).decode("utf-8")
        context["default_ssl_key"] = b64encode(
            config.get("ingress-default-ssl-key").encode("utf-8")
        ).decode("utf-8")
        default_certificate_option = (
            "- --default-ssl-certificate=" "$(POD_NAMESPACE)/default-ssl-certificate"
        )
        context["default_ssl_certificate_option"] = default_certificate_option
    context["ingress_image"] = config.get("nginx-image")
    if context["ingress_image"] == "" or context["ingress_image"] == "auto":
        if context["arch"] == "ppc64el":
            # multi-arch image doesn't include ppc64le, have to use an older version
            image = "nginx-ingress-controller-ppc64le"
            context["ingress_uid"] = "33"
            context["ingress_image"] = "/".join(
                [
                    registry_location or "quay.io",
                    "kubernetes-ingress-controller/{}:0.20.0".format(image),
                ]
            )
        else:
            context["ingress_uid"] = "101"
            context["ingress_image"] = "/".join(
                [
                    registry_location or "us.gcr.io",
                    "k8s-artifacts-prod/ingress-nginx/controller:v1.2.0",
                ]
            )

    kubelet_version = get_version("kubelet")
    if kubelet_version < (1, 9):
        context["daemonset_api_version"] = "extensions/v1beta1"
        context["deployment_api_version"] = "extensions/v1beta1"
    elif kubelet_version < (1, 16):
        context["daemonset_api_version"] = "apps/v1beta2"
        context["deployment_api_version"] = "extensions/v1beta1"
    else:
        context["daemonset_api_version"] = "apps/v1"
        context["deployment_api_version"] = "apps/v1"
    context["use_forwarded_headers"] = (
        "true" if config.get("ingress-use-forwarded-headers") else "false"
    )

    manifest = addon_path.format("ingress-daemon-set.yaml")
    render("ingress-daemon-set.yaml", manifest, context)
    hookenv.log("Creating the ingress daemon set.")
    try:
        kubectl("apply", "-f", manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            "Failed to create ingress controller. Will attempt again next update."
        )  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    # Render the default http backend (404) deployment manifest
    # needs to happen after ingress-daemon-set since that sets up the namespace
    manifest = addon_path.format("default-http-backend.yaml")
    render("default-http-backend.yaml", manifest, context)
    hookenv.log("Creating the default http backend.")
    try:
        kubectl("apply", "-f", manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            "Failed to create default-http-backend. Will attempt again next update."
        )  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state("kubernetes-worker.ingress.available")
    hookenv.open_port(80)
    hookenv.open_port(443)
Ejemplo n.º 14
0
def remove_old_ingress():
    try:
        kubectl("delete", "rc", "nginx-ingress-controller", "--ignore-not-found")

        # these moved into a different namespace for 1.12
        kubectl("delete", "rc", "default-http-backend", "--ignore-not-found")
        kubectl("delete", "svc", "default-http-backend", "--ignore-not-found")
        kubectl(
            "delete",
            "ds",
            "nginx-ingress-{}-controller".format(hookenv.service_name()),
            "--ignore-not-found",
        )
        kubectl(
            "delete",
            "serviceaccount",
            "nginx-ingress-{}-serviceaccount".format(hookenv.service_name()),
            "--ignore-not-found",
        )
        kubectl(
            "delete",
            "clusterrolebinding",
            "nginx-ingress-clusterrole-nisa-{}-binding".format(hookenv.service_name()),
            "--ignore-not-found",
        )
        kubectl(
            "delete",
            "configmap",
            "nginx-load-balancer-{}-conf".format(hookenv.service_name()),
            "--ignore-not-found",
        )
    except CalledProcessError:
        # try again next time
        return

    remove_state("kubernetes-worker.remove-old-ingress")
Ejemplo n.º 15
0
def post_series_upgrade():
    service_resume("snap.kubelet.daemon")
    service_resume("snap.kube-proxy.daemon")
    kubectl("uncordon", get_node_name())
Ejemplo n.º 16
0
def launch_default_ingress_controller():
    ''' Launch the Kubernetes ingress controller & default backend (404) '''
    config = hookenv.config()

    # need to test this in case we get in
    # here from a config change to the image
    if not config.get('ingress'):
        return

    context = {}
    context['arch'] = arch()
    addon_path = '/root/cdk/addons/{}'

    context['defaultbackend_image'] = config.get('default-backend-image')
    if (context['defaultbackend_image'] == ""
            or context['defaultbackend_image'] == "auto"):
        if context['arch'] == 's390x':
            context['defaultbackend_image'] = \
                "k8s.gcr.io/defaultbackend-s390x:1.4"
        elif context['arch'] == 'arm64':
            context['defaultbackend_image'] = \
                "k8s.gcr.io/defaultbackend-arm64:1.4"
        else:
            context['defaultbackend_image'] = \
                "k8s.gcr.io/defaultbackend:1.4"

    # Render the default http backend (404) replicationcontroller manifest
    manifest = addon_path.format('default-http-backend.yaml')
    render('default-http-backend.yaml', manifest, context)
    hookenv.log('Creating the default http backend.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            'Failed to create default-http-backend. Will attempt again next update.'
        )  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    # Render the ingress daemon set controller manifest
    context['ssl_chain_completion'] = config.get(
        'ingress-ssl-chain-completion')
    context['ingress_image'] = config.get('nginx-image')
    if context['ingress_image'] == "" or context['ingress_image'] == "auto":
        images = {
            'amd64':
            'quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.16.1',  # noqa
            'arm64':
            'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-arm64:0.16.1',  # noqa
            's390x':
            'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-s390x:0.16.1',  # noqa
            'ppc64el':
            'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.16.1',  # noqa
        }
        context['ingress_image'] = images.get(context['arch'], images['amd64'])
    if get_version('kubelet') < (1, 9):
        context['daemonset_api_version'] = 'extensions/v1beta1'
    else:
        context['daemonset_api_version'] = 'apps/v1beta2'
    context['juju_application'] = hookenv.service_name()
    manifest = addon_path.format('ingress-daemon-set.yaml')
    render('ingress-daemon-set.yaml', manifest, context)
    hookenv.log('Creating the ingress daemon set.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            'Failed to create ingress controller. Will attempt again next update.'
        )  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)