def create_namespace(v1: CoreV1Api, body) -> str:
    """
    Create an ingress based on a dict.

    :param v1: CoreV1Api
    :param body: a dict
    :return: str
    """
    print("Create a namespace:")
    v1.create_namespace(body)
    print(f"Namespace created with name '{body['metadata']['name']}'")
    return body['metadata']['name']
def create_service_account(v1: CoreV1Api, namespace, body) -> None:
    """
    Create a ServiceAccount based on a dict.

    :param v1: CoreV1Api
    :param namespace: namespace name
    :param body: a dict
    :return:
    """
    print("Create a SA:")
    v1.create_namespaced_service_account(namespace, body)
    print(f"Service account created with name '{body['metadata']['name']}'")
def create_secret(v1: CoreV1Api, namespace, body) -> str:
    """
    Create a secret based on a dict.

    :param v1: CoreV1Api
    :param namespace: namespace
    :param body: a dict
    :return: str
    """
    print("Create a secret:")
    v1.create_namespaced_secret(namespace, body)
    print(f"Secret created: {body['metadata']['name']}")
    return body['metadata']['name']
def create_configmap(v1: CoreV1Api, namespace, body) -> str:
    """
    Create a config-map based on a dict.

    :param v1: CoreV1Api
    :param namespace: namespace name
    :param body: a dict
    :return: str
    """
    print("Create a configMap:")
    v1.create_namespaced_config_map(namespace, body)
    print(f"Config map created with name '{body['metadata']['name']}'")
    return body["metadata"]["name"]
def replace_configmap(v1: CoreV1Api, name, namespace, body) -> None:
    """
    Replace a config-map based on a dict.

    :param v1: CoreV1Api
    :param name:
    :param namespace:
    :param body: a dict
    :return:
    """
    print(f"Replace a configMap: '{name}'")
    v1.replace_namespaced_config_map(name, namespace, body)
    print("ConfigMap replaced")
def delete_namespace(v1: CoreV1Api, namespace) -> None:
    """
    Delete a namespace.

    :param v1: CoreV1Api
    :param namespace: namespace name
    :return:
    """
    print(f"Delete a namespace: {namespace}")
    delete_options = client.V1DeleteOptions()
    delete_options.grace_period_seconds = 0
    delete_options.propagation_policy = 'Foreground'
    v1.delete_namespace(namespace, delete_options)
    ensure_item_removal(v1.read_namespace, namespace)
    print(f"Namespace was removed with name '{namespace}'")
def is_secret_present(v1: CoreV1Api, name, namespace) -> bool:
    """
    Check if a namespace has a secret.

    :param v1: CoreV1Api
    :param name:
    :param namespace:
    :return: bool
    """
    try:
        v1.read_namespaced_secret(name, namespace)
    except ApiException as ex:
        if ex.status == 404:
            print(f"No secret '{name}' found.")
            return False
    return True
def replace_configmap_from_yaml(v1: CoreV1Api, name, namespace, yaml_manifest) -> None:
    """
    Replace a config-map based on a yaml file.

    :param v1: CoreV1Api
    :param name:
    :param namespace: namespace name
    :param yaml_manifest: an absolute path to file
    :return:
    """
    print(f"Replace a configMap: '{name}'")
    with open(yaml_manifest) as f:
        dep = yaml.safe_load(f)
        f.close()
        v1.replace_namespaced_config_map(name, namespace, dep)
        print("ConfigMap replaced")
def create_namespace_with_name_from_yaml(v1: CoreV1Api, name, yaml_manifest) -> str:
    """
    Create a namespace with a specific name based on a yaml manifest.

    :param v1: CoreV1Api
    :param name: name
    :param yaml_manifest: an absolute path to file
    :return: str
    """
    print(f"Create a namespace with specific name:")
    with open(yaml_manifest) as f:
        dep = yaml.safe_load(f)
        f.close()
        dep['metadata']['name'] = name
        v1.create_namespace(dep)
        print(f"Namespace created with name '{str(dep['metadata']['name'])}'")
        return dep['metadata']['name']
def replace_secret(v1: CoreV1Api, name, namespace, yaml_manifest) -> str:
    """
    Replace a secret based on yaml file.

    :param v1: CoreV1Api
    :param name: secret name
    :param namespace: namespace name
    :param yaml_manifest: an absolute path to file
    :return: str
    """
    print(f"Replace a secret: '{name}'' in a namespace: '{namespace}'")
    with open(yaml_manifest) as f:
        dep = yaml.safe_load(f)
        f.close()
        v1.replace_namespaced_secret(name, namespace, dep)
        print("Secret replaced")
    return name
def wait_for_public_ip(v1: CoreV1Api, namespace: str) -> str:
    """
    Wait for LoadBalancer to get the public ip.

    :param v1: CoreV1Api
    :param namespace: namespace
    :return: str
    """
    resp = v1.list_namespaced_service(namespace)
    counter = 0
    while str(resp.items[0].status.load_balancer.ingress) == "None" and counter < 20:
        time.sleep(5)
        resp = v1.list_namespaced_service(namespace)
        counter = counter + 1
    if counter == 20:
        pytest.fail("After 100 seconds the LB still doesn't have a Public IP. Exiting...")
    print(f"Public IP ='{resp.items[0].status.load_balancer.ingress[0].ip}'")
    return str(resp.items[0].status.load_balancer.ingress[0].ip)
def get_first_pod_name(v1: CoreV1Api, namespace) -> str:
    """
    Return 1st pod_name in a list of pods in a namespace.

    :param v1: CoreV1Api
    :param namespace:
    :return: str
    """
    resp = v1.list_namespaced_pod(namespace)
    return resp.items[0].metadata.name
def get_service_node_ports(v1: CoreV1Api, name, namespace) -> (str, str):
    """
    Get service allocated node_ports.

    :param v1: CoreV1Api
    :param name:
    :param namespace:
    :return: (plain_port, ssl_port)
    """
    resp = v1.read_namespaced_service(name, namespace)
    assert len(resp.spec.ports) == 2, "There are not enough ports assigned to a service"
    return resp.spec.ports[0].node_port, resp.spec.ports[1].node_port
def create_service(v1: CoreV1Api, namespace, body) -> str:
    """
    Create a service based on a dict.

    :param v1: CoreV1Api
    :param namespace: namespace
    :param body: a dict
    :return: str
    """
    print("Create a Service:")
    resp = v1.create_namespaced_service(namespace, body)
    print(f"Service created with name '{body['metadata']['name']}'")
    return resp.metadata.name
Beispiel #15
0
    def stop(self, wait=False, timeout=0):
        # stop isolation
        stop_isolation()

        from kubernetes.client import CoreV1Api
        api = CoreV1Api(self._api_client)
        api.delete_namespace(self._namespace)
        if wait:
            start_time = time.time()
            while True:
                try:
                    api.read_namespace(self._namespace)
                except K8SApiException as ex:
                    if ex.status != 404:  # pragma: no cover
                        raise
                    break
                else:
                    time.sleep(1)
                    if timeout and time.time(
                    ) - start_time > timeout:  # pragma: no cover
                        raise TimeoutError
def main():
    """Continuously flush and delete detached persistent volumes."""
    args = parser.parse_args()
    load_incluster_config()
    api = CoreV1Api()
    batch_api = BatchV1Api()
    tasks = [
        partial(
            flush_released_pvs_and_delete_complete_jobs,
            api,
            batch_api,
            args.command,
            args.env,
            args.image,
            args.namespace,
        ),
        partial(delete_detached_pvcs, api, args.namespace, args.claim_prefix),
        partial(delete_unschedulable_pods, api, args.namespace),
    ]
    with ThreadPool(len(tasks)) as pool:
        pool.map(run_task, tasks, chunksize=1)
def are_all_pods_in_ready_state(v1: CoreV1Api, namespace) -> bool:
    """
    Check if all the pods have Ready condition.

    :param v1: CoreV1Api
    :param namespace: namespace
    :return: bool
    """
    pods = v1.list_namespaced_pod(namespace)
    if not pods.items:
        return False
    pod_ready_amount = 0
    for pod in pods.items:
        if pod.status.conditions is None:
            return False
        for condition in pod.status.conditions:
            # wait for 'Ready' state instead of 'ContainersReady' for backwards compatibility with k8s 1.10
            if condition.type == 'Ready' and condition.status == 'True':
                pod_ready_amount = pod_ready_amount + 1
                break
    return pod_ready_amount == len(pods.items)
Beispiel #18
0
def get_pod(core_api: k8s_client.CoreV1Api, pod_name: str,
            namespace: str) -> Optional[k8s_client.V1Pod]:
  """Get a pod from Kubernetes metadata API.

  Args:
    core_api: Client of Core V1 API of Kubernetes API.
    pod_name: The name of the Pod.
    namespace: The namespace of the Pod.

  Returns:
    The found Pod object. None if it's not found.
  Raises:
    RuntimeError: When it sees unexpected errors from Kubernetes API.
  """
  try:
    return core_api.read_namespaced_pod(name=pod_name, namespace=namespace)
  except k8s_client.rest.ApiException as e:
    if e.status != 404:
      raise RuntimeError('Unknown error! \nReason: %s\nBody: %s' %
                         (e.reason, e.body))
    return None
def are_all_pods_in_ready_state(v1: CoreV1Api, namespace) -> bool:
    """
    Check if all the pods have ContainersReady condition.

    :param v1: CoreV1Api
    :param namespace: namespace
    :return: bool
    """
    pods = v1.list_namespaced_pod(namespace)
    if not pods.items:
        return False
    pod_ready_amount = 0
    for pod in pods.items:
        if pod.status.conditions is None:
            return False
        for condition in pod.status.conditions:
            # wait for 'Ready' state instead of 'ContainersReady' for backwards compatibility with k8s 1.10
            if condition.type == 'ContainersReady' and condition.status == 'True':
                pod_ready_amount = pod_ready_amount + 1
                break
    return pod_ready_amount == len(pods.items)
Beispiel #20
0
def get_username_password(ref: SecretRef, api: CoreV1Api):

    secret = api.read_namespaced_secret(namespace=ref.namespace,
                                        name=ref.name,
                                        exact=True,
                                        export=True)

    raw = None

    if ref.password_ref is not None:
        raw = secret.data[ref.password_ref]
    elif "mongodb-root-password" in secret.data:
        raw = secret.data["mongodb-root-password"]
    elif "mongodb-password" in secret.data:
        raw = secret.data["mongodb-password"]
    elif "password" in secret.data:
        raw = secret.data["passsword"]

    if raw is None:
        raise RuntimeError(
            "Invalid secret, must have key mongodb-root-password, mongodb-password or passsword"
        )

    password = b64decode(raw).decode("utf-8")
    raw = None

    if ref.user_ref is not None:
        raw = secret.data[ref.user_ref]
    if "mongodb-root-user" in secret.data:
        raw = secret.data["mongodb-root-user"]
    elif "mongodb-user" in secret.data:
        raw = secret.data["mongodb-user"]
    elif "user" in secret.data:
        raw = secret.data["user"]

    if raw is None:
        return (None, password)
        # raise RuntimeError("Invalid secret, must have key mongodb-root-user, mongodb-user or user")
    user = b64decode(raw).decode("utf-8")
    return (user, password)
Beispiel #21
0
def nodes_epc_usage() -> Dict[str, int]:
    """
    Fetches the EPC usage for all nodes in the cluster.
    Takes the bigger value between measured usage and sum of requests.
    """
    k8s_api = CoreV1Api()
    pods = k8s_api.list_pod_for_all_namespaces(
        field_selector="spec.nodeName!="
    ).items
    nodes_pods_usage = (
        (x.spec.node_name, pod_sum_resources_requests(x, "intel.com/sgx")) for x in pods if
        x.status.phase in ("Pending", "Running") and pod_requests_sgx(x)
    )
    usage_per_node = defaultdict(lambda: 0)
    for (node_name, usage) in nodes_pods_usage:
        usage_per_node[node_name] += usage

    influx_results = influx_client.query(
        'SELECT SUM(epc) AS epc FROM (SELECT MAX(value) AS epc FROM "sgx/epc" WHERE value <> 0 AND time >= now() - 25s'
        ' GROUP BY pod_name, nodename) GROUP BY nodename'
    )
    return {k[1]["nodename"]: max(next(v)["epc"], usage_per_node[k[1]["nodename"]]) for k, v in influx_results.items()}
Beispiel #22
0
def nodes_memory_usage() -> Dict[str, float]:
    """
    Fetches the memory usage for all nodes in the cluster.
    Takes the bigger value between measured usage and sum of requests.
    """
    k8s_api = CoreV1Api()
    pods = k8s_api.list_pod_for_all_namespaces(
        field_selector="spec.nodeName!="
    ).items
    nodes_pods_usage = (
        (x.spec.node_name, pod_sum_resources_requests(x, "memory")) for x in pods if
        x.status.phase in ("Pending", "Running")
    )
    usage_per_node = defaultdict(lambda: 0)
    for (node_name, usage) in nodes_pods_usage:
        usage_per_node[node_name] += usage

    influx_results = influx_client.query(
        'SELECT MEAN(value) AS memory FROM "memory/usage" WHERE time >= now() - 2m AND type=\'node\' GROUP BY nodename'
    )
    return {k[1]["nodename"]: max(next(v)["memory"], usage_per_node[k[1]["nodename"]]) for k, v in
            influx_results.items()}
Beispiel #23
0
def ensure_pull_secret(secret,
                       namespace=get_execution_namespace(),
                       api=None,
                       log=None):
    if not secret:
        return
    if not api:
        load_k8s_config()
        api = CoreV1Api()
    try:
        name = secret.metadata.name
        # Give secret new metadata; lots of read-only stuff in the existing
        #  secret metadata.
        secret.metadata = client.V1ObjectMeta(name=name, namespace=namespace)
        api.create_namespaced_secret(namespace=namespace, body=secret)
    except ApiException as e:
        if not log:
            log = make_logger()
        if e.status != 409:
            log.error("Failed to create pull secret: {}".format(e))
            raise
        log.info(f"Pull secret already exists in namespace {namespace}")
def get_service_node_ports(v1: CoreV1Api, name, namespace) -> (int, int, int, int, int, int):
    """
    Get service allocated node_ports.

    :param v1: CoreV1Api
    :param name:
    :param namespace:
    :return: (plain_port, ssl_port, api_port, exporter_port)
    """
    resp = v1.read_namespaced_service(name, namespace)
    if len(resp.spec.ports) == 6:
        print("An unexpected amount of ports in a service. Check the configuration")
    print(f"Service with an API port: {resp.spec.ports[2].node_port}")
    print(f"Service with an Exporter port: {resp.spec.ports[3].node_port}")
    return (
        resp.spec.ports[0].node_port,
        resp.spec.ports[1].node_port,
        resp.spec.ports[2].node_port,
        resp.spec.ports[3].node_port,
        resp.spec.ports[4].node_port,
        resp.spec.ports[5].node_port,
    )
Beispiel #25
0
    def exec_command(self, namespace: str, pod_name: str, container_name: str, command: List):
        """
        在指定 Pod 容器中执行命令

        :param namespace: 命名空间
        :param pod_name: Pod 名称
        :param container_name: 容器名称
        :param command: 待执行指令,argv array 格式
        :return: 指令执行结果(stdout,stderr)
        """
        api_instance = CoreV1Api(self.dynamic_client.client)
        return stream(
            api_instance.connect_get_namespaced_pod_exec,
            name=pod_name,
            namespace=namespace,
            command=command,
            container=container_name,
            stderr=True,
            stdin=False,
            stdout=True,
            tty=False,
        )
Beispiel #26
0
    def _snapshot_status(self, core: kubeclient.CoreV1Api, etcd_app_name: str, tries: int):
        for t in range(tries):
            r = core.list_namespaced_pod("backup", label_selector="etcd=%s" % etcd_app_name)
            for p in r.items:
                ip = p.status.host_ip
                if p.status.phase != "Succeeded":
                    display("%d/%d pod %s status.phase: %s" % (t, tries, p.metadata.name, p.status.phase))
                    continue
                try:
                    stdout = subprocess.check_output([
                        "ssh", "-o", "StrictHostKeyChecking=no",
                        "-o", "UserKnownHostsFile=/dev/null",
                        "-o", "ConnectTimeout=1",
                        "-i", self.ssh_private_key,
                        "-lcore", ip,
                        'sudo /opt/bin/etcdctl3 snapshot status /var/lib/backup/etcd3/%s.snap -w json' % etcd_app_name
                    ])
                    return json.loads(stdout.decode())
                except Exception as e:
                    display(e)

            time.sleep(self.testing_sleep_seconds)
Beispiel #27
0
def list_storage_pvc():
    load_kube_config()
    api_instance = CoreV1Api()
    props = get_properties()
    params = dict()
    team_name = props["AWS_ORBIT_TEAM_SPACE"]
    params["namespace"] = os.environ.get("AWS_ORBIT_USER_SPACE", team_name)
    params["_preload_content"] = False
    try:
        api_response = api_instance.list_namespaced_persistent_volume_claim(
            **params)
        res = json.loads(api_response.data)
    except ApiException as e:
        _logger.info(
            "Exception when calling CoreV1Api->list persistent volume claims: %s\n"
            % e)
        raise e

    if "items" not in res:
        return []

    return res["items"]
Beispiel #28
0
    def delete_vnf(self, uuid, tenant=None):

        if uuid:
            todelete_uuids = [uuid]
        else:
            todelete_uuids = [
                uuid for uuid in self.vnfs if self.vnfs[uuid].tenant == tenant
            ]

        for todelete_uuid in todelete_uuids:
            vnf = self.vnfs[todelete_uuid]
            handle_yaml(k8s_client=ApiClient(),
                        yaml_file=vnf.get_k8s_desc_file().name,
                        mode="delete",
                        namespace=tenant)
            del self.vnfs[todelete_uuid]

        if not [vnf for vnf in self.vnfs.values() if vnf.tenant == tenant]:
            v1 = CoreV1Api()
            try:
                v1.delete_namespace(tenant)
            except:
                pass
Beispiel #29
0
def list_current_pods(label_selector: str = None):
    props = get_properties()
    team_name = props["AWS_ORBIT_TEAM_SPACE"]
    namespace = os.environ.get("AWS_ORBIT_USER_SPACE", team_name)
    load_kube_config()
    api_instance = CoreV1Api()
    try:
        params: Dict[str, Any] = {}
        params["namespace"] = namespace
        params["_preload_content"] = False
        if label_selector:
            params["label_selector"] = label_selector
        api_response = api_instance.list_namespaced_pod(**params)
        res = json.loads(api_response.data)
    except ApiException as e:
        _logger.info(
            "Exception when calling CoreV1Api->list_namespace_pod: %s\n" % e)
        raise e

    if "items" not in res:
        return []

    return res["items"]
def set_args(command_arg_file_path,
             config_map_lookup,
             arg_name,
             kubernetes_api: client.CoreV1Api,
             key=None):
    try:
        config_map = kubernetes_api.read_namespaced_config_map(
            name=config_map_lookup, namespace="default")
        multiaddress_data = config_map.data
        if key:
            multiaddresses = [multiaddress_data[key]]
        else:
            multiaddresses = multiaddress_data.values()
        if multiaddresses:
            command_line_args_list = [
                f"--{arg_name}={x}".rstrip() for x in multiaddresses
            ]
            command_line_args = " " + " ".join(command_line_args_list)
            with open(command_arg_file_path, 'a') as command_arg_file:
                command_arg_file.write(command_line_args)
    except client.rest.ApiException as api_exception:
        if api_exception.status != 404:
            raise
Beispiel #31
0
def _create_token_for_sa(
    core_api: CoreV1Api,
    service_account: V1ServiceAccount,
) -> str:
    service_account_name = service_account.metadata.name
    service_account_namespace = service_account.metadata.namespace
    token = core_api.create_namespaced_secret(
        namespace=service_account_namespace,
        body=V1Secret(
            api_version='v1',
            kind='Secret',
            metadata=V1ObjectMeta(
                generate_name=f'{service_account_name}-token-',
                annotations={
                    'kubernetes.io/service-account.name': service_account_name
                },
            ),
            type='kubernetes.io/service-account-token',
        ),
    )
    # not all required values are set on the returned object yet. Return only name so that we can
    # fetch it later (name will be generated by the kube-apiserver)
    return token.metadata.name
Beispiel #32
0
    def create_vnf(self, uuid, **params):

        v1 = CoreV1Api()
        namespaces = [ns.metadata.name for ns in v1.list_namespace().items]

        tenant = params["tenant"]
        if tenant not in namespaces:
            ns_file = open("/etc/lightmano/k8s/_internal/namespace.yaml")
            ns = ns_file.read()
            ns_file.close()
            ns = ns.replace("-NAME-", tenant)
            ns = yaml.safe_load(ns)
            v1.create_namespace(ns)

            rbac_api = RbacAuthorizationV1Api()

            role_file = open("/etc/lightmano/k8s/_internal/ns_role.yaml")
            role = role_file.read()
            role_file.close()
            role = yaml.safe_load(role)
            rbac_api.create_namespaced_role(namespace=tenant, body=role)

            rolebind_file = open(
                "/etc/lightmano/k8s/_internal/ns_role_binding.yaml")
            rolebinding = rolebind_file.read()
            rolebind_file.close()
            rolebinding = yaml.safe_load(rolebinding)
            rbac_api.create_namespaced_role_binding(namespace=tenant,
                                                    body=rolebinding)

        vnf = VNF(uuid, **params)

        handle_yaml(k8s_client=ApiClient(),
                    yaml_file=vnf.get_k8s_desc_file().name,
                    mode="create",
                    namespace=tenant)
        self.vnfs[uuid] = vnf
Beispiel #33
0
def watch_pod_events():
    V1_CLIENT = CoreV1Api()
    while True:
        try:
            logger.info("Checking for pod events....")
            try:
                watcher = watch.Watch()
                for event in watcher.stream(V1_CLIENT.list_pod_for_all_namespaces, label_selector=SCHEDULE_STRATEGY, timeout_seconds=20):
                    logger.info(f"Event: {event['type']} {event['object'].kind}, {event['object'].metadata.namespace}, {event['object'].metadata.name}, {event['object'].status.phase}")
                    if event["object"].status.phase == "Pending":
                        try:
                            logger.info(f'{event["object"].metadata.name} needs scheduling...')
                            pod_namespace = event["object"].metadata.namespace
                            pod_name = event["object"].metadata.name
                            service_name = event["object"].metadata.labels["serviceName"]
                            logger.info("Processing for Pod: %s/%s", pod_namespace, pod_name)
                            node_name = _get_schedulable_node(V1_CLIENT)
                            if node_name:
                                logger.info("Namespace %s, PodName %s , Node Name: %s  Service Name: %s",
                                            pod_namespace, pod_name, node_name, service_name)
                                res = schedule_pod(V1_CLIENT, pod_name, node_name, pod_namespace)
                                logger.info("Response %s ", res)
                            else:
                                logger.error(f"Found no valid node to schedule {pod_name} in {pod_namespace}")
                        except ApiException as e:
                            logger.error(json_loads(e.body)["message"])
                        except ValueError as e:
                            logger.error("Value Error %s", e)
                        except:
                            logger.exception("Ignoring Exception")
                logger.info("Resetting k8s watcher...")
            except:
                logger.exception("Ignoring Exception")
            finally:
                del watcher
        except:
            logger.exception("Ignoring Exception & listening for pod events")
Beispiel #34
0
def tail_pod_log(namespace: str, pod: V1Pod, color_idx: int,
                 seconds: Optional[int]) -> None:
    v1 = CoreV1Api()
    watch = Watch()
    print(
        f"{fg(color_idx)}Showing the logs for server {pod.metadata.name} of your service{attr(0)}"
    )
    retry_count = 0
    while True:
        try:
            for logline in watch.stream(
                    v1.read_namespaced_pod_log,
                    name=pod.metadata.name,
                    namespace=namespace,
                    container="k8s-service",
                    since_seconds=seconds,
            ):
                print(f"{fg(color_idx)}{pod.metadata.name} {logline}{attr(0)}")
        except Exception as e:
            if type(e) == ApiException:
                if e.status == 404:  # type: ignore
                    print(
                        f"{fg(color_idx)}Server {pod.metadata.name} has been terminated{attr(0)}"
                    )
                    return

            if retry_count < 15:
                print(
                    f"{fg(color_idx)}Couldn't get logs, waiting a bit and retrying{attr(0)}"
                )
                time.sleep(retry_count)
                retry_count += 1
            else:
                logger.error(
                    f"Got the following error while trying to fetch the logs for pod {pod.metadata.name} in namespace {namespace}: {e}"
                )
                return
Beispiel #35
0
def tail_module_log(
        layer: "Layer",
        module_name: str,
        since_seconds: Optional[int] = None,
        earliest_pod_start_time: Optional[datetime.datetime] = None,
        start_color_idx: int = 15,  # White Color
) -> None:
    current_pods_monitored: Set[str] = set()
    load_opta_kube_config()
    v1 = CoreV1Api()
    watch = Watch()
    count = 0
    """Using the UTC Time stamp as the Kubernetes uses the UTC Timestamps."""
    for event in watch.stream(
            v1.list_namespaced_pod,
            namespace=layer.name,
            label_selector=
            f"app.kubernetes.io/instance={layer.name}-{module_name}",
    ):
        pod: V1Pod = event["object"]
        color_idx = count % (256 - start_color_idx) + start_color_idx
        if color_idx in REDS:
            count += 1
            color_idx = count % (256 - start_color_idx) + start_color_idx
        if (earliest_pod_start_time is not None
                and pod.metadata.creation_timestamp < earliest_pod_start_time):
            continue

        if pod.metadata.name not in current_pods_monitored:
            current_pods_monitored.add(pod.metadata.name)
            new_thread = Thread(
                target=tail_pod_log,
                args=(layer.name, pod, color_idx, since_seconds),
                daemon=True,
            )
            new_thread.start()
            count += 1
Beispiel #36
0
 def _get_pod_infos_ns(self, namespace: str) -> Dict[Any, Dict[str, Any]]:
     v1 = CoreV1Api(self.api)
     results = {}
     pods: V1PodList = v1.list_namespaced_pod(namespace)
     for pod in pods.items:
         md = pod.metadata
         status = pod.status
         containers = {}
         for statuses in (status.container_statuses,
                          status.init_container_statuses):
             if statuses is None:
                 continue
             for container_status in statuses:
                 for location in (
                         container_status,
                         container_status.last_state.terminated,
                         container_status.state.terminated,
                 ):
                     if location is not None and location.container_id is not None:
                         containers[location.container_id.replace(
                             "docker://", "")] = container_status.name
         results[md.uid] = {
             "namespace":
             md.namespace,
             "release":
             md.labels.get("release",
                           md.labels.get("app.kubernetes.io/instance")),
             "service":
             md.labels.get("service",
                           md.labels.get("app.kubernetes.io/name")),
             "pod_name":
             md.name,
             "containers":
             containers,
         }
     return results
Beispiel #37
0
def list_running_pods(namespace: str):
    load_kube_config()
    api_instance = CoreV1Api()

    app_list = ",".join(APP_LABEL_SELECTOR)
    label_selector = f"app in ({app_list})"
    _logger.debug("using job selector %s", label_selector)
    try:
        api_response = api_instance.list_namespaced_pod(
            namespace=namespace,
            _preload_content=False,
            label_selector=label_selector,
            watch=False,
        )
        res = json.loads(api_response.data)
    except ApiException as e:
        _logger.info(
            "Exception when calling CoreV1Api->list_namespaced_job: %s\n" % e)
        raise e

    if "items" not in res:
        return []

    return res["items"]
Beispiel #38
0
 def provide_core_v1_api(self) -> CoreV1Api:
     self._load_config()
     return CoreV1Api()
Beispiel #39
0
 def __init__(self, knowledge: Knowledge, kubeconfig_file: str):
     super().__init__(knowledge)
     config.load_kube_config(config_file=kubeconfig_file)
     self.basic_api = CoreV1Api()
     self.extensions_api = AppsV1Api()
Beispiel #40
0
def update_namespace(api: client.CoreV1Api, name: str, body: str):
    return api.patch_namespace(name, body)
Beispiel #41
0
def create_pod(api: client.CoreV1Api, configmap: Resource,
               cro_spec: ResourceChunk, ns: str, name_suffix: str,
               cro_meta: ResourceChunk):
    logger = logging.getLogger('kopf.objects')

    pod_spec = cro_spec.get("pod", {})

    # did the user supply their own pod spec?
    tpl = pod_spec.get("template")

    # if not, let's use the default one
    if not tpl:
        tpl = yaml.safe_load(configmap.data['chaostoolkit-pod.yaml'])
        image_name = pod_spec.get("image", "chaostoolkit/chaostoolkit")
        env_cm_name = pod_spec.get("env", {}).get(
            "configMapName", "chaostoolkit-env")
        env_cm_enabled = pod_spec.get("env", {}).get("enabled", True)
        settings_secret_enabled = pod_spec.get("settings", {}).get(
            "enabled", False)
        settings_secret_name = pod_spec.get("settings", {}).get(
            "secretName", "chaostoolkit-settings")
        experiment_as_file = pod_spec.get(
            "experiment", {}).get("asFile", True)
        experiment_config_map_name = pod_spec.get("experiment", {}).get(
            "configMapName", "chaostoolkit-experiment")
        cmd_args = pod_spec.get("chaosArgs", [])

        set_image_name(tpl, image_name)

        if not env_cm_enabled:
            logger.info("Removing default env configmap volume")
            remove_env_config_map(tpl)
        elif env_cm_name:
            logger.info(f"Env config map named '{env_cm_name}'")
            set_env_config_map_name(tpl, env_cm_name)

        if not settings_secret_enabled:
            logger.info("Removing default settings secret volume")
            remove_settings_secret(tpl)
        elif settings_secret_name:
            logger.info(
                f"Settings secret volume named '{settings_secret_name}'")
            set_settings_secret_name(tpl, settings_secret_name)

        if experiment_as_file:
            logger.info(
                f"Experiment config map named '{experiment_config_map_name}'")
            set_experiment_config_map_name(tpl, experiment_config_map_name)
        else:
            logger.info("Removing default experiment config map volume")
            remove_experiment_volume(tpl)
            remove_env_path_config_map(tpl)

        if cmd_args:
            logger.info(
                f"Override default chaos command arguments: "
                f"$ chaos {' '.join(cmd_args)}")
            set_chaos_cmd_args(tpl, cmd_args)

    set_ns(tpl, ns)
    set_pod_name(tpl, name_suffix=name_suffix)
    set_sa_name(tpl, name_suffix=name_suffix)
    label(tpl, labels=cro_meta.get('labels', {}))

    logger.debug(f"Creating pod with template:\n{tpl}")
    pod = api.create_namespaced_pod(body=tpl, namespace=ns)
    logger.info(f"Pod {pod.metadata.self_link} created in ns '{ns}'")

    return tpl
Beispiel #42
0
def get(client: CoreV1Api, log: BoundLogger,
        namespace: V1Namespace) -> Optional[V1Namespace]:
    return common_k8s.get_resource(lambda: client.list_namespace(), log,
                                   'namespace', namespace.metadata.name)
Beispiel #43
0
def upsert(client: CoreV1Api, log: BoundLogger,
           namespace: V1Namespace) -> V1Namespace:
    return common_k8s.upsert_resource(
        get(client, log, namespace), namespace, log, 'namespace',
        lambda: client.create_namespace(body=namespace), lambda: client.
        patch_namespace(namespace.metadata.name, body=namespace))
Beispiel #44
0
def test_user_cluster_owner_access_to_pool(admin_mc, user_factory,
                                           remove_resource,
                                           wait_remove_resource):
    """Test that a cluster created by the admin is accessible by another user
    added as a cluster-owner, validate nodepool changing and switching
    nodetemplate"""

    # make an admin and user client
    admin_client = admin_mc.client
    k8sclient = CoreV1Api(admin_mc.k8s_client)
    user = user_factory()

    # make a cluster
    cluster = admin_client.create_cluster(
        name=random_str(), rancherKubernetesEngineConfig={"accessKey": "junk"})
    remove_resource(cluster)

    # wait for the namespace created by the cluster
    def _check_namespace(cluster):
        for n in k8sclient.list_namespace().items:
            if n.metadata.name == cluster.id:
                return True
        return False

    wait_for(lambda: _check_namespace(cluster))

    # add user as cluster-owner to the cluster
    crtb = admin_client.create_cluster_role_template_binding(
        userId=user.user.id,
        roleTemplateId="cluster-owner",
        clusterId=cluster.id,
    )
    remove_resource(crtb)

    # admin creates a node template and assigns to a pool
    admin_node_template, admin_cloud_credential = create_node_template(
        admin_client, "admincloudcred-" + random_str())
    admin_pool = admin_client.create_node_pool(
        nodeTemplateId=admin_node_template.id,
        hostnamePrefix="test",
        clusterId=cluster.id)
    wait_remove_resource(admin_pool)
    remove_resource(admin_cloud_credential)
    remove_resource(admin_node_template)

    # create a template for the user to try and assign
    user_node_template, user_cloud_credential = create_node_template(
        user.client, "usercloudcred-" + random_str())
    remove_resource(user_cloud_credential)
    remove_resource(user_node_template)

    # will pass, cluster owner user can change pool quantity
    user.client.update(admin_pool, quantity=2)
    # will pass, can set to a template owned by the user
    user.client.update(admin_pool, nodeTemplateId=user_node_template.id)

    # will fail, can not update nodepool template,
    # if no access to the original template
    with pytest.raises(ApiError) as e:
        user.client.update(admin_pool, nodeTemplateId=admin_node_template.id)
    assert e.value.error.status == 404
    assert e.value.error.message == "unable to find node template [%s]" % \
                                    admin_node_template.id

    # delete this by hand and the rest will cleanup
    admin_client.delete(admin_pool)
    def __init__(self, env: ApplicationVersion, config: dict):
        super().__init__(env, config)

        self.vault_name, self.vault_client = KeyVaultClient.vault_and_client(
            self.config, self.env)
        self.core_v1_api = CoreV1Api()