Exemple #1
0
 def test_stateful_sets(self):
     v1 = client.AppsV1Api()
     res = v1.list_namespaced_stateful_set("metal-control-plane")
     for i in res.items:
         self.assertEqual(i.status.current_replicas, i.status.replicas,
                          "not all stateful set replicas running")
Exemple #2
0
    def __init__(self,
                 namespace=None,
                 service_type=None,
                 gs_image=None,
                 etcd_image=None,
                 zookeeper_image=None,
                 gie_graph_manager_image=None,
                 coordinator_name=None,
                 coordinator_service_name=None,
                 etcd_cpu=None,
                 etcd_mem=None,
                 zookeeper_cpu=None,
                 zookeeper_mem=None,
                 gie_graph_manager_cpu=None,
                 gie_graph_manager_mem=None,
                 engine_cpu=None,
                 engine_mem=None,
                 vineyard_cpu=None,
                 vineyard_mem=None,
                 vineyard_shared_mem=None,
                 image_pull_policy=None,
                 image_pull_secrets=None,
                 volumes=None,
                 num_workers=None,
                 instance_id=None,
                 log_level=None,
                 timeout_seconds=None,
                 waiting_for_delete=None,
                 delete_namespace=None,
                 **kwargs):
        try:
            kube_config.load_incluster_config()
        except:  # noqa: E722
            kube_config.load_kube_config()
        self._api_client = kube_client.ApiClient()
        self._core_api = kube_client.CoreV1Api(self._api_client)
        self._app_api = kube_client.AppsV1Api(self._api_client)

        self._instance_id = instance_id

        # random for multiple k8s cluster in the same namespace
        self._engine_name = self._engine_name_prefix + self._instance_id
        self._etcd_name = self._etcd_name_prefix + self._instance_id
        self._etcd_service_name = self._etcd_service_name_prefix + self._instance_id

        self._gie_graph_manager_name = (self._gie_graph_manager_name_prefix +
                                        self._instance_id)
        self._gie_graph_manager_service_name = (
            self._gie_graph_manager_service_name_prefix + self._instance_id)
        self._vineyard_service_name = (self._vineyard_service_name_prefix +
                                       self._instance_id)

        self._namespace = namespace
        self._service_type = service_type
        self._num_workers = num_workers

        self._coordinator_name = coordinator_name
        self._coordinator_service_name = coordinator_service_name

        self._resource_object = ResourceManager(self._api_client)

        # engine container info
        self._gs_image = gs_image
        self._engine_cpu = engine_cpu
        self._engine_mem = engine_mem

        # vineyard container info
        self._vineyard_cpu = vineyard_cpu
        self._vineyard_mem = vineyard_mem
        self._vineyard_shared_mem = vineyard_shared_mem

        # etcd pod info
        self._etcd_image = etcd_image
        self._etcd_cpu = etcd_cpu
        self._etcd_mem = etcd_mem

        # zookeeper pod info
        self._zookeeper_image = zookeeper_image
        self._zookeeper_cpu = zookeeper_cpu
        self._zookeeper_mem = zookeeper_mem

        # interactive engine graph manager info
        self._gie_graph_manager_image = gie_graph_manager_image
        self._gie_graph_manager_cpu = gie_graph_manager_cpu
        self._gie_graph_manager_mem = gie_graph_manager_mem

        self._image_pull_policy = image_pull_policy

        # image pull secrets
        self._etcd_endpoint = None
        if image_pull_secrets is not None:
            self._image_pull_secrets = image_pull_secrets.split(",")
        else:
            self._image_pull_secrets = []

        self._volumes = json.loads(volumes)

        self._host0 = None
        self._pod_name_list = None
        self._pod_ip_list = None
        self._pod_host_ip_list = None

        self._analytical_engine_endpoint = None
        self._vineyard_service_endpoint = None

        self._closed = False
        self._glog_level = parse_as_glog_level(log_level)
        self._timeout_seconds = timeout_seconds
        self._waiting_for_delete = waiting_for_delete
        self._delete_namespace = delete_namespace

        self._analytical_engine_process = None

        # 8000 ~ 9000 is exposed
        self._learning_engine_ports_usage = 8000
        self._graphlearn_services = dict()
        self._learning_instance_processes = {}
 def __init__(self):
     config.load_kube_config()
     self._client = client.CoreV1Api()
     self._apps = client.AppsV1Api()
Exemple #4
0
    def __init__(self,
                 api_client=None,
                 k8s_namespace=None,
                 k8s_service_type=None,
                 num_workers=None,
                 preemptive=None,
                 k8s_gs_image=None,
                 k8s_etcd_image=None,
                 k8s_image_pull_policy=None,
                 k8s_image_pull_secrets=None,
                 k8s_vineyard_daemonset=None,
                 k8s_vineyard_cpu=None,
                 k8s_vineyard_mem=None,
                 vineyard_shared_mem=None,
                 k8s_engine_cpu=None,
                 k8s_engine_mem=None,
                 k8s_coordinator_cpu=None,
                 k8s_coordinator_mem=None,
                 etcd_addrs=None,
                 k8s_etcd_num_pods=None,
                 k8s_etcd_cpu=None,
                 k8s_etcd_mem=None,
                 k8s_mars_worker_cpu=None,
                 k8s_mars_worker_mem=None,
                 k8s_mars_scheduler_cpu=None,
                 k8s_mars_scheduler_mem=None,
                 with_mars=None,
                 k8s_volumes=None,
                 timeout_seconds=None,
                 dangling_timeout_seconds=None,
                 k8s_waiting_for_delete=None,
                 mount_dataset=None,
                 k8s_dataset_image=None,
                 **kwargs):
        self._api_client = api_client
        self._core_api = kube_client.CoreV1Api(api_client)
        self._app_api = kube_client.AppsV1Api(api_client)
        self._rbac_api = kube_client.RbacAuthorizationV1Api(api_client)

        self._saved_locals = locals()

        self._namespace = self._saved_locals["k8s_namespace"]
        self._image_pull_secrets = self._saved_locals["k8s_image_pull_secrets"]
        if self._image_pull_secrets is None:
            self._image_pull_secrets = []
        elif not isinstance(self._image_pull_secrets, list):
            self._image_pull_secrets = [self._image_pull_secrets]
        self._image_pull_secrets_str = ",".join(self._image_pull_secrets)

        self._instance_id = random_string(6)
        self._role_name = self._role_name_prefix + self._instance_id
        self._role_binding_name = self._role_binding_name_prefix + self._instance_id
        self._cluster_role_name = ""
        self._cluster_role_binding_name = ""

        # all resource created inside namsapce
        self._resource_object = []

        self._coordinator_name = self._coordinator_name_prefix + self._instance_id
        self._coordinator_service_name = (
            self._coordinator_service_name_prefix + self._instance_id)
        # environment variable
        self._coordinator_envs = kwargs.pop("coordinator_envs", dict())

        if "GS_COORDINATOR_MODULE_NAME" in os.environ:
            self._coordinator_module_name = os.environ[
                "GS_COORDINATOR_MODULE_NAME"]
        else:
            self._coordinator_module_name = "gscoordinator"

        self._closed = False

        # pods watcher
        self._coordinator_pods_watcher = []
        self._logs = []

        self._delete_namespace = False
Exemple #5
0
    def __init__(self, configuration):

        k8s_client.Configuration.set_default(configuration)

        self.core_api = k8s_client.CoreV1Api()
        self.apps_api = k8s_client.AppsV1Api()
Exemple #6
0
def list_deployments():
    api_instance = client.AppsV1Api()
    return api_instance.list_namespaced_deployment(namespace, pretty=True)
    print("Deployment updated. status='%s'" % str(api_response.status))


def delete_deployment(api_instance, deployment_name):
    # Delete deployment
    api_response = api_instance.delete_namespaced_deployment(
        name=deployment_name,
        namespace="default",
        body=client.V1DeleteOptions(propagation_policy='Foreground',
                                    grace_period_seconds=5))
    print("Deployment deleted. status='%s'" % str(api_response.status))


# Create an instance of the API class
config.load_kube_config()
apps_v1 = client.AppsV1Api()

# Getting the deployment
deployment_name = "kube-znn"
deployment_image = "cmendes/znn:100k"
deployment_replicas = 2

current_deployment = apps_v1.read_namespaced_deployment("kube-znn", "default")
#current_deployment.spec.replicas = 0
#update_deployment(apps_v1, current_deployment, deployment_name, deployment_image)
#current_deployment.spec.replicas = deployment_replicas
update_deployment(apps_v1, current_deployment, deployment_name,
                  deployment_image)

#delete_deployment(apps_v1, deployment_name)
 def init_kubernetes_client(self):
     from kubernetes import client as kubernetes_client, config as kubernetes_config
     kubernetes_config.load_incluster_config()
     self.v1core = kubernetes_client.CoreV1Api()
     self.v1apps = kubernetes_client.AppsV1Api()
     self.v1batch = kubernetes_client.BatchV1Api()
Exemple #9
0
 def connect_to_cluster(self):
     """Connect to the cluster. Set API attributes."""
     config.load_kube_config(self.kubeconfig)
     self.apps_v1_api = client.AppsV1Api()
     self.core_v1_api = client.CoreV1Api()
Exemple #10
0
 def list_deployments_in_namespace(self, namespace):
     api = client.AppsV1Api()
     output = api.list_namespaced_deployment(namespace)
     return [deployment.metadata for deployment in output.items]
Exemple #11
0
 def __init__(self, deployment_res: DeploymentResource):
     self.deployment_res = deployment_res
     # Load kube config
     config.load_incluster_config()
     self.apps_v1 = client.AppsV1Api()
Exemple #12
0
 def list_statefulsets_in_namespace(self, namespace):
     api = client.AppsV1Api()
     output = api.list_namespaced_stateful_set(namespace)
     return [statefulset.metadata for statefulset in output.items]
Exemple #13
0
def main():

    if os.environ.get('RD_CONFIG_DEBUG') == 'true':
        log.setLevel(logging.DEBUG)
        log.debug("Log level configured for DEBUG")

    data = {}

    data["type"] = os.environ.get('RD_CONFIG_TYPE')
    data["yaml"] = os.environ.get('RD_CONFIG_YAML')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')

    common.connect()

    try:
        if data["type"] == "Deployment":
            dep = yaml.safe_load(data["yaml"])
            api_instance = client.AppsV1Api()
            resp = api_instance.create_namespaced_deployment(
                body=dep, namespace=data["namespace"], pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "ConfigMap":
            api_instance = client.CoreV1Api()
            dep = yaml.safe_load(data["yaml"])
            resp = api_instance.create_namespaced_config_map(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.metadata))

        if data["type"] == "StatefulSet":
            dep = yaml.safe_load(data["yaml"])
            k8s_beta = client.AppsV1Api()
            resp = k8s_beta.create_namespaced_stateful_set(
                body=dep, namespace=data["namespace"], pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Service":
            api_instance = client.CoreV1Api()
            dep = yaml.safe_load(data["yaml"])
            resp = api_instance.create_namespaced_service(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Ingress":
            dep = yaml.safe_load(data["yaml"])
            k8s_beta = client.ExtensionsV1beta1Api()
            resp = k8s_beta.create_namespaced_ingress(
                body=dep, namespace=data["namespace"], pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Job":
            api_instance = client.BatchV1Api()
            dep = yaml.safe_load(data["yaml"])
            resp = api_instance.create_namespaced_job(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "StorageClass":
            dep = yaml.safe_load(data["yaml"])
            api_instance = client.StorageV1Api()

            resp = api_instance.create_storage_class(body=dep, pretty="true")

            print(common.parseJson(resp.metadata))

        if data["type"] == "PersistentVolumeClaim":
            dep = yaml.safe_load(data["yaml"])
            api_instance = client.CoreV1Api()

            resp = api_instance.create_namespaced_persistent_volume_claim(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Secret":
            dep = yaml.safe_load(data["yaml"])
            api_instance = client.CoreV1Api()

            resp = api_instance.create_namespaced_secret(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.metadata))

        if data["type"] == "PersistentVolume":
            dep = yaml.safe_load(data["yaml"])
            api_instance = client.CoreV1Api()

            resp = api_instance.create_persistent_volume(body=dep,
                                                         pretty="true")

            print(common.parseJson(resp.status))

    except ApiException:
        log.exception("Exception error creating:")
        sys.exit(1)
Exemple #14
0
 def setUp(self) -> None:
     self._core_v1_api = client.CoreV1Api()
     self._networking_api = client.NetworkingV1beta1Api()
     self._apps_api = client.AppsV1Api()
Exemple #15
0
async def get_deployment(name):
    api_instance = client.AppsV1Api()
    res = api_instance.read_namespaced_deployment(name, 'default')
    return res
Exemple #16
0
def main():

    if os.environ.get('RD_CONFIG_DEBUG') == 'true':
        log.setLevel(logging.DEBUG)
        log.debug("Log level configured for DEBUG")

    data = {}

    data["api_version"] = os.environ.get('RD_CONFIG_API_VERSION')
    data["name"] = os.environ.get('RD_CONFIG_NAME')
    data["container_name"] = os.environ.get('RD_CONFIG_CONTAINER_NAME')
    data["image"] = os.environ.get('RD_CONFIG_IMAGE')
    if os.environ.get('RD_CONFIG_PORTS'):
        data["ports"] = os.environ.get('RD_CONFIG_PORTS')

    data["replicas"] = os.environ.get('RD_CONFIG_REPLICAS')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')

    if os.environ.get('RD_CONFIG_LABELS'):
        data["labels"] = os.environ.get('RD_CONFIG_LABELS')

    if os.environ.get('RD_CONFIG_ENVIRONMENTS'):
        data["environments"] = os.environ.get('RD_CONFIG_ENVIRONMENTS')

    if os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS'):
        evs = os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS')
        data["environments_secrets"] = evs

    if os.environ.get('RD_CONFIG_LIVENESS_PROBE'):
        data["liveness_probe"] = os.environ.get('RD_CONFIG_LIVENESS_PROBE')

    if os.environ.get('RD_CONFIG_READINESS_PROBE'):
        data["readiness_probe"] = os.environ.get('RD_CONFIG_READINESS_PROBE')

    if os.environ.get('RD_CONFIG_CONTAINER_COMMAND'):
        cc = os.environ.get('RD_CONFIG_CONTAINER_COMMAND')
        data["container_command"] = cc

    if os.environ.get('RD_CONFIG_CONTAINER_ARGS'):
        data["container_args"] = os.environ.get('RD_CONFIG_CONTAINER_ARGS')

    if os.environ.get('RD_CONFIG_RESOURCES_REQUESTS'):
        rr = os.environ.get('RD_CONFIG_RESOURCES_REQUESTS')
        data["resources_requests"] = rr

    log.debug("Updating Deployment data:")
    log.debug(data)

    common.connect()

    try:
        apiV1 = client.AppsV1Api()
        deployment = create_deployment_object(data)

        log.debug("deployment object: ")
        log.debug(deployment)

        update_deployment(apiV1, deployment, data)
    except ApiException as e:
        log.error("Exception updating deployment: %s\n" % e)
        sys.exit(1)
Exemple #17
0
def deployment_is_ready(name):
    api_instance = client.AppsV1Api()
    ready_replicas = api_instance.read_namespaced_deployment(
        name, 'default').status.ready_replicas
    return ready_replicas is not None
Exemple #18
0
async def publish(loop):

    # Client to list namespaces
    CoreV1Api = client.CoreV1Api()

    # Client to list Deployments and StatefulSets
    AppsV1Api = client.AppsV1Api()

    async def get_deployments():
        for ns in CoreV1Api.list_namespace(label_selector=args.selector).items:
            for deploy in AppsV1Api.list_namespaced_deployment(
                    ns.metadata.name).items:
                logger.info("Namespace: %s Deployment: %s Replica: %s" %
                            (deploy.metadata.namespace, deploy.metadata.name,
                             deploy.spec.replicas))
                msg = {
                    'namespace': deploy.metadata.namespace,
                    'name': deploy.metadata.name,
                    'kind': 'deployment',
                    'replicas': deploy.spec.replicas,
                    'labels': deploy.spec.template.metadata.labels
                }

                logger.debug("Publishing Deployment: %s" % (json.dumps(msg)))

                if deploy.spec.replicas > 0 and not deploy.metadata.name == args.exclude:
                    try:
                        output['data'].append(msg)
                    except ErrConnectionClosed as e:
                        print("Connection closed prematurely.")
                        break
                    except ErrTimeout as e:
                        print("Timeout occured when publishing msg i={}: {}".
                              format(deploy, e))

    async def get_statefulsets():
        for ns in CoreV1Api.list_namespace(label_selector=args.selector).items:
            for sts in AppsV1Api.list_namespaced_stateful_set(
                    ns.metadata.name).items:
                logger.info("Namespace: %s StatefulSet: %s Replica: %s" %
                            (sts.metadata.namespace, sts.metadata.name,
                             sts.spec.replicas))
                msg = {
                    'namespace': sts.metadata.namespace,
                    'name': sts.metadata.name,
                    'kind': 'statefulset',
                    'replicas': sts.spec.replicas,
                    'labels': sts.spec.template.metadata.labels
                }

                logger.debug("Publishing Statefulset: %s" % (json.dumps(msg)))

                if sts.spec.replicas > 1:
                    try:
                        output['data'].append(msg)
                    except ErrConnectionClosed as e:
                        print("Connection closed prematurely.")
                        break
                    except ErrTimeout as e:
                        print("Timeout occured when publishing msg i={}: {}".
                              format(sts, e))

    await asyncio.gather(get_deployments(), get_statefulsets())
Exemple #19
0
def delete_deployment(name):
    api_instance = client.AppsV1Api()
    return api_instance.delete_namespaced_deployment(name,
                                                     namespace,
                                                     pretty=True)
Exemple #20
0
from kubernetes import client
from kubernetes.client.rest import ApiException
from .load_kube_config import kubeConfig

kubeConfig.load_kube_config()
apps = client.AppsV1Api()

class K8sDaemonSet:
    def get_damemonsets(ns):           
        try:
            if ns != 'all': 
                print ("\n[INFO] Fetching {} namespace dameonsets data...".format(ns))
                namespace = ns
                damemonsets = apps.list_namespaced_daemon_set(namespace, timeout_seconds=10)
            else:           
                print ("\n[INFO] Fetching all namespace dameonsets data...")  
                damemonsets = apps.list_daemon_set_for_all_namespaces(timeout_seconds=10)
            return damemonsets
        except ApiException as e:
            print("Exception when calling AppsV1Api->list_namespaced_daemon_set: %s\n" % e)
Exemple #21
0
 def create_apps_api(self):
     cfg = self.get_kubecfg()
     return client.AppsV1Api(cfg)
Exemple #22
0
 def __init__(self):
     self.core_api = client.CoreV1Api()
     self.apis_api = client.AppsV1Api()
Exemple #23
0
 def get(kind):
     client = ClientFactory.get()
     if kind == 'pod':
         return ResourceProxy(
             kind='pod',
             list_fn=client.CoreV1Api().list_pod_for_all_namespaces)
     elif kind == 'service':
         return ResourceProxy(
             kind='service',
             list_fn=client.CoreV1Api().list_service_for_all_namespaces)
     elif kind == 'endpoints':
         return ResourceProxy(
             kind='endpoints',
             list_fn=client.CoreV1Api().list_endpoints_for_all_namespaces)
     elif kind == 'config_map':
         return ResourceProxy(
             kind='config_map',
             list_fn=client.CoreV1Api().list_config_map_for_all_namespaces)
     elif kind == 'secret':
         return ResourceProxy(
             kind='secret',
             list_fn=client.CoreV1Api().list_secret_for_all_namespaces)
     elif kind == 'node':
         return ResourceProxy(kind='node',
                              list_fn=client.CoreV1Api().list_node)
     elif kind == 'deployment':
         return ResourceProxy(
             kind='deployment',
             list_fn=client.AppsV1Api().list_deployment_for_all_namespaces)
     elif kind == 'stateful_set':
         return ResourceProxy(kind='stateful_set',
                              list_fn=client.AppsV1Api().
                              list_stateful_set_for_all_namespaces)
     elif kind == 'daemon_set':
         return ResourceProxy(
             kind='daemon_set',
             list_fn=client.AppsV1Api().list_daemon_set_for_all_namespaces)
     elif kind == 'replica_set':
         return ResourceProxy(kind='replica_set',
                              list_fn=client.ExtensionsV1beta1Api().
                              list_replica_set_for_all_namespaces)
     elif kind == 'storage_class':
         return ResourceProxy(
             kind='storage_class',
             list_fn=client.StorageV1Api().list_storage_class)
     elif kind == 'persistent_volume':
         return ResourceProxy(
             kind='persistent_volume',
             list_fn=client.CoreV1Api().list_persistent_volume)
     elif kind == 'persistent_volume_claim':
         return ResourceProxy(
             kind='persistent_volume_claim',
             list_fn=client.CoreV1Api(
             ).list_persistent_volume_claim_for_all_namespaces)
     elif kind == 'namespace':
         return ResourceProxy(kind='namespace',
                              list_fn=client.CoreV1Api().list_namespace)
     elif kind == 'horizontal_pod_autoscaler':
         return ResourceProxy(
             kind='horizontal_pod_autoscaler',
             list_fn=client.AutoscalingV1Api(
             ).list_horizontal_pod_autoscaler_for_all_namespaces)
     else:
         raise Exception("Unknown kind %s" % kind)
Exemple #24
0
    def __init__(self,
                 api_client=None,
                 namespace=None,
                 service_type=None,
                 minikube_vm_driver=None,
                 num_workers=None,
                 gs_image=None,
                 etcd_image=None,
                 gie_graph_manager_image=None,
                 zookeeper_image=None,
                 image_pull_policy=None,
                 image_pull_secrets=None,
                 vineyard_cpu=None,
                 vineyard_mem=None,
                 vineyard_shared_mem=None,
                 engine_cpu=None,
                 engine_mem=None,
                 coordinator_cpu=None,
                 coordinator_mem=None,
                 timeout_seconds=None,
                 waiting_for_delete=None,
                 **kwargs):
        self._api_client = api_client
        self._core_api = kube_client.CoreV1Api(api_client)
        self._app_api = kube_client.AppsV1Api(api_client)
        self._rbac_api = kube_client.RbacAuthorizationV1Api(api_client)

        self._namespace = namespace
        self._service_type = service_type
        self._minikube_vm_driver = minikube_vm_driver
        self._gs_image = gs_image
        self._num_workers = num_workers
        self._etcd_image = etcd_image
        self._gie_graph_manager_image = gie_graph_manager_image
        self._zookeeper_image = zookeeper_image

        self._image_pull_policy = image_pull_policy
        self._image_pull_secrets = image_pull_secrets
        if self._image_pull_secrets is None:
            self._image_pull_secrets = []
        elif not isinstance(self._image_pull_secrets, list):
            self._image_pull_secrets = [self._image_pull_secrets]

        self._vineyard_cpu = vineyard_cpu
        self._vineyard_mem = vineyard_mem
        self._vineyard_shared_mem = vineyard_shared_mem
        self._engine_cpu = engine_cpu
        self._engine_mem = engine_mem
        self._waiting_for_delete = waiting_for_delete

        self._cluster_role_name = ""
        self._cluster_role_binding_name = ""

        # all resource created inside namsapce
        self._resource_object = []

        self._coordinator_name = self._coordinator_name_prefix + random_string(
            6)
        self._coordinator_service_name = (
            self._coordinator_service_name_prefix + random_string(6))
        self._coordinator_cpu = coordinator_cpu
        self._coordinator_mem = coordinator_mem
        # environment variable
        self._coordinator_envs = kwargs.pop("coordinator_envs", dict())

        self._closed = False
        self._timeout_seconds = timeout_seconds

        # pods watcher
        self._coordinator_pods_watcher = []
        self._logs = []

        self._delete_namespace = False
    def test_np(object):
        tutils.tcLog("Verify GW flows are present")
        tutils.checkGwFlows("11.3.0.1")
        createNs("prod")
        # create a deployment in default namespace
        k8s_api = utils.create_from_yaml(k8s_client,
                                         "yamls/hostnames-dep.yaml")
        v1 = client.CoreV1Api()

        # check deployment is ready
        svc_hosts = []

        def depChecker():
            svc_hosts = getPodNames(v1, "default", "app=hostnames")
            if len(svc_hosts) >= 3:
                return ""
            return "Need 3 hosts, have {}".format(len(svc_hosts))

        tutils.assertEventually(depChecker, 1, 180)
        createNsPod("default", "client-pod")

        tutils.tcLog("Verify access without netpol")
        podIPs = tutils.getPodIPs("default", "app=hostnames")
        assert len(podIPs) == 3
        verifyAccess('default', 'open', podIPs)
        # apply networkpolicy allowing access only to prod
        tutils.tcLog("Create k8s network policy")
        createNsNetPol("default", "np/hostnames-deny-all")
        tutils.tcLog("Verify access denied")
        verifyAccess('default', 'timed out', podIPs)

        tutils.tcLog("Setup a netpol to allow access from prod")
        createNsNetPol("default", "hostnames-allow-prod")
        createNsPod("prod", "client-pod")
        tutils.tcLog("Verify allow from prod")
        verifyAccess('prod', 'open', podIPs)
        tutils.tcLog("Verify deny from default")
        verifyAccess('default', 'timed out', podIPs)

        tutils.tcLog("Delete deny-all")
        deleteNsNetPol('default', 'hostnames-deny-all')
        tutils.tcLog("Verify allow from prod")
        verifyAccess('prod', 'open', podIPs)
        tutils.tcLog("Verify deny from default")
        verifyAccess('default', 'timed out', podIPs)
        tutils.tcLog("Delete allow-prod")
        deleteNsNetPol('default', 'hostnames-allow-prod')
        tutils.tcLog("Verify allow from default")
        verifyAccess('default', 'open', podIPs)
        tutils.tcLog("Verify allow from prod")
        verifyAccess('prod', 'open', podIPs)

        tutils.tcLog("Allow port 9000 from prod")
        createNsNetPol("default", "np/hostnames-allow-prod9000")
        tutils.tcLog("Verify deny from default")
        verifyAccess('default', 'timed out', podIPs)
        tutils.tcLog("Verify deny 9376 from prod")
        verifyAccess('prod', 'timed out', podIPs)
        tutils.tcLog("Delete allow-prod9000")
        deleteNsNetPol('default', 'hostnames-allow-prod9000')

        tutils.tcLog("Allow port 9376 from prod")
        createNsNetPol("default", "np/hostnames-allow-prod9376")
        tutils.tcLog("Verify allow from prod")
        verifyAccess('prod', 'open', podIPs)
        tutils.tcLog("Verify deny from default")
        verifyAccess('default', 'timed out', podIPs)

        tutils.tcLog("Delete allow-prod9376")
        deleteNsNetPol('default', 'hostnames-allow-prod9376')
        tutils.tcLog("Verify allow from default")
        verifyAccess('default', 'open', podIPs)
        tutils.tcLog("Verify allow from prod")
        verifyAccess('prod', 'open', podIPs)

        tutils.scaleDep("default", "hostnames-dep", 0)
        v1.delete_namespace(
            'prod', client.V1DeleteOptions())  # deletes the client-pod too
        av1 = client.AppsV1Api()
        av1.delete_namespaced_deployment("hostnames-dep", "default",
                                         client.V1DeleteOptions())
        tutils.deletePod("default", "client-pod")

        def delChecker():
            dList = av1.list_namespaced_deployment("default")
            for dep in dList.items:
                if dep.metadata.name == "hostnames-dep":
                    return "hostnames-dep still present"
            if tutils.namespaceExists('prod'):
                return "prod exists"
            return ""

        tutils.tcLog("Verify cleanup")
        tutils.assertEventually(delChecker, 2, 40)
Exemple #26
0
def perform_cloud_ops():
    # set GOOGLE_APPLICATION_CREDENTIALS env to credentials file
    # set GOOGLE_CLOUD_PROJECT env to project id

    domain = os.getenv('DOMAIN')
    assert domain
    logger.info(f'using domain: {domain}')

    static_ip = os.getenv('STATIC_IP')
    assert static_ip
    logger.info(f'using static IP: {static_ip}')

    admin_email = os.getenv('ADMIN_EMAIL')
    assert admin_email
    logger.info(f'using ACME admin email: {admin_email}')

    oauth_client_id = os.getenv('OAUTH_CLIENT_ID')
    assert oauth_client_id
    logger.info(f'using oauth client id: {oauth_client_id}')

    oauth_client_secret = os.getenv('OAUTH_CLIENT_SECRET')
    assert oauth_client_secret
    logger.info(f'using oauth client secret: {oauth_client_secret}')

    oauth_secret = os.getenv('OAUTH_SECRET')
    assert oauth_secret
    logger.info(f'using oauth secret: {oauth_secret}')

    oauth_domain = os.getenv('OAUTH_DOMAIN')
    assert oauth_domain
    logger.info(f'using domain: {oauth_domain}')

    django_secret_key = os.getenv('DJANGO_SECRET_KEY')
    assert django_secret_key
    logger.info(f'using DJANGO_SECRET_KEY: {django_secret_key}')

    credentials, project = google.auth.default()
    gcloud_client = container_v1.ClusterManagerClient(credentials=credentials)

    scan_clusters(gcloud_client, project)

    # FIXME add the k8s cert to a trust store
    urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

    auth_gcloud_k8s(credentials)

    api_core_v1 = client.CoreV1Api()
    api_apps_v1 = client.AppsV1Api()
    api_storage_v1 = client.StorageV1Api()
    api_custom = client.CustomObjectsApi()
    api_extensions_v1_beta1 = client.ExtensionsV1beta1Api()
    api_ext_v1_beta1 = client.ApiextensionsV1beta1Api()
    api_rbac_auth_v1_b1 = client.RbacAuthorizationV1beta1Api()

    ensure_traefik(api_core_v1, api_ext_v1_beta1, api_apps_v1, api_custom,
                   api_rbac_auth_v1_b1, admin_email, domain, static_ip,
                   oauth_client_id, oauth_client_secret, oauth_domain,
                   oauth_secret)

    with open(os.getenv('GOOGLE_APPLICATION_CREDENTIALS'), 'rb') as f:
        gcloud_credentials_b64 = b64encode(f.read()).decode('UTF-8')

    ensure_secret(api=api_core_v1,
                  name='webui-credentials',
                  namespace='default',
                  secret=V1Secret(
                      metadata=client.V1ObjectMeta(name='webui-credentials'),
                      data={'gcloud-credentials': gcloud_credentials_b64}))
    webui_volume_paths = [
        ('data', '/opt/nipyapi/data', '20Gi', 'standard'),
    ]
    webui_volume_mounts = [
        V1VolumeMount(name=path[0], mount_path=path[1])
        for path in webui_volume_paths
    ]
    webui_volume_mounts.append(
        V1VolumeMount(name='webui-credentials',
                      mount_path='/root/webui',
                      read_only=True))

    dind_volume_paths = [
        ('docker', '/var/lib/docker', '200Gi', 'standard'),
    ]
    dind_volume_mounts = [
        V1VolumeMount(name=path[0], mount_path=path[1])
        for path in dind_volume_paths
    ]
    shared_volume_mounts = [
        V1VolumeMount(name='dind-socket', mount_path='/var/run-shared')
    ]
    ensure_statefulset_with_containers(
        api_apps_v1=api_apps_v1,
        name='admin',
        namespace='default',
        replicas=1,
        containers=[
            V1Container(
                name='webui',
                image='aichrist/nipyapi-ds:latest',
                env=[
                    # FIXME use k8s secrets for these values
                    V1EnvVar(name='DOMAIN', value=domain),
                    V1EnvVar(name='STATIC_IP', value=static_ip),
                    V1EnvVar(name='ADMIN_EMAIL', value=admin_email),
                    V1EnvVar(name='OAUTH_CLIENT_ID', value=oauth_client_id),
                    V1EnvVar(name='OAUTH_CLIENT_SECRET',
                             value=oauth_client_secret),
                    V1EnvVar(name='OAUTH_SECRET', value=oauth_secret),
                    V1EnvVar(name='OAUTH_DOMAIN', value=oauth_domain),
                    V1EnvVar(name='DJANGO_SECRET_KEY',
                             value=django_secret_key),
                    V1EnvVar(name='GOOGLE_APPLICATION_CREDENTIALS',
                             value='/root/webui/gcloud_credentials.json'),
                    V1EnvVar(name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE',
                             value='/root/webui/gcloud_credentials.json'),
                    V1EnvVar(name='GOOGLE_CLOUD_PROJECT',
                             value=os.getenv('GOOGLE_CLOUD_PROJECT')),
                    V1EnvVar(name='DOCKER_HOST',
                             value='unix:///var/run-shared/docker.sock'),
                ],
                ports=[V1ContainerPort(container_port=8000)],
                volume_mounts=webui_volume_mounts + shared_volume_mounts),
            V1Container(
                name='dind',
                image='docker:19-dind',
                security_context=V1SecurityContext(privileged=True),
                command=[
                    'dockerd', '-H', 'unix:///var/run-shared/docker.sock'
                ],
                volume_mounts=dind_volume_mounts + shared_volume_mounts)
        ],
        volumes=[
            V1Volume(name='dind-socket', empty_dir={}),
            V1Volume(name='webui-credentials',
                     projected=V1ProjectedVolumeSource(sources=[
                         V1VolumeProjection(secret=V1SecretProjection(
                             name='webui-credentials',
                             items=[
                                 V1KeyToPath(key='gcloud-credentials',
                                             path='gcloud_credentials.json')
                             ]))
                     ]))
        ],
        volume_paths=webui_volume_paths + dind_volume_paths)
    ensure_ingress_routed_svc(api_core_v1=api_core_v1,
                              api_custom=api_custom,
                              domain=domain,
                              hostname='admin',
                              name='admin',
                              target_name='admin',
                              namespace='default',
                              port_name='web',
                              svc_port=80,
                              target_port=8000)
    reg_volume_paths = [
        ('database', '/opt/nifi-registry/nifi-registry-current/database',
         '10Gi', 'standard'),
        ('flow-storage',
         '/opt/nifi-registry/nifi-registry-current/flow_storage', '20Gi',
         'standard'),
    ]
    reg_volume_mounts = [
        V1VolumeMount(name=path[0], mount_path=path[1])
        for path in reg_volume_paths
    ]
    ensure_statefulset_with_containers(
        api_apps_v1=api_apps_v1,
        name='registry',
        namespace='default',
        replicas=1,
        containers=[
            V1Container(name='registry',
                        image='apache/nifi-registry:latest',
                        env=[
                            V1EnvVar(name='NIFI_REGISTRY_WEB_HTTP_PORT',
                                     value='19090'),
                        ],
                        ports=[V1ContainerPort(container_port=19090)],
                        volume_mounts=reg_volume_mounts),
        ],
        init_containers=[
            V1Container(
                name='init-permissions',
                image='busybox',
                command=[
                    'sh', '-c',
                    'chown -R 1000:1000 /opt/nifi-registry/nifi-registry-current'
                ],
                volume_mounts=[
                    V1VolumeMount(name=path[0], mount_path=path[1])
                    for path in reg_volume_paths
                ])
        ],
        volumes=[],
        volume_paths=reg_volume_paths)
    ensure_ingress_routed_svc(api_core_v1=api_core_v1,
                              api_custom=api_custom,
                              domain=domain,
                              hostname='registry',
                              name='registry',
                              target_name='registry',
                              namespace='default',
                              port_name='web',
                              svc_port=80,
                              target_port=19090)

    perform_nifi_ops(api_apps_v1, api_core_v1, api_custom, domain)

    perform_build_ops_bg()
    perform_mirror_ops_bg()
    def test_delete(self):
        kubernetes_model = create_kubernetes_model()
        application_model = create_application_model()
        service_model = create_service_model()
        namespace = service_model.service_level

        # Confirm each components exist -> no exception raises
        k8s_config.load_kube_config(
            get_full_config_path(kubernetes_model.config_path))
        apps_v1_api = k8s_client.AppsV1Api()
        core_v1 = k8s_client.CoreV1Api()
        autoscaling_v1_api = k8s_client.AutoscalingV1Api()
        custom_object_api = k8s_client.CustomObjectsApi()
        core_v1.read_namespaced_service(
            name=WorkerConfiguration.service['metadata']['name'],
            namespace=namespace)
        self.wait_worker_ready(
            insecure_host=service_model.insecure_host,
            insecure_port=service_model.insecure_port,
            application_name=application_model.application_name,
            service_level=service_model.service_level,
            rekcurd_grpc_version=service_model.version)

        try:
            apps_v1_api.read_namespaced_deployment(
                name=WorkerConfiguration.deployment['metadata']['name'],
                namespace=namespace)
            core_v1.read_namespaced_service(
                name=WorkerConfiguration.service['metadata']['name'],
                namespace=namespace)
            autoscaling_v1_api.read_namespaced_horizontal_pod_autoscaler(
                name=WorkerConfiguration.autoscaling['metadata']['name'],
                namespace=namespace)
            custom_object_api.get_namespaced_custom_object(
                group="networking.istio.io",
                version="v1alpha3",
                namespace=namespace,
                plural="virtualservices",
                name=WorkerConfiguration.virtualservice['metadata']['name'])
        except ApiException:
            self.fail('Some components of worker are not running.')

        self.client.delete(self.__URL)
        # Make sure every thing deleted
        # Reading deleted deployment will raise ApiException
        self.assertRaises(
            ApiException,
            apps_v1_api.read_namespaced_deployment,
            name=WorkerConfiguration.deployment['metadata']['name'],
            namespace=namespace)
        self.assertRaises(ApiException,
                          core_v1.read_namespaced_service,
                          name=WorkerConfiguration.service['metadata']['name'],
                          namespace=namespace)
        self.assertRaises(
            ApiException,
            autoscaling_v1_api.read_namespaced_horizontal_pod_autoscaler,
            name=WorkerConfiguration.autoscaling['metadata']['name'],
            namespace=namespace)
        self.assertRaises(
            ApiException,
            custom_object_api.get_namespaced_custom_object,
            group="networking.istio.io",
            version="v1alpha3",
            namespace=namespace,
            plural="virtualservices",
            name=WorkerConfiguration.virtualservice['metadata']['name'])
Exemple #28
0
 def post(self):
     arg = self.request.arguments
     print(arg)
     #获取namespace,keyword,回滚版本字段不为空的deploy和version
     namespace = arg['namespace'][0].decode('utf-8')
     keyword = arg['keyword'][0].decode('utf-8')
     v2 = client.AppsV1Api()
     v3 = client.AppsV1beta1Api()
     rollback_dict = {}
     for deploy in arg:
         if deploy != 'namespace' and deploy != 'keyword':
             version = arg[deploy][0].decode('utf-8')
             if version.strip():
                 rollback_dict[deploy]=version
     #如果rollback_dict不为空,则区分上一个版本和具体版本分别执行更新
     if rollback_dict:
         for deploy_name in rollback_dict:
             if rollback_dict[deploy_name] == '上一个版本':
                 # v3 = client.AppsV1beta1Api()
                 rollback = client.AppsV1beta1DeploymentRollback(name=deploy_name,rollback_to=client.AppsV1beta1RollbackConfig(revision=0))
                 v3.create_namespaced_deployment_rollback(name=deploy_name, namespace=namespace, body=rollback)
             else:
                 # v2 = client.AppsV1Api()
                 api_response = v2.read_namespaced_deployment(name=deploy_name, namespace=namespace)
                 image = api_response.spec.template.spec.containers[0].image
                 # print(image[:-12]+rollback_dict[deploy_name])
                 image_roll = image[:-12]+rollback_dict[deploy_name]
                 api_response.spec.template.spec.containers[0].image = image_roll
                 v2.patch_namespaced_deployment(name=deploy_name, namespace=namespace, body=api_response)
     else:
         print('no deploy need rollback')
     #返回更新后的deploy信息
     data = {'status': True, 'error': "", 'message': '','data': ''}
     deploy_list=[]
     #根据namespace和keyword获取对应的deploy,如keyword为空,则获取当前namespace下的所有deploy,
     # v2 = client.AppsV1Api()
     api_response = v2.list_namespaced_deployment(namespace)
     for deploy in api_response.items:
         if keyword in deploy.metadata.name:
             deploy_dict = {}
             deploy_dict['deploy'] = deploy.metadata.name
             deploy_dict['image'] = deploy.spec.template.spec.containers[0].image
             deploy_dict['replicas'] = deploy.status.replicas
             deploy_dict['available'] = deploy.status.available_replicas
             if not deploy_dict['available']:
                 deploy_dict['available'] = 0
             #根据deploy_dict['deploy']获取当前deploy前5的version号
             # sql='''
             #     select k8s_project.project_name,k8s_project_version.version_id from
             #     k8s_project,k8s_project_version
             #     where k8s_project.project_id = k8s_project_version.project_id
             #     and k8s_project.project_name=%s order by k8s_project_version.gmt_create desc limit 3
             # '''
             # cursor.execute(sql,[deploy_dict['deploy']])
             # result=cursor.fetchall()
             #通过harborapi获取版本信息,通过正则,截取image中的harbor项目名称,如果正则取到值,获取最近5个版本的版本号
             repository_name_re = re.findall('/.+:',deploy_dict['image'])
             if repository_name_re:
                 repository_name = repository_name_re[0][1:-1]
                 harbor_api = HarborApi(settings.harbor_dict["host"],settings.harbor_dict["user"],settings.harbor_dict["passwd"])
                 harbor_api.login_get_session_id()
                 result = harbor_api.tags_info_list(repository_name)[-5:]
                 result.reverse()
             else:
                 result=[]
             deploy_dict['version'] = result
             deploy_list.append(deploy_dict)
     print(deploy_list)
     data['data'] = deploy_list
     self.write(json.dumps(data))
Exemple #29
0
from kubernetes import client, config, watch
from timeloop import Timeloop
from datetime import timedelta

RUN_AS_K8S_DEPLOYMENT = 0
CUSTOM_KUBE_SCHEDULE_INTERVAL = 10

if RUN_AS_K8S_DEPLOYMENT:
    config.load_incluster_config()
else:
    config.load_kube_config()

# doing this computation within a k8s cluster
#k8s.config.load_incluster_config()
core_api = client.CoreV1Api()
apis_api = client.AppsV1Api()
#sdclient = SdcClient(<Your Sysdig API token>)
sysdig_metric = "net.http.request.time"
metrics = [{
    "id": sysdig_metric,
    "aggregations": {
        "time": "timeAvg",
        "group": "avg"
    }
}]

CustomSchedulerName = 'K8SCustomScheduler'

ureg = UnitRegistry()
ureg.load_definitions('kubernetes_units.txt')
Exemple #30
0
 def test_deployment(self):
     v1 = client.AppsV1Api()
     res = v1.list_namespaced_deployment("metal-control-plane")
     for i in res.items:
         self.assertIsNone(i.status.unavailable_replicas,
                           "not all deployment replicas running")