Exemplo n.º 1
0
def predict(service_name, input_json):
    isvc = KFServing.get(service_name, namespace=KFSERVING_TEST_NAMESPACE)
    # temporary sleep until this is fixed https://github.com/kubeflow/kfserving/issues/604
    time.sleep(10)
    api_instance = client.CoreV1Api(client.ApiClient())
    service = api_instance.read_namespaced_service("istio-ingressgateway",
                                                   "istio-system",
                                                   exact='true')
    if service.status.load_balancer.ingress is None:
        cluster_ip = service.spec.cluster_ip
    else:
        cluster_ip = service.status.load_balancer.ingress[0].ip
    host = urlparse(isvc['status']['url']).netloc
    url = "http://{}/v1/models/{}:predict".format(cluster_ip, service_name)
    headers = {'Host': host}
    with open(input_json) as json_file:
        data = json.load(json_file)
        logging.info("Sending request data: %s", json.dumps(data))
        response = requests.post(url, json.dumps(data), headers=headers)
        logging.info("Got response code %s, content %s", response.status_code,
                     response.content)
        probs = json.loads(response.content.decode('utf-8'))["predictions"]
        return probs
Exemplo n.º 2
0
    def get_api_client(self):
        """
        Create Kubernetes API client with configuration from string.

        Returns:
            str: Kubeconfig file path

        """

        # This super-ugly code configuration is causes by wrong design of config-loading
        # functions in https://github.com/kubernetes-client/python
        client_config = type.__call__(client.Configuration)
        kubeconfig = self.cluster.get_kubeconfig()
        if kubeconfig is None:
            raise ValueError(
                "Could not create kubernetes API client: kubeconfig is not found "
            )
        kcl = KubeConfigLoader(config_dict=kubeconfig, )

        kcl.load_and_set(client_config)
        if self.cluster.provisioner.engine == 'kqueen.engines.OpenstackKubesprayEngine':
            client_config.assert_hostname = False
        return client.ApiClient(configuration=client_config)
Exemplo n.º 3
0
def process_file(file):
    base_folder = file[:file.rindex('/')]
    with open(file, "r") as a_file:
        capture_metadata = json.loads(a_file.read())
        build_service_map(capture_metadata["services"]["items"])
        build_pod_map(capture_metadata["pods"]["items"])
        build_rs_map(capture_metadata["rs"]["items"])
        build_deployments_map(capture_metadata["deployments"]["items"])
        for pod_metadata in capture_metadata["pod_metadata"]:
            handle_capture_per_pod(pod_metadata["pod"],
                                   base_folder + '/' + pod_metadata["file"],
                                   pod_metadata["IP"])

    print_graph()
    result = create_network_policy()
    policy_folder = base_folder + '/network-policies'
    create_clean_policies_directory(policy_folder)
    for policy in result:
        policy_content = client.ApiClient().sanitize_for_serialization(policy)
        with open(
                policy_folder + '/network_policy_{}.json'.format(
                    policy_content["metadata"]["name"]), "w") as policy_file:
            policy_file.write(json.dumps(policy_content, indent=4))
Exemplo n.º 4
0
    def __init__(self, config):
        """ Initialize connection to Kubernetes. 
        
            Connects to Kubernetes configuration using an environment appropriate method.
        """
        super(KubernetesCompute, self).__init__()
        self.config = config
        if os.getenv('KUBERNETES_SERVICE_HOST'):
            """ We're running inside K8S. Load the config appropriately. """
            k8s_config.load_incluster_config()
        else:
            """ We're running outside of K8S. Load the config. """
            k8s_config.load_kube_config()
        api_client = k8s_client.ApiClient()
        self.api = k8s_client.CoreV1Api(api_client)
        self.rbac_api = k8s_client.RbacAuthorizationV1Api(api_client)
#        self.extensions_api = k8s_client.ExtensionsV1beta1Api(api_client) 
        self.extensions_api = k8s_client.AppsV1Api(api_client)
        self.networking_api = k8s_client.NetworkingV1Api(api_client)
        self.try_minikube = True
        self.namespace = self.get_namespace (
            namespace=os.environ.get("NAMESPACE", self.get_namespace ()))
        logger.debug (f"-- using namespace: {self.namespace}")
Exemplo n.º 5
0
def check_backend(backend):
    if backend == "kubernetes":
        try:
            from kubernetes import client as k8sclient
            from kubernetes import config as k8sconfig

            k8sconfig.load_kube_config()
            _, rc, _ = k8sclient.ApiClient().call_api(
                "/apis/yadage.github.io/v1/namespaces/default/workflows",
                "GET")
            return rc == 200
        except ImportError:
            pass
        except k8sclient.rest.ApiException:
            pass
        return False
    if backend == "local":
        try:
            import yadage

            assert yadage
            rc = subprocess.check_call(["docker", "info"],
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
            return rc == 0
        except:
            pass
        return False
    if backend == "docker":
        try:
            rc = subprocess.check_call(["docker", "info"],
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
            return rc == 0
        except:
            pass
        return False
async def test_build_and_deploy(ops_test):
    charm = await ops_test.build_charm(".")

    role_binding_file = Path("/tmp/k8s-rolebinding.yaml")

    model_name = ops_test._default_model_name

    with open("docs/gatekeeper-rb.yaml.template", "r") as fh:
        template = Template(fh.read())
        role_binding_file.write_text(
            template.render(
                service_account_user=
                f"system:serviceaccount:{model_name}:kubernetes-operator"))
    role_binding = yaml.load_all(role_binding_file.read_text(),
                                 Loader=yaml.FullLoader)
    with client.ApiClient() as api_client:
        api_instance = client.RbacAuthorizationV1Api(api_client)
        try:
            for k8s_obj in role_binding:
                if k8s_obj["kind"] == "ClusterRoleBinding":
                    api_instance.create_cluster_role_binding(body=k8s_obj)
                if k8s_obj["kind"] == "ClusterRole":
                    api_instance.create_cluster_role(body=k8s_obj)
        except ApiException as err:
            if err.status == 409:
                # ignore "already exists" errors so that we can recover from
                # partially failed setups
                pass
            else:
                raise
    resources = {"gatekeeper-image": "openpolicyagent/gatekeeper:v3.2.3"}
    for series in meta["series"]:
        await ops_test.model.deploy(charm,
                                    application_name="opa-audit-test",
                                    series=series,
                                    resources=resources)
    await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 60)
Exemplo n.º 7
0
def main():
    # Define the barer token we are going to use to authenticate.
    # See here to create the token:
    # https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/
    aToken = ""

    # disable warnings
    #urllib3.disable_warnings()
    yaml.warnings({'YAMLLoadWarning': False})

    # Create a configuration object
    # aConfiguration = client.Configuration()
    aConfiguration = config.load_kube_config()

    # Specify the endpoint of your Kube cluster
    # aConfiguration.host = "https://172.26.138.146:6443"
    # aConfiguration.host = str(apiserver)
    # aConfiguration.api_key = {"Authorization": "Bearer " + str(aToken)}

    # Create a ApiClient with our config
    aApiClient = client.ApiClient(aConfiguration)

    # Do calls
    v1 = client.CoreV1Api(aApiClient)
    print("Name\tType\tCluster IP\tExternal IP\tPorts")
    namespaces = {"default", "kube-system"}
    for namespace in namespaces:
        print("namespace: %s" % namespace)
        ret = v1.list_namespaced_service(namespace)
        for i in ret.items:
            #print("pods: %s"%i)
            portRef = ""
            for p in i.spec.ports:
                portRef = portRef + str(p.port) + "/" + p.protocol + ","
            print("%s\t%s\t%s\t%s\t%s" %
                  (i.metadata.name, i.spec.type, i.spec.cluster_ip,
                   i.spec.external_i_ps, portRef.strip(",")))
Exemplo n.º 8
0
    def deploy(self, pod_spec):
        self.job_id = str(uuid.uuid1())
        self.labels['fairing-id'] = self.job_id
        pod_template_spec = self.generate_pod_template_spec(pod_spec)
        pod_template_spec.spec.containers[0].command = [
            "seldon-core-microservice", self.serving_class, "REST",
            "--service-type=MODEL", "--persistence=0"
        ]
        self.deployment_spec = self.generate_deployment_spec(pod_template_spec)
        self.service_spec = self.generate_service_spec()

        if self.output:
            api = k8s_client.ApiClient()
            job_output = api.sanitize_for_serialization(self.deployment_spec)
            logger.warn(json.dumps(job_output))
            service_output = api.sanitize_for_serialization(self.service_spec)
            logger.warn(json.dumps(service_output))

        v1_api = k8s_client.CoreV1Api()
        apps_v1 = k8s_client.AppsV1Api()
        self.deployment = apps_v1.create_namespaced_deployment(
            self.namespace, self.deployment_spec)
        self.service = v1_api.create_namespaced_service(
            self.namespace, self.service_spec)

        if self.service_type == "LoadBalancer":
            url = self.backend.get_service_external_endpoint(
                self.service.metadata.name, self.service.metadata.namespace,
                self.service.metadata.labels)
        else:
            # TODO(jlewi): The suffix won't always be cluster.local since
            # its configurable. Is there a way to get it programmatically?
            url = "http://{0}.{1}.svc.cluster.local".format(
                self.service.metadata.name, self.service.metadata.namespace)

        logging.info("Cluster endpoint: %s", url)
        return url
Exemplo n.º 9
0
def modify_k8s_autoscaler(action):
    """
    Pauses or resumes the Kubernetes autoscaler
    """

    try:
        config.load_incluster_config()
    except config.ConfigException:
        try:
            config.load_kube_config()
        except config.ConfigException:
            raise Exception("Could not configure kubernetes python client")

    # Configure API key authorization: BearerToken
    configuration = client.Configuration()
    # create an instance of the API class
    k8s_api = client.AppsV1Api(client.ApiClient(configuration))
    if action == 'pause':
        logger.info('Pausing k8s autoscaler...')
        body = {'spec': {'replicas': 0}}
    elif action == 'resume':
        logger.info('Resuming k8s autoscaler...')
        body = {'spec': {'replicas': app_config['K8S_AUTOSCALER_REPLICAS']}}
    else:
        logger.info('Invalid k8s autoscaler option')
        sys.exit(1)
    try:
        k8s_api.patch_namespaced_deployment(
            app_config['K8S_AUTOSCALER_DEPLOYMENT'],
            app_config['K8S_AUTOSCALER_NAMESPACE'], body)
        logger.info('K8s autoscaler modified to replicas: {}'.format(
            body['spec']['replicas']))
    except ApiException as e:
        logger.info(
            'Scaling of k8s autoscaler failed. Error code was {}, {}. Exiting.'
            .format(e.reason, e.body))
        sys.exit(1)
Exemplo n.º 10
0
    def pass_to_scheduler(name_,
                          namespace_,
                          scheduler_name_='default-scheduler'):
        """
        Pass deployment to be scheduled by different scheduler
        :param str scheduler_name_: name of new scheduler, which will
            schedule this deployment
        :param str name_: name of deployment
        :param str namespace_: namespace of deployment
        :return str: return http response code
        """
        url = '/apis/extensions/v1beta1/namespaces/' + namespace_ + '/deployments/' + name_
        headers = {
            'Accept': 'application/json',
            'Content-Type': 'application/strategic-merge-patch+json'
        }
        body = {
            "spec": {
                "template": {
                    "spec": {
                        "schedulerName": scheduler_name_
                    }
                }
            }
        }

        api_client = client.ApiClient()
        response = []
        try:
            response = api_client.call_api(url,
                                           'PATCH',
                                           header_params=headers,
                                           body=body)
        except Exception as e:
            return int(str(e)[1:4])

        return response[1]
Exemplo n.º 11
0
def main():
    config.load_kube_config()
    v1 = client.CoreV1Api()
    k8s_client = client.ApiClient()
    k8s_api = client.ExtensionsV1beta1Api(k8s_client)
    pp = pprint.PrettyPrinter(indent=4)
    try:
        # checks if deployment, service, configmap already created
        check = k8s_api.read_namespaced_deployment_status(name="login-node-n",
                                                          namespace="default")
        print("deployment already exists")
    except Exception:
        pass
        # rendering template and creating configmap
        config_data = yaml.load(open('vals.yaml'), Loader=yaml.FullLoader)
        #itemp_up = render_template('condor_config.local.j2', request_name = "request",inventory_hostname = "hostname")
        env = Environment(loader=FileSystemLoader('./templates'),
                          trim_blocks=True,
                          lstrip_blocks=True)
        template = env.get_template('condor_config.local.j2')
        temp_up = template.render(config_data)
        name = 'temcon'
        namespace = 'default'
        body = kubernetes.client.V1ConfigMap()
        body.data = dict([("condor_config.local", temp_up)])
        body.metadata = kubernetes.client.V1ObjectMeta()
        body.metadata.name = name
        configuration = kubernetes.client.Configuration()
        api_instance = kubernetes.client.CoreV1Api(
            kubernetes.client.ApiClient(configuration))
        try:
            api_response = api_instance.create_namespaced_config_map(
                namespace, body)
        except ApiException as e:
            print(
                "Exception when calling CoreV1Api->create_namespaced_config_map: %s\n"
                % e)
Exemplo n.º 12
0
    def wait_for_jobs(self, namespace, label_filter):
        """Wait for all the jobs with the specified label to finish.

    Args:
      label_filter: A label filter expression e.g. "group=mygroup"
    """
        if not util.is_in_cluster():
            util.load_kube_config(persist_config=False)
        else:
            config.load_incluster_config()

        # Create an API client object to talk to the K8s master.
        api_client = k8s_client.ApiClient()
        jobs = util.wait_for_jobs_with_label(api_client, namespace,
                                             label_filter)

        done = 0
        succeeded = 0
        for job in jobs.items:
            project = job.metadata.labels.get("project", "")
            if not job.status.conditions:
                logging.info("Project %s Job %s.%s missing condition", project,
                             job.metadata.namespace, job.metadata.name)
                continue

            last_condition = job.status.conditions[-1]
            if last_condition.type in ["Failed", "Complete"]:
                logging.info("Project %s Job %s.%s has condition %s", project,
                             job.metadata.namespace, job.metadata.name,
                             last_condition.type)
                done += 1
                if last_condition.type in ["Complete"]:
                    succeeded += 1

        logging.info("%s of %s jobs finished", done, len(jobs.items))
        logging.info("%s of %s jobs finished successfully", succeeded,
                     len(jobs.items))
Exemplo n.º 13
0
    def run(self):
        """Runs the Tekton pipeline async.
    """
        client = k8s_client.ApiClient()
        crd_api = k8s_client.CustomObjectsApi(client)

        group, version = self.config["apiVersion"].split("/")
        try:
            result = crd_api.create_namespaced_custom_object(
                group=group,
                version=version,
                namespace=self.namespace,
                plural=PLURAL,
                body=self.config)
            logging.info("Created workflow:\n%s", yaml.safe_dump(result))
        except rest.ApiException as e:
            logging.error("Could not create workflow: %s")
            if e.body:
                body = None
                if isinstance(e.body, six.string_types):
                    body = {}
                    try:
                        logging.info("Parsing ApiException body: %s", e.body)
                        body = json.loads(e.body)
                    except json.JSONDecodeError as json_e:
                        logging.error("Error parsing body: %s", json_e)
                else:
                    body = e.body
                logging.error("Could not create workflow; %s", body)
            else:
                logging.error("Could not create workflow: %s", e)
            raise

        self.name = result.get("metadata", {}).get("name")
        logging.info("Submitted Tekton Pipeline %s.%s", self.namespace,
                     self.name)
        return result
Exemplo n.º 14
0
def main():
    api_token = "eyJhbGciOiJSUzI1NiIsImtpZCI6IjBRZDIyWUhqQmJQLThHM2pndl93djBuazRucXVkZVh6U1BiclNwQVJrdWMifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tem13Z2giLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGVmYXVsdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjI3YTAzMmEwLTNjM2UtNGNlOC05MmY1LWYxMTBkNWM1MGVjNiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.u0mDrFD3kPit7FaUIeTnjh1Ph9EGk-s_T4Rn5CRub_WCZ9M31Y4uWkFFK4Sdibez4Lea7maW4J9nq2SP0zoLWxxGgD1aR4ZK2qqyD2QmwfOzUdPdkK1ubII2GZMld5DPbrelA2F58DoFOrwkj_YDNmdZ-pSe1etTWcG4Q_KCoaMSUyDDNa-ZFeKjn8LAoWk1VckW-pSYIq2RcUbbdgHeURfY_qXr0G42eyC7tWnTGZK-k3ZxWneRFAc6fAF0JQD8x6QTMsxH2fc9XFM5yAnniSW2T_oaPF1HgzuMJxATQlTZZT25CwFQcgQwhExH81p8RDjzCLCAP_APRq7fiItyRQ"
    configuration = kubernetes.client.Configuration()
    configuration.api_key = {"authorization": "Bearer " + api_token}
    configuration.host = "https://192.168.1.123:6443"
    configuration.verify_ssl = False
    # configuration.assert_hostname = False
    configuration.debug = True
    ApiClient = client.ApiClient(configuration)

    # Configs can be set in Configuration class directly or using helper
    # utility. If no argument provided, the config will be loaded from
    # default location.
    #    config.load_kube_config(ApiClient)
    apps_v1 = client.AppsV1Api(ApiClient)

    # Uncomment the following lines to enable debug logging
    # c = client.Configuration()
    # c.debug = True
    # apps_v1 = client.AppsV1Api(api_client=client.ApiClient(configuration=c))

    # Create a deployment object with client-python API. The deployment we
    # created is same as the `nginx-deployment.yaml` in the /examples folder.
    deployment = create_deployment_object()
Exemplo n.º 15
0
def main():
    """Main"""
    config.load_incluster_config()

    core_v1_client = client.CoreV1Api()
    k8s_client = client.ApiClient()

    # ConfigMap
    deploy_config_map(core_v1_client)

    # CSI Pods
    deploy_csi_pods(core_v1_client)

    # Storage Class
    deploy_storage_class()

    # Send Analytics Tracker
    # The information from this analytics is available for
    # developers to understand and build project in a better
    # way
    send_analytics_tracker("operator")

    # Watch CRD
    crd_watch(core_v1_client, k8s_client)
Exemplo n.º 16
0
    def test_create_from_list_in_multi_resource_yaml(self):
        """
        Should be able to create the items in the PodList and a deployment
        specified in the multi-resource file
        """
        result = []
        # Create a configuration object
        aConfiguration = config.load_kube_config()

        # Create a ApiClient with our config
        aApiClient = client.ApiClient(aConfiguration)
        core_v1 = client.CoreV1Api(aApiClient)

        utils.create_from_yaml(
            aApiClient,
            filepath + "cluster/files/" + "multi-resource-with-list.yaml")
        core_api = client.CoreV1Api(aApiClient)
        app_api = client.AppsV1Api(aApiClient)
        pod_0 = core_api.read_namespaced_pod(name="mock-pod-0",
                                             namespace="default")
        self.assertIsNotNone(pod_0)
        pod_1 = core_api.read_namespaced_pod(name="mock-pod-1",
                                             namespace="default")
        self.assertIsNotNone(pod_1)
        dep = app_api.read_namespaced_deployment(name="mock",
                                                 namespace="default")
        self.assertIsNotNone(dep)
        core_api.delete_namespaced_pod(name="mock-pod-0",
                                       namespace="default",
                                       body={})
        core_api.delete_namespaced_pod(name="mock-pod-1",
                                       namespace="default",
                                       body={})
        app_api.delete_namespaced_deployment(name="mock",
                                             namespace="default",
                                             body={})
Exemplo n.º 17
0
def check_async(name, backend):
    assert backend == "kubernetes"
    from kubernetes import client as k8sclient
    from kubernetes import config as k8sconfig

    k8sconfig.load_kube_config()
    a, rc, d = k8sclient.ApiClient().call_api(
        "/apis/yadage.github.io/v1/namespaces/default/workflows/{}".format(
            name),
        "GET",
        _preload_content=False,
    )
    try:
        status = json.loads(a.read())["status"]["workflow"]
    except:
        return {"status": "UNKNOWN"}

    if status.get("succeeded") == 1:
        return {"status": "SUCCEEDED"}
    if status.get("active") == 1:
        return {"status": "INPROGRESS"}
    if status.get("failed") == 1:
        return {"status": "FAILED"}
    return {"status": "UNKNOWN"}
Exemplo n.º 18
0
def bind_role_with_api(name,
                       namespace,
                       labels,
                       subject_name,
                       subject_kind='ServiceAccount'):
    # Using API because of bug https://github.com/canonical/operator/issues/390
    logging.info('Creating role binding with K8s API')
    _load_kube_config()

    with client.ApiClient() as api_client:
        api_instance = client.RbacAuthorizationV1Api(api_client)
        body = client.V1RoleBinding(metadata=client.V1ObjectMeta(
            name=name, namespace=namespace, labels=labels),
                                    role_ref=client.V1RoleRef(
                                        api_group='rbac.authorization.k8s.io',
                                        kind='Role',
                                        name=name,
                                    ),
                                    subjects=[
                                        client.V1Subject(kind=subject_kind,
                                                         name=subject_name),
                                    ])
        try:
            api_instance.create_namespaced_role_binding(namespace,
                                                        body,
                                                        pretty=True)
            return True
        except ApiException as err:
            logging.exception(
                "Exception when calling RbacAuthorizationV1Api->create_namespaced_role_binding."
            )
            if err.status != 409:
                # ignoring 409 (AlreadyExists) errors
                return False
            else:
                return True
Exemplo n.º 19
0
def _run_sql_on_maildb(api_url, api_token, namespace, label, sql):
    configuration = _get_k8s_conf(api_url,api_token)
    v1 = client.CoreV1Api(client.ApiClient(configuration))
    sql_command = f"mysql -u postfix -p$MYSQL_PASSWORD -D postfix -e '{sql}'"
    print(sql_command)
    exec_command = [
        '/bin/bash',
        '-c',
        sql_command,
        ]

    c = configuration
    c.assert_hostname = False

    name = _get_pod_name(api_url, api_token, namespace, label, search='mariadb')

    resp = stream(v1.connect_get_namespaced_pod_exec,
                  name,
                  namespace,
                  command=exec_command,
                  stderr=True, stdin=False,
                  stdout=True, tty=False)

    return resp
Exemplo n.º 20
0
 def login_pending(self, request):
     config.load_kube_config(config_file='/etc/kubernetes/admin.conf')
     v1 = client.CoreV1Api()
     k8s_client = client.ApiClient()
     k8s_api = client.ExtensionsV1beta1Api(k8s_client)
     configuration = kubernetes.client.Configuration()
     api_instance = kubernetes.client.CoreV1Api(
         kubernetes.client.ApiClient(configuration))
     deps = k8s_api.read_namespaced_deployment_status(name="login-node-n-" +
                                                      str(request.name),
                                                      namespace="default")
     while (deps.status.available_replicas != 1):
         k8s_api = client.ExtensionsV1beta1Api(k8s_client)
         deps = k8s_api.read_namespaced_deployment_status(
             name="login-node-n-" + str(request.name), namespace="default")
     self.log.info("LOGIN POD CREATED")
     deps.metadata.name = deps.metadata.name + "-" + request.name
     service = v1.read_namespaced_service(name="login-node-service-" +
                                          str(request.name),
                                          namespace="default")
     config1 = api_instance.read_namespaced_config_map(name="new-config-" +
                                                       str(request.name),
                                                       namespace="default")
     return login_info(self, request)
Exemplo n.º 21
0
def ensure_namespace_cleanup(configuration):
    api_instance = kube_client.CoreV1Api(kube_client.ApiClient(configuration))
    timeout = time.time() + 180
    while True:
        if time.time() > timeout:
            telemetry.set_user_fault()
            logger.warning(
                "Namespace 'azure-arc' still in terminating state. Please ensure that you delete the 'azure-arc' namespace before onboarding the cluster again."
            )
            return
        try:
            api_response = api_instance.list_namespace(
                field_selector='metadata.name=azure-arc')
            if not api_response.items:
                return
            time.sleep(5)
        except Exception as e:  # pylint: disable=broad-except
            logger.warning("Error while retrieving namespace information: " +
                           str(e))
            kubernetes_exception_handler(
                e,
                consts.Get_Kubernetes_Namespace_Fault_Type,
                'Unable to fetch kubernetes namespace',
                raise_error=False)
Exemplo n.º 22
0
    def test_node_status(self):
        """
        verify that all nodes are online
        """
        result = ""
        # Create a configuration object
        aConfiguration = config.load_kube_config()

        # Create a ApiClient with our config
        aApiClient = client.ApiClient(aConfiguration)

        core_v1 = client.CoreV1Api(aApiClient)

        response = core_v1.list_node(watch=False)
        for i in response.items:
            for cond in i.status.conditions:
                if "KubeletReady" in cond.reason:
                    stat = str(cond.type)
            node = i.metadata.name
            # check whether node is notready
            if "None" in stat:
                result = "Not Ready"
            #self.assertEqual("Ready", stat, msg="Node %s is not ready!"%node)
        self.assertIsNotNone(result)
Exemplo n.º 23
0
    def test_node_role(self):
        """
        verify that all nodes are online
        """
        result = ""
        # Create a configuration object
        aConfiguration = config.load_kube_config()

        # Create a ApiClient with our config
        aApiClient = client.ApiClient(aConfiguration)

        core_v1 = client.CoreV1Api(aApiClient)

        response = core_v1.list_node(watch=False)
        for i in response.items:
            for label in i.metadata.labels:
                if "node-role" in label:
                    nodelabel = label.split("/")
                    noderole = nodelabel[1].strip()
            node = i.metadata.name
            if "None" in noderole:
                result = "None"
            #self.assertEqual("master", noderole, msg="Node %s does not have a role!"%node)
        self.assertIsNotNone(result)
Exemplo n.º 24
0
def apply_kfp(modify, cop, runtime):
    modify(cop)

    # Have to do it here to avoid circular dependencies
    from .pod import AutoMountType

    if AutoMountType.is_auto_modifier(modify):
        runtime.spec.disable_auto_mount = True

    api = client.ApiClient()
    for k, v in cop.pod_labels.items():
        runtime.metadata.labels[k] = v
    for k, v in cop.pod_annotations.items():
        runtime.metadata.annotations[k] = v
    if cop.container.env:
        env_names = [
            e.name if hasattr(e, "name") else e["name"]
            for e in runtime.spec.env
        ]
        for e in api.sanitize_for_serialization(cop.container.env):
            name = e["name"]
            if name in env_names:
                runtime.spec.env[env_names.index(name)] = e
            else:
                runtime.spec.env.append(e)
                env_names.append(name)
        cop.container.env.clear()

    if cop.volumes and cop.container.volume_mounts:
        vols = api.sanitize_for_serialization(cop.volumes)
        mounts = api.sanitize_for_serialization(cop.container.volume_mounts)
        runtime.spec.update_vols_and_mounts(vols, mounts)
        cop.volumes.clear()
        cop.container.volume_mounts.clear()

    return runtime
Exemplo n.º 25
0
def get_more_info(driver, user):
    try:
        WebDriverWait(driver, 280).until(
            EC.presence_of_element_located((By.ID, 'refresh_notebook_list')))
        print('{} notebook spawned successfully'.format(user['username']))
    except TimeoutException as e:  # notebook didn't spin up
        print('What happened here {} '.format(driver.page_source))
        with open('/run/secrets/kubernetes.io/serviceaccount/token') as f:
            token = f.read()
        with open('/run/secrets/kubernetes.io/serviceaccount/namespace') as f:
            namespace = f.read()

        configuration = client.Configuration()
        configuration.api_key['authorization'] = 'Bearer {}'.format(token)
        configuration.host = 'https://kubernetes.default'
        configuration.ssl_ca_cert = '/run/secrets/kubernetes.io/serviceaccount/ca.crt'

        api_instance = client.CoreV1Api(client.ApiClient(configuration))

        try:
            api_response = api_instance.read_namespaced_pod(
                'jupyter-{}'.format(user['username']), namespace)
            pprint('😡 read_namespaced_pod {}'.format(str(api_response)))
        except Exception as e:
            pprint(
                "Exception when calling CoreV1Api->read_namespaced_pod: %s\n" %
                e)

        try:
            api_response = api_instance.read_namespaced_pod_log(
                'jupyter-{}'.format(user['username']), namespace)
            pprint('😡 {} read_namespaced_pod_log'.format(str(api_response)))
        except Exception as e:
            pprint(
                "Exception when calling CoreV1Api->read_namespaced_pod_log: %s\n"
                % e)
def crud_crd_object(namespace, obj, action):
    """Create Custom Resource Definitino object"""
    # Using the API because of LP:1886694
    logging.info("Creating CRD object with K8s API")
    _load_kube_config()

    body = client.V1beta1CustomResourceDefinition(**obj)

    with client.ApiClient() as api_client:
        api_instance = client.ApiextensionsV1beta1Api(api_client)
        try:
            if action.lower() == "create":
                api_instance.create_custom_resource_definition(body,
                                                               pretty=True)
            elif action.lower() == "delete":
                api_instance.delete_custom_resource_definition(
                    name=obj["metadata"]["name"], pretty=True)
        except ApiException as err:
            if err.status == 409:
                # ignore "already exists" errors so that we can recover from
                # partially failed setups
                return
            else:
                raise
Exemplo n.º 27
0
    def __init__(self, k8s_config=None, k8s_namespace=None):
        from kubernetes import config, client
        from gevent.threadpool import ThreadPool

        if k8s_config is not None:
            self._k8s_config = k8s_config
        elif os.environ.get('KUBE_API_ADDRESS'):
            self._k8s_config = client.Configuration()
            self._k8s_config.host = os.environ['KUBE_API_ADDRESS']
        else:
            self._k8s_config = config.load_incluster_config()

        verify_ssl = bool(int(os.environ.get('KUBE_VERIFY_SSL', '1').strip('"')))
        if not verify_ssl:
            c = client.Configuration()
            c.verify_ssl = False
            client.Configuration.set_default(c)

        self._k8s_namespace = k8s_namespace or os.environ.get('MARS_K8S_POD_NAMESPACE') or 'default'
        self._full_label_selector = None
        self._client = client.CoreV1Api(client.ApiClient(self._k8s_config))
        self._pool = ThreadPool(1)

        self._service_pod_to_ep = dict()
Exemplo n.º 28
0
 def warmup(self):
     kube_configs = self._get_config_files()
     if kube_configs:
         self._kube_config = os.path.join(self._kube_dir, kube_configs[0])
     try:
         configuration = client.Configuration()
         config.load_kube_config(self._kube_config,
                                 client_configuration=configuration)
         if os.environ.get('HTTPS_PROXY'):
             configuration.proxy = os.environ['HTTPS_PROXY']
         api_client = client.ApiClient(configuration)
         self._set_all_clients(api_client)
     except Exception as e:
         if 'Invalid kube-config file. No configuration found.' not in str(
                 e):
             self.services.logging_service.warning(
                 'Experienced the following error while attempting to create kubernetes client from cluster configuration:'
                 '\n%s'
                 '\nDisconnecting and reconnecting may resolve the issue.'
                 '\nPlease try running:'
                 f'\n\t{CLI_NAME} cluster disconnect -a',
                 str(e),
             )
         self._set_all_clients(None)
Exemplo n.º 29
0
def handle_order():
    orderid = request.json['orderid']
    weight = int(request.json['products'][0]['amount']) * 700
    prodid = create_random_prod()

    logging.warning('Received Order with ID {}'.format(orderid))

    f = open("job_{}.yaml".format(prodid), "a")
    f.write(
        job_template.render(prodid=prodid.lower(),
                            registry="mimaurer",
                            version="v1",
                            orderid=orderid,
                            weight=weight))

    f.close()

    ns = load_env_file("NAMESPACE")

    config.load_incluster_config()
    k8s_client = client.ApiClient()

    utils.create_from_yaml(k8s_client,
                           './job_{}.yaml'.format(prodid),
                           namespace=ns)

    os.remove('./job_{}.yaml'.format(prodid))

    # SEND REPLY
    reply = {'orderid': orderid, 'prodid': prodid, 'status': 'success'}

    time.sleep(
        random.randint(int(load_env_file("MIN_RANDOM")),
                       int(load_env_file("MAX_RANDOM"))) / 1000)

    return json.dumps(reply)
Exemplo n.º 30
0
def handler(event, context):

    # Get Token
    token = get_token(CLUSTER_NAME)
    print("TOKEN: " + token)
    # Configure
    config.load_kube_config(KUBE_FILEPATH)

    configuration = client.Configuration()
    configuration.api_key['authorization'] = token
    configuration.api_key_prefix['authorization'] = 'Bearer'
    # API
    api = client.ApiClient(configuration)
    v1 = client.CoreV1Api(api)

    # Get all the pods
    ret = v1.list_namespaced_pod("default")

    if not ret.items:
        print("No Pods Found in namespace default")

    for i in ret.items:
        print("%s\t%s\t%s" %
              (i.status.pod_ip, i.metadata.namespace, i.metadata.name))