示例#1
0
def create_namespace(name):
    v1 = client.CoreV1Api()
    body = client.V1Namespace(metadata=client.V1ObjectMeta(name=name))
    v1.create_namespace(body=body)
示例#2
0
def create_namespace(name):
    api = kube_client.CoreV1Api()
    return api.create_namespace(body=kube_client.V1Namespace(metadata=kube_client.V1ObjectMeta(name=name)))
示例#3
0
def run(namespace,
        tmpVolumeSize,
        outputVolumeSize,
        volumeName,
        storage_class_name=None,
        imagepullsecrets=None,
        ades_namespace=None,
        state=None):
    print(
        f"Preparing {namespace} tmpVolumeSize: {tmpVolumeSize} outputVolumeSize: {outputVolumeSize}  volumeName: {volumeName}"
    )

    apiclient = helpers.get_api_client()
    api_instance = client.RbacAuthorizationV1Api(apiclient)
    v1 = client.CoreV1Api(api_client=apiclient)

    print("####################################")
    print("######### Checking if namespace already exists")
    try:
        v1.read_namespace(namespace, pretty=True)
        print("Namespace already exists")
        return {"status": "success"}
    except ApiException as e:
        if e.status == 404:
            print("Namespace does not exists and will be created")
        else:
            print("Exception when creating namespace: %s\n" % e,
                  file=sys.stderr)
            raise e

    ### Creating namespace
    print("####################################")
    print("######### Creating namespace")
    try:
        body = client.V1Namespace(metadata=client.V1ObjectMeta(name=namespace))
        namespace_json = v1.create_namespace(body=body, async_req=False)
        print(str(namespace_json))
    except ApiException as e:
        print("Exception when creating namespace: %s\n" % e, file=sys.stderr)
        raise e

    #### Creating pod manager role
    print("####################################")
    print("######### Creating pod_manager_role")
    metadata = client.V1ObjectMeta(name='pod-manager-role',
                                   namespace=namespace)
    rule = client.V1PolicyRule(
        api_groups=['*'],
        resources=['pods', 'pods/log'],
        verbs=['create', 'patch', 'delete', 'list', 'watch'])
    rules = []
    rules.append(rule)
    body = client.V1Role(metadata=metadata, rules=rules)
    pretty = True

    try:
        api_response = api_instance.create_namespaced_role(namespace,
                                                           body,
                                                           pretty=pretty)
        pprint(api_response)
    except ApiException as e:
        print("Exception when creating pod-manager-role: %s\n" % e,
              file=sys.stderr)
        raise e

    #### Creating log-reader-role
    print("####################################")
    print("######### Creating log-reader-role")
    metadata = client.V1ObjectMeta(name='log-reader-role', namespace=namespace)
    rule = client.V1PolicyRule(
        api_groups=['*'],
        resources=['pods', 'pods/log'],
        verbs=['create', 'patch', 'delete', 'list', 'watch'])
    # verbs=['get', 'list'])
    rules = []
    rules.append(rule)
    body = client.V1Role(metadata=metadata, rules=rules)
    pretty = True

    try:
        api_response = api_instance.create_namespaced_role(namespace,
                                                           body,
                                                           pretty=pretty)
        pprint(api_response)
    except ApiException as e:
        print("Exception when creating pod-manager-role: %s\n" % e,
              file=sys.stderr)
        raise e

    print("####################################")
    print("######### Creating pod-manager-default-binding")
    metadata = client.V1ObjectMeta(name='pod-manager-default-binding',
                                   namespace=namespace)

    role_ref = client.V1RoleRef(api_group='',
                                kind='Role',
                                name='pod-manager-role')

    subject = client.models.V1Subject(api_group='',
                                      kind='ServiceAccount',
                                      name='default',
                                      namespace=namespace)
    subjects = []
    subjects.append(subject)

    body = client.V1RoleBinding(metadata=metadata,
                                role_ref=role_ref,
                                subjects=subjects)
    pretty = True
    try:
        api_response = api_instance.create_namespaced_role_binding(
            namespace, body, pretty=pretty)
        pprint(api_response)
    except ApiException as e:
        print("Exception when creating pod-manager-default-binding: %s\n" % e,
              file=sys.stderr)
        raise e

    print("####################################")
    print("######### Creating log-reader-default-binding")
    metadata = client.V1ObjectMeta(name='log-reader-default-binding',
                                   namespace=namespace)

    role_ref = client.V1RoleRef(api_group='',
                                kind='Role',
                                name='log-reader-role')

    subject = client.models.V1Subject(api_group='',
                                      kind='ServiceAccount',
                                      name='default',
                                      namespace=namespace)
    subjects = []
    subjects.append(subject)

    body = client.V1RoleBinding(metadata=metadata,
                                role_ref=role_ref,
                                subjects=subjects)
    pretty = True
    try:
        api_response = api_instance.create_namespaced_role_binding(
            namespace, body, pretty=pretty)
        pprint(api_response)
    except ApiException as e:
        print("Exception when creating log-reader-default-binding: %s\n" % e,
              file=sys.stderr)
        raise e

    print("####################################")
    print("######### Creating cluster-role-binding")
    metadata = client.V1ObjectMeta(name=f"{namespace}-rbac",
                                   namespace=namespace)

    role_ref = client.V1RoleRef(api_group='rbac.authorization.k8s.io',
                                kind='ClusterRole',
                                name='cluster-admin')

    subject = client.models.V1Subject(api_group='',
                                      kind='ServiceAccount',
                                      name='default',
                                      namespace=namespace)
    subjects = []
    subjects.append(subject)

    body = client.V1ClusterRoleBinding(metadata=metadata,
                                       role_ref=role_ref,
                                       subjects=subjects)
    pretty = True
    try:
        api_response = api_instance.create_cluster_role_binding(body=body,
                                                                pretty=pretty)
        pprint(api_response)
    except ApiException as e:
        if e.status == 409:
            print(
                f"cluster-role-binding {namespace}-rbac has already been installed"
            )
        else:
            print("Exception when creating cluster-role-binding: %s\n" % e,
                  file=sys.stderr)
            raise e

    print("####################################")
    print("######### Creating Persistent Volume Claims")

    # metadata1 = client.V1ObjectMeta(name=f"{volumeName}-input-data", namespace=namespace)
    # spec1 = client.V1PersistentVolumeClaimSpec(
    #     # must be ReadWriteOnce for EBS
    #     # access_modes=["ReadWriteOnce", "ReadOnlyMany"],
    #     access_modes=["ReadWriteMany"],
    #     resources=client.V1ResourceRequirements(
    #         requests={"storage": inputVolumeSize}
    #     )
    # )
    #
    # if storage_class_name:
    #     spec1.storage_class_name = storage_class_name
    #
    # body1 = client.V1PersistentVolumeClaim(metadata=metadata1, spec=spec1)

    metadata2 = client.V1ObjectMeta(name=f"{volumeName}-tmpout",
                                    namespace=namespace)
    spec2 = client.V1PersistentVolumeClaimSpec(
        access_modes=["ReadWriteMany"],
        resources=client.V1ResourceRequirements(
            requests={"storage": tmpVolumeSize}))
    if storage_class_name:
        spec2.storage_class_name = storage_class_name

    body2 = client.V1PersistentVolumeClaim(metadata=metadata2, spec=spec2)

    metadata3 = client.V1ObjectMeta(name=f"{volumeName}-output-data",
                                    namespace=namespace)
    spec3 = client.V1PersistentVolumeClaimSpec(
        access_modes=["ReadWriteMany"],
        resources=client.V1ResourceRequirements(
            requests={"storage": outputVolumeSize}))
    if storage_class_name:
        spec3.storage_class_name = storage_class_name

    body3 = client.V1PersistentVolumeClaim(metadata=metadata3, spec=spec3)

    pretty = True
    try:
        #    api_response1 = v1.create_namespaced_persistent_volume_claim(namespace, body1, pretty=pretty)
        api_response2 = v1.create_namespaced_persistent_volume_claim(
            namespace, body2, pretty=pretty)
        api_response3 = v1.create_namespaced_persistent_volume_claim(
            namespace, body3, pretty=pretty)
        #    pprint(api_response1)
        pprint(api_response2)
        pprint(api_response3)
    except ApiException as e:
        print("Exception when creating persistent_volume_claim: %s\n" % e,
              file=sys.stderr)
        raise e

    # we copy the secret from ades namespace to the new job namespace
    if imagepullsecrets is not None and ades_namespace is not None:
        for imagepullsecret in imagepullsecrets:
            # Create an instance of the API class
            secretname = imagepullsecret["name"]
            pretty = True  # str | If 'true', then the output is pretty printed. (optional)
            exact = False  # bool | Should the export be exact.  Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18. (optional)
            export = True  # bool | Should this value be exported.  Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18. (optional)

            secret_export = None
            try:
                secret_export = v1.read_namespaced_secret(secretname,
                                                          ades_namespace,
                                                          pretty=pretty,
                                                          exact=exact,
                                                          export=export)
            except ApiException as e:
                print(
                    "Exception when retrieving image pull secret from eoepca: %s\n"
                    % e)

            time.sleep(5)
            try:
                api_response = v1.create_namespaced_secret(namespace,
                                                           secret_export,
                                                           pretty=pretty)
            except ApiException as e:
                print("Exception when creating image pull secret: %s\n" % e)

            time.sleep(5)

            name = 'default'
            try:
                service_account_body = v1.read_namespaced_service_account(
                    name, namespace, pretty=True)
                pprint(service_account_body)
                time.sleep(5)

                if service_account_body.secrets is None:
                    service_account_body.secrets = []

                if service_account_body.image_pull_secrets is None:
                    service_account_body.image_pull_secrets = []

                service_account_body.secrets.append({"name": secretname})
                service_account_body.image_pull_secrets.append(
                    {"name": secretname})

                api_response = v1.patch_namespaced_service_account(
                    name, namespace, service_account_body, pretty=True)
                pprint(api_response)
            except ApiException as e:
                print(
                    "Exception when calling CoreV1Api->patch_namespaced_service_account: %s\n"
                    % e)

    return {"status": "success"}
示例#4
0
 def set_namespace_label(self, namespace, label_dict):
     metadata = {'labels': label_dict}
     body = client.V1Namespace(metadata=self._get_metadata(metadata))
     return self.v1_h.patch_namespace(namespace, body)
def test_namespace_routing(setup, test_input, expected):
    '''
    Test namespace routing feature. This test will create an index in Splunk with name as the namespace, start a pod to generate some logs,
    then check if the logs are indexed as events in Splunk. The test handles the cleanup of the index and its events.
    '''
    # Splunk index and namespace are assumed to be the same
    index = test_input
    namespace = test_input

    # Handle special cases of default namespaces kube-system and kube-public
    if test_input == "kube-system" or test_input == "kube-public":
        search_query = "index={0}".format(test_input)
        events = check_events_from_splunk(
            index=index,
            start_time="-1h@h",
            url=setup["splunkd_url"],
            user=setup["splunk_user"],
            query=["search {0}".format(search_query)],
            password=setup["splunk_password"])
        logging.getLogger().info("Received {0} events in the index {1}".format(
            len(events), index))
        assert len(events) >= 0
        pytest.skip(
            "Test successful, skipping rest of the test for special cases")

    # Initialize kubernetes python client
    config.load_kube_config()
    v1 = client.CoreV1Api()
    found = False

    # Search for namespace
    for ns in v1.list_namespace().items:
        if test_input == ns.metadata.name:
            found = True

    # Create namespace
    if not found:
        logging.getLogger().info("creating namespace")
        try:
            v1.create_namespace(
                client.V1Namespace(metadata=client.V1ObjectMeta(
                    name=test_input)))
        except ApiException as e:
            logging.getLogger().info(
                "Exception when calling CoreV1Api create_namespace: {0}".
                format(e))

    search_query = "index={0} | delete".format(test_input)
    events = check_events_from_splunk(
        index=index,
        start_time="-1h@h",
        url=setup["splunkd_url"],
        user=setup["splunk_user"],
        query=["search {0}".format(search_query)],
        password=setup["splunk_password"])
    logging.getLogger().info("Received {0} events in the index {1}".format(
        len(events), index))
    assert len(events) == 0

    # Data generator image metadata
    image_name = "cp-data-gen"
    image_address = "chaitanyaphalak/kafkadatagen:1.0-4-gca7f6d8"
    image_pull_policy = "IfNotPresent"

    # Create pod in the test namespace to generate logs
    pod = client.V1Pod()
    pod.metadata = client.V1ObjectMeta(name=image_name)

    container = client.V1Container(name=image_name,
                                   image=image_address,
                                   image_pull_policy=image_pull_policy)

    spec = client.V1PodSpec(containers=[container])
    pod.spec = spec
    try:
        v1.create_namespaced_pod(namespace=namespace, body=pod)
    except ApiException as e:
        logging.getLogger().info(
            "Exception when calling CoreV1Api create_namespaced_pod: {0}".
            format(e))

    logging.getLogger().info("Sleeping for 60 seconds")
    time.sleep(60)

    # Check if we have those generated logs from kubernetes in Splunk
    v1.delete_namespaced_pod(name=image_name, namespace=namespace, body=pod)

    search_query = "index={0}".format(test_input)
    events = check_events_from_splunk(
        index=index,
        start_time="-1h@h",
        url=setup["splunkd_url"],
        user=setup["splunk_user"],
        query=["search {0}".format(search_query)],
        password=setup["splunk_password"])
    logging.getLogger().info(
        "Splunk received {0} events in the last minute in the index {1}".
        format(len(events), index))

    assert len(events) > 0
示例#6
0
    def setUpClass(cls):
        '''
        Deploy operator to a "kind" cluster created by run.sh using examples from /manifests.
        This operator deployment is to be shared among all tests.

        run.sh deletes the 'kind' cluster after successful run along with all operator-related entities.
        In the case of test failure the cluster will stay to enable manual examination;
        next invocation of "make test" will re-create it.
        '''
        print("Test Setup being executed")

        # set a single K8s wrapper for all tests
        k8s = cls.k8s = K8s()

        # remove existing local storage class and create hostpath class
        try:
            k8s.api.storage_v1_api.delete_storage_class("standard")
        except ApiException as e:
            print(
                "Failed to delete the 'standard' storage class: {0}".format(e))

        # operator deploys pod service account there on start up
        # needed for test_multi_namespace_support()
        cls.test_namespace = "test"
        try:
            v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(
                name=cls.test_namespace))
            k8s.api.core_v1.create_namespace(v1_namespace)
        except ApiException as e:
            print("Failed to create the '{0}' namespace: {1}".format(
                cls.test_namespace, e))

        # submit the most recent operator image built on the Docker host
        with open("manifests/postgres-operator.yaml", 'r+') as f:
            operator_deployment = yaml.safe_load(f)
            operator_deployment["spec"]["template"]["spec"]["containers"][0][
                "image"] = os.environ['OPERATOR_IMAGE']

        with open("manifests/postgres-operator.yaml", 'w') as f:
            yaml.dump(operator_deployment, f, Dumper=yaml.Dumper)

        with open("manifests/configmap.yaml", 'r+') as f:
            configmap = yaml.safe_load(f)
            configmap["data"]["workers"] = "1"

        with open("manifests/configmap.yaml", 'w') as f:
            yaml.dump(configmap, f, Dumper=yaml.Dumper)

        for filename in [
                "operator-service-account-rbac.yaml", "postgresteam.crd.yaml",
                "configmap.yaml", "postgres-operator.yaml", "api-service.yaml",
                "infrastructure-roles.yaml", "infrastructure-roles-new.yaml",
                "e2e-storage-class.yaml"
        ]:
            result = k8s.create_with_kubectl("manifests/" + filename)
            print("stdout: {}, stderr: {}".format(result.stdout,
                                                  result.stderr))

        k8s.wait_for_operator_pod_start()

        # reset taints and tolerations
        k8s.api.core_v1.patch_node("postgres-operator-e2e-tests-worker",
                                   {"spec": {
                                       "taints": []
                                   }})
        k8s.api.core_v1.patch_node("postgres-operator-e2e-tests-worker2",
                                   {"spec": {
                                       "taints": []
                                   }})

        # make sure we start a new operator on every new run,
        # this tackles the problem when kind is reused
        # and the Docker image is in fact changed (dirty one)

        k8s.update_config({}, step="TestSuite Startup")

        actual_operator_image = k8s.api.core_v1.list_namespaced_pod(
            'default', label_selector='name=postgres-operator'
        ).items[0].spec.containers[0].image
        print("Tested operator image: {}".format(
            actual_operator_image))  # shows up after tests finish

        result = k8s.create_with_kubectl(
            "manifests/minimal-postgres-manifest.yaml")
        print('stdout: {}, stderr: {}'.format(result.stdout, result.stderr))
        try:
            k8s.wait_for_pod_start('spilo-role=master')
            k8s.wait_for_pod_start('spilo-role=replica')
        except timeout_decorator.TimeoutError:
            print('Operator log: {}'.format(k8s.get_operator_log()))
            raise
示例#7
0
from lib.exp import Client
import uuid
from kubernetes import client as k8sclient

test_namespace = k8sclient.V1Namespace()
test_namespace.metadata = k8sclient.V1ObjectMeta(name='ns' + str(uuid.uuid4()))


def setup():
    v1_api = k8sclient.CoreV1Api()
    v1_api.create_namespace(test_namespace)

    c = Client(namespace=test_namespace.metadata.name)
    c.create_crds()


def teardown():
    v1_api = k8sclient.CoreV1Api()

    v1_api.delete_namespace(name=test_namespace.metadata.name,
                            body=k8sclient.models.V1DeleteOptions())
示例#8
0
 def create_namespace(self, ns_name):
     self.cluster.create_namespace(client.V1Namespace(metadata=client.V1ObjectMeta(name=ns_name)))
示例#9
0
 def CreateNameSpace(self, name):
     body = client.V1Namespace()
     body.metadata = client.V1ObjectMeta(name=name)
     return self.Connect.create_namespace(body=body)
示例#10
0
 def V1Namespace():
     v1Namespace = client.V1Namespace()
     return v1Namespace
示例#11
0
def online_install(ctx):
    click.echo(
        'Installing WALKOFF to Kubernetes cluster with Internet access.')
    try:
        config_dir = os.environ.get(
            'KUBECONFIG',
            os.path.join(os.path.expanduser("~"), ".kube", "config"))
        config_dir = click.prompt("Enter location of kubernetes config",
                                  default=config_dir)

        contexts, current = config.list_kube_config_contexts(
            config_file=config_dir)
        contexts = [context["name"] for context in contexts]
        current = current["name"]

        context = click.prompt(
            "Available contexts: {}\nEnter context to install WALKOFF to".
            format(contexts),
            default=current)

        config.load_kube_config(config_file=config_dir, context=context)
        k8s_api = k8s_client.CoreV1Api()
        k8s_custom_api = k8s_client.CustomObjectsApi()
    except IOError as e:
        print("Could not open config: {}".format(e))
        return

    namespaces = k8s_api.list_namespace()
    namespaces = [ns.metadata.name for ns in namespaces.items]
    namespace = click.prompt(
        "Available namespaces: {}\nEnter namespace to install WALKOFF in".
        format(namespaces),
        default="default")

    if namespace not in namespaces:
        if click.confirm("{} does not exist - do you want to create it now?"):
            new_namespace = k8s_client.V1Namespace(
                metadata={'name': namespace})
            try:
                k8s_api.create_namespace(new_namespace)
            except k8s_client.rest.ApiException as e:
                click.echo("Error creating namespace:\n{}".format(str(e)))
                click.echo(
                    'You should use the uninstall command to rollback changes made by this installer.'
                )
                return

    tiller_namespace = click.prompt(
        'Enter the namespace your Tiller service resides in',
        default='kube-system')

    click.echo("Generating ZMQ certificates for WALKOFF.")
    if subprocess.call(['python', 'scripts/generate_certificates.py']) != 0:
        click.echo("Error generating ZMQ certificates.")
        return

    click.echo("Adding ZMQ certificates to Kubernetes secrets.")
    kubectl_command([
        'create', 'secret', 'generic', 'walkoff-zmq-private-keys',
        '--from-file=server.key_secret=./.certificates/private_keys/server.key_secret',
        '--from-file=client.key_secret=./.certificates/private_keys/client.key_secret'
    ], namespace)

    kubectl_command([
        'create', 'secret', 'generic', 'walkoff-zmq-public-keys',
        '--from-file=server.key=./.certificates/public_keys/server.key',
        '--from-file=client.key=./.certificates/public_keys/client.key'
    ], namespace)

    existing_secrets = k8s_api.list_namespaced_secret(namespace)
    redis_secret_name = None
    redis_hostname = None
    if click.confirm(
            'Is there an existing Redis instance WALKOFF should use?'):
        redis_hostname = click.prompt(
            'Enter the Redis hostname (if it is not in the same Kubernetes namespace '
            'as WALKOFF, enter a fully qualified domain name)')
        if click.confirm(
                "Is the Redis password already stored in a Kubernetes secret?"
        ):
            redis_secret_name = click.prompt(
                'Available secrets: {}\nEnter the name of the secret the Redis password '
                'is stored in with a key of "redis-password" (leave blank for none): ',
                default="")
            if redis_secret_name not in existing_secrets:
                redis_secret_name = None
                click.echo(
                    'No secret with that name in this namespace. Creating a new secret to store password.'
                )

    if not redis_secret_name:
        redis_secret_name = "walkoff-redis-secret"
        new_pass = click.prompt('Enter a password for the Redis instance',
                                hide_input=True,
                                confirmation_prompt=True,
                                default='walkoff')
        redis_secret_obj = k8s_client.V1Secret(
            metadata={'name': redis_secret_name},
            data={
                'redis-password':
                b64encode(new_pass.encode('utf-8')).decode('utf-8')
            })
        try:
            k8s_api.create_namespaced_secret(namespace, redis_secret_obj)
        except k8s_client.rest.ApiException as e:
            click.echo("Error creating secret:\n{}".format(str(e)))
            click.echo(
                'You should use the uninstall command to rollback changes made by this installer.'
            )
            return

    with open("k8s_manifests/setupfiles/redis-helm-values.yaml", 'r+') as f:
        try:
            y = yaml.load(f)
            y['existingSecret'] = redis_secret_name
            f.seek(0)
            f.truncate()
            yaml.dump(y, f, default_flow_style=False)
        except yaml.YAMLError as e:
            click.echo(
                "Error reading k8s_manifests/setupfiles/redis-helm-values.yaml"
            )
            click.echo(
                'You should use the uninstall command to rollback changes made by this installer.'
            )
            return

    if not redis_hostname:
        redis_hostname = 'walkoff-redis'
        helm_command([
            'install', 'stable/redis', '--name', redis_hostname, '--values',
            'k8s_manifests/setupfiles/redis-helm-values.yaml', '--set',
            'existingSecret={}'.format(redis_secret_name)
        ], tiller_namespace)

    execution_secret_name = None
    execution_db_hostname = None
    if click.confirm(
            'Do you have an existing PostgreSQL database to store WALKOFF execution data in?'
    ):
        execution_db_hostname = click.prompt(
            'Enter the database hostname (if it is not in the same Kubernetes '
            'namespace as WALKOFF, enter a fully qualified domain name)')
        execution_db_username = click.prompt(
            'Enter a username that is able to create/read/write/update databases'
        )
        if click.confirm(
                "Is the PostgreSQL password already stored in a Kubernetes secret?"
        ):
            execution_secret_name = click.prompt(
                'Available secrets: {}\nEnter the name of the secret the PostgreSQL '
                'password is stored in with a key of "postgres-password" '
                '(leave blank for none): ',
                default="")
            if execution_secret_name not in existing_secrets:
                execution_secret_name = None
                click.echo(
                    'No secret with that name in this namespace. Creating a new secret to store password.'
                )

    if not execution_secret_name:
        execution_secret_name = "walkoff-postgres-execution-secret"
        execution_db_username = click.prompt('Enter a username to create',
                                             default='walkoff')
        execution_db_password = click.prompt(
            'Enter a password for the PostgreSQL instance',
            hide_input=True,
            confirmation_prompt=True,
            default='walkoff')
        execution_secret_obj = k8s_client.V1Secret(
            metadata={'name': execution_secret_name},
            data={
                'postgres-password':
                b64encode(
                    execution_db_password.encode('utf-8')).decode('utf-8')
            })
        try:
            k8s_api.create_namespaced_secret(namespace, execution_secret_obj)
        except k8s_client.rest.ApiException as e:
            click.echo("Error creating secret:\n{}".format(str(e)))
            click.echo(
                'You should use the uninstall command to rollback changes made by this installer.'
            )
            return

    with open("k8s_manifests/setupfiles/execution-postgres-helm-values.yaml",
              'r+') as f:
        try:
            y = yaml.load(f)
            y['postgresqlUsername'] = execution_db_username
            y['postgresqlPassword'] = execution_db_password
            f.seek(0)
            f.truncate()
            yaml.dump(y, f, default_flow_style=False)
        except yaml.YAMLError as e:
            click.echo(
                "Error reading k8s_manifests/setupfiles/execution-postgres-helm-values.yaml"
            )
            click.echo(
                'You should use the uninstall command to rollback changes made by this installer.'
            )
            return

    if not execution_db_hostname:
        helm_command([
            'install', 'stable/postgresql', '--name', 'execution-db',
            '--values',
            'k8s_manifests/setupfiles/execution-postgres-helm-values.yaml'
        ], tiller_namespace)
        execution_db_hostname = 'execution-db-postgresql'

    walkoff_db_secret_name = None
    walkoff_db_hostname = None
    if click.confirm(
            'Do you have an existing PostgreSQL database to store WALKOFF application data in? '
            '(This can be the same or different as the previous)'):
        walkoff_db_hostname = click.prompt(
            'Enter the database hostname (if it is not in the same Kubernetes namespace '
            'as WALKOFF, enter a fully qualified domain name)')
        walkoff_db_username = click.prompt(
            'Enter a username that is able to create/read/write/update databases'
        )
        if click.confirm(
                "Is the PostgreSQL password already stored in a Kubernetes secret?"
        ):
            walkoff_db_secret_name = click.prompt(
                'Available secrets: {}\nEnter the name of the secret the PostgreSQL '
                'password is stored in with a key of "postgres-password" '
                '(leave blank for none): ',
                default="")
            if walkoff_db_secret_name not in existing_secrets:
                walkoff_db_secret_name = None
                click.echo(
                    'No secret with that name in this namespace. Creating a new secret to store password.'
                )

    if not walkoff_db_secret_name:
        walkoff_db_secret_name = "walkoff-postgres-secret"
        walkoff_db_username = click.prompt('Enter a username to create',
                                           default='walkoff')
        walkoff_db_password = click.prompt(
            'Enter a password for the PostgreSQL instance',
            hide_input=True,
            confirmation_prompt=True,
            default='walkoff')
        walkoff_db_secret_obj = k8s_client.V1Secret(
            metadata={'name': walkoff_db_secret_name},
            data={
                'postgres-password':
                b64encode(walkoff_db_password.encode('utf-8')).decode('utf-8')
            })
        try:
            k8s_api.create_namespaced_secret(namespace, walkoff_db_secret_obj)
        except k8s_client.rest.ApiException as e:
            click.echo("Error creating secret:\n{}".format(str(e)))
            click.echo(
                'You should use the uninstall command to rollback changes made by this installer.'
            )
            return

    with open("k8s_manifests/setupfiles/walkoff-postgres-helm-values.yaml",
              'r+') as f:
        try:
            y = yaml.load(f)
            y['postgresqlUsername'] = walkoff_db_username
            y['postgresqlPassword'] = walkoff_db_password
            f.seek(0)
            f.truncate()
            yaml.dump(y, f, default_flow_style=False)
        except yaml.YAMLError as e:
            click.echo(
                "Error reading k8s_manifests/setupfiles/walkoff-postgres-helm-values.yaml"
            )
            click.echo(
                'You should use the uninstall command to rollback changes made by this installer.'
            )
            return

    if not walkoff_db_hostname:
        helm_command([
            'install', 'stable/postgresql', '--name', 'walkoff-db', '--values',
            'k8s_manifests/setupfiles/walkoff-postgres-helm-values.yaml'
        ], tiller_namespace)
        walkoff_db_hostname = 'walkoff-db-postgresql'

    walkoff_ca_key_pair = None
    if click.confirm(
            'Do you have an existing CA signing key pair stored in Kubernetes secrets?'
    ):
        walkoff_ca_key_pair = click.prompt(
            'Available secrets: {}\nEnter the name of the secret the key pair is stored in (leave blank for none): ',
            default="")
        if walkoff_ca_key_pair not in existing_secrets:
            walkoff_ca_key_pair = None
            click.echo(
                'No secret with that name in this namespace. Creating a new secret to store keypair.'
            )

    if not walkoff_ca_key_pair:
        crt = None
        key = None
        if click.confirm('Do you have existing CA signing key pair files?'):
            while not crt:
                crt = click.prompt('Enter the path to a cert (.crt) file: ')
                try:
                    with open(crt, 'rb') as f:
                        crt = b64encode(f.read()).decode('ascii')
                        click.echo('Successfully loaded cert')
                except IOError as e:
                    click.echo('Error reading {}: {}'.format(crt, e))
                    crt = None

            while not key:
                key = click.prompt(
                    'Enter the path to the matching private key (.key) file: ')
                try:
                    with open(key, 'rb') as f:
                        key = b64encode(f.read()).decode('ascii')
                        click.echo('Successfully loaded key.')
                except IOError as e:
                    click.echo('Error reading {}: {}'.format(key, e))
                    key = None

        if not all((crt, key)):
            private_key = rsa.generate_private_key(public_exponent=65537,
                                                   key_size=2048,
                                                   backend=default_backend())
            public_key = private_key.public_key()
            builder = x509.CertificateBuilder()
            builder = builder.subject_name(
                x509.Name(
                    [x509.NameAttribute(NameOID.COMMON_NAME, u'walkoff')]))
            builder = builder.issuer_name(
                x509.Name([
                    x509.NameAttribute(NameOID.COMMON_NAME, u'walkoff'),
                ]))
            builder = builder.not_valid_before(datetime.datetime.today() -
                                               datetime.timedelta(days=1))
            builder = builder.not_valid_after(datetime.datetime.today() +
                                              datetime.timedelta(days=3650))
            builder = builder.serial_number(int(uuid.uuid4()))
            builder = builder.public_key(public_key)

            builder = builder.add_extension(
                x509.SubjectKeyIdentifier.from_public_key(public_key),
                critical=False)
            builder = builder.add_extension(
                x509.AuthorityKeyIdentifier.from_issuer_public_key(public_key),
                critical=False)
            builder = builder.add_extension(x509.BasicConstraints(
                ca=True, path_length=None),
                                            critical=True)
            certificate = builder.sign(private_key=private_key,
                                       algorithm=hashes.SHA256(),
                                       backend=default_backend())

            with open("ca.key", "wb") as f:
                byte_cert = private_key.private_bytes(
                    encoding=serialization.Encoding.PEM,
                    format=serialization.PrivateFormat.TraditionalOpenSSL,
                    encryption_algorithm=serialization.NoEncryption())
                key = b64encode(byte_cert).decode('ascii')
                f.write(byte_cert)

            with open("ca.crt", "wb") as f:
                byte_key = certificate.public_bytes(
                    encoding=serialization.Encoding.PEM, )
                crt = b64encode(byte_key).decode('ascii')
                f.write(byte_key)

        tls_secret = k8s_client.V1Secret(
            metadata={'name': 'walkoff-ca-key-pair'},
            data={
                'tls.crt': crt,
                'tls.key': key
            },
            type='kubernetes.io/tls')
        try:
            k8s_api.create_namespaced_secret('default', tls_secret)
        except k8s_client.rest.ApiException as e:
            click.echo("Error creating secret:\n{}".format(str(e)))
            click.echo(
                'You should use the uninstall command to rollback changes made by this installer.'
            )
            return

        walkoff_ca_key_pair = 'walkoff-ca-key-pair'

    helm_command(
        ['install', 'stable/cert-manager', '--name', 'walkoff-cert-manager'],
        tiller_namespace)

    with open("k8s_manifests/setupfiles/cert-issuer.yaml", 'r+') as f:
        try:
            y = yaml.load(f)
            y['spec']['ca']['secretName'] = walkoff_ca_key_pair
            f.seek(0)
            f.truncate()
            yaml.dump(y, f, default_flow_style=False)
        except yaml.YAMLError as e:
            click.echo(
                "Error reading k8s_manifests/setupfiles/cert-issuer.yaml")
            click.echo(
                'You should use the uninstall command to rollback changes made by this installer.'
            )
            return

    kubectl_command(
        ['apply', '-f', 'k8s_manifests/setupfiles/cert-issuer.yaml'],
        namespace)
    kubectl_command(['apply', '-f', 'k8s_manifests/setupfiles/cert.yaml'],
                    namespace)

    with open("k8s_manifests/setupfiles/walkoff-values.yaml", 'r+') as f:
        try:
            y = yaml.load(f)
            y['namespace'] = namespace
            y['resources']['redis']['service_name'] = redis_hostname
            y['resources']['redis']['secret_name'] = redis_secret_name
            y['resources']['execution_db'][
                'service_name'] = execution_db_hostname
            y['resources']['execution_db'][
                'secret_name'] = execution_secret_name
            y['resources']['execution_db']['username'] = execution_db_username
            y['resources']['walkoff_db']['service_name'] = walkoff_db_hostname
            y['resources']['walkoff_db'][
                'secret_name'] = walkoff_db_secret_name
            y['resources']['walkoff_db']['username'] = walkoff_db_username
            f.seek(0)
            f.truncate()
            yaml.dump(y, f, default_flow_style=False)
        except yaml.YAMLError as e:
            click.echo(
                "Error reading k8s_manifests/setupfiles/walkoff-values.yaml")
            click.echo(
                'You should use the uninstall command to rollback changes made by this installer.'
            )
            return

    helm_command([
        'install', 'k8s_manifests/helm_charts/walkoff', '--name',
        'walkoff-deployment'
    ], tiller_namespace)
示例#12
0
def _sync_namespaces(request, core_v1, rbac_v1):
    # K8S namespaces -> portal namespaces
    success_count_pull = 0
    k8s_ns_list = None
    try:
        k8s_ns_list = core_v1.list_namespace()
    except Exception as e:
        logger.error("Exception: {0}".format(e))
        messages.error(request, "Sync failed, error while fetching list of namespaces: {0}.".format(e))
        return
    k8s_ns_uids = []
    for k8s_ns in k8s_ns_list.items:
        try:
            k8s_ns_name = k8s_ns.metadata.name
            k8s_ns_uid = k8s_ns.metadata.uid
            # remember for later use
            k8s_ns_uids.append(k8s_ns_uid)
            portal_ns, created = KubernetesNamespace.objects.get_or_create(
                name=k8s_ns_name, uid=k8s_ns_uid)
            if created:
                # Create missing namespace record
                logger.info(
                    "Creating record for Kubernetes namespace '{0}'".format(k8s_ns_name))
                if k8s_ns_name in HIDDEN_NAMESPACES:
                    portal_ns.visible = False
                else:
                    portal_ns.visible = True
                portal_ns.save()
                messages.info(request,
                              "Found new Kubernetes namespace '{0}'.".format(k8s_ns_name))
            else:
                # No action needed
                logger.debug(
                    "Found existing record for Kubernetes namespace '{0}'".format(k8s_ns_name))
                success_count_pull += 1
        except Exception as e:
            logger.error("Exception: {0}".format(e))
            messages.error(request, "Sync from Kubernetes for namespace {0} failed: {1}.".format(k8s_ns_name, e))

    # portal namespaces -> K8S namespaces
    success_count_push = 0
    for portal_ns in KubernetesNamespace.objects.all():
        try:
            if portal_ns.uid:
                # Portal namespace records with UID must be given in K8S, or they are
                # stale und should be deleted
                if portal_ns.uid in k8s_ns_uids:
                    # No action needed
                    logger.debug(
                        "Found existing Kubernetes namespace for record '{0}'".format(portal_ns.name))
                    success_count_push += 1
                else:
                    # Remove stale namespace record
                    logger.warning(
                        "Removing stale record for Kubernetes namespace '{0}'".format(portal_ns.name))
                    portal_ns.delete()
                    messages.info(
                        request, "Namespace '{0}' no longer exists in Kubernetes and was removed.".format(portal_ns.name))
            else:
                # Portal namespaces without UID are new and should be created in K8S
                logger.info(
                    "Creating Kubernetes namespace '{0}'".format(portal_ns.name))
                k8s_ns = client.V1Namespace(
                    api_version="v1", kind="Namespace", metadata=client.V1ObjectMeta(name=portal_ns.name))
                core_v1.create_namespace(k8s_ns)
                # Fetch UID and store it in portal record
                created_k8s_ns = core_v1.read_namespace(name=portal_ns.name)
                portal_ns.uid = created_k8s_ns.metadata.uid
                portal_ns.save()
                messages.success(
                    request, "Created namespace '{0}' in Kubernetes.".format(portal_ns.name))
        except Exception as e:
            logger.error("Exception: {0}".format(e))
            messages.error(request, "Sync to Kubernetes for namespace {0} failed: {1}.".format(portal_ns, e))

    if success_count_push == success_count_pull:
        messages.success(
            request, "All valid namespaces are in sync.")

    # check role bindings of namespaces
    # We only consider visible namespaces here, to prevent hitting
    # special namespaces and giving them (most likely unneccessary)
    # additional role bindings
    for portal_ns in KubernetesNamespace.objects.filter(visible=True):
        # Get role bindings in the current namespace
        try:
            rolebindings = rbac_v1.list_namespaced_role_binding(portal_ns.name)
        except Exception as e:
            logger.error("Exception: {0}".format(e))
            messages.error(request, "Could not fetch role bindings for namespace '{0}': {1}.".format(portal_ns, e))
            continue
        # Get all cluster roles this namespace is currently bound to
        clusterroles_active = [rolebinding.role_ref.name for rolebinding in rolebindings.items if rolebinding.role_ref.kind == 'ClusterRole']
        logger.debug("Namespace '{0}' is bound to cluster roles {1}".format(portal_ns, clusterroles_active))
        # Check list of default cluster roles from settings
        for clusterrole in settings.NAMESPACE_CLUSTERROLES:
            if clusterrole not in clusterroles_active:
                try:
                    logger.info("Namespace '{0}' is not bound to cluster role '{1}', fixing this ...".format(portal_ns, clusterrole))
                    role_ref = client.V1RoleRef(name=clusterrole, kind="ClusterRole", api_group="rbac.authorization.k8s.io")
                    # Subject for the cluster role are all service accounts in the namespace
                    subject = client.V1Subject(name="system:serviceaccounts:" + portal_ns.name, kind="Group", api_group="rbac.authorization.k8s.io")
                    metadata = client.V1ObjectMeta(name=clusterrole)
                    new_rolebinding = client.V1RoleBinding(role_ref=role_ref, metadata=metadata, subjects=[subject, ])
                    rbac_v1.create_namespaced_role_binding(portal_ns.name, new_rolebinding)
                except Exception as e:
                    logger.exception(e)
                    messages.error(request, "Could not create binding of namespace '{0}' to cluster role '{1}': {2}.".format(portal_ns.name, clusterrole, e))
                    continue
示例#13
0
def create_k8s_namespace(namespace_name: str):
    _api_client = _get_k8s_api_client()
    return client.CoreV1Api(_api_client).create_namespace(
        client.V1Namespace(name=namespace_name))
示例#14
0
def create_user_namespace(
    api: client.CoreV1Api,
    userspace_dc: dynamic.DynamicClient,
    user_name: str,
    user_email: str,
    expected_user_namespaces: Dict[str, str],
    namespaces: List[str],
) -> None:
    env = os.environ.get("ORBIT_ENV", "")
    if not env:
        raise ValueError("Orbit Environment ORBIT_ENV is required")
    for team, user_ns in expected_user_namespaces.items():
        try:
            team_namespace = api.read_namespace(name=team).to_dict()
            team_uid = team_namespace.get("metadata", {}).get("uid", None)
            logger.info(f"Retrieved Team Namespace uid: {team_uid}")
        except Exception:
            logger.exception("Error retrieving Team Namespace")
            team_uid = None
        if user_ns not in namespaces:
            logger.info(f"User namespace {user_ns} doesnt exist. Creating...")
            kwargs = {
                "name": user_ns,
                "annotations": {"owner": user_email},
                "labels": {
                    "orbit/efs-id": EFS_FS_ID,
                    "orbit/env": os.environ.get("ORBIT_ENV"),
                    "orbit/space": "user",
                    "orbit/team": team,
                    "orbit/user": user_name,
                    # "istio-injection": "enabled",
                },
            }
            if team_uid:
                kwargs["owner_references"] = [
                    client.V1OwnerReference(api_version="v1", kind="Namespace", name=team, uid=team_uid)
                ]

            body = client.V1Namespace()
            body.metadata = client.V1ObjectMeta(**kwargs)

            try:
                # create userspace namespace resource
                api.create_namespace(body=body)
                logger.info(f"Created namespace {user_ns}")
            except ApiException as ae:
                logger.warning(f"Exception when trying to create user namespace {user_ns}")
                logger.warning(ae.body)

            try:
                # create userspace custom resource for the given user namespace
                logger.info(f"Creating userspace custom resource {user_ns}")
                create_userspace(
                    userspace_dc=userspace_dc,
                    name=user_ns,
                    env=env,
                    space="user",
                    team=team,
                    user=user_name,
                    team_efsid=EFS_FS_ID,
                    user_email=user_email,
                )
                logger.info(f"Created userspace custom resource {user_ns}")
            except ApiException as ae:
                logger.warning(f"Exception when trying to create userspace custom resource {user_ns}")
                logger.warning(ae.body)
示例#15
0
class TestCastValue:
    """Tests for kubetest.manifest.cast_value"""
    @pytest.mark.parametrize(
        'value,t,expected',
        [
            # builtin types
            (11, 'int', int(11)),
            ('11', 'int', int(11)),
            (11.0, 'int', int(11)),
            (11, 'float', float(11)),
            (11, 'str', '11'),

            # casting to object should result in no change
            (11, 'object', 11),
            ('11', 'object', '11'),

            # kubernetes types
            ({
                'apiVersion': 'apps/v1',
                'kind': 'Namespace'
            }, 'V1Namespace',
             client.V1Namespace(kind='Namespace', api_version='apps/v1')),
            ({
                'fieldRef': {
                    'apiVersion': 'apps/v1beta1',
                    'fieldPath': 'foobar'
                }
            }, 'V1EnvVarSource',
             client.V1EnvVarSource(field_ref=client.V1ObjectFieldSelector(
                 api_version='apps/v1beta1', field_path='foobar'))),
            ({
                'finalizers': ['a', 'b', 'c']
            }, 'V1ObjectMeta',
             client.V1ObjectMeta(finalizers=['a', 'b', 'c'])),
        ])
    def test_ok(self, value, t, expected):
        """Test casting values to the specified type successfully."""

        actual = manifest.cast_value(value, t)
        assert type(actual) == type(expected)
        assert actual == expected

    @pytest.mark.parametrize(
        'value,t,error',
        [
            # builtin types
            ({
                'foo': 'bar'
            }, 'int', TypeError),
            ([1, 3, 5], 'float', TypeError),
            (1.0, 'set', TypeError),

            # kubernetes types
            (11, 'V1Namespace', AttributeError),
            ('foo', 'V1Deployment', AttributeError),
            (['a', 'b', 'c'], 'V1Service', AttributeError),
            ({1, 2, 3, 4}, 'V1Pod', AttributeError),

            # unknown type
            (11, 'NotARealType', ValueError),
        ])
    def test_error(self, value, t, error):
        """Test casting values to the specified type unsuccessfully."""

        with pytest.raises(error):
            manifest.cast_value(value, t)
示例#16
0
def namespace():
    configure_kube()
    return client.V1Namespace(metadata=client.V1ObjectMeta(name="helloworld"))
示例#17
0
import base64
from kubernetes import client, config
namespace = "test-dev001"

# Configs can be set in Configuration class directly or using helper utility
config.load_kube_config('./kube-config')

v1 = client.CoreV1Api()

# create K8s namespace
ret = v1.create_namespace(
    client.V1Namespace(metadata=client.V1ObjectMeta(name=namespace)))
print("create K8s namespace :" + namespace)

# create K8s service account
ret = v1.create_namespaced_service_account(
    "account",
    client.V1ServiceAccount(metadata=client.V1ObjectMeta(name="dev001")))
print("create K8s service account 'dev001'")

# delete K8s service account
# delete_namespaced_service_account("ServerAccount","Namespace")
ret = v1.delete_namespaced_service_account("dev001", "account")
print("delete K8s namespace 'account' / service account 'dev001'")

# create K8s ResourceQuota
resource_quota = client.V1ResourceQuota(spec=client.V1ResourceQuotaSpec(
    hard={
        "cpu": "10",
        "memory": "10G",
        "pods": "20",
def apply_rekcurd_to_kubernetes(project_id: int,
                                application_id: str,
                                service_level: str,
                                version: str,
                                insecure_host: str,
                                insecure_port: int,
                                replicas_default: int,
                                replicas_minimum: int,
                                replicas_maximum: int,
                                autoscale_cpu_threshold: str,
                                policy_max_surge: int,
                                policy_max_unavailable: int,
                                policy_wait_seconds: int,
                                container_image: str,
                                resource_request_cpu: str,
                                resource_request_memory: str,
                                resource_limit_cpu: str,
                                resource_limit_memory: str,
                                commit_message: str,
                                service_model_assignment: int,
                                service_git_url: str = "",
                                service_git_branch: str = "",
                                service_boot_script: str = "",
                                debug_mode: bool = False,
                                service_id: str = None,
                                is_creation_mode: bool = False,
                                display_name: str = None,
                                description: str = None,
                                kubernetes_models=None,
                                **kwargs) -> str:
    """
    kubectl apply
    :param project_id:
    :param application_id:
    :param service_level:
    :param version:
    :param insecure_host:
    :param insecure_port:
    :param replicas_default:
    :param replicas_minimum:
    :param replicas_maximum:
    :param autoscale_cpu_threshold:
    :param policy_max_surge:
    :param policy_max_unavailable:
    :param policy_wait_seconds:
    :param container_image:
    :param resource_request_cpu:
    :param resource_request_memory:
    :param resource_limit_cpu:
    :param resource_limit_memory:
    :param commit_message:
    :param service_model_assignment:
    :param service_git_url:
    :param service_git_branch:
    :param service_boot_script:
    :param debug_mode:
    :param service_id:
    :param is_creation_mode:
    :param display_name:
    :param description:
    :param kubernetes_models:
    :param kwargs:
    :return:
    """
    __num_retry = 5
    progress_deadline_seconds = \
        int(__num_retry*policy_wait_seconds*replicas_maximum/(policy_max_surge+policy_max_unavailable))
    if service_id is None:
        is_creation_mode = True
        service_id = uuid.uuid4().hex
    if kubernetes_models is None:
        kubernetes_models = db.session.query(KubernetesModel).filter(
            KubernetesModel.project_id == project_id).all()
    data_server_model: DataServerModel = db.session.query(
        DataServerModel).filter(
            DataServerModel.project_id == project_id).first_or_404()
    application_model: ApplicationModel = db.session.query(
        ApplicationModel).filter(
            ApplicationModel.application_id == application_id).first_or_404()
    application_name = application_model.application_name
    model_model: ModelModel = db.session.query(ModelModel).filter(
        ModelModel.model_id == service_model_assignment).first_or_404()

    from kubernetes import client
    try:
        git_secret = load_secret(project_id, application_id, service_level,
                                 GIT_SECRET_PREFIX)
    except:
        git_secret = None
    volume_mounts = dict()
    volumes = dict()
    if git_secret:
        connector_name = "sec-git-name"
        secret_name = "sec-{}-{}".format(GIT_SECRET_PREFIX, application_id)
        volume_mounts = {
            'volume_mounts': [
                client.V1VolumeMount(name=connector_name,
                                     mount_path=GIT_SSH_MOUNT_DIR,
                                     read_only=True)
            ]
        }
        volumes = {
            'volumes': [
                client.V1Volume(name=connector_name,
                                secret=client.V1SecretVolumeSource(
                                    secret_name=secret_name,
                                    items=[
                                        client.V1KeyToPath(key=GIT_ID_RSA,
                                                           path=GIT_ID_RSA,
                                                           mode=GIT_SSH_MODE),
                                        client.V1KeyToPath(key=GIT_CONFIG,
                                                           path=GIT_CONFIG,
                                                           mode=GIT_SSH_MODE)
                                    ]))
            ]
        }

    for kubernetes_model in kubernetes_models:
        full_config_path = get_full_config_path(kubernetes_model.config_path)
        from kubernetes import config
        config.load_kube_config(full_config_path)

        pod_env = [
            client.V1EnvVar(name="REKCURD_SERVICE_UPDATE_FLAG",
                            value=commit_message),
            client.V1EnvVar(name="REKCURD_KUBERNETES_MODE", value="True"),
            client.V1EnvVar(name="REKCURD_DEBUG_MODE", value=str(debug_mode)),
            client.V1EnvVar(name="REKCURD_APPLICATION_NAME",
                            value=application_name),
            client.V1EnvVar(name="REKCURD_SERVICE_INSECURE_HOST",
                            value=insecure_host),
            client.V1EnvVar(name="REKCURD_SERVICE_INSECURE_PORT",
                            value=str(insecure_port)),
            client.V1EnvVar(name="REKCURD_SERVICE_ID", value=service_id),
            client.V1EnvVar(name="REKCURD_SERVICE_LEVEL", value=service_level),
            client.V1EnvVar(name="REKCURD_GRPC_PROTO_VERSION", value=version),
            client.V1EnvVar(name="REKCURD_MODEL_MODE",
                            value=data_server_model.data_server_mode.value),
            client.V1EnvVar(name="REKCURD_MODEL_FILE_PATH",
                            value=model_model.filepath),
            client.V1EnvVar(name="REKCURD_CEPH_ACCESS_KEY",
                            value=str(data_server_model.ceph_access_key
                                      or "xxx")),
            client.V1EnvVar(name="REKCURD_CEPH_SECRET_KEY",
                            value=str(data_server_model.ceph_secret_key
                                      or "xxx")),
            client.V1EnvVar(name="REKCURD_CEPH_HOST",
                            value=str(data_server_model.ceph_host or "xxx")),
            client.V1EnvVar(name="REKCURD_CEPH_PORT",
                            value=str(data_server_model.ceph_port or "1234")),
            client.V1EnvVar(name="REKCURD_CEPH_IS_SECURE",
                            value=str(data_server_model.ceph_is_secure
                                      or "False")),
            client.V1EnvVar(name="REKCURD_CEPH_BUCKET_NAME",
                            value=str(data_server_model.ceph_bucket_name
                                      or "xxx")),
            client.V1EnvVar(name="REKCURD_AWS_ACCESS_KEY",
                            value=str(data_server_model.aws_access_key
                                      or "xxx")),
            client.V1EnvVar(name="REKCURD_AWS_SECRET_KEY",
                            value=str(data_server_model.aws_secret_key
                                      or "xxx")),
            client.V1EnvVar(name="REKCURD_AWS_BUCKET_NAME",
                            value=str(data_server_model.aws_bucket_name
                                      or "xxx")),
            client.V1EnvVar(name="REKCURD_GCS_ACCESS_KEY",
                            value=str(data_server_model.gcs_access_key
                                      or "xxx")),
            client.V1EnvVar(name="REKCURD_GCS_SECRET_KEY",
                            value=str(data_server_model.gcs_secret_key
                                      or "xxx")),
            client.V1EnvVar(name="REKCURD_GCS_BUCKET_NAME",
                            value=str(data_server_model.gcs_bucket_name
                                      or "xxx")),
            client.V1EnvVar(name="REKCURD_SERVICE_GIT_URL",
                            value=service_git_url),
            client.V1EnvVar(name="REKCURD_SERVICE_GIT_BRANCH",
                            value=service_git_branch),
            client.V1EnvVar(name="REKCURD_SERVICE_BOOT_SHELL",
                            value=service_boot_script),
        ]
        """Namespace registration."""
        core_vi_api = client.CoreV1Api()
        try:
            core_vi_api.read_namespace(name=service_level)
        except:
            api.logger.info("\"{}\" namespace created".format(service_level))
            v1_namespace = client.V1Namespace(
                api_version="v1",
                kind="Namespace",
                metadata=client.V1ObjectMeta(name=service_level))
            core_vi_api.create_namespace(body=v1_namespace)
        """Create/patch Deployment."""
        v1_deployment = client.V1Deployment(
            api_version="apps/v1",
            kind="Deployment",
            metadata=client.V1ObjectMeta(name="deploy-{0}".format(service_id),
                                         namespace=service_level,
                                         labels={
                                             "rekcurd-worker": "True",
                                             "id": application_id,
                                             "name": application_name,
                                             "sel": service_id
                                         }),
            spec=client.V1DeploymentSpec(
                min_ready_seconds=policy_wait_seconds,
                progress_deadline_seconds=progress_deadline_seconds,
                replicas=replicas_default,
                revision_history_limit=3,
                selector=client.V1LabelSelector(
                    match_labels={"sel": service_id}),
                strategy=client.V1DeploymentStrategy(
                    type="RollingUpdate",
                    rolling_update=client.V1RollingUpdateDeployment(
                        max_surge=policy_max_surge,
                        max_unavailable=policy_max_unavailable)),
                template=client.V1PodTemplateSpec(
                    metadata=client.V1ObjectMeta(
                        labels={
                            "rekcurd-worker": "True",
                            "id": application_id,
                            "name": application_name,
                            "sel": service_id
                        }),
                    spec=client.V1PodSpec(affinity=client.V1Affinity(
                        pod_anti_affinity=client.V1PodAntiAffinity(
                            preferred_during_scheduling_ignored_during_execution
                            =[
                                client.V1WeightedPodAffinityTerm(
                                    pod_affinity_term=client.V1PodAffinityTerm(
                                        label_selector=client.
                                        V1LabelSelector(match_expressions=[
                                            client.V1LabelSelectorRequirement(
                                                key="id",
                                                operator="In",
                                                values=[service_id])
                                        ]),
                                        topology_key="kubernetes.io/hostname"),
                                    weight=100)
                            ])),
                                          containers=[
                                              client.V1Container(
                                                  env=pod_env,
                                                  image=container_image,
                                                  image_pull_policy="Always",
                                                  name=service_id,
                                                  ports=[
                                                      client.V1ContainerPort(
                                                          container_port=
                                                          insecure_port)
                                                  ],
                                                  resources=client.
                                                  V1ResourceRequirements(
                                                      limits={
                                                          "cpu":
                                                          str(resource_limit_cpu
                                                              ),
                                                          "memory":
                                                          resource_limit_memory
                                                      },
                                                      requests={
                                                          "cpu":
                                                          str(resource_request_cpu
                                                              ),
                                                          "memory":
                                                          resource_request_memory
                                                      }),
                                                  security_context=client.
                                                  V1SecurityContext(
                                                      privileged=True),
                                                  **volume_mounts)
                                          ],
                                          node_selector={
                                              "host": service_level
                                          },
                                          **volumes))))
        apps_v1_api = client.AppsV1Api()
        if is_creation_mode:
            api.logger.info("Deployment created.")
            apps_v1_api.create_namespaced_deployment(body=v1_deployment,
                                                     namespace=service_level)
        else:
            api.logger.info("Deployment patched.")
            apps_v1_api.patch_namespaced_deployment(
                body=v1_deployment,
                name="deploy-{0}".format(service_id),
                namespace=service_level)
        """Create/patch Service."""
        v1_service = client.V1Service(
            api_version="v1",
            kind="Service",
            metadata=client.V1ObjectMeta(name="svc-{0}".format(service_id),
                                         namespace=service_level,
                                         labels={
                                             "rekcurd-worker": "True",
                                             "id": application_id,
                                             "name": application_name,
                                             "sel": service_id
                                         }),
            spec=client.V1ServiceSpec(ports=[
                client.V1ServicePort(name="grpc-backend",
                                     port=insecure_port,
                                     protocol="TCP",
                                     target_port=insecure_port)
            ],
                                      selector={"sel": service_id}))
        core_vi_api = client.CoreV1Api()
        if is_creation_mode:
            api.logger.info("Service created.")
            core_vi_api.create_namespaced_service(namespace=service_level,
                                                  body=v1_service)
        else:
            api.logger.info("Service patched.")
            core_vi_api.patch_namespaced_service(
                namespace=service_level,
                name="svc-{0}".format(service_id),
                body=v1_service)
        """Create/patch Autoscaler."""
        v1_horizontal_pod_autoscaler = client.V1HorizontalPodAutoscaler(
            api_version="autoscaling/v1",
            kind="HorizontalPodAutoscaler",
            metadata=client.V1ObjectMeta(name="hpa-{0}".format(service_id),
                                         namespace=service_level,
                                         labels={
                                             "rekcurd-worker": "True",
                                             "id": application_id,
                                             "name": application_name,
                                             "sel": service_id
                                         }),
            spec=client.V1HorizontalPodAutoscalerSpec(
                max_replicas=replicas_maximum,
                min_replicas=replicas_minimum,
                scale_target_ref=client.V1CrossVersionObjectReference(
                    api_version="apps/v1",
                    kind="Deployment",
                    name="deploy-{0}".format(service_id)),
                target_cpu_utilization_percentage=autoscale_cpu_threshold))
        autoscaling_v1_api = client.AutoscalingV1Api()
        if is_creation_mode:
            api.logger.info("Autoscaler created.")
            autoscaling_v1_api.create_namespaced_horizontal_pod_autoscaler(
                namespace=service_level, body=v1_horizontal_pod_autoscaler)
        else:
            api.logger.info("Autoscaler patched.")
            autoscaling_v1_api.patch_namespaced_horizontal_pod_autoscaler(
                namespace=service_level,
                name="hpa-{0}".format(service_id),
                body=v1_horizontal_pod_autoscaler)
        """Create Istio ingress if this is the first application."""
        custom_object_api = client.CustomObjectsApi()
        try:
            custom_object_api.get_namespaced_custom_object(
                group="networking.istio.io",
                version="v1alpha3",
                namespace=service_level,
                plural="virtualservices",
                name="ing-vs-{0}".format(application_id),
            )
        except:
            ingress_virtual_service_body = {
                "apiVersion": "networking.istio.io/v1alpha3",
                "kind": "VirtualService",
                "metadata": {
                    "labels": {
                        "rekcurd-worker": "True",
                        "id": application_id,
                        "name": application_name
                    },
                    "name": "ing-vs-{0}".format(application_id),
                    "namespace": service_level
                },
                "spec": {
                    "hosts": ["*"],
                    "gateways": ["rekcurd-ingress-gateway"],
                    "http": [{
                        "match": [{
                            "headers": {
                                "x-rekcurd-application-name": {
                                    "exact": application_name
                                },
                                "x-rekcurd-sevice-level": {
                                    "exact": service_level
                                },
                                "x-rekcurd-grpc-version": {
                                    "exact": version
                                },
                            }
                        }],
                        "route": [{
                            "destination": {
                                "port": {
                                    "number": insecure_port
                                },
                                "host": "svc-{0}".format(service_id)
                            },
                            "weight": 100
                        }],
                        "retries": {
                            "attempts": 25,
                            "perTryTimeout": "1s"
                        }
                    }]
                }
            }
            api.logger.info("Istio created.")
            custom_object_api.create_namespaced_custom_object(
                group="networking.istio.io",
                version="v1alpha3",
                namespace=service_level,
                plural="virtualservices",
                body=ingress_virtual_service_body)
        """Add service model."""
        if is_creation_mode:
            if display_name is None:
                display_name = "{0}-{1}".format(service_level, service_id)
            service_model = ServiceModel(service_id=service_id,
                                         application_id=application_id,
                                         display_name=display_name,
                                         description=description,
                                         service_level=service_level,
                                         version=version,
                                         model_id=service_model_assignment,
                                         insecure_host=insecure_host,
                                         insecure_port=insecure_port)
            db.session.add(service_model)
            db.session.flush()
    """Finish."""
    return service_id
    def create_namespace(self, user):
        """
        Creates a namespace for the given user if it doesn't exist
        """
        namestr = "tool-{}".format(user)
        try:
            _ = self.core.create_namespace(body=client.V1Namespace(
                api_version="v1",
                kind="Namespace",
                metadata=client.V1ObjectMeta(
                    name=namestr,
                    labels={
                        "name": namestr,
                        "tenancy": "tool"
                    },
                ),
            ))
        except ApiException as api_ex:
            if api_ex.status == 409 and "AlreadyExists" in api_ex.body:
                logging.info("Namespace tool-%s already exists", user)
                return

            logging.error("Could not create namespace for %s", user)
            raise

        # The above will shortcircuit this function before altering quotas
        # Define default quotas for new namespaces only
        _ = self.core.create_namespaced_resource_quota(
            namespace=namestr,
            body=client.V1ResourceQuota(
                api_version="v1",
                kind="ResourceQuota",
                metadata=client.V1ObjectMeta(name=namestr),
                spec=client.V1ResourceQuotaSpec(
                    hard={
                        "requests.cpu": "2",
                        "requests.memory": "6Gi",
                        "limits.cpu": "2",
                        "limits.memory": "8Gi",
                        "pods": "4",
                        "services": "1",
                        "services.nodeports": "0",
                        "replicationcontrollers": "1",
                        "secrets": "10",
                        "configmaps": "10",
                        "persistentvolumeclaims": "3",
                    }),
            ),
        )
        _ = self.core.create_namespaced_limit_range(
            namespace=namestr,
            body=client.V1LimitRange(
                api_version="v1",
                kind="LimitRange",
                metadata=client.V1ObjectMeta(name=namestr),
                spec=client.V1LimitRangeSpec(limits=[
                    client.V1LimitRangeItem(
                        default={
                            "cpu": "500m",
                            "memory": "512Mi"
                        },
                        default_request={
                            "cpu": "150m",
                            "memory": "256Mi"
                        },
                        type="Container",
                        max={
                            "cpu": "1",
                            "memory": "4Gi"
                        },
                        min={
                            "cpu": "50m",
                            "memory": "100Mi"
                        },
                    )
                ]),
            ),
        )
示例#20
0
def main():
    # creating a instance of class Namespace
    body = client.V1Namespace()
    # giving name for the namespace as "test1"
    body.metadata = client.V1ObjectMeta(name="test1")
    v1.create_namespace(body)
示例#21
0
import time
import os

# usage: docker run --rm --net=host  -v `pwd`/.kube/config:/root/.kube/config --env SERVICE_NUM=500  hub.c.163.com/qingzhou/istio/loadgen


def get_random_string(length):
    return ''.join(
        random.choice(string.ascii_lowercase) for i in range(length))


config.load_kube_config()
v1 = client.CoreV1Api()

ns = "loadgen"
nsObject = client.V1Namespace(metadata={"name": "loadgen"})

appsv1 = client.AppsV1Api()
selector = client.V1LabelSelector(match_labels={"app": "loadgen"})
deployment = client.V1Deployment(metadata={
    "name": "backend",
    "namespace": ns
},
                                 spec={
                                     "replicas": 1,
                                     "selector": selector,
                                     "strategy": {
                                         "rollingUpdate": {
                                             "maxSurge": 1,
                                             "maxUnavailable": 1
                                         }
示例#22
0
 def _create_namespace(self):
     # https://github.com/kubernetes-client/python/issues/613#issuecomment-429425777
     with KubeApi() as ka:
         self.namespace = ka.create_namespace(client.V1Namespace(metadata=client.V1ObjectMeta(name="{0}{1}".format(self.PREFIX, str(uuid.uuid4())))))
     self.namespace_name = self.namespace.metadata.name
     print(self.namespace_name)
示例#23
0
    def test_generate_action_plan(self):
        mock_input = {
            'all_nodes': [
                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-1',
                    creation_timestamp=(now - datetime.timedelta(days=30.1)),
                    annotations={}
                ), spec=client.V1NodeSpec(unschedulable=False)),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-2',
                    creation_timestamp=(now - datetime.timedelta(days=60)),
                    annotations={}
                ), spec=client.V1NodeSpec(unschedulable=False)),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-3',
                    creation_timestamp=(now - datetime.timedelta(days=30.2)),
                    annotations={}
                ), spec=client.V1NodeSpec(unschedulable=True)),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-4',
                    creation_timestamp=(now - datetime.timedelta(days=30.3)),
                    annotations={
                        main.annotation('cordoned'): '',
                    }
                ), spec=client.V1NodeSpec(unschedulable=False)),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-4',
                    creation_timestamp=(now - datetime.timedelta(days=30.4)),
                    annotations={
                        main.annotation('cordoned'): '',
                    }
                ), spec=client.V1NodeSpec(
                    unschedulable=True,
                    taints=[
                        client.V1Taint(
                            key='node.kubernetes.io/unschedulable',
                            effect='NoSchedule',
                            time_added=(now - datetime.timedelta(hours=1)),
                        )
                    ]
                )),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-5',
                    creation_timestamp=(now - datetime.timedelta(days=32.5)),
                    annotations={
                        main.annotation('cordoned'): '',
                    }
                ), spec=client.V1NodeSpec(
                    unschedulable=True,
                    taints=[
                        client.V1Taint(
                            key='node.kubernetes.io/unschedulable',
                            effect='NoSchedule',
                            time_added=(now - datetime.timedelta(days=1.2)),
                        )
                    ]
                )),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-6',
                    creation_timestamp=(now - datetime.timedelta(days=35)),
                    annotations={
                        main.annotation('cordoned'): '',
                    }
                ), spec=client.V1NodeSpec(
                    unschedulable=True,
                    taints=[
                        client.V1Taint(
                            key='node.kubernetes.io/unschedulable',
                            effect='NoSchedule',
                            time_added=(now - datetime.timedelta(days=2)),
                        )
                    ]
                )),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-7',
                    creation_timestamp=(now - datetime.timedelta(days=35)),
                    annotations={
                        main.annotation('cordoned'): '',
                        main.annotation('notifications-sent'): str(int((datetime.datetime.utcnow() - datetime.timedelta(days=2.5)).timestamp())),
                    }
                ), spec=client.V1NodeSpec(
                    unschedulable=True,
                    taints=[
                        client.V1Taint(
                            key='node.kubernetes.io/unschedulable',
                            effect='NoSchedule',
                            time_added=(now - datetime.timedelta(days=4)),
                        )
                    ]
                )),
            ],
            'all_namespaces': [
                client.V1Namespace(metadata=client.V1ObjectMeta(
                    name='ns-1',
                    annotations={
                        'annotation-1': 'bla',
                    })),
                client.V1Namespace(metadata=client.V1ObjectMeta(
                    name='ns-2',
                    annotations={
                        'annotation-2': 'blub',
                    }))
            ],
            'all_pods': [
                client.V1Pod(
                    metadata=client.V1ObjectMeta(namespace='ns-1', name='pod-1', annotations={
                        'annotation-3': '123',
                    }),
                    spec=client.V1PodSpec(node_name='node-5', containers=[])),
                client.V1Pod(
                    metadata=client.V1ObjectMeta(namespace='ns-2', name='pod-2', annotations={
                        'annotation-4': '456',
                    }),
                    spec=client.V1PodSpec(node_name='node-6', containers=[])),
                client.V1Pod(
                    metadata=client.V1ObjectMeta(namespace='ns-2', name='pod-3', annotations={
                        'annotation-5': '789',
                    }),
                    spec=client.V1PodSpec(node_name='node-7', containers=[])),
            ],
            'args': args,
        }
        expected_result = {
            'cordon': {
                'nodes': ['node-1', 'node-2', 'node-4'],
                'affected_pods': []
            },
            'notify': {
                'nodes': ['node-5', 'node-6'],
                'affected_pods': [
                    {
                        'namespace': 'ns-1',
                        'name': 'pod-1',
                        'annotations': {
                            'annotation-1': 'bla',
                            'annotation-3': '123',
                        },
                        'eviction_time': '2 days from now',
                    },
                    {
                        'namespace': 'ns-2',
                        'name': 'pod-2',
                        'annotations': {
                            'annotation-2': 'blub',
                            'annotation-4': '456',
                        },
                        'eviction_time': '2 days from now',
                    },
                ]
            },
            'drain': {
                'nodes': ['node-7'],
                'affected_pods': [
                    {
                        'namespace': 'ns-2',
                        'name': 'pod-3',
                        'annotations': {
                            'annotation-2': 'blub',
                            'annotation-5': '789',
                        },
                        'eviction_time': None,
                    },
                ]
            },
        }
        self.assertEqual(expected_result, main.generate_action_plan(**mock_input))
    def __init__(self,
                 kubernetes_proxy_addr=None,
                 redis_ip=None,
                 redis_port=6379,
                 useInternalIP=False,
                 namespace='default',
                 create_namespace_if_not_exists=False):
        """

        Parameters
        ----------
        kubernetes_proxy_addr : str, optional
            The proxy address if you are proxying connections locally using ``kubectl proxy``.
            If this argument is provided, Clipper will construct the appropriate proxy
            URLs for accessing Clipper's Kubernetes services, rather than using the API server
            addres provided in your kube config.
        redis_ip : str, optional
            The address of a running Redis cluster. If set to None, Clipper will start
            a Redis deployment for you.
        redis_port : int, optional
            The Redis port. If ``redis_ip`` is set to None, Clipper will start Redis on this port.
            If ``redis_ip`` is provided, Clipper will connect to Redis on this port.
        useInternalIP : bool, optional
            Use Internal IP of the K8S nodes . If ``useInternalIP`` is set to False, Clipper will
            throw an exception if none of the nodes have ExternalDNS.
            If ``useInternalIP`` is set to true, Clipper will use the Internal IP of the K8S node
            if no ExternalDNS exists for any of the nodes.
        namespace: str, optional
            The Kubernetes namespace to use .
            If this argument is provided, all Clipper artifacts and resources will be created in this
            k8s namespace. If not "default" namespace is used.
        create_namespace_if_not_exists: bool, False
            Create a k8s namespace if the namespace doesnt already exist.
            If this argument is provided and the k8s namespace does not exist a new k8s namespace will
            be created.

        Note
        ----
        Clipper stores all persistent configuration state (such as registered application and model
        information) in Redis. If you want Clipper to be durable and able to recover from failures,
        we recommend configuring your own persistent and replicated Redis cluster rather than
        letting Clipper launch one for you.
        """

        if kubernetes_proxy_addr is not None:
            self.kubernetes_proxy_addr = kubernetes_proxy_addr
            self.use_k8s_proxy = True
        else:
            self.use_k8s_proxy = False

        self.redis_ip = redis_ip
        self.redis_port = redis_port
        self.useInternalIP = useInternalIP
        config.load_kube_config()
        configuration.assert_hostname = False
        self._k8s_v1 = client.CoreV1Api()
        self._k8s_beta = client.ExtensionsV1beta1Api()

        # Check if namespace exists and if create flag set ...create the namespace or throw error
        namespaces = []
        for ns in self._k8s_v1.list_namespace().items:
            namespaces.append(ns.metadata.name)

        if namespace in namespaces:
            self.k8s_namespace = namespace
        elif create_namespace_if_not_exists:
            body = client.V1Namespace()
            body.metadata = client.V1ObjectMeta(name=namespace)
            try:
                self._k8s_v1.create_namespace(body)
            except ApiException as e:
                logging.error(
                    "Exception creating Kubernetes namespace: {}".format(e))
                raise ClipperException(
                    "Could not create Kubernetes namespace. "
                    "Reason: {}".format(e.reason))
            self.k8s_namespace = namespace
        else:
            msg = "Error connecting to Kubernetes cluster. Namespace does not exist"
            logger.error(msg)
            raise ClipperException(msg)