def test_object_factory_raises_for_unknown_kind(): api = MagicMock() api.resource_list.return_value = { "resources": [{ "kind": "ExampleObject", "namespaced": True, "name": "exampleobjects" }] } with pytest.raises(ValueError): pykube.object_factory(api, "example.org/v1", "OtherObject")
def test_object_factory_raises_for_unknown_kind(): api = MagicMock() api.resource_list.return_value = { 'resources': [{ 'kind': 'ExampleObject', 'namespaced': True, 'name': 'exampleobjects' }] } with pytest.raises(ValueError): pykube.object_factory(api, 'example.org/v1', 'OtherObject')
def execute(self, obj): api = self.payload.get("_k8s_api_client") if api is None: config = pykube.KubeConfig.from_service_account() api = pykube.HTTPClient(config) self.payload["_k8s_api_client"] = api apiversion = obj.get("apiVersion") kind = obj.get("kind") pykube.object_factory(api, apiversion, kind)(api, obj).create()
def deploy_cluster_role(api, manifest, version, update): """Deploy Service Account.""" logging.info('Deploying cluster role') object_class = pykube.object_factory(api, manifest['apiVersion'], manifest['kind']) role = object_class(api, manifest) role.annotations[ 'kubernetes.io/change-cause'] = 'Deploying version %s' % version if 'metadata' in role.obj and 'namespace' in role.obj['metadata']: check_namespace(api, role.obj['metadata']['namespace']) if not role.exists(): logging.info('Creating ClusterRole') role.create() elif update: logging.info('Updating ClusterRole') role.update() else: logging.info('Not updating ClusterRole') return role
def deploy_daemon_set(api, manifest, version, update): """Deploy Daemon Set.""" logging.info('Deploying daemonset') object_class = pykube.object_factory(api, manifest['apiVersion'], manifest['kind']) daemon_set = object_class(api, manifest) daemon_set.annotations[ 'kubernetes.io/change-cause'] = 'Deploying version %s' % version if 'metadata' in daemon_set.obj and 'namespace' in daemon_set.obj[ 'metadata']: check_namespace(api, daemon_set.obj['metadata']['namespace']) if not daemon_set.exists(): logging.info('Creating DaemonSet') daemon_set.create() elif update: logging.info('Updating DaemonSet') daemon_set.update() else: logging.info('Not updating DaemonSet') return daemon_set
def deploy_config_map(api, manifest, version, timeout, update): """Deploy Config Map.""" logging.info('Deploying configmap') object_class = pykube.object_factory(api, manifest['apiVersion'], manifest['kind']) configmap = object_class(api, manifest) configmap.annotations[ 'kubernetes.io/change-cause'] = 'Deploying version %s' % version if 'metadata' in configmap.obj and 'namespace' in configmap.obj['metadata']: check_namespace(api, configmap.obj['metadata']['namespace']) if not configmap.exists(): logging.info('Creating ConfigMap') configmap.create() elif update: logging.info('Updating ConfigMap') configmap.update() else: logging.info('Not updating ConfigMap') return configmap
def execute(self, apiversion=None, kind=None, filters={}, foreach=None, returns=None): api = self.payload.get("_k8s_api_client") if api is None: config = pykube.KubeConfig.from_service_account() api = pykube.HTTPClient(config) self.payload["_k8s_api_client"] = api use_context = apiversion is None and kind is None and len(filters) == 0 context = self.payload if use_context and context.get("metadata", {}).get("name") is None: resp = api.session.get(url=f"{api.url}{self.subject.name[len('k8s:'):]}") resp.raise_for_status() context = resp.json() if use_context: apiversion = context["apiversione"] kind = context["kind"] obj = pykube.object_factory(api, apiversion, kind) if "namespace" not in filters: filters.update({ "namespace": self.subject.get_ext("namespace") }) qobjs = obj.objects(api).filter(**filters) if foreach is not None: for obj in qobjs: foreach(obj) if returns is not None: return returns(qobjs) return len(qobjs)
def _deploy_generic_manifest(self, manifest, version, update, timeout): """Deploy generic manifest.""" kind = manifest['kind'] logging.info('Deploying manifest') object_class = pykube.object_factory(self.api, manifest['apiVersion'], manifest['kind']) k8s_object = object_class(self.api, manifest) k8s_object.annotations[ 'kubernetes.io/change-cause'] = 'Deploying version %s' % version if 'metadata' in k8s_object.obj and 'namespace' in k8s_object.obj[ 'metadata']: self._check_namespace(k8s_object.obj['metadata']['namespace']) updated = True if not k8s_object.exists(): logging.info('Creating %s' % kind) k8s_object.create() elif update: logging.info('Updating %s' % kind) k8s_object.update() else: logging.info('Not updating %s' % kind) updated = False return k8s_object, updated
def deploy_service_account(api, manifest, version, update): """Deploy Service Account.""" logging.info('Deploying service account') object_class = pykube.object_factory(api, manifest['apiVersion'], manifest['kind']) service_account = object_class(api, manifest) service_account.annotations[ 'kubernetes.io/change-cause'] = 'Deploying version %s' % version if 'metadata' in service_account.obj and 'namespace' in service_account.obj[ 'metadata']: check_namespace(api, service_account.obj['metadata']['namespace']) if not service_account.exists(): logging.info('Creating ServiceAccount') service_account.create() elif update: logging.info('Updating ServiceAccount') service_account.update() else: logging.info('Not updating ServiceAccount') return service_account
def deploy_generic_manifest(api, manifest, version, update): """Deploy generic manifest.""" logging.info('Deploying generic manifest') object_class = pykube.object_factory(api, manifest['apiVersion'], manifest['kind']) k8s_object = object_class(api, manifest) k8s_object.annotations[ 'kubernetes.io/change-cause'] = 'Deploying version %s' % version if 'metadata' in k8s_object.obj and 'namespace' in k8s_object.obj[ 'metadata']: check_namespace(api, k8s_object.obj['metadata']['namespace']) if not k8s_object.exists(): logging.info('Creating %s' % manifest['kind']) k8s_object.create() elif update: logging.info('Updating %s' % manifest['kind']) k8s_object.update() else: logging.info('Not updating %s' % manifest['kind']) return k8s_object
def execute(self, apiversion=None, kind=None, filters={}, foreach=None, returns=None): api = self.payload.get("_k8s_api_client") if api is None: config = pykube.KubeConfig.from_service_account() api = pykube.HTTPClient(config) # self.payload["_k8s_api_client"] = api if apiversion is None: apiversion = self.subject.get_ext("apiversion") if "group" in self.subject.get_ext_props(): apiversion = "{}/{}".format(self.subject.get_ext("group"), apiversion) if kind is None: kind = self.subject.get_ext("kind") obj = pykube.object_factory(api, apiversion, kind) if "namespace" not in filters: filters.update({"namespace": self.subject.get_ext("namespace")}) qobjs = obj.objects(api).filter(**filters) if foreach is not None: for obj in qobjs: foreach(obj) if returns is not None: return returns(qobjs) return len(qobjs)
def _undeploy_manifest(self, manifest, version, update, timeout): """Delete k8s object.""" logging.info('Deleting k8s object') object_class = pykube.object_factory(self.api, manifest['apiVersion'], manifest['kind']) k8s_object = object_class(self.api, manifest) if k8s_object.exists(): logging.info('Found object, deleting: %s', k8s_object.name) k8s_object.delete() self._wait_for_object_removal(k8s_object, timeout) if manifest['kind'] == 'Deployment': logging.info( 'Object is Deployment, cascading delete of ReplicaSets') self._delete_replica_sets( manifest['apiVersion'], k8s_object.obj['metadata']['namespace'], k8s_object.obj['metadata']['labels']['app'], timeout) elif manifest['kind'] == 'StatefulSet': logging.info('Object is StatefulSet, cascading delete of Pods') self._delete_pods(k8s_object.obj['metadata']['namespace'], k8s_object.obj['metadata']['labels']['app'], timeout) else: logging.info('Object not found') return
def test_object_factory_succeeds(): api = MagicMock() api.resource_list.return_value = {'resources': [{'kind': 'ExampleObject', 'namespaced': True, 'name': 'exampleobjects'}]} ExampleObject = pykube.object_factory(api, 'example.org/v1', 'ExampleObject') assert ExampleObject.kind == 'ExampleObject' assert ExampleObject.endpoint == 'exampleobjects' assert ExampleObject.version == 'example.org/v1' assert NamespacedAPIObject in ExampleObject.mro()
def _load_kafkauser(namespace, name): KafkaUser = object_factory(state.api, "kafka.strimzi.io/v1beta1", "KafkaUser") kafkauser = KafkaUser( state.api, {"metadata": {"namespace": namespace, "name": name}} ) kafkauser.reload() return kafkauser
def execute(self, obj): api = self.payload.get("_k8s_api_client") if api is None: config = pykube.KubeConfig.from_env() api = pykube.HTTPClient(config) self.payload["_k8s_api_client"] = api apiversion = obj.get("apiVersion") kind = obj.get("kind") while True: try: pykube.object_factory(api, apiversion, kind)(api, obj).create() break except pykube.exceptions.HTTPError as ex: if ex.code == 409: continue else: raise ex
def object_factory(api, api_version, kind): """Dynamically builds kubernetes objects python class. 1. Objects from openstack_operator.pykube.KUBE_OBJECTS 2. Objects from pykube.objects 3. Generic kubernetes object """ resource = KUBE_OBJECTS.get( (api_version, kind), pykube.object_factory(api, api_version, kind) ) return resource
def _copy_kafkatopic(body, namespace, name): dst_namespace = globalconf.kafka_user_topic_destination_namespace new_obj = _copy_object(body) new_obj["metadata"]["namespace"] = dst_namespace KafkaTopic = object_factory(state.api, "kafka.strimzi.io/v1beta1", "KafkaTopic") new_kafkatopic = KafkaTopic(state.api, new_obj) new_kafkatopic.annotations[ "knuto.niradynamics.se/source"] = f"{namespace}/{name}" new_kafkatopic.annotations["knuto.niradynamics.se/created"] = "true" return new_kafkatopic
def _delete_replica_sets(self, api_version, namespace, app, timeout): """Delete replica sets.""" object_class = pykube.object_factory(self.api, api_version, 'ReplicaSet') replica_sets = object_class.objects(self.api).filter( namespace=namespace, selector={'app__in': [app]}) for rs in replica_sets: logging.info('Deleting rs: %s', rs.name) rs.delete() self._wait_for_object_removal(rs, timeout) self._delete_pods(namespace, app, timeout)
def execute(self, func, subresource=None, name=None, apiversion=None, kind=None, filters={}): api = self.payload.get("_k8s_api_client") if api is None: config = pykube.KubeConfig.from_service_account() api = pykube.HTTPClient(config) self.payload["_k8s_api_client"] = api use_context = subresource is None and name is None and apiversion is None and kind is None and len( filters) == 0 context = self.payload if use_context and context.get("metadata", {}).get("name") is None: resp = api.session.get( url=f"{api.url}{self.subject.name[len('k8s:'):]}") resp.raise_for_status() context = resp.json() if use_context: apiversion = context["apiVersion"] kind = context["kind"] name = context["metadata"]["name"] # namespace = context["metadata"]["namespace"] # else: # namespace = self.subject.get_ext("namespace") factory = pykube.object_factory(api, apiversion, kind) # if "namespace" not in filters: # filters.update({ # "namespace": self.subject.get_ext("namespace") # }) # obj.update() might fail if the resource was modified between loading and updating. In this case you need to retry. # Reference: https://pykube.readthedocs.io/en/latest/howtos/update-deployment-image.html while True: obj = factory.objects(api).filter(**filters).get(name=name) func(obj.obj) try: obj.update(subresource=subresource) break except pykube.exceptions.HTTPError as ex: print(str(ex)) if ex.code == 409: continue else: raise ex
def _resolve_broker(api, ksvc_sink, namespace): global logger if ksvc_sink.startswith("broker:"): broker = ksvc_sink.split(":")[1] try: obj = pykube.object_factory( api, "eventing.knative.dev/v1", "Broker").objects(api).filter( namespace=namespace).get(name=broker) except pykube.exceptions.ObjectDoesNotExist: logger.critical(f"broker {broker} not found") sys.exit(1) ksvc_sink = obj.obj.get("status").get("address").get("url") logger.debug(f"ksvc_sink: {ksvc_sink}") return ksvc_sink
def test_object_factory_succeeds(): api = MagicMock() api.resource_list.return_value = { "resources": [{ "kind": "ExampleObject", "namespaced": True, "name": "exampleobjects" }] } ExampleObject = pykube.object_factory(api, "example.org/v1", "ExampleObject") assert ExampleObject.kind == "ExampleObject" assert ExampleObject.endpoint == "exampleobjects" assert ExampleObject.version == "example.org/v1" assert NamespacedAPIObject in ExampleObject.mro()
def execute(self, apiversion=None, kind=None, foreach=None, returns=None, **filters): api = self.payload.get("_k8s_api_client") if api is None: config = pykube.KubeConfig.from_env() api = pykube.HTTPClient(config) self.payload["_k8s_api_client"] = api if apiversion is None and "apiversion" in self.subject.get_ext_props(): apiversion = self.subject.get_ext("apiversion") if kind is None and "kind" in self.subject.get_ext_props(): kind = self.subject.get_ext("kind") obj = pykube.object_factory(api, apiversion, kind) if "namespace" not in filters and "namespace" in self.subject.get_ext_props() \ and self.subject.ext_namespace is not None: filters.update({"namespace": self.subject.get_ext("namespace")}) qobjs = obj.objects(api).filter(**filters) if foreach is not None: for obj in qobjs: while True: try: foreach(obj) break except pykube.exceptions.HTTPError as ex: if ex.code == 409: continue else: raise ex if returns is not None: while True: try: return returns(qobjs) except pykube.exceptions.HTTPError as ex: if ex.code == 409: continue else: raise ex return len(qobjs)
def execute(self, func, name=None, apiversion=None, kind=None, subresource=None, **filters): if name is None: name = self.subject.get_ext("name") api = self.payload.get("_k8s_api_client") if api is None: config = pykube.KubeConfig.from_env() api = pykube.HTTPClient(config) self.payload["_k8s_api_client"] = api if apiversion is None and "apiversion" in self.subject.get_ext_props(): apiversion = self.subject.get_ext("apiversion") if kind is None and "kind" in self.subject.get_ext_props(): kind = self.subject.get_ext("kind") obj = pykube.object_factory(api, apiversion, kind) if "namespace" not in filters and "namespace" in self.subject.get_ext_props() \ and self.subject.ext_namespace is not None: filters.update({"namespace": self.subject.get_ext("namespace")}) obj = obj.objects(api).filter(**filters).get(name=name) func(obj.obj) while True: try: obj.update(subresource=subresource) break except pykube.exceptions.HTTPError as ex: if ex.code == 409: continue else: raise ex
def deploy_deployment(api, manifest, version, timeout, update): """Deploy Deployment.""" logging.info('Deploying deployment') object_class = pykube.object_factory(api, manifest['apiVersion'], manifest['kind']) deployment = object_class(api, manifest) deployment.annotations[ 'kubernetes.io/change-cause'] = 'Deploying version %s' % version if 'metadata' in deployment.obj and 'namespace' in deployment.obj[ 'metadata']: check_namespace(api, deployment.obj['metadata']['namespace']) if not deployment.exists(): logging.info('Creating deployment') deployment.create() elif update: logging.info('Updating deployment') deployment.update() else: logging.info('Not updating deployment') return # We wait for deployments finish app_label = deployment.obj['metadata']['labels']['app'] if 'metadata' in deployment.obj and 'namespace' in deployment.obj[ 'metadata']: namespace = deployment.obj['metadata']['namespace'] else: namespace = 'default' revision = get_revision(api, app_label, version, timeout, namespace) # Hack to make sure deployment has a chance to start - Need a better way to detect this time.sleep(3) wait_for_deployment(deployment, revision, timeout) return deployment
def _get_kafkausers(api, namespace): kafkauser_type = pykube.object_factory(api, "kafka.strimzi.io/v1beta1", "KafkaUser") return list(kafkauser_type.objects(api, namespace=namespace))
import asyncio import functools import random import time import kopf import pykube N_HANDLERS = 5 api = pykube.HTTPClient(pykube.KubeConfig.from_env()) KopfChild = pykube.object_factory(api, "zalando.org/v1", "KopfChild") # @kopf.on.resume("zalando.org", "v1", "kopfexamples", labels={"foo": "bar"}) # @kopf.on.update("zalando.org", "v1", "kopfexamples", labels={"foo": "bar"}) # @kopf.on.create("zalando.org", "v1", "kopfexamples", labels={"foo": "bar"}) # @kopf.on.resume("zalando.org", "v1", "kopfexamples") @kopf.on.update("zalando.org", "v1", "kopfexamples") @kopf.on.create("zalando.org", "v1", "kopfexamples") async def ensure(body, logger, event, **kwargs): fns = {s: functools.partial(sleepy, s) for s in range(N_HANDLERS)} await kopf.execute(fns=fns) return {"message": f"{event}d"} async def sleepy(s, logger, event, namespace, name, body, spec, **kwargs): logger.info(f"Handler {s} for event {event} with field {spec['field']}") # snooze = 10 * random.choice((0, 1)) if event == "update" and s == 0 and spec[ 'field'] == 'value1' and random.choice((True, False)):
def _get_kafkatopics(api, namespace): kafkatopic_type = pykube.object_factory( api, "kafka.strimzi.io/v1beta1", "KafkaTopic" ) return list(kafkatopic_type.objects(api, namespace=namespace))
def create(name, uid, namespace, spec, logger, **_): algorithm = "sha1" salt_len = 12 characters = string.ascii_letters + string.digits password = "".join(random.sample(characters, 16)) h = hashlib.new(algorithm) salt = ("%0" + str(salt_len) + "x") % random.getrandbits(4 * salt_len) h.update(bytes(password, "utf-8") + salt.encode("ascii")) password_hash = ":".join((algorithm, salt, h.hexdigest())) config_map_body = { "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "name": name, "namespace": namespace, "labels": { "app": name } }, "data": { "setup-environment.sh": notebook_startup % dict(password_hash=password_hash) } } kopf.adopt(config_map_body) K8SConfigMap = pykube.object_factory(api, "v1", "ConfigMap") config_map_resource = K8SConfigMap(api, config_map_body) config_map_resource.create() notebook_interface = spec.get("notebook", {}).get("interface", "lab") image = spec.get("deployment", {}).get("image", "jupyter/minimal-notebook:latest") service_account = spec.get("deployment", {}).get("serviceAccountName", "default") memory_limit = spec.get("deployment", {}).get("resources", {}).get("limits", {}).get("memory", "512Mi") memory_request = spec.get("deployment", {}).get("resources", {}).get("requests", {}).get("memory", memory_limit) deployment_body = { "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { "name": name, "namespace": namespace, "labels": { "app": name } }, "spec": { "replicas": 1, "selector": { "matchLabels": { "deployment": name } }, "strategy": { "type": "Recreate" }, "template": { "metadata": { "labels": { "deployment": name } }, "spec": { "serviceAccountName": service_account, "containers": [{ "name": "notebook", "image": image, "imagePullPolicy": "Always", "resources": { "requests": { "memory": memory_request }, "limits": { "memory": memory_limit } }, "ports": [{ "name": "8888-tcp", "containerPort": 8888, "protocol": "TCP", }], "env": [], "volumeMounts": [{ "name": "startup", "mountPath": "/usr/local/bin/before-notebook.d" }] }], "securityContext": { "fsGroup": 0 }, "volumes": [{ "name": "startup", "configMap": { "name": "notebook" } }] }, }, }, } if notebook_interface != "classic": deployment_body["spec"]["template"]["spec"]["containers"][0][ "env"].append({ "name": "JUPYTER_ENABLE_LAB", "value": "true" }) storage_request = "" storage_limit = "" storage_claim_name = spec.get("storage", {}).get("claimName", "") storage_sub_path = spec.get("storage", {}).get("claimName", "") if not storage_claim_name: storage_request = spec.get("deployment", {}).get("resources", {}).get("requests", {}).get("storage", "") storage_limit = spec.get("deployment", {}).get("resources", {}).get("limits", {}).get("storage", "") if storage_request or storage_limit: volume = { "name": "data", "persistentVolumeClaim": { "claimName": "notebook" } } deployment_body["spec"]["template"]["spec"]["volumes"].append( volume) storage_mount = {"name": "data", "mountPath": "/home/jovyan"} deployment_body["spec"]["template"]["spec"]["containers"][0][ "volumeMounts"].append(storage_mount) persistent_volume_claim_body = { "apiVersion": "v1", "kind": "PersistentVolumeClaim", "metadata": { "name": name, "namespace": namespace, "labels": { "app": name } }, "spec": { "accessModes": ["ReadWriteOnce"], "resources": { "requests": {}, "limits": {} } } } if storage_request: persistent_volume_claim_body["spec"]["resources"]["requests"][ "storage"] = storage_request if storage_limit: persistent_volume_claim_body["spec"]["resources"]["limits"][ "storage"] = storage_limit kopf.adopt(persistent_volume_claim_body) K8SPersistentVolumeClaim = pykube.object_factory( api, "v1", "PersistentVolumeClaim") persistent_volume_claim_resource = K8SPersistentVolumeClaim( api, persistent_volume_claim_body) persistent_volume_claim_resource.create() else: volume = { "name": "data", "persistentVolumeClaim": { "claimName": storage_claim_name } } deployment_body["spec"]["template"]["spec"]["volumes"].append(volume) storage_mount = { "name": "data", "mountPath": "/home/jovyan", "subPath": storage_sub_path } deployment_body["spec"]["template"]["spec"]["containers"][0][ "volumeMounts"].append(storage_mount) kopf.adopt(deployment_body) K8SDeployment = pykube.object_factory(api, "apps/v1", "Deployment") deployment_resource = K8SDeployment(api, deployment_body) deployment_resource.create() service_body = { "apiVersion": "v1", "kind": "Service", "metadata": { "name": name, "namespace": namespace, "labels": { "app": name } }, "spec": { "type": "ClusterIP", "ports": [{ "name": "8888-tcp", "port": 8888, "protocol": "TCP", "targetPort": 8888, }], "selector": { "deployment": name }, }, } kopf.adopt(service_body) K8SService = pykube.object_factory(api, "v1", "Service") service_resource = K8SService(api, service_body) service_resource.create() ingress_domain = os.environ.get("INGRESS_DOMAIN") ingress_hostname = f"notebook-{namespace}.{ingress_domain}" ingress_body = { "apiVersion": "extensions/v1beta1", "kind": "Ingress", "metadata": { "name": name, "namespace": namespace, "labels": { "app": name }, "annotations": { "projectcontour.io/websocket-routes": "/" } }, "spec": { "rules": [{ "host": ingress_hostname, "http": { "paths": [{ "path": "/", "backend": { "serviceName": name, "servicePort": 8888, }, }] } }] } } kopf.adopt(ingress_body) K8SIngress = pykube.object_factory(api, "extensions/v1beta1", "Ingress") ingress_resource = K8SIngress(api, ingress_body) ingress_resource.create() return { "notebook": { "url": f"http://{ingress_hostname}", "password": password, "interface": notebook_interface, }, "deployment": { "image": image, "serviceAccountName": service_account, "resources": { "requests": { "memory": memory_request, "storage": storage_request }, "limits": { "memory": memory_limit, "storage": storage_limit } } }, "storage": { "claimName": storage_claim_name, "subPath": storage_sub_path } }
def cmd_deploy(spec_module, path, namespace, registry): global logger global script_module logger.debug("cmd_deploy for ruleset {} in {}".format( spec_module.name, path)) if registry is None: logger.critical("DOCKER_REGISTRY not set") sys.exit(1) api = pykube.HTTPClient(pykube.KubeConfig.from_env()) if namespace is None: namespace = api.config.namespace logger.debug(f"using namespace {namespace}") # Get docker base image try: cm = pykube.ConfigMap.objects(api).filter(namespace=namespace).get( name="config-krules-project") except pykube.exceptions.ObjectDoesNotExist: logger.critical("config-krules-project configmap not found") sys.exit(1) try: image_base = cm.obj.get("data", {})["imageBase"] except KeyError: logger.critical( "imageBase not found in config-krules-project configmap") sys.exit(1) logger.debug(f"imageBase: {image_base}") # Build and push the ruleset image add_section = io.StringIO("") for f in getattr(spec_module, "add_files", ()): logger.debug(f"adding file '{f}' to resulting contaner") print(f"ADD {f} /app/{f}", file=add_section) add_modules = getattr(spec_module, "add_modules", True) if add_modules: for mdir in [ mdir for mdir in os.listdir(path) if os.path.exists(os.path.join(path, mdir, "__init__.py")) ]: logger.debug(f"adding module '{mdir}' to resulting container") print(f"ADD {mdir}/ /app/{mdir}/", file=add_section) extra_commands = io.StringIO("") for c in getattr(spec_module, "extra_commands", ()): logger.debug(f"adding '{c[0]}' command to resulting container") print(" ".join(c), file=extra_commands) logger.debug("build image locally") tag = "{}/{}".format(registry, spec_module.name) dockerfile = script_module.dockerfile_skel.format( image_base=image_base, add_section=add_section.getvalue(), extra_commands=extra_commands.getvalue()) logger.debug(f"Dockerfile:\n{dockerfile}") ret = subprocess.run(("docker", "build", path, f"-t{tag}", "-f-"), input=bytes(dockerfile, encoding='utf-8')) if ret.returncode > 0: logger.critical("docker build failed") sys.exit(ret.returncode) logger.debug(f"push {tag} image to registry") ret = subprocess.run(("docker", "push", f"{tag}")) if ret.returncode > 0: logger.critical("docker push failed") sys.exit(ret.returncode) # retrieve pushed image digest ret = subprocess.run( ("docker", "inspect", "--format='{{index .RepoDigests 0}}'", tag), capture_output=True) if ret.returncode > 0: logger.critical("failed to fetch image digest") sys.exit(ret.returncode) digest = eval(ret.stdout.decode("utf-8")) # Create/Update Knative service labels = getattr( spec_module, "labels", { "serving.knative.dev/visibility": "cluster-local", "krules.airspot.dev/type": "ruleset" }) template_annotations = getattr(spec_module, "template_annotations", {}) ksvc_sink = _resolve_broker( api, getattr(spec_module, "ksvc_sink", "broker:default"), namespace) ksvc_procevents_sink = _resolve_broker( api, getattr(spec_module, "ksvc_procevents_sink", "broker:procevents"), namespace) service_account = getattr(spec_module, "service_account", "") extra_environ = getattr(spec_module, "environ", {}) environ = [{ "name": "K_SINK", "value": ksvc_sink, }, { "name": "K_PROCEVENTS_SINK", "value": ksvc_procevents_sink }] for k, v in extra_environ.items(): environ.append({ "name": k, "value": v, }) logger.debug(f"service account: {service_account}") revision_name = _hashed( spec_module.name, #labels, template_annotations, ksvc_sink, ksvc_procevents_sink, service_account, digest, service_account) logger.info(f"Revision name: {revision_name}") obj_ref = pykube.object_factory( api, "serving.knative.dev/v1", "Service").objects(api).get_or_none(name=spec_module.name) if obj_ref is None: logger.debug("creating object..") obj = { "apiVersion": "serving.knative.dev/v1", "kind": "Service", "metadata": { "name": spec_module.name, "labels": labels }, "spec": { "template": { "metadata": { "name": revision_name, "annotations": template_annotations, #"labels": labels }, "spec": { "containers": [{ "name": "ruleset", "image": digest, "env": environ }] } }, } } if service_account != "": obj["spec"]["template"]["spec"][ "serviceAccountName"] = service_account pykube.object_factory(api, "serving.knative.dev/v1", "Service")(api, obj).create() logger.info(f"Ruleset '{spec_module.name}' created") else: logger.debug("object exists..") obj_ref.obj["metadata"]["labels"] = labels obj_ref.obj["spec"]["template"]["metadata"]["name"] = revision_name if "anntations" in obj_ref.obj["spec"]["template"]["metadata"]: obj_ref.obj["spec"]["template"]["metadata"]["annotations"].update( template_annotations) else: obj_ref.obj["spec"]["template"]["metadata"][ "annotations"] = template_annotations containers = obj_ref.obj["spec"]["template"]["spec"]["containers"] for container in containers: if container["name"] == "ruleset": container["image"] = digest envs = {e["name"]: e["value"] for e in container["env"]} envs["K_SINK"] = ksvc_sink envs["K_PROCEVENTS_SINK"] = ksvc_procevents_sink envs.update(extra_environ) container["env"] = [{ "name": k, "value": v } for k, v in envs.items()] if service_account != "": obj_ref.obj["spec"]["template"]["spec"][ "serviceAccountName"] = service_account obj_ref.update() logger.info(f"Ruleset '{spec_module.name}' updated") # Create/Update triggers triggers = getattr(spec_module, "triggers", ()) for trigger in triggers: try: obj_ref = pykube.object_factory( api, "eventing.knative.dev/v1", "Trigger").objects(api).get_or_none(name=trigger.get("name")) if obj_ref is None: name = trigger.pop('name') logging.debug(f"creating trigger {name}") trigger.update({ "subscriber": { "ref": { "apiVersion": "serving.knative.dev/v1", "kind": "Service", "name": spec_module.name, "namespace": namespace, } } }) obj = { "apiVersion": "eventing.knative.dev/v1", "kind": "Trigger", "metadata": { "name": name, "namespace": namespace, "labels": { "krules.airspot.dev/owned-by": spec_module.name, }, }, "spec": trigger, } if obj["spec"].get("broker") is None: obj["spec"]["broker"] = getattr(spec_module, "triggers_default_broker", "default") pykube.object_factory(api, "eventing.knative.dev/v1", "Trigger")(api, obj).create() logger.info(f"Trigger {obj['metadata']['name']} created") else: name = trigger.pop("name") trigger["subscriber"] = obj_ref.obj["spec"]["subscriber"] obj_ref.obj["spec"].update(trigger) obj_ref.update() logger.info(f"Trigger {name} updated") except Exception as ex: logger.error(f"Error processing trigger:\n{trigger}") logger.exception(ex)