def get_issuer_from_resource(resource): """return the named EstIssuer/EstClusterIssuer if Ready""" issuer_ref = resource["spec"]["issuerRef"] kwargs = dict( group=issuer_ref["group"], version=VERSION, plural=issuer_ref["kind"].lower() + "s", name=issuer_ref["name"], ) if kwargs["plural"] == "estissuers": kwargs["namespace"] = resource["metadata"]["namespace"] try: api = k8s.CustomObjectsApi() if kwargs.get("namespace"): issuer = api.get_namespaced_custom_object(**kwargs) else: issuer = api.get_cluster_custom_object(**kwargs) except k8s.exceptions.OpenApiException as err: raise kopf.TemporaryError(eval(err.body)["message"]) if ((issuer.get("status") is None) or (issuer["status"].get("estissuer_create") is None) or (issuer["status"]["estissuer_create"]["Ready"] != "True")): raise kopf.TemporaryError(f"{issuer_ref['name']} not ready") kopf.info( resource, reason="Debugging", message=f"get_issuer_from_resource: {issuer['metadata']['name']}", ) return issuer
def create_1(body, meta, spec, status, **kwargs): children = _create_children(owner=body) kopf.info(body, reason='AnyReason') kopf.event(body, type='Warning', reason='SomeReason', message="Cannot do something") kopf.event(children, type='Normal', reason='SomeReason', message="Created as part of the job1step") return {'job1-status': 100}
def create_fn(body, **kwargs): spec = body["spec"] name = body["metadata"]["name"] namespace = body["metadata"]["namespace"] secret_data = process_spec(spec) kopf.info(body, reason="SpecProccessed", message="Processed Secret Items") create_secret(namespace, name, secret_data) kopf.info(body, reason="SecretCreated", message="Created Secret Object")
def estissuer_certrequest_handler(namespace, spec, meta, body, patch, **_): """reconcile CertificateRequests""" # gather resources issuer = get_issuer_from_resource(body) cert = get_owner_by_kind(body, ["Certificate"]) cert_secret = get_secret_from_resource(cert) # Create an EstOrder for it in request namespace renewal = (False if cert_secret is None else (cert_secret.type == "kubernetes.io/tls")) resource = ESTORDER_TEMPLATE.format( ordername=meta["name"] + "-order", issuername=issuer["metadata"]["name"], issuerkind=issuer["kind"], request=spec["request"], renewal=renewal, ) resource = yaml.safe_load(resource) # Set EstOrder owner to CertificateRequest kopf.adopt(resource) # create the resource try: api = k8s.CustomObjectsApi() _ = api.create_namespaced_custom_object( group=GROUP, version=VERSION, namespace=namespace, plural="estorders", body=resource, ) except k8s.exceptions.OpenApiException as err: raise kopf.TemporaryError(eval(err.body)["message"]) from err # log event message = f"Created new EstOrder {resource['metadata']['name']}" kopf.info( body, reason="Ordered", message=message, ) # set certificate request status to False,Pending # utcnow()+"Z" b/c python datetime doesn't do Zulu # timepec='seconds' b/c cert-manager webhook will trim to seconds # (causing the API to warn about the inconsistency) condition = dict( lastTransitionTime= f"{datetime.utcnow().isoformat(timespec='seconds')}Z", type="Ready", status="False", reason="Pending", message=message, ) if patch.status.get("conditions") is None: patch.status["conditions"] = [] patch.status["conditions"].append(condition)
def create_workflow(**kwargs): body = kwargs['body'] attributes = body['spec']['metadata']['service'][0]['attributes'] # Make sure type did is provided if not attributes: raise kopf.HandlerFatalError(f"Workflow error. Got {attributes}.") # Pod template kopf.info(body, reason='workflow with type {}'.format( attributes['main']['type'])) for stage in attributes['workflow']['stages']: logger.info( f"Stage {stage['index']} with stageType {stage['stageType']}") logger.info( f"Running container {stage['requirements']['container']['image']}" f":{stage['requirements']['container']['tag']}") # Configmap for workflow create_configmap_workflow(body, logger) # Volume create_pvc(body, logger) # Configure pod create_configure_job(body, logger) # Wait configure pod to finish while not wait_finish_job(body['metadata']['namespace'], f"{body['metadata']['name']}-configure-job"): logger.info("Waiting configure pod to finish") time.sleep(10.0) # Algorithm job create_algorithm_job(body, logger) # Wait configure pod to finish while not wait_finish_job(body['metadata']['namespace'], f"{body['metadata']['name']}-algorithm-job"): logger.info("Waiting algorithm pod to finish") time.sleep(10.0) # Publish job create_publish_job(body, logger) while not wait_finish_job(body['metadata']['namespace'], f"{body['metadata']['name']}-publish-job"): logger.info("Waiting publish pod to finish") time.sleep(10.0) return {'message': "Creating workflow finished"}
def create_fn(body, **kwargs): # The all-purpose function for the vent creation. kopf.event(body, type="SomeType", reason="SomeReason", message="Some message") # The shortcuts for the conventional events and common cases. kopf.info(body, reason="SomeReason", message="Some message") kopf.warn(body, reason="SomeReason", message="Some message") try: raise RuntimeError("Exception text.") except: kopf.exception(body, reason="SomeReason", message="Some exception:")
def get_secret_from_resource(resource): """return secret of type tls or basic-auth from secretName or None""" secretName = resource["spec"]["secretName"] namespace = resource["metadata"].get("namespace") if not namespace and resource["kind"] == "EstClusterIssuer": namespace = os.getenv("CLUSTER_SCOPE_NAMESPACE", "est-operator") try: api = k8s.CoreV1Api() secret = api.read_namespaced_secret(secretName, namespace) except k8s.exceptions.OpenApiException: secret = None if secret and secret.type not in [ "kubernetes.io/basic-auth", "kubernetes.io/tls" ]: secret = None kopf.info( resource, reason="Debugging", message= f"get_secret_from_resource: {namespace}:{secretName} {secret is not None}", ) return secret
def update_handler(spec, name, **kwargs): logger.info("Updating existing Grafana dashboard object: %s", name) kopf.info(spec, reason="UpdatingDashboard", message="Updating Grafana dashboard.") logger.debug("Got the following keyword args for udpating the object: %s", kwargs) try: create_or_update_handler(spec, name, **kwargs) kopf.info(spec, reason="UpdatedDashboard", message="Finished updating Grafana dashboard: %s." % name) logger.info("Finished updating Grafana dashboard: %s", name) except Exception as e: logger.error(("Failed to update Grafana dashboard due to the " "following exception: %s"), e) kopf.exception(spec, reason="APIError", message=("Failed to update dashboard due to API " "error: %s" % e)) raise kopf.PermanentError("Failed creating the dashboard")
def handler(spec, name, **kwargs): logger.debug("Event: %s",kwargs['event']) action=kwargs["event"] try: if action == 'create': logger.info("Creating new Grafana dashboard: %s", name) kopf.info( spec, reason="CreatingDashboard", message="Creating new grafana-dashboard." ) logger.debug("Got the following keyword args for creating the object: %s", kwargs) resp = create_or_update_handler(spec, name, **kwargs) kopf.info( spec,reason="CreatedDashboard",message=("Finished creating dashboard " "at %s." % resp["grafana_url"]) ) logger.info("Finished creating Grafana dashboard: %s", name) else: logger.info("Updating existing Grafana dashboard object: %s", name) kopf.info( spec, reason="UpdatingDashboard", message="Updating Grafana dashboard." ) logger.debug("Got the following keyword args for updating the object: %s", kwargs) resp = create_or_update_handler(spec, name, **kwargs) kopf.info( spec,reason="UpdatedDashboard",message="Finished updating Grafana dashboard: %s." % name ) logger.info("Finished updating Grafana dashboard: %s", name) return resp except Exception as e: logger.error( ( "Failed to %s Grafana dashboard due to the " "following exception: %s" ), action, e, ) kopf.exception( spec, reason="APIError", message=("Failed to %s dashboard due to API " "error: %s",action, e), ) raise kopf.PermanentError("Failed creating the dashboard")
def update_psswd(body, spec, diff, status, logger, **kwargs): name = body['metadata']['name'] image = body['spec']['image'] password = body['spec']['password'] database = body['spec']['database'] deployment_patch = render_template('mysql-deployment.yml.j2', { 'name': name, 'image': image, 'password': password, 'database': database }) for diff_entry in diff: if diff_entry[0] == "change": if diff_entry[1][1] == "password": old_pass = diff_entry[2] new_pass = diff_entry[3] pass_change_job = render_template( 'pass-change-job.yml.j2', { 'name': name, 'image': image, 'old_password': old_pass, 'new_password': new_pass }) api = kubernetes.client.BatchV1Api() api.create_namespaced_job('default', pass_change_job) if wait_until_job_end(f"pass-change-{name}-job"): kopf.info(body, reason="PassChange", message="Password for " + name + " has been changed.") api = kubernetes.client.AppsV1Api() api.patch_namespaced_deployment(name, 'default', deployment_patch)
async def delete_fn(body, spec, meta, new, diff, old, logger, **kwargs): terrayaml = spec.get('terrayaml') destroyOnDelete = spec.get('destroyOnDelete', False) planId = spec.get('planId') name = meta.get('name') team = meta.get('team') env = meta.get('environment') app = meta.get('application') if destroyOnDelete is True and planId: kopf.info( body, reason='destroyOnDelete is True', message= f"planid: {planId} name: {name} team: {team} environment: {env} app: {app}" ) process_destroy(planId=planId, logger=logger) else: kopf.info( body, reason='destroyOnDelete is False', message= f"planid: {planId} name: {name} team: {team} environment: {env} app: {app}" )
def patch_namespaced_secret(name, namespace, patch): try: client = kubernetes.client.CoreV1Api() client.patch_namespaced_secret( name=name, namespace=namespace, body=patch ) kopf.info( patch, reason="UPDATED", message=f"Updated secret {name} in namespace {namespace}" ) except ApiException as e: message = f"Failed to patch secret {name}: {e.reason} | {e.status}" kopf.exception( patch, reason=e.reason, message=message ) raise kopf.PermanentError(message)
def create_namespaced_secret(instrumentation_key, name, namespace): try: client = kubernetes.client.CoreV1Api() body = parse_secret_template(name, instrumentation_key) client.create_namespaced_secret( namespace=namespace, body=body ) kopf.info( body, reason="CREATED", message=f"Created secret {name} in namespace {namespace}") except ApiException as e: message = f"Failed to create secret {name}: {e.reason} | {e.status}" kopf.exception( body, reason=e.reason, message=message ) raise kopf.PermanentError(message)
def get_owner_by_kind(resource, kind_list): """get the first owner of any kind in kind_list from resource, if present""" ownerReferences = resource["metadata"].get("ownerReferences", []) (owner, ) = [ owner for owner in ownerReferences if owner["kind"] in kind_list ] if not owner: kopf.info( resource, reason="Debugging", message=f"get_owner_by_kind: {kind_list} not found", ) return None group, version = owner["apiVersion"].split("/") namespace = resource["metadata"]["namespace"] kwargs = dict( group=group, version=version, plural=owner["kind"].lower() + "s", name=owner["name"], ) try: api = k8s.CustomObjectsApi() # ownerRefs don't have namespace attributes, so we have to try both. # Most resources are namespaced, so do that first. Namespaced owners # have to be in the same namespace. try: owner = api.get_namespaced_custom_object(namespace=namespace, **kwargs) except k8s.exceptions.OpenApiException: owner = api.get_cluster_custom_object(**kwargs) except k8s.exceptions.OpenApiException as err: raise kopf.TemporaryError(eval(err.body)["message"]) kopf.info(resource, reason="Debugging", message=f"get_owner_by_kind: {kwargs}") return owner
def create_handler(spec, name, **kwargs): logger.info("Creating new Grafana dashboard: %s", name) kopf.info(spec, reason="CreatingDashboard", message="Creating new grafana-dashboard.") logger.debug("Got the following keyword args for creating the object: %s", kwargs) try: resp = create_or_update_handler(spec, name, **kwargs) kopf.info(spec, reason="CreatedDashboard", message=("Finished creating dashboard " "at %s." % resp["grafana_url"])) logger.info("Finished creating Grafana dashboard: %s", name) return resp except Exception as e: logger.error(("Failed to create Grafana dashboard due to the " "following exception: %s"), e) kopf.exception(spec, reason="APIError", message=("Failed to create dashboard due to API " "error: %s" % e)) raise kopf.PermanentError("Failed creating the dashboard")
def delete_handler(spec, name, body, **kwargs): logger.info("Deleting Grafana dashboard: %s", name) kopf.info(spec, reason="DeletingDashboard", message="Deleting grafana dashboard.") logger.debug("Got the following keyword args for deleting the object: %s", kwargs) # Fetch the uid / try deleting the dashboard only if the object creation # was successful earlier if "status" in body: status = body["status"] if status.get("handler"): uid = body["status"]["handler"]["uid"] try: legend_config = load_legend_config() status = delete_dashboard(legend_config, uid) kopf.info( spec, reason="DeletedDashboard", message="Finished deleting dashboard: %s." % name, ) logger.info("Finished deleting Grafana dashboard: %s", name) return {"status": status} except Exception as e: logger.error( ( "Failed to delete dashboard due to the following " "exception: %s" ), e, ) kopf.exception( spec, reason="APIError", message=("Failed to delete dashboard due to API " "error: %s" % e), ) raise kopf.PermanentError("Failed deleting the dashboard") else: kopf.info( spec, reason="DeletedDashboard", message="Finished deleting dashboard: %s." % name, ) logger.info("Finished deleting Grafana dashboard: %s", name) return {"status": "Deleted"}
def create_fn(body: TestEngineBody): # pylint: disable=too-many-statements # check if dependent pods have finished running # create io_utility pod, wait until status is completed # launch engine, wait until status is completed # create io_utility pod to run post completion file transfers # clean up pods with shared test label (on CRD delete) name = body["metadata"]["name"] namespace = body["metadata"]["namespace"] spec = body["spec"] try: assert "tests" in spec, "spec missing 'tests' section" assert "reports" in spec, "spec missing 'tests' section" assert "pvc" in spec["tests"], "'tests' section must have 'pvc'" assert "pvc" in spec["reports"], "'reports' section must have 'pvc'" except AssertionError as err: raise kopf.PermanentError(f"Invalid spec: {err}") if "dependencies" in spec: try: for dependency in spec["dependencies"]: pod_name = dependency.get("name") pod_labels = dependency.get("labels") pod_statuses = dependency.get("statuses") pods.check_dependent_pods( namespace=namespace, name=pod_name, statuses=pod_statuses, labels=pod_labels, ) except (ApiException, RuntimeError) as err: raise kopf.TemporaryError( f"Encountered problem checking dependent pods: {err}" ) except AssertionError as err: raise kopf.PermanentError(f"Must resolve issue with dependencies: {err}") created_pods = {} if "remotePath" in spec["tests"]: try: tests = spec["tests"] pvc_name = tests["pvc"] local_path = tests.get("localPath", os.path.join("/", pvc_name)) remote_path = tests["remotePath"] transfer_path = f"{remote_path}==file://{local_path}" pre_transfer_pod = pods.run_io_utility( test_engine_name=name, namespace=namespace, pvc_name=pvc_name, io_config=spec.get("ioConfig", {}), transfer_path=transfer_path, ) kopf.info( body, reason="StepComplete", message=f"Completed running post transfer pod (name: {pre_transfer_pod.metadata.name})", ) created_pods["pre_transfer"] = pre_transfer_pod except ApiException as err: raise kopf.TemporaryError(f"Encountered problem with io utility: {err}") except (AssertionError, RuntimeError) as err: raise kopf.PermanentError(f"Must resolve issue with io utility: {err}") try: engine_pod = pods.run_engine( test_engine_name=name, namespace=namespace, tests_pvc=spec["tests"]["pvc"], reports_pvc=spec["reports"]["pvc"], tests_mount_path=spec["tests"].get("mountPath", "/tests"), reports_mount_path=spec["reports"].get("mountPath", "/reports"), engine_service_account=spec.get("engineServiceAccount"), engine_config=spec.get("engineConfig"), ) kopf.info( body, reason="StepComplete", message=f"Completed running post transfer pod (name: {engine_pod.metadata.name})", ) created_pods["engine"] = engine_pod except ApiException as err: raise kopf.TemporaryError(f"Encountered problem with engine: {err}") except RuntimeError as err: raise kopf.PermanentError(f"Must resolve issue with engine: {err}") if "remotePath" in spec["reports"]: try: reports = spec["reports"] pvc_name = reports["pvc"] local_path = reports.get("localPath", os.path.join("/", pvc_name)) remote_path = reports["remotePath"] transfer_path = f"file://{local_path}=={remote_path}" post_transfer_pod = pods.run_io_utility( test_engine_name=name, namespace=namespace, pvc_name=pvc_name, io_config=spec.get("ioConfig", {}), transfer_path=transfer_path, ) kopf.info( body, reason="StepComplete", message=f"Completed running post transfer pod (name: {post_transfer_pod.metadata.name})", ) created_pods["post_transfer"] = post_transfer_pod except ApiException as err: raise kopf.TemporaryError(f"Encountered problem with io utility: {err}") except (AssertionError, RuntimeError) as err: raise kopf.PermanentError(f"Must resolve issue with io utility: {err}") return created_pods
def create_event_types(body, spec, name, namespace, logger, **kwargs): logger.info(f"{body}") logger.info(f"{spec}") event_type = spec.get('type') event_type_name = spec.get('event_type_name') if event_type == 'webhook': # webhook example # apiVersion: kubeaction.spaceone.dev/v1alpha1 # kind: EventType # metadata: # name: webhook-event-sourc # spec: # type: webhook # event_type_name: ci-webhook # gateway_replica: 1 # sensor_port: 9300 # events: # intergraion: # port: 12000 # endpoint: /intergraion # method: POST # etc: # port: 12001 # endpoint: /etc # method: POST raw_events = spec.get('events', {}) events = {} sensor_port = spec.get('sensor_port') service_ports = [] event_names = [] for k, v in raw_events.items(): port = v.get('port') service_ports.append(port) event_names.append(k) events[k] = v # event source port must string type events[k]['port'] = f"{port}" evs = ArgoWebHookEventSource(namespace, name, events) evs_obj = evs.to_dict() ArgoEventSourceAPI(namespace).create(body=evs_obj) logger.info(evs_obj) kopf.info(evs_obj, reason='Created', message='Gateway created') ga = ArgoWebHookGateway(namespace, name, service_ports, sensor_port, replica=spec.get('gateway_replica')) ga_obj = ga.to_dict() pprint(ga_obj) ArgoGatewayAPI(namespace).create(body=ga_obj) logger.info(ga_obj) kopf.info(ga_obj, reason='Created', message='Gateway created') sensor = ArgoWebHookSensor(namespace, name, event_names=event_names, sensor_port=sensor_port, triggers=[ make_trigger_template( KUBEACTION_API, event_type_name, event_names) ]) sensor_obj = sensor.to_dict() logger.info(sensor_obj) ArgoSensorsAPI(namespace).create(body=sensor_obj) kopf.info(sensor_obj, reason='Created', message='Sensor created')