async def add_kafka_topic_schema(spec, patch, **kwargs):
    def add_status(status: dict):
        patch.setdefault("status", {}).update(status)

    def add_result(msg: str):
        add_status({"result": msg})

    topic = spec.get("topic")
    if not topic:
        add_result("Topic not specified")
        raise kopf.PermanentError("Type not specified")

    key_schema = spec.get("keySchema")
    if key_schema:
        try:
            keyID = await add_key_schema(topic, key_schema)
        except Exception as e:
            add_result(f"Cannot add key schema: {e}")
            raise kopf.PermanentError(f"Cannot add key schema: {e}")
        else:
            add_status({"keySchemaID": keyID})

    value_schema = spec.get("valueSchema")
    if value_schema:
        try:
            valueID = await add_value_schema(topic, value_schema)
        except Exception as e:
            add_result(f"Cannot add value schema: {e}")
            raise kopf.PermanentError(f"Cannot add key schema: {e}")
        else:
            add_status({"valueSchemaID": valueID})

    add_result("OK")
Esempio n. 2
0
def create_fn(meta, spec, namespace, logger, **kwargs):

    name = meta.get('name')
    group_name = spec.get('groupName')
    rbac_role = spec.get('rbacRole')

    if not group_name:
        raise kopf.PermanentError(
            f"groupName must be set. Got {group_name!r}.")
    if not rbac_role:
        raise kopf.PermanentError(f"rbacRole must be set. Got {rbac_role!r}.")

    users_arns = get_group_membership(group_name)
    if type(users_arns) is Exception:
        raise Exception(
            "The group does not exist or the group membership is null.")
    else:
        aws_auth_users = get_aws_auth_users()
        if aws_auth_users != None:
            configmap_data = create_patch(users_arns,
                                          rbac_role,
                                          data=aws_auth_users)
        else:
            configmap_data = create_patch(users_arns, rbac_role)
        configmap_obj = create_configmap_object(configmap_data)

    try:
        api.patch_namespaced_config_map(name="aws-auth",
                                        namespace="kube-system",
                                        body=configmap_obj)
    except ApiException as e:
        print(
            "Exception when calling CoreV1API->patch_namespaced_config_map: %s\n"
            % e)
def securityClientDelete(meta, spec, status, body, namespace, labels, name,
                         **kwargs):
    """
    Handler to delete component from Keycloak
    """

    try:  # to authenticate and get a token
        token = kc.getToken(username, password)
    except requests.HTTPError as e:
        logger.error(
            formatCloudEvent(str(e), "secCon couldn't GET Keycloak token"))
    except requests.URLRequired as e:
        logger.error(
            formatCloudEvent(str(e), "secCon couldn't determine Keycloak URL"))
        raise kopf.PermanentError(
            "Could not determine Keycloak URL. Will NOT retry.")

    try:  # to delete the client from Keycloak
        kc.delClient(name, token, kcRealm)
    except requests.HTTPError as e:
        logger.error(
            formatCloudEvent(
                str(e),
                f"secCon couldn't DELETE client {name} in realm {kcRealm}"))
        raise kopf.TemporaryError(
            "Could not delete component from Keycloak. Will retry.", delay=10)
    except requests.URLRequired as e:
        logger.error(
            formatCloudEvent(str(e), "secCon couldn't determine Keycloak URL"))
        raise kopf.PermanentError(
            "Could not determine Keycloak URL. Will NOT retry.")
Esempio n. 4
0
def _create_pod(namespace, complex_job, pod_def, logger):
    api = client.CoreV1Api()
    complex_job_name = complex_job['metadata']['name']

    pod_name = pod_def.get('name')
    if not pod_name:
        raise kopf.PermanentError(f"'spec.pods.pod.name' must be set.")

    podSpec = pod_def.get('spec')
    if not pod_name:
        raise kopf.PermanentError(f"'spec.pods.pod.spec' must be set.")

    pod = client.V1Pod()
    pod.metadata = client.V1ObjectMeta(name=f'{complex_job_name}-{pod_name}')
    pod.spec = podSpec

    pod.metadata.labels = {
        "mytracks4mac.info/complexJob": complex_job_name,
        "mytracks4mac.info/podName": pod_name
    }

    # Create resource in cluster
    pod_dict = pod.to_dict()
    kopf.adopt(pod_dict, owner=complex_job)

    pod_obj = api.create_namespaced_pod(namespace=namespace, body=pod_dict)

    logger.info(f"POD child is created: %s", pod)
Esempio n. 5
0
def update_fn(logger, spec, old, new, diff, **kwargs):
    if not new or "spec" not in new:
        return get_result_message(f"invalid schema {new}")
    if "mappings" not in new["spec"]:
        new_role_mappings = AuthMappingList()
    else:
        new_role_mappings = AuthMappingList(new["spec"]["mappings"])
    if not old or "spec" not in old or "mappings" not in old["spec"]:
        old_role_mappings = AuthMappingList()
    else:
        old_role_mappings = AuthMappingList(old["spec"]["mappings"])

    if overwrites_protected_mapping(logger, new_role_mappings):
        return get_result_message("overwriting protected mapping not possible")
    try:
        auth_config_map = get_config_map()
        current_config_mapping = AuthMappingList(data=auth_config_map.data)
        # save current config before change
        write_last_handled_mapping(logger, current_config_mapping.get_values())

        # remove old stuff first
        current_config_mapping.remove_mappings(old_role_mappings)
        # add new values
        current_config_mapping.merge_mappings(new_role_mappings)
        auth_config_map = update_config_map(auth_config_map,
                                            current_config_mapping.get_data())
        response = write_config_map(auth_config_map)
        response_data = AuthMappingList(data=response.data)
        if len(new_role_mappings
               ) > 0 and new_role_mappings not in response_data:
            raise kopf.PermanentError("Update Roles failed")
    except ApiException as e:
        raise kopf.PermanentError(f"Exception: {e}")
    return get_result_message("All good")
Esempio n. 6
0
def on_ac_delete(status: dict, logger, **_):
    try:  # making sure to catch all exceptions here to prevent blocking deletion
        identifier = get_ac_identifier(status)
        delete_ac(logger, identifier)
    except KeyError as error:
        raise kopf.PermanentError(
            "was not able to determine the AC ID for deletion") from error
    except Exception as error:
        raise kopf.PermanentError(f"deleting AC failed: {error}") from error
Esempio n. 7
0
def create_backup(spec, name, namespace, logger, **kwargs):

    label_selector = f"app={spec.get('instance')}"
    instance_name = spec.get('instance')
    database_name = spec.get('databaseName')
    s3_bucket = spec.get('s3Bucket')

    if not instance_name:
        raise kopf.PermanentError("Instance does not exist")

    if not database_name:
        raise kopf.PermanentError("Instance does not exist")

    if not s3_bucket:
        raise kopf.PermanentError("You should specify S3 Bucket")

    api = kubernetes.client.CoreV1Api()

    resp = api.list_namespaced_pod(namespace=namespace,
                                   label_selector=label_selector)

    for x in resp.items:
        pod_name = x.metadata.name
        logger.info(name)

        resp = api.read_namespaced_pod(name=pod_name, namespace=namespace)

        exec_command = [
            '/bin/sh', '-c',
            "mysqldump -u root -p${MYSQL_ROOT_PASSWORD} %s > dump.sql" %
            (database_name, )
        ]

        resp = kubernetes.stream.stream(api.connect_get_namespaced_pod_exec,
                                        pod_name,
                                        namespace,
                                        command=exec_command,
                                        stderr=True,
                                        stdin=False,
                                        stdout=True,
                                        tty=False)

        exportSql = f"kubectl exec {pod_name} -- cat dump.sql > local_dump.sql"
        process = subprocess.Popen(exportSql.split(), stdout=subprocess.PIPE)
        output, error = process.communicate()
        s3_client = boto3.client('s3')
        temp_file = open("dump.sql", "w")
        temp_file.write(output.decode("utf-8"))
        temp_file.close()
        try:
            response = s3_client.upload_file("dump.sql", s3_bucket,
                                             f"{database_name}-dump.sql")
        except ClientError as e:
            logger.error(e)

        logger.info(resp)
Esempio n. 8
0
def create_permissions(spec, name, namespace, logger, **kwargs):

    label_selector = f"app={spec.get('instance')}"
    instance_name = spec.get('instance')
    user_name = spec.get('userName')
    permissions = spec.get('permissions')

    if not instance_name:
        raise kopf.PermanentError("Instance does not exist")
    if not user_name:
        raise kopf.PermanentError("You should define user name")
    if not permissions:
        raise kopf.PermanentError("You should define permissions")

    mysql_permissions = ""

    if "read" in permissions:
        mysql_permissions += "select"

    if "write" in permissions:
        if len(mysql_permissions) > 1:
            mysql_permissions += ","
        mysql_permissions += "insert,update,delete"

    if not instance_name:
        raise kopf.PermanentError("Instance does not exist")

    if not user_name:
        raise kopf.PermanentError("User does not exist")

    api = kubernetes.client.CoreV1Api()

    resp = api.list_namespaced_pod(namespace=namespace,
                                   label_selector=label_selector)

    for x in resp.items:
        name = x.metadata.name
        logger.info(name)

        resp = api.read_namespaced_pod(name=name, namespace=namespace)

        exec_command = [
            '/bin/sh', '-c', 'mysql -p${MYSQL_ROOT_PASSWORD} -e "GRANT ' +
            mysql_permissions + ' ON *.* TO \'' + user_name + '\'@\'%\'"'
        ]

        resp = kubernetes.stream.stream(api.connect_get_namespaced_pod_exec,
                                        name,
                                        namespace,
                                        command=exec_command,
                                        stderr=True,
                                        stdin=False,
                                        stdout=True,
                                        tty=False)

        logger.info(resp)
Esempio n. 9
0
def run_workflow(task):
    results = luigi.build([task], detailed_summary=True)
    if task.temporary_error:
        raise kopf.TemporaryError(
            "Temporary Error: {}".format(task.error), delay=task.retry_delay)
    elif task.permanent_error:
        raise kopf.PermanentError(
            "Permanent Error: {}".format(task.error))
    elif results.status == LuigiStatusCode.FAILED:
        raise kopf.PermanentError(
            "Unknown Error: {}".format(results.summary_text))
Esempio n. 10
0
def create_role_binding(api: client.RbacAuthorizationV1Api,
                        configmap: Resource, cro_spec: ResourceChunk, ns: str,
                        name_suffix: str):
    logger = logging.getLogger('kopf.objects')
    role_bind_name = cro_spec.get("role", {}).get("bind")
    cluster_role_bind_namespaces = cro_spec.get("clusterRoleBindNamespaces",
                                                [])
    if not role_bind_name:
        tpl = yaml.safe_load(configmap.data['chaostoolkit-role-binding.yaml'])
        role_binding_name = tpl["metadata"]["name"]
        role_binding_name = f"{role_binding_name}-{name_suffix}"
        tpl["metadata"]["name"] = role_binding_name

        # change sa subject name
        sa_name = tpl["subjects"][0]["name"]
        sa_name = f"{sa_name}-{name_suffix}"
        tpl["subjects"][0]["name"] = sa_name

        # change sa subject namespace
        tpl["subjects"][0]["namespace"] = ns

        # change role name
        role_name = tpl["roleRef"]["name"]
        role_name = f"{role_name}-{name_suffix}"
        tpl["roleRef"]["name"] = role_name

        logger.debug(f"Creating role binding with template:\n{tpl}")

        if len(cluster_role_bind_namespaces) > 0:
            cluster_tpl = tpl
            for namespace in cluster_role_bind_namespaces:
                set_ns(cluster_tpl, namespace)
                try:
                    api.create_namespaced_role_binding(body=cluster_tpl,
                                                       namespace=namespace)
                except ApiException as e:
                    if e.status == 409:
                        logger.info(f"Role binding '{role_binding_name}' \
                                      already exists in {namespace}.")
                    else:
                        raise kopf.PermanentError(
                            f"Failed to bind to role: {str(e)}")

        set_ns(tpl, ns)
        try:
            api.create_namespaced_role_binding(body=tpl, namespace=ns)
            return tpl
        except ApiException as e:
            if e.status == 409:
                logger.info(
                    f"Role binding '{role_binding_name}' already exists.")
            else:
                raise kopf.PermanentError(f"Failed to bind to role: {str(e)}")
Esempio n. 11
0
def set_planId(old, new, meta, logger, **kwargs):
    logger.debug(f"planId new: {new}")
    logger.debug(f"planId old: {old}")
    if new and new != old:
        run_path = Path(new)
        plan_path = Path(new, 'plan')
        if run_path.exists() is False:
            raise kopf.PermanentError(f"Path to planId does not exist {new}.")
        if plan_path.exists() is False:
            raise kopf.PermanentError(
                f"planId file does not exist {plan_path.name}.")

        logger.info(f"Path to and planId file exists {plan_path.name}")
Esempio n. 12
0
def wait_for_job_completion(job, namespace, timeout=300):

    start = time.time()
    while time.time() - start < timeout:
        time.sleep(2)
        response = client.BatchV1Api().read_namespaced_job_status(
            job, namespace)
        if (response.status.completion_time != None):
            return ("Database init job done.")
        if (response.status.failed != None):
            raise kopf.PermanentError('Job %s failed' % job)
        else:
            continue
    raise kopf.PermanentError('Waiting timeout for job %s' % job)
def load_config_map(config_map):
    metadata = config_map['metadata']
    name = metadata['name']
    if not 'data' in config_map \
    or 'config' not in config_map['data']:
        raise kopf.PermanentError('Config map must include config data')
    try:
        config = yaml.safe_load(config_map['data']['config'])
    except yaml.parser.ParserError as e:
        raise kopf.PermanentError('Unable to load config YAML: {0}'.format(str(e)))
    if not 'secretNames' in config:
        raise kopf.PermanentError('Config data must include secretNames')
    if not isinstance(config['secretNames'], list):
        raise kopf.PermanentError('Config data secretNames must be a list')
    return config
Esempio n. 14
0
    def _should_run_on_failed_dependency(self, annotations: dict,
                                         handler_name: str,
                                         logger: logging.Logger) -> bool:
        """
        There is no way in kopf to say if a certain handler has failed or not.

        What we are doing instead is peeking into kopf's internal state storage -
        the annotations on the CrateDB objects to check if the handler has failed.

        Slightly naughty, but there is no better way at the time of writing.
        """
        # Handler names have dots instead of slashes in annotations
        normalized_name = handler_name.replace("/", ".")
        key = f"{KOPF_STATE_STORE_PREFIX}/{normalized_name}"
        status_str = annotations.get(key)
        if not status_str:
            return False
        status = json.loads(status_str)
        if not status["success"] and status["failure"]:
            if self.run_on_dep_failures:
                logger.warning(
                    f"Our dependency ({handler_name}) has failed but we'll still run."
                )
                return True
            else:
                raise kopf.PermanentError(
                    f"A dependency ({handler_name}) has failed. Giving up.")

        return False
Esempio n. 15
0
def relabel(spec, status, namespace, old, new, logger, **kwargs):

    try:
        s = {}
        for k, v in spec.items():
            s[k] = v

        name = status.get('create').get('name')
        resource_group = s.pop('resourcegroup')
        location = s.pop('location')
        patch = {'metadata': {'labels': new}}

        kubernetes.patch_namespaced_secret(name, namespace, patch)

        tags = {}
        for k, v in new.items():
            tags[f"k8s_{k}"] = v

        logger.info(f"Updating app insights resource tags: {tags}.")
        resourceid, instrumentation_key = app_insights.create_or_update(
            resource_name=name,
            resource_group_name=resource_group,
            location=location,
            tags=tags,
            spec=s)

    except RuntimeError as e:
        raise kopf.PermanentError(
            f"Failed to relabel app insights resource {name}: {e}")
Esempio n. 16
0
def update(name, spec, status, meta, namespace, logger, **kwargs):

    try:
        logger.info(f"status {status}")
        logger.info(f"meta {meta}")

        s = {}
        for k, v in spec.items():
            s[k] = v

        resource_group = s.pop('resourcegroup')
        location = s.pop('location')

        labels = meta.get('labels', {})
        tags = {'k8s_namespace': namespace}
        for k, v in labels.items():
            tags[f"k8s_{k}"] = v

        logger.info(f"Updating app insights resource: {name}.")
        resourceid, instrumentation_key = app_insights.create_or_update(
            resource_name=name,
            resource_group_name=resource_group,
            location=location,
            tags=tags,
            spec=s)

    except RuntimeError as e:
        raise kopf.PermanentError(
            f"Failed to update app insights resource {name}: {e}")
Esempio n. 17
0
    def reconcile(meta, *args, **kwargs):
        gen = meta["generation"]
        # skip the last self-write
        # TBD for parallel reconciliation may need to lock rc.gen before patch
        if gen == rc.skip_gen:
            logger.info(f"Skipping gen {gen}")
            return

        spec = rc.run(*args, **kwargs)
        _, resp, e = util.check_gen_and_patch_spec(g, v, r, n, ns,
                                                   spec, gen=gen)
        if e is not None:
            if e.status == util.DriverError.GEN_OUTDATED:
                # retry s.t. the diff object contains the past changes
                # TBD(@kopf) non-zero delay fix
                raise kopf.TemporaryError(e, delay=0)
            else:
                raise kopf.PermanentError(e.status)

        # if the model didn't get updated do not
        # increment the counter
        new_gen = resp["metadata"]["generation"]
        if gen + 1 == new_gen:
            rc.skip_gen = new_gen
        logger.info(f"Done reconciliation")
Esempio n. 18
0
def process_middleglue(mg, namespace):
    logger.info(f'[{namespace}/{mg}] MiddleGlue processed')
    cidrs = []

    source_mws = middleglues[namespace][mg]['sourceMiddlewares']
    managed_mw = middleglues[namespace][mg]['managedMiddleware']
    depth = middleglues[namespace][mg]['depth']
    cidrs.extend(middleglues[namespace][mg]['ips'])

    for source_mw in source_mws:
        try:
            if '/' in source_mw:
                source_namespace, source_name = source_mw.split('/')
            else:
                source_name = source_mw
                source_namespace = namespace
            ns_mw = common.Middleware.objects(api)
            ns_mw.namespace = source_namespace
            source_obj = ns_mw.get(name=source_name).obj
            cidrs.extend(source_obj['spec']['ipWhiteList']['sourceRange'])
        except pykube.ObjectDoesNotExist:
            logger.error(f'[{source_namespace}/{source_name}] Source Middleware does not exist')

    obj = common.gen_middleware(managed_mw, namespace, depth, cidrs, mg=mg)
    if common.Middleware(api, obj).exists():
        logger.info(f'[{namespace}/{managed_mw}] Updating managed MiddleWare')
        if not check_middleware_ownership(mg, obj):
            logger.error(f'[{namespace}/{managed_mw}] Managed MiddleWare is not handled by us. Not updating')
            raise kopf.PermanentError("Managed MiddleWare is not handled by us. Not updating")
        common.Middleware(api, obj).update()
    else:
        logger.info(f'[{namespace}/{managed_mw}] Creating managed MiddleWare')
        common.Middleware(api, obj).create()
Esempio n. 19
0
    def create_security_group(self):
        """Create an open security group for private network RFCs"""
        client = boto3.client('ec2', region_name=self.region)

        try:
            response = client.create_security_group(  # noqa
                Description='Generic security group for private network RFCs',
                GroupName=f'{self.name}-generic',
                VpcId=self.vpc_id,
                DryRun=False,
            )
        except ClientError as e:
            raise kopf.PermanentError(
                f"Error creating generic securit group: {e}")

        try:
            logging.info(f"Tagging resource_id: {response['GroupId']}")
            client.create_tags(DryRun=False,
                               Resources=[response['GroupId']],
                               Tags=[
                                   {
                                       'Key': 'Name',
                                       'Value': f"{self.name}-{self.namespace}"
                                   },
                               ])
        except ClientError as e:
            raise kopf.TemporaryError(
                f"Failed to tag resouce: {response['GroupId']}: {e}")

        return response['GroupId']
Esempio n. 20
0
def init_uptimerobot_api(logger):
    global uptime_robot
    try:
        uptime_robot = uptimerobot.create_uptimerobot_api()
    except Exception as error:
        logger.error('failed to create UptimeRobot API')
        raise kopf.PermanentError(error)
Esempio n. 21
0
def create_fn(spec, name, namespace, ilogger, **kwargs):

    size = spec.get('size')
    if not size:
        raise kopf.PermanentError(f"Size must be set. Got {size!r}.")

    path = os.path.join(os.path.dirname(__file__), 'pvc.yaml')
    print("path: ", path)
    tmpl = open(path, 'rt').read()
    print("templ type: ", type(tmpl))
    text = tmpl.format(name=name, size=size)
    data = yaml.safe_load(text)
    print("Data: ", str(data))

    # Set the hierarchy so the pvc will be owned by the evc, so when evc is deleted the pvc is deleted too!
    kopf.adopt(data)

    #ilogger(f"Data:\n%s",str(data))
    kubernetes.config.load_kube_config(config_file="/root/.kube/config",
                                       context="k8sb1")
    api = kubernetes.client.CoreV1Api()
    print("********")
    obj = api.create_namespaced_persistent_volume_claim(
        namespace=namespace,
        body=data,
    )

    ilogger.info(f"PVC child is created: %s", obj)
Esempio n. 22
0
 async def fn(**kwargs):
     dummy.mock()
     dummy.kwargs = kwargs
     dummy.steps['called'].set()
     kwargs['stopped']._stopper.set(
         reason=kopf.DaemonStoppingReason.NONE)  # to exit the cycle
     raise kopf.PermanentError("boo!")
Esempio n. 23
0
    def create_k8s_service(self):
        """load svc.yaml and input the data that"""
        path = os.path.join(os.path.dirname(__file__), 'svc.yaml')

        try:
            tmpl = open(path, 'rt').read()
            text = tmpl.format(name=self.name,
                               namespace=self.namespace,
                               vpce_dns=self.vpce_dns)
        except OSError as err:
            logging.error(f"OS error: {err}")

        try:
            data = yaml.safe_load(text)
        except yaml.YAMLError as e:
            logging.error(f"Yaml load error: {e}")

        try:
            logging.info(f"SVC body to be sent to k8s: {data}")
            api = kubernetes.client.CoreV1Api()
            obj = api.create_namespaced_service(
                namespace=self.namespace,
                body=data,
            )
            logging.info(f"SVC child is created: %s", obj)
        except ApiException as e:
            raise kopf.PermanentError(f"Error creating svc endpoint: {e}")

        return {'svc_endpoint_status': "created"}
def create_role_binding(api: client.RbacAuthorizationV1Api,
                        configmap: Resource, cro_spec: ResourceChunk, ns: str,
                        name_suffix: str, logger: logging.Logger):
    role_bind_name = cro_spec.get("role", {}).get("bind")
    if not role_bind_name:
        tpl = yaml.safe_load(configmap.data['chaostoolkit-role-binding.yaml'])
        role_binding_name = tpl["metadata"]["name"]
        role_binding_name = f"{role_binding_name}-{name_suffix}"
        tpl["metadata"]["name"] = role_binding_name

        # change sa subject name
        sa_name = tpl["subjects"][0]["name"]
        sa_name = f"{sa_name}-{name_suffix}"
        tpl["subjects"][0]["name"] = sa_name

        # change role name
        role_name = tpl["roleRef"]["name"]
        role_name = f"{role_name}-{name_suffix}"
        tpl["roleRef"]["name"] = role_name

        set_ns(tpl, ns)
        try:
            api.create_namespaced_role_binding(body=tpl, namespace=ns)
            return tpl
        except ApiException as e:
            if e.status == 409:
                logger.info(
                    f"Role binding '{role_binding_name}' already exists.")
            else:
                raise kopf.PermanentError(
                    f"Failed to bind to role: {str(e)}")
Esempio n. 25
0
async def create_host_certificate(name, namespace, body: HostCertificate, meta,
                                  spec: HostCertificateSpec, retry, patch,
                                  logger, **_):

    log.info('create_host_certificate: %s/%s %s', namespace, name, retry)

    assert type(body) == HostCertificate

    group, version = body.apiVersion.split('/')
    issuer_name = spec.issuerRef.name
    issuer_kind = spec.issuerRef.kind
    issuer_namespace = None
    if issuer_kind == 'Issuer':
        issuer_namespace = namespace

    try:
        issuer = get_issuer_from_cache(issuer_kind, issuer_name,
                                       issuer_namespace)
    except ResourceNotFound as e:
        if retry < 5:
            raise kopf.TemporaryError('Issuer not found in cache.',
                                      delay=10) from e
        else:
            raise kopf.PermanentError(
                f'Issuer not found in cache after {retry} tries. Giving up.')

    print(f'hostcertificate: {body}')
    print(f'issuer: {issuer}')
Esempio n. 26
0
def create_role(api: client.RbacAuthorizationV1Api, configmap: Resource,
                cro_spec: ResourceChunk, ns: str, name_suffix: str,
                psp: client.PolicyV1beta1PodSecurityPolicy = None):
    logger = logging.getLogger('kopf.objects')
    role_name = cro_spec.get("role", {}).get("name")
    if not role_name:
        tpl = yaml.safe_load(configmap.data['chaostoolkit-role.yaml'])
        role_name = tpl["metadata"]["name"]
        role_name = f"{role_name}-{name_suffix}"
        tpl["metadata"]["name"] = role_name
        set_ns(tpl, ns)

        # when a PSP is defined, we add a rule to use that PSP
        if psp:
            logger.info(
                f"Adding pod security policy {psp.metadata.name} use to role")
            psp_rule = yaml.safe_load(
                configmap.data['chaostoolkit-role-psp-rule.yaml'])

            set_rule_psp_name(psp_rule, psp.metadata.name)
            tpl["rules"].append(psp_rule)

        logger.debug(f"Creating role with template:\n{tpl}")
        try:
            api.create_namespaced_role(body=tpl, namespace=ns)
            return tpl
        except ApiException as e:
            if e.status == 409:
                logger.info(f"Role '{role_name}' already exists.")
            else:
                raise kopf.PermanentError(
                    f"Failed to create role: {str(e)}")
Esempio n. 27
0
def _create_k8s_svc(name, namespace, vpce_dns):
    try:
        svc = aws.K8s(name, namespace, vpce_dns=vpce_dns)
        svc_result = svc.create_k8s_service()
    except:  # noqa
        raise kopf.PermanentError("Error creating svc endpoint")

    return svc_result
Esempio n. 28
0
def get_dns_name(service_name):
    domain_name = os.getenv('domain_name')
    if domain_name is None:
        raise kopf.PermanentError(f"domain_name needed as System ENV.")
    domain_prefix = os.getenv('domain_prefix', 'kube')
    # construct the dns_name
    dns_name = f'{domain_prefix}-{service_name}.{domain_name}'
    return dns_name
Esempio n. 29
0
def create_fn(retry, **kwargs):
    time.sleep(0.1)  # for different timestamps of the events
    if not retry:
        raise kopf.TemporaryError("First failure.", delay=1)
    elif retry == 1:
        raise MyException("Second failure.")
    else:
        raise kopf.PermanentError("Third failure, the final one.")
Esempio n. 30
0
def estissuer_create(spec, patch, body, **_):
    """validate and mark issuers as ready"""
    # Secret must exist and be the correct type
    secret = get_secret_from_resource(body)
    if secret is None:
        raise kopf.TemporaryError(f"{spec['secretName']} not found")
    baseUrl = f"https://{spec['host']}:{spec.get('port', 443)}"
    path = "/".join(
        i for i in [WELLKNOWN, spec.get("label"), "cacerts"] if i is not None)
    # fetch /cacerts using explicit TA
    cacert = base64.b64decode(spec["cacert"])
    with tempfile.NamedTemporaryFile(suffix=".pem") as cafile:
        cafile.write(cacert)
        cafile.seek(0)
        session = requests.Session()
        session.mount(baseUrl, SSLContextAdapter())
        try:
            response = session.get(baseUrl + path, verify=cafile.name)
        except (
                requests.exceptions.SSLError,
                requests.exceptions.RequestException,
        ) as err:
            patch.metadata.annotations["estoperator-perm-fail"] = "yes"
            raise kopf.PermanentError(err) from err
    # 200 OK is good, anything else is an error
    if response.status_code != 200:
        raise kopf.TemporaryError(
            f"Unexpected response: {response.status}, {response.reason}", )
    # configured cacert must be in EST portal bundle
    explicit = pem.parse(cacert)
    store = X509Store()
    for cert in explicit:
        store.add_cert(load_certificate(FILETYPE_PEM, cert.as_text()))
    try:
        for leaf in pkcs7.load_der_pkcs7_certificates(
                base64.b64decode(response.content)):
            context = X509StoreContext(
                store,
                load_certificate(FILETYPE_PEM,
                                 leaf.public_bytes(Encoding.PEM)),
            )
            context.verify_certificate()
    except X509StoreContextError as err:
        raise kopf.PermanentError(
            f"Unable to verify /cacerts content: {err}") from err
    return {"Ready": "True"}