コード例 #1
0
async def adopt_service(meta, spec, body, namespace, labels, name, **kwargs):
    """Handler function for new services
    
    If the service has an oda.tmforum.org/componentName label, it makes the service a child of the named component.
    This can help with navigating around the different resources that belong to the component. It also ensures that the kubernetes garbage collection
    will delete these resources automatically if the component is deleted.

    Args:
        * meta (Dict): The metadata from the yaml service definition 
        * spec (Dict): The spec from the yaml service definition showing the intent (or desired state) 
        * status (Dict): The status from the yaml service definition showing the actual state.
        * body (Dict): The entire yaml service definition
        * namespace (String): The namespace for the service
        * labels (Dict): The labels attached to the service. All ODA Components (and their children) should have a oda.tmforum.org/componentName label
        * name (String): The name of the service

    Returns:
        No return value.

    :meta public:
    """
    logger.debug(
        f"[adopt_service/{namespace}/{name}] handler called with spec: {spec}")
    logger.debug("adopt_service called for service - if it is part of a component (oda.tmforum.org/componentName as a label) then make it a child ")

    if 'oda.tmforum.org/componentName' in labels.keys():

        # get the parent component object
        # str | the custom object's name
        component_name = labels['oda.tmforum.org/componentName']
        try:
            custom_objects_api = kubernetes.client.CustomObjectsApi()
            parent_component = custom_objects_api.get_namespaced_custom_object(
                GROUP, VERSION, namespace, COMPONENTS_PLURAL, component_name)
        except ApiException as e:
            # Cant find parent component (if component in same chart as other kubernetes resources it may not be created yet)
            if e.status == HTTP_NOT_FOUND:
                raise kopf.TemporaryError(
                    "Cannot find parent component " + component_name)
            else:
                logger.error(
                    "Exception when calling custom_objects_api.get_namespaced_custom_object: %s", e)

        # append oener reference to parent component
        newBody = dict(body)  # cast the service body to a dict
        kopf.append_owner_reference(newBody, owner=parent_component)
        core_api_instance = kubernetes.client.CoreV1Api()
        try:
            api_response = core_api_instance.patch_namespaced_service(
                newBody['metadata']['name'], newBody['metadata']['namespace'], newBody)
            logger.debug(
                'Patch service with owner. api_response = %s', api_response)
            logger.info(
                f'[adopt_service/{namespace}/{name}] Adding component {component_name} as parent of service')
        except ApiException as e:
            if e.status == HTTP_CONFLICT:  # Conflict = try again
                raise kopf.TemporaryError("Conflict updating service.")
            else:
                logger.error(
                    "Exception when calling core_api_instance.patch_namespaced_service: %s", e)
コード例 #2
0
    def create_custom_resource(self, resource_object):
        api_group_version = resource_object['apiVersion']
        api_group = ApiGroup.get(api_group_version)
        if not api_group:
            raise kopf.TemporaryError(
                f"Unable to find information about apiVersion {api_group_version}",
                delay = 60
            )

        kind = resource_object['kind']
        api_resource = api_group.get_resource(kind = kind)
        if not api_resource:
            raise kopf.TemporaryError(
                f"Unable to find resource kind {kind} in apiVersion {api_group_version}",
                delay = 60
            )

        if api_resource.namespaced:
            return custom_objects_api.create_namespaced_custom_object(
                api_group.name, api_group.version, self.name, api_resource.plural, resource_object
            )
        else:
            return custom_objects_api.create_cluster_custom_object(
                api_group.name, api_group.version, api_resource.plural, resource_object
            )
コード例 #3
0
def get_issuer_from_resource(resource):
    """return the named EstIssuer/EstClusterIssuer if Ready"""
    issuer_ref = resource["spec"]["issuerRef"]
    kwargs = dict(
        group=issuer_ref["group"],
        version=VERSION,
        plural=issuer_ref["kind"].lower() + "s",
        name=issuer_ref["name"],
    )
    if kwargs["plural"] == "estissuers":
        kwargs["namespace"] = resource["metadata"]["namespace"]
    try:
        api = k8s.CustomObjectsApi()
        if kwargs.get("namespace"):
            issuer = api.get_namespaced_custom_object(**kwargs)
        else:
            issuer = api.get_cluster_custom_object(**kwargs)
    except k8s.exceptions.OpenApiException as err:
        raise kopf.TemporaryError(eval(err.body)["message"])
    if ((issuer.get("status") is None)
            or (issuer["status"].get("estissuer_create") is None)
            or (issuer["status"]["estissuer_create"]["Ready"] != "True")):
        raise kopf.TemporaryError(f"{issuer_ref['name']} not ready")
    kopf.info(
        resource,
        reason="Debugging",
        message=f"get_issuer_from_resource: {issuer['metadata']['name']}",
    )
    return issuer
コード例 #4
0
def delete(spec, **_):
    # Initialize k8s client
    kubernetes.config.load_incluster_config()
    api = kubernetes.client.CoreV1Api()

    # Get info from the trigger object
    exchange_topic = spec['topic']

    try:
        # Read secret
        secret = api.read_namespaced_secret(TRIGGERS_STORE_SECRET,
                                            TRIGGERS_NAMESPACE)
    except ApiException:
        raise kopf.TemporaryError(
            f'Secret {TRIGGERS_STORE_SECRET} not found in the namespace {TRIGGERS_NAMESPACE}',
            delay=30)

    try:
        # Update secret contents
        updated_store = json.dumps(
            remove_trigger_store(
                exchange_topic,
                json.loads(secret.body['TRIGGERS_SECRET_KEY'])))
        secret.body = dict((TRIGGERS_SECRET_KEY, updated_store))
        api.replace_namespaced_secret(TRIGGERS_STORE_SECRET,
                                      TRIGGERS_NAMESPACE, secret)
    except MissingTriggerStore as error:
        raise kopf.TemporaryError(
            f'Trigger (topic {exchange_topic}) not found in the trigger secret store {TRIGGERS_STORE_SECRET}',
            delay=30)
コード例 #5
0
async def check_nodes_present_or_gone(
    connection_factory,
    old_replicas: int,
    new_replicas: int,
    node_prefix: str,
    logger: logging.Logger,
):
    """
    :param connection_factory: A callable that allows the operator to connect
        to the database. We regularly need to reconnect to ensure the
        connection wasn't closed because it was opened to a CrateDB node that
        was shut down since the connection was opened.
    :param old_replicas: The number of replicas in a StatefulSet before
        scaling.
    :param new_replicas: The number of replicas in a StatefulSet after scaling.
    :param node_prefix: The prefix of the node names in CrateDB.
    :raises: A :class:`kopf.TemporaryError` when nodes are missing (scale up)
        or still available (scale down).
    """
    full_node_list = [
        f"{node_prefix}-{i}" for i in range(max(old_replicas, new_replicas))
    ]
    async with connection_factory() as conn:
        async with conn.cursor() as cursor:
            await cursor.execute(
                """
                SELECT name FROM sys.nodes WHERE name = ANY(%s)
                """,
                (full_node_list,),
            )
            rows = await cursor.fetchall()
            available_nodes = {r[0] for r in rows} if rows else set()
            candidate_node_names = {
                f"{node_prefix}-{i}"
                for i in range(
                    min(old_replicas, new_replicas), max(old_replicas, new_replicas)
                )
            }
            if old_replicas < new_replicas:
                # scale up. Wait for missing nodes
                if not candidate_node_names.issubset(available_nodes):
                    missing_nodes = ", ".join(sorted(candidate_node_names))
                    raise kopf.TemporaryError(
                        f"Waiting for nodes {missing_nodes} to be present.", delay=15
                    )
            elif old_replicas > new_replicas:
                # scale down
                if candidate_node_names.issubset(available_nodes):
                    excess_nodes = ", ".join(sorted(candidate_node_names))
                    raise kopf.TemporaryError(
                        f"Waiting for nodes {excess_nodes} to be gone.", delay=15
                    )
            else:
                logger.info(
                    "No need to wait for nodes with prefix '%s', since the "
                    "number of replicas didn't change.",
                    node_prefix,
                )
コード例 #6
0
ファイル: kube.py プロジェクト: amadev/open4k
def wait_for_resource(klass, name, namespace=None, delay=60):
    try:
        find(klass, name, namespace)
    except pykube.exceptions.ObjectDoesNotExist:
        raise kopf.TemporaryError(
            f"The object: {klass.kind} with name '{name}' is not found yet.",
            delay=delay,
        )
    except:
        raise kopf.TemporaryError(
            f"Unknown error occured while getting object: {klass.kind}.",
            delay=delay,
        )
コード例 #7
0
 def set_owner(self, runtime):
     '''
     Anarchy when using on.event when an anarchyaction is added and modified
     we will verify that an ownerReferences exist and created it.
     '''
     subject = self.get_subject(runtime)
     if not subject:
         raise kopf.TemporaryError('Cannot find subject of the action "%s"',
                                   self.action)
     governor = subject.get_governor(runtime)
     if not governor:
         raise kopf.TemporaryError(
             'Cannot find governor of the action "%s"', self.action)
     runtime.custom_objects_api.patch_namespaced_custom_object(
         runtime.operator_domain, 'v1', runtime.operator_namespace,
         'anarchyactions', self.name, {
             'metadata': {
                 'labels': {
                     runtime.operator_domain + '/action': self.action,
                     runtime.operator_domain + '/governor': governor.name,
                     runtime.operator_domain + '/subject': subject.name,
                 },
                 'ownerReferences':
                 [{
                     'apiVersion': runtime.operator_domain + '/v1',
                     'controller': True,
                     'kind': 'AnarchySubject',
                     'name': subject.name,
                     'uid': subject.uid
                 }]
             },
             'spec': {
                 'governorRef': {
                     'apiVersion': runtime.operator_domain + '/v1',
                     'kind': 'AnarchyGovernor',
                     'name': governor.name,
                     'namespace': governor.namespace,
                     'uid': governor.uid
                 },
                 'subjectRef': {
                     'apiVersion': runtime.operator_domain + '/v1',
                     'kind': 'AnarchySubject',
                     'name': subject.name,
                     'namespace': subject.namespace,
                     'uid': subject.uid
                 }
             }
         })
コード例 #8
0
ファイル: node.py プロジェクト: amadev/open4k
async def node_status_update_handler(name, body, old, new, event, **kwargs):
    LOG.debug(f"Handling node status {event} event.")
    LOG.debug(f"The new state is {new}")

    # NOTE(vsaienko) get conditions from the object to avoid fake reporing by
    # calico when kubelet is down on the node.
    # Do not remove pods from flapping node.
    node = kube.Node(kube.api, body)
    if node.ready:
        return True

    not_ready_delta = datetime.timedelta(
        seconds=settings.OSCTL_NODE_NOT_READY_FLAPPING_TIMEOUT)

    now = last_transition_time = datetime.datetime.utcnow()

    for cond in node.obj["status"]["conditions"]:
        if cond["type"] == "Ready":
            last_transition_time = datetime.datetime.strptime(
                cond["lastTransitionTime"], "%Y-%m-%dT%H:%M:%SZ")
    not_ready_for = now - last_transition_time
    if now - not_ready_delta < last_transition_time:
        raise kopf.TemporaryError(
            f"The node is not ready for {not_ready_for.seconds}s", )
    LOG.info(f"The node: {name} is not ready for {not_ready_for.seconds}s. "
             f"Removing pods...")
    node.remove_pods(settings.OSCTL_OS_DEPLOYMENT_NAMESPACE)
コード例 #9
0
ファイル: apiOperatorApig.py プロジェクト: tmforum-oda/oda-ca
def restCall( host, path, spec ):
    APIG_MOCK = os.getenv('APIG_MOCK', "")
    if APIG_MOCK != "":
        return {"res_code": "00000", "res_message": APIG_MOCK }
        
    hConn=HTTPConnection(host)
    respBody = None
    try:
        data = json.dumps(spec)
        headers = {"Content-type": "application/json"}
        hConn.request('POST', path, data.encode('utf-8'), headers)
        logging.info(f"host: %s, path: %s, body: %s"%(host,path,data))
        resp = hConn.getresponse()
        data=resp.read()
        if data:
            respStr = data.decode('utf-8')
            logging.info(f"Rest api response code: %s, body: %s"%(resp.status, respStr))
            respBody = json.loads(respStr)
        hConn.close()
        if resp.status != 200:
            raise kopf.TemporaryError("Exception when calling rest api, return code: %s\n" % resp.status)
        return respBody
    except Exception as StrError:
        hConn.close()
        logging.warn("Exception when calling restful api: %s\n" % StrError)
        time.sleep(2)
コード例 #10
0
ファイル: aws.py プロジェクト: silvermullet/vpce-operator
    def create_security_group(self):
        """Create an open security group for private network RFCs"""
        client = boto3.client('ec2', region_name=self.region)

        try:
            response = client.create_security_group(  # noqa
                Description='Generic security group for private network RFCs',
                GroupName=f'{self.name}-generic',
                VpcId=self.vpc_id,
                DryRun=False,
            )
        except ClientError as e:
            raise kopf.PermanentError(
                f"Error creating generic securit group: {e}")

        try:
            logging.info(f"Tagging resource_id: {response['GroupId']}")
            client.create_tags(DryRun=False,
                               Resources=[response['GroupId']],
                               Tags=[
                                   {
                                       'Key': 'Name',
                                       'Value': f"{self.name}-{self.namespace}"
                                   },
                               ])
        except ClientError as e:
            raise kopf.TemporaryError(
                f"Failed to tag resouce: {response['GroupId']}: {e}")

        return response['GroupId']
コード例 #11
0
ファイル: apiOperatorApig.py プロジェクト: tmforum-oda/oda-ca
def apigUnBind(meta, spec, status, body, namespace, labels, name, **kwargs):

    logging.debug(f"api has name: {meta['name']}")
    logging.debug(f"api has status: {status}")
    logging.debug(f"api is called with body: {spec}")
    
    MOCK_ALL = os.getenv('MOCK_ALL', "")
    if MOCK_ALL != "":
        return {"response": "success", "spec": MOCK_ALL }
    
    namespace = meta.get('namespace')
    apigEndpoint = os.getenv('APIG_ENDPOINT', "apig-operator-uportal.%s:8080"%namespace) 
    apigUnBindPath = "/operator/AutoCreation/removeAPIFromSwagger"
    
    apiSpec = {
        "path":spec['path'],
        "name":meta['name'],
        "specification":spec['specification'],
        "implementation": spec['implementation'],
        "port": spec['port']
    }
    resp = restCall(apigEndpoint, apigUnBindPath, apiSpec)
    
    if not resp or resp['res_code'] != "00000":
        raise kopf.TemporaryError( "UnBind apig failed , return %s"%resp )
    return {"response": resp}
コード例 #12
0
def rating_rules_creation(body: Dict, spec: Dict, logger: Logger,
                          **kwargs: Dict):
    """
    Create and validate the RatingRules through rating-api after creation in kubernetes.

    :body (Dict) A dictionary containing the created kubernetes object.
    :spec (Dict) A smaller version of body.
    :logger (Logger) A Logger object to log informations.
    :kwargs (Dict) A dictionary holding unused parameters.
    """
    timestamp = body['metadata']['creationTimestamp']
    rules_name = body['metadata']['name']
    data = {
        'rules': spec.get('rules', {}),
        'metrics': spec.get('metrics', {}),
        'timestamp': timestamp
    }
    try:
        utils.post_for_rating_api(endpoint='/ratingrules/add', payload=data)
    except utils.ConfigurationException as exc:
        logger.error(f'RatingRules {rules_name} is invalid. Reason: {exc}')
    except requests.exceptions.RequestException:
        raise kopf.TemporaryError(
            f'Request for RatingRules {rules_name} update failed. retrying in 30s',
            delay=30)
    else:
        logger.info(
            f'RatingRule {rules_name} created, valid from {timestamp}.')
コード例 #13
0
    def reconcile(meta, *args, **kwargs):
        gen = meta["generation"]
        # skip the last self-write
        # TBD for parallel reconciliation may need to lock rc.gen before patch
        if gen == rc.skip_gen:
            logger.info(f"Skipping gen {gen}")
            return

        spec = rc.run(*args, **kwargs)
        _, resp, e = util.check_gen_and_patch_spec(g, v, r, n, ns,
                                                   spec, gen=gen)
        if e is not None:
            if e.status == util.DriverError.GEN_OUTDATED:
                # retry s.t. the diff object contains the past changes
                # TBD(@kopf) non-zero delay fix
                raise kopf.TemporaryError(e, delay=0)
            else:
                raise kopf.PermanentError(e.status)

        # if the model didn't get updated do not
        # increment the counter
        new_gen = resp["metadata"]["generation"]
        if gen + 1 == new_gen:
            rc.skip_gen = new_gen
        logger.info(f"Done reconciliation")
コード例 #14
0
async def create_host_certificate(name, namespace, body: HostCertificate, meta,
                                  spec: HostCertificateSpec, retry, patch,
                                  logger, **_):

    log.info('create_host_certificate: %s/%s %s', namespace, name, retry)

    assert type(body) == HostCertificate

    group, version = body.apiVersion.split('/')
    issuer_name = spec.issuerRef.name
    issuer_kind = spec.issuerRef.kind
    issuer_namespace = None
    if issuer_kind == 'Issuer':
        issuer_namespace = namespace

    try:
        issuer = get_issuer_from_cache(issuer_kind, issuer_name,
                                       issuer_namespace)
    except ResourceNotFound as e:
        if retry < 5:
            raise kopf.TemporaryError('Issuer not found in cache.',
                                      delay=10) from e
        else:
            raise kopf.PermanentError(
                f'Issuer not found in cache after {retry} tries. Giving up.')

    print(f'hostcertificate: {body}')
    print(f'issuer: {issuer}')
コード例 #15
0
async def check_for_deallocation(
    cursor: Cursor, node_names: List[str], logger: logging.Logger
):
    """
    Wait until the nodes ``node_names`` have no more shards.

    :param cursor: A database cursor to a current and open database connection.
    :param node_names: A list of CrateDB node names. These are the names that
        are known to CrateDB, e.g. ``data-hot-2`` or ``master-1``.
    """
    logger.info(
        "Waiting for deallocation of CrateDB nodes %s ...", ", ".join(node_names)
    )
    # We select the node names and the number of shards for all nodes that
    # will be torn down. As long as the rowcount is > 0 the nodes in question
    # still have shards allocated and thus can't be decommissioned.
    await cursor.execute(
        """
        SELECT node['name'], count(*)
        FROM sys.shards
        WHERE node['name'] = ANY(%s)
        GROUP BY 1
        """,
        (node_names,),
    )
    rows = await cursor.fetchall()
    if rows:
        allocations = ", ".join(f"{row[0]}={row[1]}" for row in rows) or "None"
        logger.info("Current pending allocation %s", allocations)
        raise kopf.TemporaryError("Pending allocation")
コード例 #16
0
async def exec_add_rule(s, pod, _old, action, logger):
    rname = action['name']
    pname = pod['metadata']['name']
    logger.info(f'configuring pod:{pname} for rule:{rname}')
    l7mp_instance = get_l7mp_instance(pod)

    spec = action['spec']
    spec = convert_to_old_api(logger, 'rules', spec)
    rulelist = spec['rulelist']
    position = spec['position']
    rspec = deepcopy(spec['rule'])

    rspec['name'] = rname
    body = {'rule': rspec}

    #logger.debug(f'request: {request}')
    try:
        l7mp_instance.add_rule_to_rule_list(rulelist, position, body)
    except l7mp_client.exceptions.ApiException as e:
        content = json.loads(e.body).get('content', '')
        if e.status == 400 and content.endswith(' already defined'):
            # FIXME: cannot update, so this will be ok for now.
            logger.warning('already defined')
        else:
            logger.warning(f'request: {rulelist}, {position}, body:{body}')
            raise e
    except urllib3.exceptions.MaxRetryError as e:
        raise kopf.TemporaryError(f'{e}', delay=5)
    await set_owner_status(s, 'rules', rname, logger)
コード例 #17
0
    async def _ensure_no_backup_cronjobs_running(self, namespace: str,
                                                 name: str,
                                                 logger: logging.Logger):
        async with ApiClient() as api_client:
            batch = BatchV1Api(api_client)

            jobs: V1JobList = await call_kubeapi(batch.list_namespaced_job,
                                                 logger,
                                                 namespace=namespace)
            for job in jobs.items:
                job_name = job.metadata.name
                labels = job.metadata.labels
                job_status: V1JobStatus = job.status
                if (labels.get("app.kubernetes.io/component") == "backup"
                        and labels.get("app.kubernetes.io/name") == name
                        and job_status.active is not None):
                    await kopf.execute(
                        fns={
                            "notify_backup_running":
                            subhandler_partial(self._notify_backup_running,
                                               logger)
                        })
                    raise kopf.TemporaryError(
                        "A snapshot k8s job is currently running, "
                        f"waiting for it to finish: {job_name}",
                        delay=30,
                    )
コード例 #18
0
async def exec_add_dynamic_endpoint(s, pod, _old, action, logger):
    ename = action['name']
    pname = pod['metadata']['name']
    cname = action['target']

    logger.info(f'configuring pod:{pname} for d_endpoint:{ename}')

    l7mp_instance = get_l7mp_instance(pod)

    ep = {
        'name': action['name'],
        'spec': action['spec'],
    }
    endpoint_obj = l7mp_client.IoL7mpApiV1Cluster(**ep)
    request = l7mp_client.IoL7mpApiV1EndPointRequest(endpoint=endpoint_obj)
    try:
        l7mp_instance.add_end_point(cname, request)
    except l7mp_client.exceptions.ApiException as e:
        content = json.loads(e.body).get('content', '')
        if e.status == 400 and content.endswith(' already defined'):
            # FIXME: cannot update, so this will be ok for now.
            logger.warning('already defined', )
        else:
            logger.warning(f'request: {request}')
            raise e
    except urllib3.exceptions.MaxRetryError as e:
        raise kopf.TemporaryError(f'{e}', delay=5)
コード例 #19
0
async def exec_add_target(s, pod, _old, action, logger):
    tname = action['name']
    pname = pod['metadata']['name']
    tspec = action['spec']
    tspec = convert_to_old_api(logger, 'targets', tspec)
    logger.info(f'configuring pod:{pname} for target:{tname}')

    l7mp_instance = get_l7mp_instance(pod)

    cluster = deepcopy(tspec['cluster'])
    cluster['name'] = tname

    cluster_obj = l7mp_client.IoL7mpApiV1Cluster(**cluster)
    request = l7mp_client.IoL7mpApiV1ClusterRequest(cluster=cluster)
    try:
        l7mp_instance.add_cluster(request)
    except l7mp_client.exceptions.ApiException as e:
        content = json.loads(e.body).get('content', '')
        if e.status == 400 and content.endswith(' already defined'):
            # FIXME: cannot update, so this will be ok for now.
            logger.warning('already defined', )
        else:
            logger.warning(f"request:\n{request}")
            raise e
    except urllib3.exceptions.MaxRetryError as e:
        raise kopf.TemporaryError(f'{e}', delay=5)
    await set_owner_status(s, 'targets', tname, logger)
コード例 #20
0
async def exec_add_vsvc(s, pod, _old, action, logger):
    vname = action['name']
    pname = pod['metadata']['name']
    vsvc_spec = action['spec']
    vsvc_spec = convert_to_old_api(logger, 'virtualservices', vsvc_spec)
    logger.info(f'configuring pod:{pname} for vsvc:{vname}')

    l7mp_instance = get_l7mp_instance(pod)

    listener = l7mp_client.IoL7mpApiV1Listener(
        name=vname,
        spec=vsvc_spec.get('listener', {}).get('spec'),
        rules=vsvc_spec.get('listener', {}).get('rules'))
    request = l7mp_client.IoL7mpApiV1ListenerRequest(listener=listener)
    logger.warning(f'request: {request}')
    try:
        l7mp_instance.add_listener(request)
    except l7mp_client.exceptions.ApiException as e:
        content = json.loads(e.body).get('content', '')
        if e.status == 400 and content.endswith(' already defined'):
            # FIXME: cannot update, so this will be ok for now.
            logger.warning('already defined')
        else:
            logger.warning(f'request: {request}')
            raise e
    except urllib3.exceptions.MaxRetryError as e:
        raise kopf.TemporaryError(f'{e}', delay=5)
    await set_owner_status(s, 'virtualservices', vname, logger)
コード例 #21
0
def securityClientDelete(meta, spec, status, body, namespace, labels, name,
                         **kwargs):
    """
    Handler to delete component from Keycloak
    """

    try:  # to authenticate and get a token
        token = kc.getToken(username, password)
    except requests.HTTPError as e:
        logger.error(
            formatCloudEvent(str(e), "secCon couldn't GET Keycloak token"))
    except requests.URLRequired as e:
        logger.error(
            formatCloudEvent(str(e), "secCon couldn't determine Keycloak URL"))
        raise kopf.PermanentError(
            "Could not determine Keycloak URL. Will NOT retry.")

    try:  # to delete the client from Keycloak
        kc.delClient(name, token, kcRealm)
    except requests.HTTPError as e:
        logger.error(
            formatCloudEvent(
                str(e),
                f"secCon couldn't DELETE client {name} in realm {kcRealm}"))
        raise kopf.TemporaryError(
            "Could not delete component from Keycloak. Will retry.", delay=10)
    except requests.URLRequired as e:
        logger.error(
            formatCloudEvent(str(e), "secCon couldn't determine Keycloak URL"))
        raise kopf.PermanentError(
            "Could not determine Keycloak URL. Will NOT retry.")
コード例 #22
0
async def sleepy(s, logger, event, namespace, name, body, spec, **kwargs):
    logger.info(f"Handler {s} for event {event} with field {spec['field']}")
    # snooze = 10 * random.choice((0, 1))
    if event == "update" and s == 0 and spec[
            'field'] == 'value1' and random.choice((True, False)):
        # logger.info(f"Will sleep for {snooze}s")
        # await asyncio.sleep(snooze)
        # time.sleep(snooze)
        raise kopf.TemporaryError("BOOM!")

    child_def = {
        "kind": "KopfChild",
        "apiVersion": "zalando.org/v1",
        "spec": body["spec"],
        "metadata": {
            "name": f"{name}.{s}",
        },
    }
    logger.info(f"Applying spec with value {spec['field']}")
    try:
        child = KopfChild.objects(api).filter(namespace=namespace).get(
            name=child_def["metadata"]["name"])
        kopf.adopt(child_def, body)
        child.set_obj(child_def)
        child.update()
    except pykube.ObjectDoesNotExist:
        child = KopfChild(api, child_def)
        kopf.adopt(child_def, body)
        child.create()
コード例 #23
0
def post_for_rating_api(endpoint: AnyStr, payload: Dict) -> Dict:
    """
    Send a POST request to the given endpoint of the rating-api.

    :endpoint (AnyStr) The endpoint to which to send the request.
    :payload (Dict) A dictionary containing everything to be embedded in the request.

    Return the results of the requests, as a dictionary.
    """
    api_url = envvar('RATING_API_URL')
    headers = {'content-type': 'application/json'}
    response = requests.post(url=f'{api_url}{endpoint}',
                             headers=headers,
                             json=payload)
    if response.status_code == 400:  # When ratingrule is wrong
        raise ConfigurationException(response.content.decode("utf-8"))
    elif response.status_code == 404:  # When object is not found
        raise ApiException
    try:
        response.raise_for_status()
    except requests.exceptions.RequestException:
        raise kopf.TemporaryError(
            'rated data failed to be transmitted (connection error), retrying in 5s..',
            delay=5)
    return response.json()
コード例 #24
0
def security_client_delete(meta, spec, status, body, namespace, labels, name,
                           **kwargs):
    """
    Handler to delete component from Keycloak
    """

    # del unused-arguments for linting
    del meta, spec, status, body, namespace, labels, kwargs

    try:  # to authenticate and get a token
        token = kc.get_token(username, password)
    except RuntimeError as e:
        logger.error(
            format_cloud_event(str(e), 'secCon could not GET Keycloak token'))

    try:  # to delete the client from Keycloak
        kc.del_client(name, token, kcRealm)
    except RuntimeError as e:
        logger.error(
            format_cloud_event(
                str(e),
                f'secCon could not DELETE client {name} in realm {kcRealm}'))
        raise kopf.TemporaryError(
            'Could not delete component from Keycloak. Will retry.', delay=10)
    else:
        logger.info(
            format_cloud_event(f'oda.tmforum.org component {name} deleted',
                               'secCon: component deleted'))
コード例 #25
0
async def background_async(spec, logger, retry, **_):
    if retry < 3:
        raise kopf.TemporaryError("Simulated failure.", delay=1)

    while True:
        logger.info(f"=> Ping from an async daemon: field={spec['field']!r}")
        await asyncio.sleep(5.0)
コード例 #26
0
def process_vm(name, namespace, spec, operation='create', timeout=60):
    config = Kconfig(quiet=True)
    exists = config.k.exists(name)
    if operation == "delete" and exists:
        print("Deleting vm %s" % name)
        return config.k.delete(name)
    if operation == "create":
        if not exists:
            profile = spec.get("profile")
            if profile is None:
                if 'image' in spec:
                    profile = spec['image']
                else:
                    profile = name
            print("Creating vm %s" % name)
            if profile is not None:
                result = config.create_vm(name, profile, overrides=spec)
                if result['result'] != 'success':
                    return result
        info = config.k.info(name)
        image = info.get('image')
        if image is not None and 'ip' not in info:
            raise kopf.TemporaryError("Waiting to populate ip", delay=10)
        newspec = {'spec': {'info': info}}
        return update_vm_cr(name, namespace, newspec)
コード例 #27
0
ファイル: operator.py プロジェクト: redhat-cop/babylon
    def manage_resource_claims(self, logger):
        logger.debug(
            f"Manage ResourceClaims for {self.name} in namespace {self.namespace}"
        )

        try:
            workshop = self.get_workshop()
        except kubernetes.client.rest.ApiException as e:
            if e.status == 404:
                raise kopf.TemporaryError(
                    "Workshop {self.workshop_name} was not found.", delay=30)
            else:
                raise

        resource_claim_count = 0
        provisioning_count = 0
        for resource_claim in self.list_resource_claims():
            resource_claim_count += 1
            if resource_claim.provision_complete:
                workshop.update_resource_claim(
                    logger=logger,
                    resource_claim=resource_claim,
                )
            else:
                provisioning_count += 1
        if resource_claim_count < self.count and provisioning_count < self.concurrency:
            self.create_resource_claim(logger=logger, workshop=workshop)
コード例 #28
0
def handle_new_trigger(spec, **_):
    # Initialize k8s client
    kubernetes.config.load_incluster_config()
    api = kubernetes.client.CoreV1Api()

    # Get info from the trigger object
    destination_function = spec['function']
    exchange_topic = spec['topic']

    try:
        # Read secret
        secret = api.read_namespaced_secret(TRIGGERS_STORE_SECRET,
                                            TRIGGERS_NAMESPACE)

        # Update secret contents
        updated_store = json.dumps({
            exchange_topic:
            destination_function,
            **json.loads(secret.body['TRIGGERS_SECRET_KEY'])
        })
        secret.body = dict((TRIGGERS_SECRET_KEY, updated_store))
        api.replace_namespaced_secret(TRIGGERS_STORE_SECRET,
                                      TRIGGERS_NAMESPACE, secret)
    except ApiException:
        raise kopf.TemporaryError(
            f'Secret {TRIGGERS_STORE_SECRET} not found in the namespace {TRIGGERS_NAMESPACE}',
            delay=30)
コード例 #29
0
ファイル: operator.py プロジェクト: redhat-cop/babylon
 def set_owner_references(self, logger):
     try:
         workshop = self.get_workshop()
     except kubernetes.client.rest.ApiException as e:
         if e.status == 404:
             raise kopf.TemporaryError(
                 "Workshop {self.workshop_name} was not found.", delay=30)
         else:
             raise
     if not self.owner_references or not workshop_label in self.labels:
         logger.info(
             "Setting ownerReferences and workshop label for WorkshopProvision {self.name} in namespace {self.namespace}"
         )
         custom_objects_api.patch_namespaced_custom_object(
             babylon_domain, babylon_api_version, self.namespace,
             'workshopprovisions', self.name, {
                 "metadata": {
                     "labels": {
                         workshop_label: workshop.name,
                     },
                     "ownerReferences": [{
                         "apiVersion":
                         f"{babylon_domain}/{babylon_api_version}",
                         "controller": True,
                         "kind": "Workshop",
                         "name": workshop.name,
                         "uid": workshop.uid,
                     }]
                 }
             })
コード例 #30
0
 async def fn(**kwargs):
     dummy.mock()
     dummy.kwargs = kwargs
     dummy.steps['called'].set()
     if dummy.mock.call_count >= 5:
         kwargs['stopped']._stopper.set(
             reason=kopf.DaemonStoppingReason.NONE)  # to exit the cycle
     raise kopf.TemporaryError("boo!", delay=1.0)