async def test_skipping_hidden_levels(caplog, logstream, logfn, event_queue, event_queue_loop): logger = ObjectLogger(body=OBJ1) logger_fn = getattr(logger, logfn) logger_fn("hello %s", "world") logger.info("must be here") assert event_queue.qsize() == 1 # not 2! assert caplog.messages == ["hello world", "must be here"]
def terraform_destroy(planId: str, logger: KopfObjectLogger) -> tuple: logger.info(f"PLANID: {planId}") #@TODO check if planId exists throw kopf eception if not ptf = python_terraform.Terraform(working_dir=planId) return_code, stdout, stderr = ptf.destroy(dir_or_plan=f"{planId}/plan", refresh=True, auto_approve=True) # return_code, stdout, stderr = 0, 'all destroyed', '' response = stdout if not stderr else stderr logger.info(f"TERRAFORM DESTROY COMPLETE: {return_code} {response}") return response, return_code
async def test_skipping_below_config(caplog, logstream, logfn, event_queue, event_queue_loop, mocker): logger = ObjectLogger(body=OBJ1) logger_fn = getattr(logger, logfn) mocker.patch.object(EventsConfig, 'events_loglevel', 666) logger_fn("hello %s", "world") mocker.patch.object(EventsConfig, 'events_loglevel', 0) logger.info("must be here") assert event_queue.qsize() == 1 # not 2! assert caplog.messages == ["hello world", "must be here"]
async def test_skipping_below_config(settings, caplog, logstream, logfn, event_queue, event_queue_loop, mocker): logger = ObjectLogger(body=OBJ1, settings=settings) logger_fn = getattr(logger, logfn) settings.posting.level = 666 logger_fn("hello %s", "world") settings.posting.level = 0 logger.info("must be here") assert event_queue.qsize() == 1 # not 2! assert caplog.messages == ["hello world", "must be here"]
def test_skipping_when_local_with_all_level(caplog, logstream, logfn): queue = asyncio.Queue() logger = ObjectLogger(body=OBJ1, event_queue=queue) getattr(logger, logfn)("hello %s", "world", local=True) assert queue.empty() assert caplog.messages == ["hello world"]
async def test_posting_normal_levels(caplog, logstream, logfn, event_type, event_queue, event_queue_loop): logger = ObjectLogger(body=OBJ1) logger_fn = getattr(logger, logfn) logger_fn("hello %s", "world") assert event_queue.qsize() == 1 event1 = event_queue.get_nowait() assert event1.ref == REF1 assert event1.type == event_type assert event1.reason == "Logging" assert event1.message == "hello world" assert caplog.messages == ["hello world"]
def test_posting_normal_levels(caplog, logstream, logfn, event_type): queue = asyncio.Queue() logger = ObjectLogger(body=OBJ1, event_queue=queue) getattr(logger, logfn)("hello %s", "world") assert queue.qsize() == 1 event1 = queue.get_nowait() assert event1.ref == REF1 assert event1.type == event_type assert event1.reason == "Logging" assert event1.message == "hello world" assert caplog.messages == ["hello world"]
def test_garbage_collection_of_log_handlers(): event_queue = asyncio.Queue() native_logger = logging.getLogger(f'kopf.objects.{id(event_queue)}') assert len(native_logger.handlers) == 0 object_logger = ObjectLogger(body=OBJ1, event_queue=event_queue) assert object_logger.logger is native_logger assert len(native_logger.handlers) == 1 object_logger_ref = weakref.ref(object_logger) del object_logger gc.collect() # triggers ObjectLogger.__del__() assert object_logger_ref() is None # garbage-collected indeed. assert len(native_logger.handlers) == 0
async def test_posting_above_config(caplog, logstream, logfn, event_type, min_levelno, event_queue, event_queue_loop, mocker): logger = ObjectLogger(body=OBJ1) logger_fn = getattr(logger, logfn) mocker.patch.object(EventsConfig, 'events_loglevel', min_levelno) logger_fn("hello %s", "world") mocker.patch.object(EventsConfig, 'events_loglevel', min_levelno + 1) logger_fn("must not be posted") assert event_queue.qsize() == 1 event1 = event_queue.get_nowait() assert event1.ref == REF1 assert event1.type == event_type assert event1.reason == "Logging" assert event1.message == "hello world" assert caplog.messages == ["hello world", "must not be posted"]
async def test_posting_above_config(settings, caplog, logstream, logfn, event_type, min_levelno, event_queue, event_queue_loop, mocker): logger = ObjectLogger(body=OBJ1, settings=settings) logger_fn = getattr(logger, logfn) settings.posting.level = min_levelno logger_fn("hello %s", "world") settings.posting.level = min_levelno + 1 logger_fn("must not be posted") assert event_queue.qsize() == 1 event1 = event_queue.get_nowait() assert event1.ref == REF1 assert event1.type == event_type assert event1.reason == "Logging" assert event1.message == "hello world" assert caplog.messages == ["hello world", "must not be posted"]
def terraform(working_dir: str, data: str, logger: KopfObjectLogger, apply: bool = False, planId: str = '') -> tuple: logger.info(f"WORKING IN DIR: {working_dir}") Path(f"{working_dir}/main.tf.json").write_text(data) ptf = python_terraform.Terraform(working_dir=working_dir) return_code, stdout, stderr = ptf.init() assert return_code != 1, f"Terraform Init Failed {stderr}" logger.info('TERRAFORM INIT COMPLETE') return_code, stdout, stderr = ptf.plan(refresh=True, out='plan') response = stdout if not stderr else stderr logger.info(f"TERRAFORM PLAN COMPLETE {response}") return response, return_code
async def handle_event( registry: registries.BaseRegistry, resource: registries.Resource, logger: logging_engine.ObjectLogger, patch: dict, event: dict, ): """ Handle a received event, log but ignore all errors. This is a lightweight version of the cause handling, but for the raw events, without any progress persistence. Multi-step calls are also not supported. If the handler fails, it fails and is never retried. Note: K8s-event posting is skipped for `kopf.on.event` handlers, as they should be silent. Still, the messages are logged normally. """ handlers = registry.get_event_handlers(resource=resource, event=event) for handler in handlers: # The exceptions are handled locally and are not re-raised, to keep the operator running. try: logger.debug(f"Invoking handler {handler.id!r}.") # TODO: also set the context-vars, despite most of the make no sense here. result = await invocation.invoke( handler.fn, event=event, patch=patch, logger=logger, ) except Exception: logger.exception( f"Handler {handler.id!r} failed with an exception. Will ignore.", local=True) else: logger.info(f"Handler {handler.id!r} succeeded.", local=True) status.store_result(patch=patch, handler=handler, result=result)
async def apply_reaction_outcomes( *, resource: resources.Resource, body: bodies.Body, patch: patches.Patch, delays: Collection[float], logger: logging_engine.ObjectLogger, replenished: asyncio.Event, ) -> None: delay = min(delays) if delays else None if patch: logger.debug("Patching with: %r", patch) await patching.patch_obj(resource=resource, patch=patch, body=body) # Sleep strictly after patching, never before -- to keep the status proper. # The patching above, if done, interrupts the sleep instantly, so we skip it at all. # Note: a zero-second or negative sleep is still a sleep, it will trigger a dummy patch. if delay and patch: logger.debug( f"Sleeping was skipped because of the patch, {delay} seconds left." ) elif delay is None and not patch: logger.debug( f"Handling cycle is finished, waiting for new changes since now.") elif delay is not None: if delay > WAITING_KEEPALIVE_INTERVAL: limit = WAITING_KEEPALIVE_INTERVAL logger.debug( f"Sleeping for {delay} (capped {limit}) seconds for the delayed handlers." ) unslept_delay = await sleeping.sleep_or_wait(limit, replenished) elif delay > 0: logger.debug( f"Sleeping for {delay} seconds for the delayed handlers.") unslept_delay = await sleeping.sleep_or_wait(delay, replenished) else: unslept_delay = None # no need to sleep? means: slept in full. if unslept_delay is not None: logger.debug( f"Sleeping was interrupted by new changes, {unslept_delay} seconds left." ) else: # Any unique always-changing value will work; not necessary a timestamp. dummy_value = datetime.datetime.utcnow().isoformat() dummy_patch = patches.Patch( {'status': { 'kopf': { 'dummy': dummy_value } }}) logger.debug("Provoking reaction with: %r", dummy_patch) await patching.patch_obj(resource=resource, patch=dummy_patch, body=body)
def process(terrayaml: str, metadata: dict, logger: KopfObjectLogger) -> str: # # User input YAML # env = jinja2.Environment() env.filters['random_password'] = random_password env.filters['random_name'] = random_name template = T(template_text=terrayaml, environment=env).render(**metadata) provision = yaml.load(template, Loader=yaml.FullLoader) logger.info(f"provision this template {provision}") # print(provision) # # Start terraform # meta = provision.pop('meta', {}) team = meta.get('team', 'oss') profile = meta.get('profile', PROFILE) environment = meta.get('environment', 'testing') application = meta.get('application', 'wurkflow') statefile_region = meta.get('statefile_region', 'eu-west-1') ts = Terrascript() ts += Terraform(required_version=">= 0.12.7").backend( "s3", bucket=REMOTE_STATE_S3_BUCKET, key= f"k8/terrayaml-operator/{team}/{environment}/{application}-terraform.tfstate", region=statefile_region, profile=ROOT_PROFILE) # # Extract the notify component # notify = provision.pop('notify') if notify: # tuple of email, key recipient_emails = notify.get('email', []) # append out infra provisioner email recipient_emails.append('*****@*****.**') recipients = get_recipients_from_pgp(recipient_emails=recipient_emails) logger.info(f"notify these emails: {recipient_emails}") # # Parse the yaml # for provider in provision: #print(f"----- output for provider: {provider.upper()} -----") for resource, data in provision.get(provider).items(): #print(f"----- output for resource: {resource} -----") for item in data.get('items', []): api = TF_YAML_MAP.get(resource) outputs = item.pop('outputs', []) item_name = item.pop('name', random_name(value=resource)) tf_resource = api(item_name, **item) ts.add(tf_resource) # handle terraform outputs for opt in outputs: assert getattr( tf_resource, opt.get('value') ), f"{tf_resource} has no attribute {opt.get('value')}" ts.add( Output(opt.get('name'), value=getattr(tf_resource, opt.get('value')))) # Add a provider (+= syntax) ts += Provider('aws', skip_metadata_api_check=True, profile=profile, region=REGION) data = ts.dump() # Plan working_dir = tempfile.mkdtemp(dir='./runs') crd_api = kubernetes.client.CustomObjectsApi() selfLink = metadata.get('selfLink').split('/') # update with planId logger.info(f"planId: {working_dir}") crd_api.patch_namespaced_custom_object( group=selfLink[2], version=selfLink[3], name=selfLink[7], namespace=selfLink[5], plural=selfLink[6], body={"spec": { "planId": working_dir }}) tf_response, tf_code = terraform(working_dir=working_dir, data=data, logger=logger) logger.info(f"Terraform Plan result: {tf_response}") if recipients: logger.info(f"Send email to {recipients}") send_email(to=recipients, attachment=tf_response, message_type='success' if tf_code != 1 else 'error') else: logger.info('No recipients defined') logger.info(f"PlanId is {working_dir}") return f"{working_dir}"
def process_apply(planId: str, logger: KopfObjectLogger) -> str: tf_response, tf_code = terraform_apply(planId=planId, logger=logger) logger.info(f"Terraform Apply result: {tf_response}")
async def apply_reaction_outcomes( *, settings: configuration.OperatorSettings, resource: resources.Resource, body: bodies.Body, patch: patches.Patch, delays: Collection[float], logger: logging_engine.ObjectLogger, replenished: asyncio.Event, ) -> None: delay = min(delays) if delays else None # Delete dummies on occasion, but don't trigger special patching for them [discussable]. if patch: # TODO: LATER: and the dummies are there (without additional methods?) settings.persistence.progress_storage.touch(body=body, patch=patch, value=None) # Actually patch if it contained payload originally or after dummies removal. if patch: logger.debug("Patching with: %r", patch) await patching.patch_obj(resource=resource, patch=patch, body=body) # Sleep strictly after patching, never before -- to keep the status proper. # The patching above, if done, interrupts the sleep instantly, so we skip it at all. # Note: a zero-second or negative sleep is still a sleep, it will trigger a dummy patch. if delay and patch: logger.debug( f"Sleeping was skipped because of the patch, {delay} seconds left." ) elif delay is None and not patch: logger.debug( f"Handling cycle is finished, waiting for new changes since now.") elif delay is not None: if delay > WAITING_KEEPALIVE_INTERVAL: limit = WAITING_KEEPALIVE_INTERVAL logger.debug( f"Sleeping for {delay} (capped {limit}) seconds for the delayed handlers." ) unslept_delay = await sleeping.sleep_or_wait(limit, replenished) elif delay > 0: logger.debug( f"Sleeping for {delay} seconds for the delayed handlers.") unslept_delay = await sleeping.sleep_or_wait(delay, replenished) else: unslept_delay = None # no need to sleep? means: slept in full. # Exclude cases when touching immediately after patching (including: ``delay == 0``). if patch and not delay: pass elif unslept_delay is not None: logger.debug( f"Sleeping was interrupted by new changes, {unslept_delay} seconds left." ) else: # Any unique always-changing value will work; not necessary a timestamp. value = datetime.datetime.utcnow().isoformat() touch_patch = patches.Patch() settings.persistence.progress_storage.touch(body=body, patch=touch_patch, value=value) if touch_patch: logger.debug("Provoking reaction with: %r", touch_patch) await patching.patch_obj(resource=resource, patch=touch_patch, body=body)
def process_destroy(planId: str, logger: KopfObjectLogger) -> str: tf_response, tf_code = terraform_destroy(planId=planId, logger=logger) logger.info(reason='Destroy complete', message=f"Terraform Destroy result: {tf_response}")