async def test_skipping_hidden_levels(settings, caplog, logstream, logfn, event_queue, event_queue_loop): logger = ObjectLogger(body=OBJ1, settings=settings) logger_fn = getattr(logger, logfn) logger_fn("hello %s", "world") logger.info("must be here") assert event_queue.qsize() == 1 # not 2! assert caplog.messages == ["hello world", "must be here"]
async def test_skipping_below_config(settings, caplog, logstream, logfn, event_queue, event_queue_loop, mocker): logger = ObjectLogger(body=OBJ1, settings=settings) logger_fn = getattr(logger, logfn) settings.posting.level = 666 logger_fn("hello %s", "world") settings.posting.level = 0 logger.info("must be here") assert event_queue.qsize() == 1 # not 2! assert caplog.messages == ["hello world", "must be here"]
async def test_posting_normal_levels(settings, caplog, logstream, logfn, event_type, event_queue, event_queue_loop): logger = ObjectLogger(body=OBJ1, settings=settings) logger_fn = getattr(logger, logfn) logger_fn("hello %s", "world") assert event_queue.qsize() == 1 event1 = event_queue.get_nowait() assert event1.ref == REF1 assert event1.type == event_type assert event1.reason == "Logging" assert event1.message == "hello world" assert caplog.messages == ["hello world"]
async def test_posting_above_config(settings, caplog, logstream, logfn, event_type, min_levelno, event_queue, event_queue_loop, mocker): logger = ObjectLogger(body=OBJ1, settings=settings) logger_fn = getattr(logger, logfn) settings.posting.level = min_levelno logger_fn("hello %s", "world") settings.posting.level = min_levelno + 1 logger_fn("must not be posted") assert event_queue.qsize() == 1 event1 = event_queue.get_nowait() assert event1.ref == REF1 assert event1.type == event_type assert event1.reason == "Logging" assert event1.message == "hello world" assert caplog.messages == ["hello world", "must not be posted"]
async def process_resource_causes( lifecycle: execution.LifeCycleFn, indexers: indexing.OperatorIndexers, registry: registries.OperatorRegistry, settings: configuration.OperatorSettings, resource: references.Resource, raw_event: bodies.RawEvent, body: bodies.Body, patch: patches.Patch, memory: inventory.ResourceMemory, local_logger: loggers.ObjectLogger, event_logger: loggers.ObjectLogger, ) -> Tuple[Collection[float], bool]: finalizer = settings.persistence.finalizer extra_fields = ( # NB: indexing handlers are useless here, they are handled on their own. registry._watching.get_extra_fields(resource=resource) | registry._changing.get_extra_fields(resource=resource) | registry._spawning.get_extra_fields(resource=resource)) old = settings.persistence.diffbase_storage.fetch(body=body) new = settings.persistence.diffbase_storage.build(body=body, extra_fields=extra_fields) old = settings.persistence.progress_storage.clear(essence=old) if old is not None else None new = settings.persistence.progress_storage.clear(essence=new) if new is not None else None diff = diffs.diff(old, new) # Detect what are we going to do on this processing cycle. watching_cause = causes.detect_watching_cause( raw_event=raw_event, resource=resource, indices=indexers.indices, logger=local_logger, patch=patch, body=body, memo=memory.memo, ) if registry._watching.has_handlers(resource=resource) else None spawning_cause = causes.detect_spawning_cause( resource=resource, indices=indexers.indices, logger=event_logger, patch=patch, body=body, memo=memory.memo, reset=bool(diff), # only essential changes reset idling, not every event ) if registry._spawning.has_handlers(resource=resource) else None changing_cause = causes.detect_changing_cause( finalizer=finalizer, raw_event=raw_event, resource=resource, indices=indexers.indices, logger=event_logger, patch=patch, body=body, old=old, new=new, diff=diff, memo=memory.memo, initial=memory.noticed_by_listing and not memory.fully_handled_once, ) if registry._changing.has_handlers(resource=resource) else None # If there are any handlers for this resource kind in general, but not for this specific object # due to filters, then be blind to it, store no state, and log nothing about the handling cycle. if changing_cause is not None and not registry._changing.prematch(cause=changing_cause): changing_cause = None # Block the object from deletion if we have anything to do in its end of life: # specifically, if there are daemons to kill or mandatory on-deletion handlers to call. # The high-level handlers are prevented if this event cycle is dedicated to the finalizer. # The low-level handlers (on-event spying & daemon spawning) are still executed asap. deletion_is_ongoing = finalizers.is_deletion_ongoing(body=body) deletion_is_blocked = finalizers.is_deletion_blocked(body=body, finalizer=finalizer) deletion_must_be_blocked = ( (spawning_cause is not None and registry._spawning.requires_finalizer( cause=spawning_cause, excluded=memory.daemons_memory.forever_stopped, )) or (changing_cause is not None and registry._changing.requires_finalizer( cause=changing_cause, ))) if deletion_must_be_blocked and not deletion_is_blocked and not deletion_is_ongoing: local_logger.debug("Adding the finalizer, thus preventing the actual deletion.") finalizers.block_deletion(body=body, patch=patch, finalizer=finalizer) changing_cause = None # prevent further high-level processing this time if not deletion_must_be_blocked and deletion_is_blocked: local_logger.debug("Removing the finalizer, as there are no handlers requiring it.") finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer) changing_cause = None # prevent further high-level processing this time # Invoke all the handlers that should or could be invoked at this processing cycle. # The low-level spies go ASAP always. However, the daemons are spawned before the high-level # handlers and killed after them: the daemons should live throughout the full object lifecycle. if watching_cause is not None: await process_watching_cause( lifecycle=lifecycles.all_at_once, registry=registry, settings=settings, cause=watching_cause, ) spawning_delays: Collection[float] = [] if spawning_cause is not None: spawning_delays = await process_spawning_cause( registry=registry, settings=settings, memory=memory, cause=spawning_cause, ) changing_delays: Collection[float] = [] if changing_cause is not None: changing_delays = await process_changing_cause( lifecycle=lifecycle, registry=registry, settings=settings, memory=memory, cause=changing_cause, ) # Release the object if everything is done, and it is marked for deletion. # But not when it has already gone. if deletion_is_ongoing and deletion_is_blocked and not spawning_delays and not changing_delays: local_logger.debug("Removing the finalizer, thus allowing the actual deletion.") finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer) delays = list(spawning_delays) + list(changing_delays) return (delays, changing_cause is not None)
async def apply( *, settings: configuration.OperatorSettings, resource: references.Resource, body: bodies.Body, patch: patches.Patch, delays: Collection[float], logger: loggers.ObjectLogger, stream_pressure: Optional[asyncio.Event] = None, # None for tests ) -> bool: delay = min(delays) if delays else None # Delete dummies on occasion, but don't trigger special patching for them [discussable]. if patch: # TODO: LATER: and the dummies are there (without additional methods?) settings.persistence.progress_storage.touch(body=body, patch=patch, value=None) # Actually patch if it was not empty originally or after the dummies removal. await patch_and_check( settings=settings, resource=resource, logger=logger, patch=patch, body=body, ) # Sleep strictly after patching, never before -- to keep the status proper. # The patching above, if done, interrupts the sleep instantly, so we skip it at all. # Note: a zero-second or negative sleep is still a sleep, it will trigger a dummy patch. applied = False if delay and patch: logger.debug( f"Sleeping was skipped because of the patch, {delay} seconds left." ) elif delay is not None: if delay > WAITING_KEEPALIVE_INTERVAL: limit = WAITING_KEEPALIVE_INTERVAL logger.debug( f"Sleeping for {delay} (capped {limit}) seconds for the delayed handlers." ) unslept_delay = await aiotime.sleep(limit, wakeup=stream_pressure) elif delay > 0: logger.debug( f"Sleeping for {delay} seconds for the delayed handlers.") unslept_delay = await aiotime.sleep(delay, wakeup=stream_pressure) else: unslept_delay = None # no need to sleep? means: slept in full. # Exclude cases when touching immediately after patching (including: ``delay == 0``). if patch and not delay: pass elif unslept_delay is not None: logger.debug( f"Sleeping was interrupted by new changes, {unslept_delay} seconds left." ) else: # Any unique always-changing value will work; not necessary a timestamp. value = datetime.datetime.utcnow().isoformat() touch = patches.Patch() settings.persistence.progress_storage.touch(body=body, patch=touch, value=value) await patch_and_check( settings=settings, resource=resource, logger=logger, patch=touch, body=body, ) elif not patch: # no patch/touch and no delay applied = True return applied