Example #1
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     _src = self._src_view
     self._dot_view = self._dot_view.to_dict()
     _diffs = diff(self._dot_view_old, self._dot_view)
     for op, path, old, new in _diffs:
         path = [self._attr_map.get(p, p) for p in path]
         deep_set(_src, path, new)
Example #2
0
def test_custom_mappings_are_recursed():
    class SampleMapping(collections.abc.Mapping):
        def __init__(self, data=(), **kwargs) -> None:
            super().__init__()
            self._items = dict(data, **kwargs)

        def __len__(self) -> int:
            return len(self._items)

        def __iter__(self):
            return iter(self._items)

        def __getitem__(self, item: str) -> str:
            return self._items[item]

    class MappingA(SampleMapping):
        pass

    class MappingB(SampleMapping):
        pass

    a = MappingA(a=100, b=200)
    b = MappingB(b=300, c=400)
    d = diff(a, b)
    assert (DiffOperation.REMOVE, ('a', ), 100, None) in d
    assert (DiffOperation.CHANGE, ('b', ), 200, 300) in d
    assert (DiffOperation.ADD, ('c', ), None, 400) in d
    assert (DiffOperation.CHANGE, (), a, b) not in d
Example #3
0
def test_dicts_different_items_handled():
    body_before_storage_size_update = {'spec': {'items': ['task1', 'task2']}}
    body_after_storage_size_update = {'spec': {'items': ['task3', 'task4']}}

    d = diff(body_before_storage_size_update, body_after_storage_size_update)
    assert d == (('change', ('spec', 'items'), ['task1',
                                                'task2'], ['task3',
                                                           'task4']), )
Example #4
0
def get_essential_diffs(
    body: bodies.Body,
    extra_fields: Optional[Iterable[dicts.FieldSpec]] = None,
) -> Tuple[Optional[bodies.BodyEssence], Optional[bodies.BodyEssence],
           diffs.Diff]:
    old: Optional[bodies.BodyEssence] = retrieve_essence(body)
    new: Optional[bodies.BodyEssence] = get_essence(body,
                                                    extra_fields=extra_fields)
    return old, new, diffs.diff(old, new)
Example #5
0
    def __exit__(self, typ, value, traceback):
        _root = self._root_view
        _diffs = diff(self._old, self._new)

        for op, path, old, new in _diffs:
            typ = path[0]
            if typ == "root":
                deep_set(_root, ".".join(path[1:]), new)
            else:
                typ = self._typ_full_typ[typ]
                nsn = util.normalized_nsn(path[1])
                path = ["mount", typ, nsn, "spec"] + list(path[2:])
                deep_set(_root, path, new)
Example #6
0
 def __exit__(self, typ, value, traceback):
     # diff and apply
     _root = self._root_view
     _diffs = diff(self._old, self._new)
     for op, path, old, new in _diffs:
         nsn = path[0]
         if nsn == "root":
             deep_set(_root, ".".join(path[1:]), new)
         else:
             typ = self._nsn_gvr[nsn]
             nsn = util.normalized_nsn(nsn)
             path = ["mount", typ, nsn, "spec"] + list(path[1:])
             deep_set(_root, path, new)
Example #7
0
async def patch_and_check(
    *,
    resource: references.Resource,
    body: bodies.Body,
    patch: patches.Patch,
    logger: Union[logging.Logger, logging.LoggerAdapter],
) -> None:
    """
    Apply a patch and verify that it is applied correctly.

    The inconsistencies are checked only against what was in the patch.
    Other unexpected changes in the body are ignored, including the system
    fields, such as generations, resource versions, and other unrelated fields,
    such as other statuses, spec, labels, annotations, etc.

    Selected false-positive inconsistencies are explicitly ignored
    for K8s-managed fields, such as finalizers, labels or annotations:
    whenever an empty list/dict is stored, such fields are completely removed.
    For normal fields (e.g. in spec/status), an empty list/dict is still
    a value and is persisted in the object and matches with the patch.
    """
    if patch:
        logger.debug(f"Patching with: {patch!r}")
        resulting_body = await patching.patch_obj(
            resource=resource,
            namespace=body.metadata.namespace,
            name=body.metadata.name,
            patch=patch,
        )
        inconsistencies = diffs.diff(patch,
                                     resulting_body,
                                     scope=diffs.DiffScope.LEFT)
        inconsistencies = diffs.Diff(
            diffs.DiffItem(op, field, old, new)
            for op, field, old, new in inconsistencies
            if old or new or field not in KNOWN_INCONSISTENCIES)
        if resulting_body is None:
            logger.debug(
                f"Patching was skipped: the object does not exist anymore.")
        elif inconsistencies:
            logger.warning(
                f"Patching failed with inconsistencies: {inconsistencies}")
Example #8
0
def test_scalars_unequal():
    a = 100
    b = 200
    d = diff(a, b)
    assert d == (('change', (), 100, 200), )
Example #9
0
def test_strings_equal():
    a = 'hello'
    b = 'hello'
    d = diff(a, b)
    assert d == ()
Example #10
0
def test_scalars_equal():
    a = 100
    b = 100
    d = diff(a, b)
    assert d == ()
Example #11
0
def test_none_for_old():
    a = None
    b = object()
    d = diff(a, b)
    assert d == (('add', (), None, b), )
Example #12
0
def test_dicts_with_keys_added_and_noticed(scope):
    a = {'hello': 'world'}
    b = {'hello': 'world', 'key': 'val'}
    d = diff(a, b, scope=scope)
    assert d == (('add', ('key', ), None, 'val'), )
Example #13
0
def test_nones_for_both():
    a = None
    b = None
    d = diff(a, b)
    assert d == ()
Example #14
0
def test_dicts_with_keys_changed():
    a = {'hello': 'world', 'key': 'old'}
    b = {'hello': 'world', 'key': 'new'}
    d = diff(a, b)
    assert d == (('change', ('key', ), 'old', 'new'), )
Example #15
0
def test_dicts_with_keys_added_but_ignored(scope):
    a = {'hello': 'world'}
    b = {'hello': 'world', 'key': 'val'}
    d = diff(a, b, scope=scope)
    assert d == ()
Example #16
0
def test_lists_unequal():
    a = [100, 200, 300]
    b = [100, 666, 300]
    d = diff(a, b)
    assert d == (('change', (), [100, 200, 300], [100, 666, 300]), )
Example #17
0
def test_dicts_with_keys_added():
    a = {'hello': 'world'}
    b = {'hello': 'world', 'key': 'val'}
    d = diff(a, b)
    assert d == (('add', ('key', ), None, 'val'), )
Example #18
0
def get_state_diffs(body, extra_fields=None):
    old = retreive_state(body)
    new = get_state(body, extra_fields=extra_fields)
    return old, new, diffs.diff(old, new)
Example #19
0
async def process_resource_causes(
    lifecycle: lifecycles.LifeCycleFn,
    indexers: indexing.OperatorIndexers,
    registry: registries.OperatorRegistry,
    settings: configuration.OperatorSettings,
    resource: references.Resource,
    raw_event: bodies.RawEvent,
    body: bodies.Body,
    patch: patches.Patch,
    logger: loggers.ObjectLogger,
    memory: containers.ResourceMemory,
) -> Tuple[Collection[float], bool]:

    finalizer = settings.persistence.finalizer
    extra_fields = (
        # NB: indexing handlers are useless here, they are handled on their own.
        registry._resource_watching.get_extra_fields(resource=resource)
        | registry._resource_changing.get_extra_fields(resource=resource)
        | registry._resource_spawning.get_extra_fields(resource=resource))
    old = settings.persistence.diffbase_storage.fetch(body=body)
    new = settings.persistence.diffbase_storage.build(
        body=body, extra_fields=extra_fields)
    old = settings.persistence.progress_storage.clear(
        essence=old) if old is not None else None
    new = settings.persistence.progress_storage.clear(
        essence=new) if new is not None else None
    diff = diffs.diff(old, new)

    # Detect what are we going to do on this processing cycle.
    resource_watching_cause = causation.detect_resource_watching_cause(
        raw_event=raw_event,
        resource=resource,
        indices=indexers.indices,
        logger=logger,
        patch=patch,
        body=body,
        memo=memory.memo,
    ) if registry._resource_watching.has_handlers(resource=resource) else None

    resource_spawning_cause = causation.detect_resource_spawning_cause(
        resource=resource,
        indices=indexers.indices,
        logger=logger,
        patch=patch,
        body=body,
        memo=memory.memo,
        reset=bool(
            diff),  # only essential changes reset idling, not every event
    ) if registry._resource_spawning.has_handlers(resource=resource) else None

    resource_changing_cause = causation.detect_resource_changing_cause(
        finalizer=finalizer,
        raw_event=raw_event,
        resource=resource,
        indices=indexers.indices,
        logger=logger,
        patch=patch,
        body=body,
        old=old,
        new=new,
        diff=diff,
        memo=memory.memo,
        initial=memory.noticed_by_listing and not memory.fully_handled_once,
    ) if registry._resource_changing.has_handlers(resource=resource) else None

    # If there are any handlers for this resource kind in general, but not for this specific object
    # due to filters, then be blind to it, store no state, and log nothing about the handling cycle.
    if (resource_changing_cause is not None
            and not registry._resource_changing.prematch(
                cause=resource_changing_cause)):
        resource_changing_cause = None

    # Block the object from deletion if we have anything to do in its end of life:
    # specifically, if there are daemons to kill or mandatory on-deletion handlers to call.
    # The high-level handlers are prevented if this event cycle is dedicated to the finalizer.
    # The low-level handlers (on-event spying & daemon spawning) are still executed asap.
    deletion_is_ongoing = finalizers.is_deletion_ongoing(body=body)
    deletion_is_blocked = finalizers.is_deletion_blocked(body=body,
                                                         finalizer=finalizer)
    deletion_must_be_blocked = (
        (resource_spawning_cause is not None
         and registry._resource_spawning.requires_finalizer(
             cause=resource_spawning_cause,
             excluded=memory.forever_stopped,
         )) or (resource_changing_cause is not None
                and registry._resource_changing.requires_finalizer(
                    cause=resource_changing_cause, )))

    if deletion_must_be_blocked and not deletion_is_blocked and not deletion_is_ongoing:
        logger.debug(
            "Adding the finalizer, thus preventing the actual deletion.")
        finalizers.block_deletion(body=body, patch=patch, finalizer=finalizer)
        resource_changing_cause = None  # prevent further high-level processing this time

    if not deletion_must_be_blocked and deletion_is_blocked:
        logger.debug(
            "Removing the finalizer, as there are no handlers requiring it.")
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)
        resource_changing_cause = None  # prevent further high-level processing this time

    # Invoke all the handlers that should or could be invoked at this processing cycle.
    # The low-level spies go ASAP always. However, the daemons are spawned before the high-level
    # handlers and killed after them: the daemons should live throughout the full object lifecycle.
    if resource_watching_cause is not None:
        await process_resource_watching_cause(
            lifecycle=lifecycles.all_at_once,
            registry=registry,
            settings=settings,
            cause=resource_watching_cause,
        )

    resource_spawning_delays: Collection[float] = []
    if resource_spawning_cause is not None:
        resource_spawning_delays = await process_resource_spawning_cause(
            registry=registry,
            settings=settings,
            memory=memory,
            cause=resource_spawning_cause,
        )

    resource_changing_delays: Collection[float] = []
    if resource_changing_cause is not None:
        resource_changing_delays = await process_resource_changing_cause(
            lifecycle=lifecycle,
            registry=registry,
            settings=settings,
            memory=memory,
            cause=resource_changing_cause,
        )

    # Release the object if everything is done, and it is marked for deletion.
    # But not when it has already gone.
    if deletion_is_ongoing and deletion_is_blocked \
            and not resource_spawning_delays \
            and not resource_changing_delays:
        logger.debug(
            "Removing the finalizer, thus allowing the actual deletion.")
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)

    delays = list(resource_spawning_delays) + list(resource_changing_delays)
    return (delays, resource_changing_cause is not None)
Example #20
0
async def process_resource_event(
    lifecycle: lifecycles.LifeCycleFn,
    registry: registries.OperatorRegistry,
    settings: configuration.OperatorSettings,
    memories: containers.ResourceMemories,
    resource: resources.Resource,
    raw_event: bodies.RawEvent,
    replenished: asyncio.Event,
    event_queue: posting.K8sEventQueue,
) -> None:
    """
    Handle a single custom object low-level watch-event.

    Convert the low-level events, as provided by the watching/queueing tasks,
    to the high-level causes, and then call the cause-handling logic.

    All the internally provoked changes are intercepted, do not create causes,
    and therefore do not call the handling logic.
    """
    finalizer = settings.persistence.finalizer

    # Recall what is stored about that object. Share it in little portions with the consumers.
    # And immediately forget it if the object is deleted from the cluster (but keep in memory).
    raw_type, raw_body = raw_event['type'], raw_event['object']
    memory = await memories.recall(raw_body,
                                   noticed_by_listing=raw_type is None)
    if memory.live_fresh_body is not None:
        memory.live_fresh_body._replace_with(raw_body)
    if raw_type == 'DELETED':
        await memories.forget(raw_body)

    # Convert to a heavy mapping-view wrapper only now, when heavy processing begins.
    # Raw-event streaming, queueing, and batching use regular lightweight dicts.
    # Why here? 1. Before it splits into multiple causes & handlers for the same object's body;
    # 2. After it is batched (queueing); 3. While the "raw" parsed JSON is still known;
    # 4. Same as where a patch object of a similar wrapping semantics is created.
    body = memory.live_fresh_body if memory.live_fresh_body is not None else bodies.Body(
        raw_body)
    patch = patches.Patch()

    # Each object has its own prefixed logger, to distinguish parallel handling.
    logger = logging_engine.ObjectLogger(body=body, settings=settings)
    posting.event_queue_loop_var.set(asyncio.get_running_loop())
    posting.event_queue_var.set(
        event_queue)  # till the end of this object's task.

    extra_fields = registry.resource_changing_handlers[
        resource].get_extra_fields()
    old = settings.persistence.diffbase_storage.fetch(body=body)
    new = settings.persistence.diffbase_storage.build(
        body=body, extra_fields=extra_fields)
    old = settings.persistence.progress_storage.clear(
        essence=old) if old is not None else None
    new = settings.persistence.progress_storage.clear(
        essence=new) if new is not None else None
    diff = diffs.diff(old, new)

    # Detect what are we going to do on this processing cycle.
    resource_watching_cause = causation.detect_resource_watching_cause(
        raw_event=raw_event,
        resource=resource,
        logger=logger,
        patch=patch,
        body=body,
        memo=memory.memo,
    ) if registry.resource_watching_handlers[resource] else None

    resource_spawning_cause = causation.detect_resource_spawning_cause(
        resource=resource,
        logger=logger,
        patch=patch,
        body=body,
        memo=memory.memo,
        reset=bool(
            diff),  # only essential changes reset idling, not every event
    ) if registry.resource_spawning_handlers[resource] else None

    resource_changing_cause = causation.detect_resource_changing_cause(
        finalizer=finalizer,
        raw_event=raw_event,
        resource=resource,
        logger=logger,
        patch=patch,
        body=body,
        old=old,
        new=new,
        diff=diff,
        memo=memory.memo,
        initial=memory.noticed_by_listing and not memory.fully_handled_once,
    ) if registry.resource_changing_handlers[resource] else None

    # Block the object from deletion if we have anything to do in its end of life:
    # specifically, if there are daemons to kill or mandatory on-deletion handlers to call.
    # The high-level handlers are prevented if this event cycle is dedicated to the finalizer.
    # The low-level handlers (on-event spying & daemon spawning) are still executed asap.
    deletion_is_ongoing = finalizers.is_deletion_ongoing(body=body)
    deletion_is_blocked = finalizers.is_deletion_blocked(body=body,
                                                         finalizer=finalizer)
    deletion_must_be_blocked = (
        (resource_spawning_cause is not None
         and registry.resource_spawning_handlers[resource].requires_finalizer(
             cause=resource_spawning_cause,
             excluded=memory.forever_stopped,
         )) or
        (resource_changing_cause is not None
         and registry.resource_changing_handlers[resource].requires_finalizer(
             cause=resource_changing_cause, )))

    if deletion_must_be_blocked and not deletion_is_blocked and not deletion_is_ongoing:
        logger.debug(
            "Adding the finalizer, thus preventing the actual deletion.")
        finalizers.block_deletion(body=body, patch=patch, finalizer=finalizer)
        resource_changing_cause = None  # prevent further high-level processing this time

    if not deletion_must_be_blocked and deletion_is_blocked:
        logger.debug(
            "Removing the finalizer, as there are no handlers requiring it.")
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)
        resource_changing_cause = None  # prevent further high-level processing this time

    # Invoke all the handlers that should or could be invoked at this processing cycle.
    # The low-level spies go ASAP always. However, the daemons are spawned before the high-level
    # handlers and killed after them: the daemons should live throughout the full object lifecycle.
    if resource_watching_cause is not None:
        await process_resource_watching_cause(
            lifecycle=lifecycles.all_at_once,
            registry=registry,
            settings=settings,
            cause=resource_watching_cause,
        )

    resource_spawning_delays: Collection[float] = []
    if resource_spawning_cause is not None:
        resource_spawning_delays = await process_resource_spawning_cause(
            registry=registry,
            settings=settings,
            memory=memory,
            cause=resource_spawning_cause,
        )

    resource_changing_delays: Collection[float] = []
    if resource_changing_cause is not None:
        resource_changing_delays = await process_resource_changing_cause(
            lifecycle=lifecycle,
            registry=registry,
            settings=settings,
            memory=memory,
            cause=resource_changing_cause,
        )

    # Release the object if everything is done, and it is marked for deletion.
    # But not when it has already gone.
    if deletion_is_ongoing and deletion_is_blocked \
            and not resource_spawning_delays \
            and not resource_changing_delays:
        logger.debug(
            "Removing the finalizer, thus allowing the actual deletion.")
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)

    # Whatever was done, apply the accumulated changes to the object, or sleep-n-touch for delays.
    # But only once, to reduce the number of API calls and the generated irrelevant events.
    # And only if the object is at least supposed to exist (not "GONE"), even if actually does not.
    if raw_event['type'] != 'DELETED':
        await apply_reaction_outcomes(
            settings=settings,
            resource=resource,
            body=body,
            patch=patch,
            logger=logger,
            delays=list(resource_spawning_delays) +
            list(resource_changing_delays),
            replenished=replenished,
        )
Example #21
0
def test_strings_unequal():
    a = 'hello'
    b = 'world'
    d = diff(a, b)
    assert d == (('change', (), 'hello', 'world'), )
Example #22
0
def test_lists_equal(scope):
    a = [100, 200, 300]
    b = [100, 200, 300]
    d = diff(a, b, scope=scope)
    assert d == ()
Example #23
0
def test_lists_equal():
    a = [100, 200, 300]
    b = [100, 200, 300]
    d = diff(a, b)
    assert d == ()
Example #24
0
def get_state_diffs(body):
    old = retreive_state(body)
    new = get_state(body)
    return old, new, diffs.diff(old, new)
Example #25
0
def test_dicts_equal():
    a = {'hello': 'world', 'key': 'val'}
    b = {'key': 'val', 'hello': 'world'}
    d = diff(a, b)
    assert d == ()
Example #26
0
def test_none_for_new():
    a = object()
    b = None
    d = diff(a, b)
    assert d == (('remove', (), a, None), )
Example #27
0
def test_dicts_with_keys_removed():
    a = {'hello': 'world', 'key': 'val'}
    b = {'hello': 'world'}
    d = diff(a, b)
    assert d == (('remove', ('key', ), 'val', None), )
Example #28
0
def test_dicts_adding_label():
    body_before_labelling = {'metadata': {}}
    body_after_labelling = {'metadata': {'labels': 'LABEL'}}

    d = diff(body_before_labelling, body_after_labelling)
    assert d == (('add', ('metadata', 'labels'), None, 'LABEL'), )
Example #29
0
def test_dicts_with_subkeys_changed():
    a = {'main': {'hello': 'world', 'key': 'old'}}
    b = {'main': {'hello': 'world', 'key': 'new'}}
    d = diff(a, b)
    assert d == (('change', ('main', 'key'), 'old', 'new'), )
Example #30
0
def test_dicts_updating_storage_size():
    body_before_storage_size_update = {'spec': {'size': '42G'}}
    body_after_storage_size_update = {'spec': {'size': '76G'}}

    d = diff(body_before_storage_size_update, body_after_storage_size_update)
    assert d == (('change', ('spec', 'size'), '42G', '76G'), )