async def test_reporting_on_resource_readiness(resource, settings, registry, indexers, caplog, event_type, handlers, timer): caplog.set_level(logging.DEBUG) operator_indexed = ToggleSet(all) resource_indexed = await operator_indexed.make_toggle() async with timer, async_timeout.timeout(0.5) as timeout: await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=asyncio.Queue(), operator_indexed=operator_indexed, resource_indexed=resource_indexed, ) assert not timeout.expired assert timer.seconds < 0.2 # asap, nowait assert operator_indexed.is_on() assert set(operator_indexed) == set() # save RAM assert handlers.event_mock.called
async def test_removed_on_filters_mismatch(resource, settings, registry, indexers, index, caplog, event_type, handlers, mocker): # Simulate the indexing handler is gone out of scope (this is only one of the ways to do it): mocker.patch.object(registry._indexing, 'get_handlers', return_value=[]) caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} handlers.index_mock.return_value = 123 await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': body }, event_queue=asyncio.Queue(), resource_indexed=Toggle(), # used! only to enable indexing. ) assert set(index) == set()
async def test_supersession_is_logged( registry, settings, resource, handlers, cause_types, cause_mock, caplog, assert_logs): caplog.set_level(logging.DEBUG) settings.persistence.progress_storage = StatusProgressStorage() body = {'status': {'kopf': {'progress': { 'create_fn': {'purpose': cause_types[0]}, 'update_fn': {'purpose': cause_types[0]}, 'resume_fn': {'purpose': cause_types[0]}, 'delete_fn': {'purpose': cause_types[0]}, }}}} cause_mock.reason = cause_types[1] event_type = None if cause_types[1] == Reason.RESUME else 'irrelevant' await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': body}, event_queue=asyncio.Queue(), ) assert_logs([ "(Creation|Updating|Resuming|Deletion) is superseded by (creation|updating|resuming|deletion): ", "(Creation|Updating|Resuming|Deletion) is in progress: ", "(Creation|Updating|Resuming|Deletion) is processed: ", ])
async def test_diffs_logged_if_present(registry, settings, resource, handlers, cause_type, cause_mock, caplog, assert_logs, diff): caplog.set_level(logging.DEBUG) event_type = None if cause_type == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_type cause_mock.diff = diff cause_mock.new = {'field': 'old'} # checked for `not None`, and JSON-serialised cause_mock.old = {'field': 'new'} # checked for `not None`, and JSON-serialised await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': {}}, event_queue=asyncio.Queue(), ) assert_logs([ "(Creation|Updating|Resuming|Deletion) is in progress: ", "(Creation|Updating|Resuming|Deletion) diff: " ])
async def test_preserved_on_logical_deletion(resource, settings, registry, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) body = { 'metadata': { 'namespace': 'ns1', 'name': 'name1', 'deletionTimestamp': '...' } } handlers.index_mock.return_value = 456 await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': body }, event_queue=asyncio.Queue(), resource_indexed=Toggle(), # used! only to enable indexing. ) assert set(index) == {None} assert set(index[None]) == {456}
async def test_noop(registry, settings, handlers, resource, cause_mock, event_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.NOOP event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': {}}, event_queue=event_queue, ) assert not handlers.create_mock.called assert not handlers.update_mock.called assert not handlers.delete_mock.called assert not k8s_mocked.sleep.called assert not k8s_mocked.patch.called assert event_queue.empty() assert_logs([ "Something has changed, but we are not interested", ])
async def test_unblocking_once_operator_is_ready( resource, settings, registry, indexers, caplog, event_type, handlers, timer): caplog.set_level(logging.DEBUG) async def delayed_readiness(delay: float): await asyncio.sleep(delay) await resource_listed.turn_to(True) operator_indexed = ToggleSet(all) resource_listed = await operator_indexed.make_toggle() resource_indexed = await operator_indexed.make_toggle() with timer: asyncio.create_task(delayed_readiness(0.2)) await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': {}}, event_queue=asyncio.Queue(), operator_indexed=operator_indexed, resource_indexed=resource_indexed, ) assert 0.2 < timer.seconds < 0.4 assert operator_indexed.is_on() assert set(operator_indexed) == {resource_listed} assert handlers.event_mock.called
async def test_blocking_when_operator_is_not_ready( resource, settings, registry, indexers, caplog, event_type, handlers, timer): caplog.set_level(logging.DEBUG) operator_indexed = ToggleSet(all) resource_listed = await operator_indexed.make_toggle() resource_indexed = await operator_indexed.make_toggle() with pytest.raises(asyncio.TimeoutError), timer: await asyncio.wait_for(process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': {}}, event_queue=asyncio.Queue(), operator_indexed=operator_indexed, resource_indexed=resource_indexed, ), timeout=0.2) assert 0.2 < timer.seconds < 0.4 assert operator_indexed.is_off() assert set(operator_indexed) == {resource_listed} assert not handlers.event_mock.called
async def test_errors_are_ignored(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = cause_type handlers.event_mock.side_effect = Exception("oops") await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': 'ev-type', 'object': {} }, event_queue=asyncio.Queue(), ) assert handlers.event_mock.called assert extrahandlers.event_mock.called assert_logs([ "Handler 'event_fn' is invoked.", "Handler 'event_fn' failed with an exception. Will ignore.", "Handler 'event_fn2' is invoked.", "Handler 'event_fn2' succeeded.", ])
async def test_1st_step_stores_progress_by_patching(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, k8s_mocked, deletion_ts): name1 = f'{cause_type}_fn' name2 = f'{cause_type}_fn2' event_type = None if cause_type == Reason.RESUME else 'irrelevant' event_body = { 'metadata': { 'finalizers': [settings.persistence.finalizer] }, } event_body['metadata'].update(deletion_ts) cause_mock.reason = cause_type await process_resource_event( lifecycle=kopf.lifecycles.asap, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=asyncio.Queue(), ) assert handlers.create_mock.call_count == (1 if cause_type == Reason.CREATE else 0) assert handlers.update_mock.call_count == (1 if cause_type == Reason.UPDATE else 0) assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) assert not k8s_mocked.sleep.called assert k8s_mocked.patch_obj.called patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert patch['status']['kopf']['progress'] is not None assert patch['status']['kopf']['progress'][name1]['retries'] == 1 assert patch['status']['kopf']['progress'][name1]['success'] is True assert patch['status']['kopf']['progress'][name2]['retries'] == 0 assert patch['status']['kopf']['progress'][name2]['success'] is False assert patch['status']['kopf']['progress'][name1]['started'] assert patch['status']['kopf']['progress'][name2]['started'] # Premature removal of finalizers can prevent the 2nd step for deletion handlers. # So, the finalizers must never be removed on the 1st step. assert 'finalizers' not in patch['metadata']
async def test_forgetting_deletes_when_present(): memories = ResourceMemories() memory1 = await memories.recall(BODY) await memories.forget(BODY) # Check by recalling -- it should be a new one. memory2 = await memories.recall(BODY) assert memory1 is not memory2
async def test_stealth_mode_with_mismatching_handlers( registry, settings, selector, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked, annotations, labels, when, deleted, initial): caplog.set_level(logging.DEBUG) event_type = None event_body = {'metadata': {'finalizers': []}} cause_mock.reason = cause_type assert not registry._changing.has_handlers( resource=resource) # prerequisite registry._changing.append( ChangingHandler( reason=None, fn=lambda **_: None, id='id', param=None, errors=None, timeout=None, retries=None, backoff=None, selector=selector, annotations=annotations, labels=labels, when=when, field=None, value=None, old=None, new=None, field_needs_change=None, deleted=deleted, initial=initial, requires_finalizer=None, )) await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=asyncio.Queue(), ) assert not k8s_mocked.sleep.called assert not k8s_mocked.patch_obj.called assert not caplog.messages # total stealth mode!
async def test_memo_is_shallow_copied(): class MyMemo(Memo): def __copy__(self): mock() return MyMemo() mock = Mock() memobase = MyMemo() memories = ResourceMemories() memory = await memories.recall(BODY, memobase=memobase) assert mock.call_count == 1 assert memory.memo is not memobase
async def test_delayed_handlers_progress(registry, settings, handlers, resource, cause_mock, cause_reason, caplog, assert_logs, k8s_mocked, now, delayed_iso, delay): caplog.set_level(logging.DEBUG) handlers.create_mock.side_effect = TemporaryError("oops", delay=delay) handlers.update_mock.side_effect = TemporaryError("oops", delay=delay) handlers.delete_mock.side_effect = TemporaryError("oops", delay=delay) handlers.resume_mock.side_effect = TemporaryError("oops", delay=delay) event_type = None if cause_reason == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_reason with freezegun.freeze_time(now): await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=asyncio.Queue(), ) assert handlers.create_mock.call_count == (1 if cause_reason == Reason.CREATE else 0) assert handlers.update_mock.call_count == (1 if cause_reason == Reason.UPDATE else 0) assert handlers.delete_mock.call_count == (1 if cause_reason == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_reason == Reason.RESUME else 0) assert not k8s_mocked.sleep.called assert k8s_mocked.patch.called fname = f'{cause_reason}_fn' patch = k8s_mocked.patch.call_args_list[0][1]['payload'] assert patch['status']['kopf']['progress'][fname]['delayed'] == delayed_iso assert_logs([ "Handler .+ is invoked", "Handler .+ failed temporarily: oops", ])
async def test_retry_error_delays_handler(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) name1 = f'{cause_type}_fn' event_type = None if cause_type == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_type handlers.create_mock.side_effect = TemporaryError("oops") handlers.update_mock.side_effect = TemporaryError("oops") handlers.delete_mock.side_effect = TemporaryError("oops") handlers.resume_mock.side_effect = TemporaryError("oops") await process_resource_event( lifecycle=kopf.lifecycles.one_by_one, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=asyncio.Queue(), ) assert handlers.create_mock.call_count == (1 if cause_type == Reason.CREATE else 0) assert handlers.update_mock.call_count == (1 if cause_type == Reason.UPDATE else 0) assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) assert not k8s_mocked.sleep.called assert k8s_mocked.patch_obj.called patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert patch['status']['kopf']['progress'] is not None assert patch['status']['kopf']['progress'][name1]['failure'] is False assert patch['status']['kopf']['progress'][name1]['success'] is False assert patch['status']['kopf']['progress'][name1]['delayed'] assert_logs([ "Handler .+ failed temporarily: oops", ])
async def test_delete(registry, settings, handlers, resource, cause_mock, event_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.DELETE finalizer = settings.persistence.finalizer event_body = { 'metadata': { 'deletionTimestamp': '...', 'finalizers': [finalizer] } } event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=event_queue, ) assert not handlers.create_mock.called assert not handlers.update_mock.called assert handlers.delete_mock.call_count == 1 assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch_obj.call_count == 1 assert not event_queue.empty() assert_logs([ "Deletion is in progress:", "Handler 'delete_fn' is invoked", "Handler 'delete_fn' succeeded", "Deletion is processed:", "Removing the finalizer", "Patching with", ])
async def test_update(registry, settings, handlers, resource, cause_mock, event_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.UPDATE event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=event_queue, ) assert not handlers.create_mock.called assert handlers.update_mock.call_count == 1 assert not handlers.delete_mock.called assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch_obj.call_count == 1 assert not event_queue.empty() patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert 'metadata' in patch assert 'annotations' in patch['metadata'] assert LAST_SEEN_ANNOTATION in patch['metadata']['annotations'] assert_logs([ "Updating is in progress:", "Handler 'update_fn' is invoked", "Handler 'update_fn' succeeded", "Updating is processed:", "Patching with", ])
async def test_all_logs_are_prefixed(registry, settings, resource, handlers, logstream, cause_type, cause_mock): event_type = None if cause_type == Reason.RESUME else 'irrelevant' event_body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} cause_mock.reason = cause_type await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': event_body}, event_queue=asyncio.Queue(), ) lines = logstream.getvalue().splitlines() assert lines # no messages means that we cannot test it assert all(line.startswith('prefix [ns1/name1] ') for line in lines)
async def test_handlers_called_always(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = cause_type await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': 'ev-type', 'object': { 'field': 'value' } }, event_queue=asyncio.Queue(), ) assert handlers.event_mock.call_count == 1 assert extrahandlers.event_mock.call_count == 1 event = handlers.event_mock.call_args_list[0][1]['event'] assert 'field' in event['object'] assert event['object']['field'] == 'value' assert event['type'] == 'ev-type' assert_logs([ "Handler 'event_fn' is invoked.", "Handler 'event_fn' succeeded.", "Handler 'event_fn2' is invoked.", "Handler 'event_fn2' succeeded.", ])
async def test_parameter_is_passed_even_if_not_specified(resource, cause_mock, registry, settings): mock = Mock() # If it works for this handler, we assume it works for all of them. # Otherwise, it is too difficult to trigger the actual invocation. @kopf.on.event(*resource) def fn(**kwargs): mock(**kwargs) event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': None, 'object': {}}, event_queue=event_queue, ) assert mock.called assert mock.call_args_list[0][1]['param'] is None
async def test_diffs_not_logged_if_absent(registry, settings, resource, handlers, cause_type, cause_mock, caplog, assert_logs, diff): caplog.set_level(logging.DEBUG) event_type = None if cause_type == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_type cause_mock.diff = diff await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': {}}, event_queue=asyncio.Queue(), ) assert_logs([ "(Creation|Updating|Resuming|Deletion) is in progress: ", ], prohibited=[ " diff: " ])
async def test_delayed_handlers_sleep(registry, settings, handlers, resource, cause_mock, cause_reason, caplog, assert_logs, k8s_mocked, now, delayed_iso, delay): caplog.set_level(logging.DEBUG) # Simulate the original persisted state of the resource. # Make sure the finalizer is added since there are mandatory deletion handlers. started_dt = datetime.datetime.fromisoformat( '2000-01-01T00:00:00') # long time ago is fine. delayed_dt = datetime.datetime.fromisoformat(delayed_iso) event_type = None if cause_reason == Reason.RESUME else 'irrelevant' event_body = { 'metadata': { 'finalizers': [settings.persistence.finalizer] }, 'status': { 'kopf': { 'progress': { 'create_fn': HandlerState(started=started_dt, delayed=delayed_dt).as_in_storage(), 'update_fn': HandlerState(started=started_dt, delayed=delayed_dt).as_in_storage(), 'delete_fn': HandlerState(started=started_dt, delayed=delayed_dt).as_in_storage(), 'resume_fn': HandlerState(started=started_dt, delayed=delayed_dt).as_in_storage(), } } } } cause_mock.reason = cause_reason with freezegun.freeze_time(now): await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=asyncio.Queue(), ) assert not handlers.create_mock.called assert not handlers.update_mock.called assert not handlers.delete_mock.called assert not handlers.resume_mock.called # The dummy patch is needed to trigger the further changes. The value is irrelevant. assert k8s_mocked.patch.called assert 'dummy' in k8s_mocked.patch.call_args_list[-1][1]['payload'][ 'status']['kopf'] # The duration of sleep should be as expected. assert k8s_mocked.sleep.called assert k8s_mocked.sleep.call_args_list[0][0][0] == delay assert_logs([ r"Sleeping for ([\d\.]+|[\d\.]+ \(capped [\d\.]+\)) seconds", ])
async def test_skipped_with_no_handlers(registry, settings, selector, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) event_type = None event_body = {'metadata': {'finalizers': []}} cause_mock.reason = cause_type assert not registry._changing.has_handlers( resource=resource) # prerequisite registry._changing.append( ChangingHandler( reason='a-non-existent-cause-type', fn=lambda **_: None, id='id', param=None, errors=None, timeout=None, retries=None, backoff=None, selector=selector, annotations=None, labels=None, when=None, field=None, value=None, old=None, new=None, field_needs_change=None, deleted=None, initial=None, requires_finalizer=None, )) await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=asyncio.Queue(), ) assert not k8s_mocked.sleep.called assert k8s_mocked.patch_obj.called # The patch must contain ONLY the last-seen update, and nothing else. patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert set(patch.keys()) == {'metadata'} assert set(patch['metadata'].keys()) == {'annotations'} assert set( patch['metadata']['annotations'].keys()) == {LAST_SEEN_ANNOTATION} assert_logs([ "(Creation|Updating|Resuming|Deletion) is in progress:", "Patching with:", ], prohibited=[ "(Creation|Updating|Resuming|Deletion) is processed:", ])
async def test_recalling_reuses_when_present(): memories = ResourceMemories() memory1 = await memories.recall(BODY) memory2 = await memories.recall(BODY) assert memory1 is memory2
async def test_recalling_creates_when_absent(): memories = ResourceMemories() memory = await memories.recall(BODY) assert isinstance(memory, ResourceMemory)
async def test_forgetting_ignores_when_absent(): memories = ResourceMemories() await memories.forget(BODY)
async def test_memo_is_autocreated(): memories = ResourceMemories() memory = await memories.recall(BODY) assert isinstance(memory.memo, Memo)
def memories(): return ResourceMemories()
async def test_retries_limited_handler_fails(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) name1 = f'{cause_type}_fn' event_type = None if cause_type == Reason.RESUME else 'irrelevant' event_body = { 'status': { 'kopf': { 'progress': { 'create_fn': { 'retries': 100 }, 'update_fn': { 'retries': 100 }, 'delete_fn': { 'retries': 100 }, 'resume_fn': { 'retries': 100 }, } } } } cause_mock.reason = cause_type await process_resource_event( lifecycle=kopf.lifecycles.one_by_one, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=asyncio.Queue(), ) assert not handlers.create_mock.called assert not handlers.update_mock.called assert not handlers.delete_mock.called assert not handlers.resume_mock.called # Progress is reset, as the handler is not going to retry. assert not k8s_mocked.sleep.called assert k8s_mocked.patch_obj.called patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert patch['status']['kopf']['progress'] is not None assert patch['status']['kopf']['progress'][name1]['failure'] is True assert_logs([ r"Handler .+ has exceeded \d+ retries", ])
async def test_consistent_awakening(registry, settings, resource, k8s_mocked, mocker): """ A special case to ensure that "now" is consistent during the handling. Previously, "now" of `handler.awakened` and "now" of `state.delay` were different (maybe for less than 1 ms). If the scheduled awakening time was unlucky to be between these two points in time, the operator stopped reacting on this object until any other events or changes arrive. Implementation-wise, the operator neither selected the handlers (because it was "1ms too early", as per `handler.awakened`), nor did it sleep (because it was "1ms too late", as per `state.delay`), nor did it produce even a dummy patch (because zero-sleep meant "no sleep"). After the fix, zero-sleep produces a dummy patch to trigger the reaction cycle after the sleep is over (as if it was an actual zero-time sleep). In the test, the time granularity is intentionally that low -- 1 µs. The time is anyway frozen and does not progress unless explicitly ticked. See also: #284 """ # Simulate that the object is scheduled to be awakened between the watch-event and sleep. ts0 = datetime.datetime(2019, 12, 30, 10, 56, 43) tsA_triggered = "2019-12-30T10:56:42.999999" ts0_scheduled = "2019-12-30T10:56:43.000000" tsB_delivered = "2019-12-30T10:56:43.000001" # A dummy handler: it will not be selected for execution anyway, we just need to have it. @kopf.on.create(*resource, id='some-id') def handler_fn(**_): pass # Simulate the ticking of time, so that it goes beyond the scheduled awakening time. # Any hook point between handler selection and delay calculation is fine, # but State.store() also prevents other status-fields from being added and the patch populated. def move_to_tsB(*_, **__): frozen_dt.move_to(tsB_delivered) state_store = mocker.patch('kopf._core.actions.progression.State.store', side_effect=move_to_tsB) body = { 'status': { 'kopf': { 'progress': { 'some-id': { 'delayed': ts0_scheduled } } } } } # Simulate the call as if the event has just arrived on the watch-stream. # Another way (the same effect): process_changing_cause() and its result. with freezegun.freeze_time(tsA_triggered) as frozen_dt: assert datetime.datetime.utcnow() < ts0 # extra precaution await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': 'ADDED', 'object': body }, event_queue=asyncio.Queue(), ) assert datetime.datetime.utcnow() > ts0 # extra precaution assert state_store.called # Without "now"-time consistency, neither sleep() would be called, nor a patch applied. # Verify that the patch was actually applied, so that the reaction cycle continues. assert k8s_mocked.patch.called assert 'dummy' in k8s_mocked.patch.call_args_list[-1][1]['payload'][ 'status']['kopf']