def owner(request, resource): body = Body(copy.deepcopy(OWNER)) if request.param == 'state-changing-cause': cause = ResourceChangingCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), memo=Memo(), body=body, initial=False, reason=Reason.NOOP, ) with context([(cause_var, cause)]): yield body elif request.param == 'event-watching-cause': cause = ResourceWatchingCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), memo=Memo(), body=body, type='irrelevant', raw=RawEvent(type='irrelevant', object=OWNER), ) with context([(cause_var, cause)]): yield body else: raise RuntimeError( f"Wrong param for `owner` fixture: {request.param!r}")
async def liveness_url(settings, liveness_registry, aiohttp_unused_port): # The server startup is not instant, so we need a readiness flag. ready_flag = asyncio.Event() port = aiohttp_unused_port() server = asyncio.create_task( health_reporter( endpoint=f'http://:{port}/xyz', registry=liveness_registry, settings=settings, ready_flag=ready_flag, indices=OperatorIndexers().indices, memo=Memo(), )) try: await ready_flag.wait() yield f'http://localhost:{port}/xyz' finally: server.cancel() try: await server except asyncio.CancelledError: pass
async def test_diffs_logged_if_present(registry, settings, resource, handlers, cause_type, cause_mock, caplog, assert_logs, diff): caplog.set_level(logging.DEBUG) event_type = None if cause_type == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_type cause_mock.diff = diff cause_mock.new = { 'field': 'old' } # checked for `not None`, and JSON-serialised cause_mock.old = { 'field': 'new' } # checked for `not None`, and JSON-serialised await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=asyncio.Queue(), ) assert_logs([ "(Creation|Updating|Resuming|Deletion) is in progress: ", "(Creation|Updating|Resuming|Deletion) diff: " ])
async def test_single_credentials_provided_to_vault(settings): info = ConnectionInfo(server='https://expected/') vault = Vault() registry = OperatorRegistry() def login_fn(**_): return info # NB: id auto-detection does not work, as it is local to the test function. registry._activities.append(ActivityHandler( fn=login_fn, id='login_fn', activity=Activity.AUTHENTICATION, param=None, errors=None, timeout=None, retries=None, backoff=None, )) await authenticate( registry=registry, settings=settings, vault=vault, memo=Memo(), indices=OperatorIndexers().indices, ) assert vault items = [] async for key, info in vault: items.append((key, info)) assert len(items) == 1 assert items[0][0] == 'login_fn' assert items[0][1] is info
async def test_special_kwargs_added(fn, resource): body = {'metadata': {'uid': 'uid', 'name': 'name', 'namespace': 'ns'}, 'spec': {'field': 'value'}, 'status': {'info': 'payload'}} # Values can be any. cause = ResourceChangingCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), initial=False, reason=Reason.NOOP, memo=object(), body=Body(body), diff=object(), old=object(), new=object(), ) fn = MagicMock(fn) await invoke(fn, cause=cause) assert fn.called assert fn.call_count == 1 # Only check that kwargs are passed at all. The exact kwargs per cause are tested separately. assert 'logger' in fn.call_args[1] assert 'resource' in fn.call_args[1]
async def test_parameter_is_passed_even_if_not_specified( resource, cause_mock, registry, settings): mock = Mock() # If it works for this handler, we assume it works for all of them. # Otherwise, it is too difficult to trigger the actual invocation. @kopf.on.event(*resource) def fn(**kwargs): mock(**kwargs) event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': None, 'object': {} }, event_queue=event_queue, ) assert mock.called assert mock.call_args_list[0][1]['param'] is None
async def test_retries_are_simulated(settings, activity, mocker): mock = mocker.MagicMock() def sample_fn(**_): mock() raise TemporaryError('to be retried', delay=0) registry = OperatorRegistry() registry._activities.append( ActivityHandler( fn=sample_fn, id='id', activity=activity, param=None, errors=None, timeout=None, retries=3, backoff=None, )) with pytest.raises(ActivityError) as e: await run_activity( registry=registry, settings=settings, activity=activity, lifecycle=all_at_once, indices=OperatorIndexers().indices, memo=Memo(), ) assert isinstance(e.value.outcomes['id'].exception, PermanentError) assert mock.call_count == 3
async def test_errors_are_cascaded_from_one_of_the_originals( settings, activity): def sample_fn(**_): raise PermanentError("boo!") registry = OperatorRegistry() registry._activities.append( ActivityHandler( fn=sample_fn, id='id', activity=activity, param=None, errors=None, timeout=None, retries=None, backoff=None, )) with pytest.raises(ActivityError) as e: await run_activity( registry=registry, settings=settings, activity=activity, lifecycle=all_at_once, indices=OperatorIndexers().indices, memo=Memo(), ) assert e.value.__cause__ assert type(e.value.__cause__) is PermanentError assert str(e.value.__cause__) == "boo!"
async def test_diffs_not_logged_if_absent(registry, settings, resource, handlers, cause_type, cause_mock, caplog, assert_logs, diff): caplog.set_level(logging.DEBUG) event_type = None if cause_type == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_type cause_mock.diff = diff await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=asyncio.Queue(), ) assert_logs([ "(Creation|Updating|Resuming|Deletion) is in progress: ", ], prohibited=[" diff: "])
async def test_noop(registry, settings, handlers, resource, cause_mock, event_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.NOOP event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=event_queue, ) assert not handlers.create_mock.called assert not handlers.update_mock.called assert not handlers.delete_mock.called assert not k8s_mocked.sleep_or_wait.called assert not k8s_mocked.patch_obj.called assert event_queue.empty() assert_logs([ "Something has changed, but we are not interested", ])
async def test_noreturn_handler_produces_no_credentials(settings): vault = Vault() registry = OperatorRegistry() def login_fn(**_): pass # NB: id auto-detection does not work, as it is local to the test function. registry._activities.append(ActivityHandler( fn=login_fn, id='login_fn', activity=Activity.AUTHENTICATION, param=None, errors=None, timeout=None, retries=None, backoff=None, )) await authenticate( registry=registry, settings=settings, vault=vault, memo=Memo(), indices=OperatorIndexers().indices, ) assert not vault with pytest.raises(LoginError): async for _, _ in vault: pass
async def test_errors_are_ignored(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = cause_type handlers.event_mock.side_effect = Exception("oops") await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': 'ev-type', 'object': {} }, event_queue=asyncio.Queue(), ) assert handlers.event_mock.called assert extrahandlers.event_mock.called assert_logs([ "Handler 'event_fn' is invoked.", "Handler 'event_fn' failed with an exception. Will ignore.", "Handler 'event_fn2' is invoked.", "Handler 'event_fn2' succeeded.", ])
async def test_1st_step_stores_progress_by_patching(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, k8s_mocked, deletion_ts): name1 = f'{cause_type}_fn' name2 = f'{cause_type}_fn2' event_type = None if cause_type == Reason.RESUME else 'irrelevant' event_body = { 'metadata': { 'finalizers': [settings.persistence.finalizer] }, } event_body['metadata'].update(deletion_ts) cause_mock.reason = cause_type await process_resource_event( lifecycle=kopf.lifecycles.asap, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=asyncio.Queue(), ) assert handlers.create_mock.call_count == (1 if cause_type == Reason.CREATE else 0) assert handlers.update_mock.call_count == (1 if cause_type == Reason.UPDATE else 0) assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) assert not k8s_mocked.sleep_or_wait.called assert k8s_mocked.patch_obj.called patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert patch['status']['kopf']['progress'] is not None assert patch['status']['kopf']['progress'][name1]['retries'] == 1 assert patch['status']['kopf']['progress'][name1]['success'] is True assert patch['status']['kopf']['progress'][name2]['retries'] == 0 assert patch['status']['kopf']['progress'][name2]['success'] is False assert patch['status']['kopf']['progress'][name1]['started'] assert patch['status']['kopf']['progress'][name2]['started'] # Premature removal of finalizers can prevent the 2nd step for deletion handlers. # So, the finalizers must never be removed on the 1st step. assert 'finalizers' not in patch['metadata']
async def test_stealth_mode_with_mismatching_handlers( registry, settings, selector, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked, annotations, labels, when, deleted, initial): caplog.set_level(logging.DEBUG) event_type = None event_body = {'metadata': {'finalizers': []}} cause_mock.reason = cause_type assert not registry._resource_changing.has_handlers( resource=resource) # prerequisite registry._resource_changing.append( ResourceChangingHandler( reason=None, fn=lambda **_: None, id='id', param=None, errors=None, timeout=None, retries=None, backoff=None, selector=selector, annotations=annotations, labels=labels, when=when, field=None, value=None, old=None, new=None, field_needs_change=None, deleted=deleted, initial=initial, requires_finalizer=None, )) await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=asyncio.Queue(), ) assert not k8s_mocked.sleep_or_wait.called assert not k8s_mocked.patch_obj.called assert not caplog.messages # total stealth mode!
async def test_errors_are_raised_aggregated(settings, activity): def sample_fn1(**_): raise PermanentError("boo!123") def sample_fn2(**_): raise PermanentError("boo!456") registry = OperatorRegistry() registry._activities.append( ActivityHandler( fn=sample_fn1, id='id1', activity=activity, param=None, errors=None, timeout=None, retries=None, backoff=None, )) registry._activities.append( ActivityHandler( fn=sample_fn2, id='id2', activity=activity, param=None, errors=None, timeout=None, retries=None, backoff=None, )) with pytest.raises(ActivityError) as e: await run_activity( registry=registry, settings=settings, activity=activity, lifecycle=all_at_once, indices=OperatorIndexers().indices, memo=Memo(), ) assert set(e.value.outcomes.keys()) == {'id1', 'id2'} assert e.value.outcomes['id1'].final assert e.value.outcomes['id1'].delay is None assert e.value.outcomes['id1'].result is None assert e.value.outcomes['id1'].exception is not None assert e.value.outcomes['id2'].final assert e.value.outcomes['id2'].delay is None assert e.value.outcomes['id2'].result is None assert e.value.outcomes['id2'].exception is not None assert str(e.value.outcomes['id1'].exception) == "boo!123" assert str(e.value.outcomes['id2'].exception) == "boo!456"
async def test_delayed_handlers_progress(registry, settings, handlers, resource, cause_mock, cause_reason, caplog, assert_logs, k8s_mocked, now, delayed_iso, delay): caplog.set_level(logging.DEBUG) handlers.create_mock.side_effect = TemporaryError("oops", delay=delay) handlers.update_mock.side_effect = TemporaryError("oops", delay=delay) handlers.delete_mock.side_effect = TemporaryError("oops", delay=delay) handlers.resume_mock.side_effect = TemporaryError("oops", delay=delay) event_type = None if cause_reason == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_reason with freezegun.freeze_time(now): await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=asyncio.Queue(), ) assert handlers.create_mock.call_count == (1 if cause_reason == Reason.CREATE else 0) assert handlers.update_mock.call_count == (1 if cause_reason == Reason.UPDATE else 0) assert handlers.delete_mock.call_count == (1 if cause_reason == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_reason == Reason.RESUME else 0) assert not k8s_mocked.sleep_or_wait.called assert k8s_mocked.patch_obj.called fname = f'{cause_reason}_fn' patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert patch['status']['kopf']['progress'][fname]['delayed'] == delayed_iso assert_logs([ "Handler .+ is invoked", "Handler .+ failed temporarily: oops", ])
async def test_retry_error_delays_handler(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) name1 = f'{cause_type}_fn' event_type = None if cause_type == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_type handlers.create_mock.side_effect = TemporaryError("oops") handlers.update_mock.side_effect = TemporaryError("oops") handlers.delete_mock.side_effect = TemporaryError("oops") handlers.resume_mock.side_effect = TemporaryError("oops") await process_resource_event( lifecycle=kopf.lifecycles.one_by_one, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=asyncio.Queue(), ) assert handlers.create_mock.call_count == (1 if cause_type == Reason.CREATE else 0) assert handlers.update_mock.call_count == (1 if cause_type == Reason.UPDATE else 0) assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) assert not k8s_mocked.sleep_or_wait.called assert k8s_mocked.patch_obj.called patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert patch['status']['kopf']['progress'] is not None assert patch['status']['kopf']['progress'][name1]['failure'] is False assert patch['status']['kopf']['progress'][name1]['success'] is False assert patch['status']['kopf']['progress'][name1]['delayed'] assert_logs([ "Handler .+ failed temporarily: oops", ])
async def test_supersession_is_logged(registry, settings, resource, handlers, cause_types, cause_mock, caplog, assert_logs): caplog.set_level(logging.DEBUG) settings.persistence.progress_storage = StatusProgressStorage() body = { 'status': { 'kopf': { 'progress': { 'create_fn': { 'purpose': cause_types[0] }, 'update_fn': { 'purpose': cause_types[0] }, 'resume_fn': { 'purpose': cause_types[0] }, 'delete_fn': { 'purpose': cause_types[0] }, } } } } cause_mock.reason = cause_types[1] event_type = None if cause_types[1] == Reason.RESUME else 'irrelevant' await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': body }, event_queue=asyncio.Queue(), ) assert_logs([ "(Creation|Updating|Resuming|Deletion) is superseded by (creation|updating|resuming|deletion): ", "(Creation|Updating|Resuming|Deletion) is in progress: ", "(Creation|Updating|Resuming|Deletion) is processed: ", ])
async def test_empty_registry_produces_no_credentials(settings): vault = Vault() registry = OperatorRegistry() await authenticate( registry=registry, settings=settings, vault=vault, memo=Memo(), indices=OperatorIndexers().indices, ) assert not vault with pytest.raises(LoginError): async for _, _ in vault: pass
async def test_timed_out_handler_fails( registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked, now, ts): caplog.set_level(logging.DEBUG) name1 = f'{cause_type}_fn' event_type = None if cause_type == Reason.RESUME else 'irrelevant' event_body = { 'status': {'kopf': {'progress': { 'create_fn': {'started': ts}, 'update_fn': {'started': ts}, 'delete_fn': {'started': ts}, 'resume_fn': {'started': ts}, }}} } cause_mock.reason = cause_type with freezegun.freeze_time(now): await process_resource_event( lifecycle=kopf.lifecycles.one_by_one, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': event_body}, event_queue=asyncio.Queue(), ) assert not handlers.create_mock.called assert not handlers.update_mock.called assert not handlers.delete_mock.called assert not handlers.resume_mock.called # Progress is reset, as the handler is not going to retry. assert not k8s_mocked.sleep_or_wait.called assert k8s_mocked.patch_obj.called patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert patch['status']['kopf']['progress'] is not None assert patch['status']['kopf']['progress'][name1]['failure'] is True assert_logs([ "Handler .+ has timed out after", ])
async def test_delete(registry, settings, handlers, resource, cause_mock, event_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.DELETE finalizer = settings.persistence.finalizer event_body = { 'metadata': { 'deletionTimestamp': '...', 'finalizers': [finalizer] } } event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=event_queue, ) assert not handlers.create_mock.called assert not handlers.update_mock.called assert handlers.delete_mock.call_count == 1 assert k8s_mocked.sleep_or_wait.call_count == 0 assert k8s_mocked.patch_obj.call_count == 1 assert not event_queue.empty() assert_logs([ "Deletion is in progress:", "Handler 'delete_fn' is invoked", "Handler 'delete_fn' succeeded", "Deletion is processed:", "Removing the finalizer", "Patching with", ])
async def test_results_are_returned_on_success(settings, activity): def sample_fn1(**_): return 123 def sample_fn2(**_): return 456 registry = OperatorRegistry() registry._activities.append( ActivityHandler( fn=sample_fn1, id='id1', activity=activity, param=None, errors=None, timeout=None, retries=None, backoff=None, )) registry._activities.append( ActivityHandler( fn=sample_fn2, id='id2', activity=activity, param=None, errors=None, timeout=None, retries=None, backoff=None, )) results = await run_activity( registry=registry, settings=settings, activity=activity, lifecycle=all_at_once, indices=OperatorIndexers().indices, memo=Memo(), ) assert set(results.keys()) == {'id1', 'id2'} assert results['id1'] == 123 assert results['id2'] == 456
async def test_update(registry, settings, handlers, resource, cause_mock, event_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.UPDATE event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=event_queue, ) assert not handlers.create_mock.called assert handlers.update_mock.call_count == 1 assert not handlers.delete_mock.called assert k8s_mocked.sleep_or_wait.call_count == 0 assert k8s_mocked.patch_obj.call_count == 1 assert not event_queue.empty() patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert 'metadata' in patch assert 'annotations' in patch['metadata'] assert LAST_SEEN_ANNOTATION in patch['metadata']['annotations'] assert_logs([ "Updating is in progress:", "Handler 'update_fn' is invoked", "Handler 'update_fn' succeeded", "Updating is processed:", "Patching with", ])
async def test_protocol_invocation(lifecycle, resource): """ To be sure that all kwargs are accepted properly. Especially when the new kwargs are added or an invocation protocol changed. """ # The values are irrelevant, they can be anything. state = State.from_scratch() cause = ResourceChangingCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), memo=Memo(), body=Body({}), initial=False, reason=Reason.NOOP, ) handlers = [] selected = await invoke(lifecycle, handlers, cause=cause, state=state) assert isinstance(selected, (tuple, list)) assert len(selected) == 0
async def _simulate_cycle(event_object: RawBody): mocker.resetall() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, memories=memories, memobase=Memo(), indexers=OperatorIndexers(), raw_event={ 'type': 'irrelevant', 'object': event_object }, event_queue=asyncio.Queue(), ) # Do the same as k8s does: merge the patches into the object. for call in k8s_mocked.patch_obj.call_args_list: _merge_dicts(call[1]['patch'], event_object)
async def test_delays_are_simulated(settings, activity, mocker): def sample_fn(**_): raise TemporaryError('to be retried', delay=123) registry = OperatorRegistry() registry._activities.append( ActivityHandler( fn=sample_fn, id='id', activity=activity, param=None, errors=None, timeout=None, retries=3, backoff=None, )) with freezegun.freeze_time() as frozen: async def sleep_or_wait_substitute(*_, **__): frozen.tick(123) sleep_or_wait = mocker.patch('kopf.structs.primitives.sleep_or_wait', wraps=sleep_or_wait_substitute) with pytest.raises(ActivityError) as e: await run_activity( registry=registry, settings=settings, activity=activity, lifecycle=all_at_once, indices=OperatorIndexers().indices, memo=Memo(), ) assert sleep_or_wait.call_count >= 3 # 3 retries, 1 sleep each assert sleep_or_wait.call_count <= 4 # 3 retries, 1 final success (delay=None), not more if sleep_or_wait.call_count > 3: sleep_or_wait.call_args_list[-1][0][0] is None
async def test_handlers_called_always(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = cause_type await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': 'ev-type', 'object': { 'field': 'value' } }, event_queue=asyncio.Queue(), ) assert handlers.event_mock.call_count == 1 assert extrahandlers.event_mock.call_count == 1 event = handlers.event_mock.call_args_list[0][1]['event'] assert 'field' in event['object'] assert event['object']['field'] == 'value' assert event['type'] == 'ev-type' assert_logs([ "Handler 'event_fn' is invoked.", "Handler 'event_fn' succeeded.", "Handler 'event_fn2' is invoked.", "Handler 'event_fn2' succeeded.", ])
async def test_all_logs_are_prefixed(registry, settings, resource, handlers, logstream, cause_type, cause_mock): event_type = None if cause_type == Reason.RESUME else 'irrelevant' event_body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} cause_mock.reason = cause_type await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=asyncio.Queue(), ) lines = logstream.getvalue().splitlines() assert lines # no messages means that we cannot test it assert all(line.startswith('prefix [ns1/name1] ') for line in lines)
async def test_consistent_awakening(registry, settings, resource, k8s_mocked, mocker): """ A special case to ensure that "now" is consistent during the handling. Previously, "now" of `handler.awakened` and "now" of `state.delay` were different (maybe for less than 1 ms). If the scheduled awakening time was unlucky to be between these two points in time, the operator stopped reacting on this object until any other events or changes arrive. Implementation-wise, the operator neither selected the handlers (because it was "1ms too early", as per `handler.awakened`), nor did it sleep (because it was "1ms too late", as per `state.delay`), nor did it produce even a dummy patch (because zero-sleep meant "no sleep"). After the fix, zero-sleep produces a dummy patch to trigger the reaction cycle after the sleep is over (as if it was an actual zero-time sleep). In the test, the time granularity is intentionally that low -- 1 µs. The time is anyway frozen and does not progress unless explicitly ticked. See also: #284 """ # Simulate that the object is scheduled to be awakened between the watch-event and sleep. ts0 = datetime.datetime(2019, 12, 30, 10, 56, 43) tsA_triggered = "2019-12-30T10:56:42.999999" ts0_scheduled = "2019-12-30T10:56:43.000000" tsB_delivered = "2019-12-30T10:56:43.000001" # A dummy handler: it will not be selected for execution anyway, we just need to have it. @kopf.on.create(*resource, id='some-id') def handler_fn(**_): pass # Simulate the ticking of time, so that it goes beyond the scheduled awakening time. # Any hook point between handler selection and delay calculation is fine, # but State.store() also prevents other status-fields from being added and the patch populated. def move_to_tsB(*_, **__): frozen_dt.move_to(tsB_delivered) state_store = mocker.patch('kopf.storage.states.State.store', side_effect=move_to_tsB) body = {'status': {'kopf': {'progress': {'some-id': {'delayed': ts0_scheduled}}}}} # Simulate the call as if the event has just arrived on the watch-stream. # Another way (same effect): handle_resource_changing_cause() and its result. with freezegun.freeze_time(tsA_triggered) as frozen_dt: assert datetime.datetime.utcnow() < ts0 # extra precaution await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': 'ADDED', 'object': body}, event_queue=asyncio.Queue(), ) assert datetime.datetime.utcnow() > ts0 # extra precaution assert state_store.called # Without "now"-time consistency, neither sleep() would be called, nor a patch applied. # Verify that the patch was actually applied, so that the reaction cycle continues. assert k8s_mocked.patch_obj.called assert 'dummy' in k8s_mocked.patch_obj.call_args_list[-1][1]['patch']['status']['kopf']
def indices(): indexers = OperatorIndexers() indexers['index1'] = OperatorIndexer() indexers['index2'] = OperatorIndexer() return indexers.indices