def owner(request, resource): body = Body(copy.deepcopy(OWNER)) if request.param == 'state-changing-cause': cause = ChangingCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), memo=Memo(), body=body, initial=False, reason=Reason.NOOP, ) with context([(cause_var, cause)]): yield body elif request.param == 'event-watching-cause': cause = WatchingCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), memo=Memo(), body=body, type='irrelevant', event=RawEvent(type='irrelevant', object=OWNER), ) with context([(cause_var, cause)]): yield body else: raise RuntimeError(f"Wrong param for `owner` fixture: {request.param!r}")
async def liveness_url(settings, liveness_registry, aiohttp_unused_port): # The server startup is not instant, so we need a readiness flag. ready_flag = asyncio.Event() port = aiohttp_unused_port() server = asyncio.create_task( health_reporter( endpoint=f'http://:{port}/xyz', registry=liveness_registry, settings=settings, ready_flag=ready_flag, indices=OperatorIndexers().indices, memo=Memo(), )) try: await ready_flag.wait() yield f'http://localhost:{port}/xyz' finally: server.cancel() try: await server except asyncio.CancelledError: pass
async def test_special_kwargs_added(fn, resource): body = {'metadata': {'uid': 'uid', 'name': 'name', 'namespace': 'ns'}, 'spec': {'field': 'value'}, 'status': {'info': 'payload'}} # Values can be any. cause = ChangingCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), initial=False, reason=Reason.NOOP, memo=object(), body=Body(body), diff=object(), old=object(), new=object(), ) fn = MagicMock(fn) await invoke(fn, kwargsrc=cause) assert fn.called assert fn.call_count == 1 # Only check that kwargs are passed at all. The exact kwargs per cause are tested separately. assert 'logger' in fn.call_args[1] assert 'resource' in fn.call_args[1]
async def test_delays_are_simulated(settings, activity, mocker): def sample_fn(**_): raise TemporaryError('to be retried', delay=123) registry = OperatorRegistry() registry._activities.append(ActivityHandler( fn=sample_fn, id='id', activity=activity, param=None, errors=None, timeout=None, retries=3, backoff=None, )) with freezegun.freeze_time() as frozen: async def sleep_substitute(*_, **__): frozen.tick(123) sleep = mocker.patch('kopf._cogs.aiokits.aiotime.sleep', wraps=sleep_substitute) with pytest.raises(ActivityError) as e: await run_activity( registry=registry, settings=settings, activity=activity, lifecycle=all_at_once, indices=OperatorIndexers().indices, memo=Memo(), ) assert sleep.call_count >= 3 # 3 retries, 1 sleep each assert sleep.call_count <= 4 # 3 retries, 1 final success (delay=None), not more if sleep.call_count > 3: sleep.call_args_list[-1][0][0] is None
async def test_noreturn_handler_produces_no_credentials(settings): vault = Vault() registry = OperatorRegistry() def login_fn(**_): pass # NB: id auto-detection does not work, as it is local to the test function. registry._activities.append( ActivityHandler( fn=login_fn, id='login_fn', activity=Activity.AUTHENTICATION, param=None, errors=None, timeout=None, retries=None, backoff=None, )) await authenticate( registry=registry, settings=settings, vault=vault, memo=Memo(), indices=OperatorIndexers().indices, ) assert not vault with pytest.raises(LoginError): async for _, _ in vault: pass
async def test_supersession_is_logged( registry, settings, resource, handlers, cause_types, cause_mock, caplog, assert_logs): caplog.set_level(logging.DEBUG) settings.persistence.progress_storage = StatusProgressStorage() body = {'status': {'kopf': {'progress': { 'create_fn': {'purpose': cause_types[0]}, 'update_fn': {'purpose': cause_types[0]}, 'resume_fn': {'purpose': cause_types[0]}, 'delete_fn': {'purpose': cause_types[0]}, }}}} cause_mock.reason = cause_types[1] event_type = None if cause_types[1] == Reason.RESUME else 'irrelevant' await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': body}, event_queue=asyncio.Queue(), ) assert_logs([ "(Creation|Updating|Resuming|Deletion) is superseded by (creation|updating|resuming|deletion): ", "(Creation|Updating|Resuming|Deletion) is in progress: ", "(Creation|Updating|Resuming|Deletion) is processed: ", ])
async def test_results_are_returned_on_success(settings, activity): def sample_fn1(**_): return 123 def sample_fn2(**_): return 456 registry = OperatorRegistry() registry._activities.append(ActivityHandler( fn=sample_fn1, id='id1', activity=activity, param=None, errors=None, timeout=None, retries=None, backoff=None, )) registry._activities.append(ActivityHandler( fn=sample_fn2, id='id2', activity=activity, param=None, errors=None, timeout=None, retries=None, backoff=None, )) results = await run_activity( registry=registry, settings=settings, activity=activity, lifecycle=all_at_once, indices=OperatorIndexers().indices, memo=Memo(), ) assert set(results.keys()) == {'id1', 'id2'} assert results['id1'] == 123 assert results['id2'] == 456
async def test_diffs_logged_if_present(registry, settings, resource, handlers, cause_type, cause_mock, caplog, assert_logs, diff): caplog.set_level(logging.DEBUG) event_type = None if cause_type == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_type cause_mock.diff = diff cause_mock.new = {'field': 'old'} # checked for `not None`, and JSON-serialised cause_mock.old = {'field': 'new'} # checked for `not None`, and JSON-serialised await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': {}}, event_queue=asyncio.Queue(), ) assert_logs([ "(Creation|Updating|Resuming|Deletion) is in progress: ", "(Creation|Updating|Resuming|Deletion) diff: " ])
async def test_errors_are_ignored(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = cause_type handlers.event_mock.side_effect = Exception("oops") await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': 'ev-type', 'object': {} }, event_queue=asyncio.Queue(), ) assert handlers.event_mock.called assert extrahandlers.event_mock.called assert_logs([ "Handler 'event_fn' is invoked.", "Handler 'event_fn' failed with an exception. Will ignore.", "Handler 'event_fn2' is invoked.", "Handler 'event_fn2' succeeded.", ])
async def test_noop(registry, settings, handlers, resource, cause_mock, event_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.NOOP event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': {}}, event_queue=event_queue, ) assert not handlers.create_mock.called assert not handlers.update_mock.called assert not handlers.delete_mock.called assert not k8s_mocked.sleep.called assert not k8s_mocked.patch.called assert event_queue.empty() assert_logs([ "Something has changed, but we are not interested", ])
def test_watching_kwargs(resource, attr): body = {'metadata': {'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1', 'labels': {'l1': 'v1'}, 'annotations': {'a1': 'v1'}}, 'spec': {'field': 'value'}, 'status': {'info': 'payload'}} cause = WatchingCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), memo=Memo(), body=Body(body), type='ADDED', event={'type': 'ADDED', 'object': {}}, ) kwargs = getattr(cause, attr) # cause.kwargs / cause.sync_kwargs / cause.async_kwargs assert set(kwargs) == {'logger', 'resource', 'patch', 'event', 'type', 'memo', 'body', 'spec', 'status', 'meta', 'uid', 'name', 'namespace', 'labels', 'annotations'} assert kwargs['resource'] is cause.resource assert kwargs['logger'] is cause.logger assert kwargs['patch'] is cause.patch assert kwargs['event'] is cause.event assert kwargs['memo'] is cause.memo assert kwargs['type'] is cause.type assert kwargs['body'] is cause.body assert kwargs['spec'] is cause.body.spec assert kwargs['meta'] is cause.body.metadata assert kwargs['status'] is cause.body.status assert kwargs['labels'] is cause.body.metadata.labels assert kwargs['annotations'] is cause.body.metadata.annotations assert kwargs['uid'] == cause.body.metadata.uid assert kwargs['name'] == cause.body.metadata.name assert kwargs['namespace'] == cause.body.metadata.namespace
async def test_retries_are_simulated(settings, activity, mocker): mock = mocker.MagicMock() def sample_fn(**_): mock() raise TemporaryError('to be retried', delay=0) registry = OperatorRegistry() registry._activities.append(ActivityHandler( fn=sample_fn, id='id', activity=activity, param=None, errors=None, timeout=None, retries=3, backoff=None, )) with pytest.raises(ActivityError) as e: await run_activity( registry=registry, settings=settings, activity=activity, lifecycle=all_at_once, indices=OperatorIndexers().indices, memo=Memo(), ) assert isinstance(e.value.outcomes['id'].exception, PermanentError) assert mock.call_count == 3
def test_daemon_kwargs(resource, attr): body = {'metadata': {'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1', 'labels': {'l1': 'v1'}, 'annotations': {'a1': 'v1'}}, 'spec': {'field': 'value'}, 'status': {'info': 'payload'}} cause = DaemonCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), memo=Memo(), body=Body(body), stopper=DaemonStopper(), ) kwargs = getattr(cause, attr) # cause.kwargs assert set(kwargs) == {'logger', 'resource', 'patch', 'memo', 'body', 'spec', 'status', 'meta', 'uid', 'name', 'namespace', 'labels', 'annotations'} assert kwargs['resource'] is cause.resource assert kwargs['logger'] is cause.logger assert kwargs['patch'] is cause.patch assert kwargs['memo'] is cause.memo assert kwargs['body'] is cause.body assert kwargs['spec'] is cause.body.spec assert kwargs['meta'] is cause.body.metadata assert kwargs['status'] is cause.body.status assert kwargs['labels'] is cause.body.metadata.labels assert kwargs['annotations'] is cause.body.metadata.annotations assert kwargs['uid'] == cause.body.metadata.uid assert kwargs['name'] == cause.body.metadata.name assert kwargs['namespace'] == cause.body.metadata.namespace assert 'stopper' not in kwargs assert 'stopped' not in kwargs
async def test_1st_step_stores_progress_by_patching(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, k8s_mocked, deletion_ts): name1 = f'{cause_type}_fn' name2 = f'{cause_type}_fn2' event_type = None if cause_type == Reason.RESUME else 'irrelevant' event_body = { 'metadata': { 'finalizers': [settings.persistence.finalizer] }, } event_body['metadata'].update(deletion_ts) cause_mock.reason = cause_type await process_resource_event( lifecycle=kopf.lifecycles.asap, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=asyncio.Queue(), ) assert handlers.create_mock.call_count == (1 if cause_type == Reason.CREATE else 0) assert handlers.update_mock.call_count == (1 if cause_type == Reason.UPDATE else 0) assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) assert not k8s_mocked.sleep.called assert k8s_mocked.patch_obj.called patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert patch['status']['kopf']['progress'] is not None assert patch['status']['kopf']['progress'][name1]['retries'] == 1 assert patch['status']['kopf']['progress'][name1]['success'] is True assert patch['status']['kopf']['progress'][name2]['retries'] == 0 assert patch['status']['kopf']['progress'][name2]['success'] is False assert patch['status']['kopf']['progress'][name1]['started'] assert patch['status']['kopf']['progress'][name2]['started'] # Premature removal of finalizers can prevent the 2nd step for deletion handlers. # So, the finalizers must never be removed on the 1st step. assert 'finalizers' not in patch['metadata']
def test_changing_kwargs(resource, attr): body = { 'metadata': { 'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1', 'labels': { 'l1': 'v1' }, 'annotations': { 'a1': 'v1' } }, 'spec': { 'field': 'value' }, 'status': { 'info': 'payload' } } cause = ChangingCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), initial=False, reason=Reason.NOOP, memo=Memo(), body=Body(body), diff=Diff([]), old=BodyEssence(), new=BodyEssence(), ) kwargs = getattr( cause, attr) # cause.kwargs / cause.sync_kwargs / cause.async_kwargs assert set(kwargs) == { 'logger', 'resource', 'patch', 'reason', 'memo', 'body', 'spec', 'status', 'meta', 'uid', 'name', 'namespace', 'labels', 'annotations', 'diff', 'old', 'new' } assert kwargs['resource'] is cause.resource assert kwargs['reason'] is cause.reason assert kwargs['logger'] is cause.logger assert kwargs['patch'] is cause.patch assert kwargs['memo'] is cause.memo assert kwargs['diff'] is cause.diff assert kwargs['old'] is cause.old assert kwargs['new'] is cause.new assert kwargs['body'] is cause.body assert kwargs['spec'] is cause.body.spec assert kwargs['meta'] is cause.body.metadata assert kwargs['status'] is cause.body.status assert kwargs['labels'] is cause.body.metadata.labels assert kwargs['annotations'] is cause.body.metadata.annotations assert kwargs['uid'] == cause.body.metadata.uid assert kwargs['name'] == cause.body.metadata.name assert kwargs['namespace'] == cause.body.metadata.namespace
async def test_stealth_mode_with_mismatching_handlers( registry, settings, selector, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked, annotations, labels, when, deleted, initial): caplog.set_level(logging.DEBUG) event_type = None event_body = {'metadata': {'finalizers': []}} cause_mock.reason = cause_type assert not registry._changing.has_handlers( resource=resource) # prerequisite registry._changing.append( ChangingHandler( reason=None, fn=lambda **_: None, id='id', param=None, errors=None, timeout=None, retries=None, backoff=None, selector=selector, annotations=annotations, labels=labels, when=when, field=None, value=None, old=None, new=None, field_needs_change=None, deleted=deleted, initial=initial, requires_finalizer=None, )) await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=asyncio.Queue(), ) assert not k8s_mocked.sleep.called assert not k8s_mocked.patch_obj.called assert not caplog.messages # total stealth mode!
def test_indices_overwrite_kwargs(cls: Type[BaseCause], name, attr): indexers = OperatorIndexers() indexers['index1'] = OperatorIndexer() indexers['index2'] = OperatorIndexer() indexers[name] = OperatorIndexer() mocks = {field.name: Mock() for field in dataclasses.fields(cls)} mocks['indices'] = indexers.indices cause = cls(**mocks) kwargs = getattr(cause, attr) # cause.kwargs / cause.sync_kwargs / cause.async_kwargs assert kwargs['index1'] is indexers['index1'].index assert kwargs['index2'] is indexers['index2'].index assert kwargs[name] is indexers[name].index
def test_admission_kwargs(resource, attr): body = {'metadata': {'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1', 'labels': {'l1': 'v1'}, 'annotations': {'a1': 'v1'}}, 'spec': {'field': 'value'}, 'status': {'info': 'payload'}} cause = WebhookCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), memo=Memo(), body=Body(body), dryrun=False, headers={'k1': 'v1'}, sslpeer={'k2': 'v2'}, userinfo={'k3': 'v3'}, warnings=['w1'], webhook=None, reason=None, operation=None, subresource=None, new=BodyEssence(body), old=None, diff=diffs.diff(BodyEssence(body), None), ) kwargs = getattr(cause, attr) # cause.kwargs / cause.sync_kwargs / cause.async_kwargs assert set(kwargs) == {'logger', 'resource', 'dryrun', 'headers', 'sslpeer', 'userinfo', 'warnings', 'subresource', 'patch', 'memo', 'body', 'spec', 'status', 'meta', 'uid', 'name', 'namespace', 'labels', 'annotations', 'old', 'new', 'diff', 'operation'} assert kwargs['resource'] is cause.resource assert kwargs['logger'] is cause.logger assert kwargs['dryrun'] is cause.dryrun assert kwargs['headers'] is cause.headers assert kwargs['sslpeer'] is cause.sslpeer assert kwargs['userinfo'] is cause.userinfo assert kwargs['warnings'] is cause.warnings assert kwargs['patch'] is cause.patch assert kwargs['memo'] is cause.memo assert kwargs['body'] is cause.body assert kwargs['spec'] is cause.body.spec assert kwargs['meta'] is cause.body.metadata assert kwargs['status'] is cause.body.status assert kwargs['labels'] is cause.body.metadata.labels assert kwargs['annotations'] is cause.body.metadata.annotations assert kwargs['uid'] == cause.body.metadata.uid assert kwargs['name'] == cause.body.metadata.name assert kwargs['namespace'] == cause.body.metadata.namespace assert kwargs['operation'] == cause.operation assert kwargs['new'] == cause.new assert kwargs['old'] == cause.old assert kwargs['diff'] == cause.diff
def test_activity_kwargs(resource, activity, attr): cause = ActivityCause( memo=Memo(), logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, activity=activity, settings=OperatorSettings(), ) kwargs = getattr(cause, attr) # cause.kwargs / cause.sync_kwargs / cause.async_kwargs assert set(kwargs) == {'memo', 'logger', 'activity', 'settings'} assert kwargs['logger'] is cause.logger assert kwargs['activity'] is activity assert kwargs['settings'] is cause.settings
def test_daemon_async_stopper(resource, attr): cause = DaemonCause( logger=logging.getLogger('kopf.test.fake.logger'), indices=OperatorIndexers().indices, resource=resource, patch=Patch(), memo=Memo(), body=Body({}), stopper=DaemonStopper(), ) kwargs = getattr(cause, attr) # cause.async_kwargs assert 'stopper' not in kwargs assert kwargs['stopped'] is cause.stopper.async_waiter
async def test_delayed_handlers_progress(registry, settings, handlers, resource, cause_mock, cause_reason, caplog, assert_logs, k8s_mocked, now, delayed_iso, delay): caplog.set_level(logging.DEBUG) handlers.create_mock.side_effect = TemporaryError("oops", delay=delay) handlers.update_mock.side_effect = TemporaryError("oops", delay=delay) handlers.delete_mock.side_effect = TemporaryError("oops", delay=delay) handlers.resume_mock.side_effect = TemporaryError("oops", delay=delay) event_type = None if cause_reason == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_reason with freezegun.freeze_time(now): await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=asyncio.Queue(), ) assert handlers.create_mock.call_count == (1 if cause_reason == Reason.CREATE else 0) assert handlers.update_mock.call_count == (1 if cause_reason == Reason.UPDATE else 0) assert handlers.delete_mock.call_count == (1 if cause_reason == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_reason == Reason.RESUME else 0) assert not k8s_mocked.sleep.called assert k8s_mocked.patch.called fname = f'{cause_reason}_fn' patch = k8s_mocked.patch.call_args_list[0][1]['payload'] assert patch['status']['kopf']['progress'][fname]['delayed'] == delayed_iso assert_logs([ "Handler .+ is invoked", "Handler .+ failed temporarily: oops", ])
async def test_retry_error_delays_handler(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) name1 = f'{cause_type}_fn' event_type = None if cause_type == Reason.RESUME else 'irrelevant' cause_mock.reason = cause_type handlers.create_mock.side_effect = TemporaryError("oops") handlers.update_mock.side_effect = TemporaryError("oops") handlers.delete_mock.side_effect = TemporaryError("oops") handlers.resume_mock.side_effect = TemporaryError("oops") await process_resource_event( lifecycle=kopf.lifecycles.one_by_one, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=asyncio.Queue(), ) assert handlers.create_mock.call_count == (1 if cause_type == Reason.CREATE else 0) assert handlers.update_mock.call_count == (1 if cause_type == Reason.UPDATE else 0) assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0) assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0) assert not k8s_mocked.sleep.called assert k8s_mocked.patch_obj.called patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert patch['status']['kopf']['progress'] is not None assert patch['status']['kopf']['progress'][name1]['failure'] is False assert patch['status']['kopf']['progress'][name1]['success'] is False assert patch['status']['kopf']['progress'][name1]['delayed'] assert_logs([ "Handler .+ failed temporarily: oops", ])
async def test_empty_registry_produces_no_credentials(settings): vault = Vault() registry = OperatorRegistry() await authenticate( registry=registry, settings=settings, vault=vault, memo=Memo(), indices=OperatorIndexers().indices, ) assert not vault with pytest.raises(LoginError): async for _, _ in vault: pass
async def _simulate_cycle(event_object: RawBody): mocker.resetall() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, memories=memories, memobase=Memo(), indexers=OperatorIndexers(), raw_event={'type': 'irrelevant', 'object': event_object}, event_queue=asyncio.Queue(), ) # Do the same as k8s does: merge the patches into the object. for call in k8s_mocked.patch.call_args_list: _merge_dicts(call[1]['payload'], event_object)
async def test_delete(registry, settings, handlers, resource, cause_mock, event_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.DELETE finalizer = settings.persistence.finalizer event_body = { 'metadata': { 'deletionTimestamp': '...', 'finalizers': [finalizer] } } event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': event_body }, event_queue=event_queue, ) assert not handlers.create_mock.called assert not handlers.update_mock.called assert handlers.delete_mock.call_count == 1 assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch_obj.call_count == 1 assert not event_queue.empty() assert_logs([ "Deletion is in progress:", "Handler 'delete_fn' is invoked", "Handler 'delete_fn' succeeded", "Deletion is processed:", "Removing the finalizer", "Patching with", ])
async def test_update(registry, settings, handlers, resource, cause_mock, event_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = Reason.UPDATE event_queue = asyncio.Queue() await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': {} }, event_queue=event_queue, ) assert not handlers.create_mock.called assert handlers.update_mock.call_count == 1 assert not handlers.delete_mock.called assert k8s_mocked.sleep.call_count == 0 assert k8s_mocked.patch_obj.call_count == 1 assert not event_queue.empty() patch = k8s_mocked.patch_obj.call_args_list[0][1]['patch'] assert 'metadata' in patch assert 'annotations' in patch['metadata'] assert LAST_SEEN_ANNOTATION in patch['metadata']['annotations'] assert_logs([ "Updating is in progress:", "Handler 'update_fn' is invoked", "Handler 'update_fn' succeeded", "Updating is processed:", "Patching with", ])
async def test_all_logs_are_prefixed(registry, settings, resource, handlers, logstream, cause_type, cause_mock): event_type = None if cause_type == Reason.RESUME else 'irrelevant' event_body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} cause_mock.reason = cause_type await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={'type': event_type, 'object': event_body}, event_queue=asyncio.Queue(), ) lines = logstream.getvalue().splitlines() assert lines # no messages means that we cannot test it assert all(line.startswith('prefix [ns1/name1] ') for line in lines)
async def test_errors_are_raised_aggregated(settings, activity): def sample_fn1(**_): raise PermanentError("boo!123") def sample_fn2(**_): raise PermanentError("boo!456") registry = OperatorRegistry() registry._activities.append(ActivityHandler( fn=sample_fn1, id='id1', activity=activity, param=None, errors=None, timeout=None, retries=None, backoff=None, )) registry._activities.append(ActivityHandler( fn=sample_fn2, id='id2', activity=activity, param=None, errors=None, timeout=None, retries=None, backoff=None, )) with pytest.raises(ActivityError) as e: await run_activity( registry=registry, settings=settings, activity=activity, lifecycle=all_at_once, indices=OperatorIndexers().indices, memo=Memo(), ) assert set(e.value.outcomes.keys()) == {'id1', 'id2'} assert e.value.outcomes['id1'].final assert e.value.outcomes['id1'].delay is None assert e.value.outcomes['id1'].result is None assert e.value.outcomes['id1'].exception is not None assert e.value.outcomes['id2'].final assert e.value.outcomes['id2'].delay is None assert e.value.outcomes['id2'].result is None assert e.value.outcomes['id2'].exception is not None assert str(e.value.outcomes['id1'].exception) == "boo!123" assert str(e.value.outcomes['id2'].exception) == "boo!456"
async def test_handlers_called_always(registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type, caplog, assert_logs, k8s_mocked): caplog.set_level(logging.DEBUG) cause_mock.reason = cause_type await process_resource_event( lifecycle=kopf.lifecycles.all_at_once, registry=registry, settings=settings, resource=resource, indexers=OperatorIndexers(), memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': 'ev-type', 'object': { 'field': 'value' } }, event_queue=asyncio.Queue(), ) assert handlers.event_mock.call_count == 1 assert extrahandlers.event_mock.call_count == 1 event = handlers.event_mock.call_args_list[0][1]['event'] assert 'field' in event['object'] assert event['object']['field'] == 'value' assert event['type'] == 'ev-type' assert_logs([ "Handler 'event_fn' is invoked.", "Handler 'event_fn' succeeded.", "Handler 'event_fn2' is invoked.", "Handler 'event_fn2' succeeded.", ])
async def test_single_credentials_provided_to_vault(settings): info = ConnectionInfo(server='https://expected/') vault = Vault() registry = OperatorRegistry() def login_fn(**_): return info # NB: id auto-detection does not work, as it is local to the test function. registry._activities.append( ActivityHandler( fn=login_fn, id='login_fn', activity=Activity.AUTHENTICATION, param=None, errors=None, timeout=None, retries=None, backoff=None, )) await authenticate( registry=registry, settings=settings, vault=vault, memo=Memo(), indices=OperatorIndexers().indices, ) assert vault items = [] async for key, info in vault: items.append((key, info)) assert len(items) == 1 assert items[0][0] == 'login_fn' assert items[0][1] is info