async def test_waiting_until_off_fails_when_not_turned_off(): toggle = Toggle(True) with pytest.raises(asyncio.TimeoutError): await asyncio.wait_for(toggle.wait_for_off(), timeout=0.1) assert toggle
async def test_turning_off(): toggle = Toggle(True) await toggle.turn_off() assert not toggle assert not toggle.is_on() assert toggle.is_off()
async def test_waiting_until_off_fails_when_not_turned_off(): toggle = Toggle(True) with pytest.raises(asyncio.TimeoutError): async with async_timeout.timeout(0.1) as timeout: await toggle.wait_for(False) assert toggle.is_on() assert timeout.expired
async def test_waiting_until_off_wakes_when_turned_off(timer): toggle = Toggle(True) async def delayed_turning_off(delay: float): await asyncio.sleep(delay) await toggle.turn_off() with timer: asyncio.create_task(delayed_turning_off(0.05)) await asyncio.wait_for(toggle.wait_for_off(), timeout=1.0) assert not toggle assert timer.seconds < 0.5 # approx. 0.05 plus some code overhead
async def process_peering_event( *, raw_event: bodies.RawEvent, freeze_mode: primitives.Toggle, ourselves: Peer, autoclean: bool = True, replenished: asyncio.Event, ) -> None: """ Handle a single update of the peers by us or by other operators. When an operator with a higher priority appears, switch to the freeze-mode. The these operators disappear or become presumably dead, resume the event handling. The freeze object is passed both to the peers handler to set/clear it, and to all the resource handlers to check its value when the events arrive (see `create_tasks` and `run` functions). """ # Silently ignore the peering objects which are not ours to worry. body = raw_event['object'] name = body.get('metadata', {}).get('name', None) namespace = body.get('metadata', {}).get('namespace', None) if namespace != ourselves.namespace or name != ourselves.name or name is None: return # Find if we are still the highest priority operator. pairs = cast(Mapping[str, Mapping[str, object]], body.get('status', {})) peers = [Peer(id=opid, name=name, **opinfo) for opid, opinfo in pairs.items()] dead_peers = [peer for peer in peers if peer.is_dead] prio_peers = [peer for peer in peers if not peer.is_dead and peer.priority > ourselves.priority] same_peers = [peer for peer in peers if not peer.is_dead and peer.priority == ourselves.priority and peer.id != ourselves.id] if autoclean and dead_peers: # NB: sync and blocking, but this is fine. await apply_peers(dead_peers, name=ourselves.name, namespace=ourselves.namespace, legacy=ourselves.legacy) if prio_peers: if freeze_mode.is_off(): logger.info(f"Freezing operations in favour of {prio_peers}.") await freeze_mode.turn_on() elif same_peers: logger.warning(f"Possibly conflicting operators with the same priority: {same_peers}.") if freeze_mode.is_off(): logger.warning(f"Freezing all operators, including self: {peers}") await freeze_mode.turn_on() else: if freeze_mode.is_on(): logger.info(f"Resuming operations after the freeze. Conflicting operators with the same priority are gone.") await freeze_mode.turn_off()
async def test_waiting_until_off_wakes_when_turned_off(timer): toggle = Toggle(True) async def delayed_turning_off(delay: float): await asyncio.sleep(delay) await toggle.turn_to(False) async with timer, async_timeout.timeout(1.0) as timeout: asyncio.create_task(delayed_turning_off(0.05)) await toggle.wait_for(False) assert toggle.is_off() assert not timeout.expired assert timer.seconds < 0.5 # approx. 0.05 plus some code overhead
async def test_removed_on_filters_mismatch(resource, settings, registry, indexers, index, caplog, event_type, handlers, mocker): # Simulate the indexing handler is gone out of scope (this is only one of the ways to do it): mocker.patch.object(registry._resource_indexing, 'get_handlers', return_value=[]) caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} handlers.index_mock.return_value = 123 await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': body }, event_queue=asyncio.Queue(), resource_indexed=Toggle(), # used! only to enable indexing. ) assert set(index) == set()
async def test_freezing_waits_forever_if_not_resumed(resource, stream, namespace, timer, caplog, assert_logs): stream.feed(STREAM_WITH_NORMAL_EVENTS, namespace=namespace) stream.close(namespace=namespace) freeze_mode = Toggle(True) events = [] async def read_stream(): async for event in streaming_watch(resource=resource, namespace=namespace, freeze_mode=freeze_mode): events.append(event) caplog.set_level(logging.DEBUG) with timer: with pytest.raises(asyncio.TimeoutError): await asyncio.wait_for(read_stream(), timeout=0.5) assert len(events) == 0 assert timer.seconds >= 0.5 assert_logs([ r"Freezing the watch-stream for", ], prohibited=[ r"Resuming the watch-stream for", ])
async def test_created_empty(fn, expected): toggleset = ToggleSet(fn) assert len(toggleset) == 0 assert set(toggleset) == set() assert Toggle() not in toggleset assert toggleset.is_on() == expected assert toggleset.is_off() == (not expected)
async def test_preserved_on_logical_deletion(resource, settings, registry, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) body = { 'metadata': { 'namespace': 'ns1', 'name': 'name1', 'deletionTimestamp': '...' } } handlers.index_mock.return_value = 456 await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=ResourceMemories(), memobase=Memo(), raw_event={ 'type': event_type, 'object': body }, event_queue=asyncio.Queue(), resource_indexed=Toggle(), # used! only to enable indexing. ) assert set(index) == {None} assert set(index[None]) == {456}
async def test_freezing_is_ignored_if_turned_off(resource, stream, namespace, timer, caplog, assert_logs): stream.feed(STREAM_WITH_NORMAL_EVENTS, namespace=namespace) stream.close(namespace=namespace) freeze_mode = Toggle(False) events = [] async def read_stream(): async for event in streaming_watch(resource=resource, namespace=namespace, freeze_mode=freeze_mode): events.append(event) caplog.set_level(logging.DEBUG) with timer: await asyncio.wait_for(read_stream(), timeout=0.5) assert len(events) == 2 assert timer.seconds < 0.2 # no waits, exits as soon as possible assert_logs([], prohibited=[ r"Freezing the watch-stream for", r"Resuming the watch-stream for", ])
async def test_created_as_off(): toggleset = ToggleSet() assert len(toggleset) == 0 assert set(toggleset) == set() assert Toggle() not in toggleset assert not toggleset.is_on() assert toggleset.is_off()
async def test_removed_and_remembered_on_temporary_errors( resource, settings, registry, memories, indexers, index, caplog, event_type, handlers, delay_kwargs, expected_delayed): caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} memory = await memories.recall(raw_body=body) handlers.index_mock.side_effect = TemporaryError("boo!", **delay_kwargs) await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=memories, memobase=Memo(), raw_event={'type': event_type, 'object': body}, event_queue=asyncio.Queue(), resource_indexed=Toggle(), # used! only to enable indexing. ) assert set(index) == set() assert memory.indexing_state['index_fn'].finished == False assert memory.indexing_state['index_fn'].failure == False assert memory.indexing_state['index_fn'].success == False assert memory.indexing_state['index_fn'].message == 'boo!' assert memory.indexing_state['index_fn'].delayed == expected_delayed
async def test_freezing_waits_until_resumed(resource, stream, namespace, timer, caplog, assert_logs): stream.feed(STREAM_WITH_NORMAL_EVENTS, namespace=namespace) stream.close(namespace=namespace) freeze_mode = Toggle(True) events = [] async def delayed_resuming(delay: float): await asyncio.sleep(delay) await freeze_mode.turn_off() async def read_stream(): async for event in streaming_watch(resource=resource, namespace=namespace, freeze_mode=freeze_mode): events.append(event) caplog.set_level(logging.DEBUG) with timer: asyncio.create_task(delayed_resuming(0.2)) await asyncio.wait_for(read_stream(), timeout=1.0) assert len(events) == 2 assert timer.seconds >= 0.2 assert timer.seconds <= 0.5 assert_logs([ r"Freezing the watch-stream for", r"Resuming the watch-stream for", ])
async def test_making_a_turned_off_toggle(fn): toggleset = ToggleSet(fn) toggle = await toggleset.make_toggle(False) assert len(toggleset) == 1 assert set(toggleset) == {toggle} assert toggle in toggleset assert Toggle() not in toggleset assert toggleset.is_on() == False assert toggleset.is_off() == True
async def test_dropping_an_unexistent_toggle(): toggleset = ToggleSet() toggle = Toggle() await toggleset.drop_toggle(toggle) assert len(toggleset) == 0 assert set(toggleset) == set() assert toggle not in toggleset assert not toggleset.is_on() assert toggleset.is_off()
async def test_dropping_an_unexistent_toggle(fn, expected): toggleset = ToggleSet(fn) toggle = Toggle() await toggleset.drop_toggle(toggle) assert len(toggleset) == 0 assert set(toggleset) == set() assert toggle not in toggleset assert toggleset.is_on() == expected assert toggleset.is_off() == (not expected)
async def test_making_a_turned_on_toggle(): toggleset = ToggleSet() toggle = await toggleset.make_toggle(True) assert len(toggleset) == 1 assert set(toggleset) == {toggle} assert toggle in toggleset assert Toggle() not in toggleset assert toggleset.is_on() assert not toggleset.is_off()
async def test_dropping_multiple_toggles(): toggleset = ToggleSet() toggle1 = await toggleset.make_toggle(True) toggle2 = Toggle() await toggleset.drop_toggles([toggle1, toggle2]) assert len(toggleset) == 0 assert set(toggleset) == set() assert toggle1 not in toggleset assert toggle2 not in toggleset assert not toggleset.is_on() assert toggleset.is_off()
async def test_permanent_failures_are_not_reindexed( resource, settings, registry, memories, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} memory = await memories.recall(raw_body=body) memory.indexing_state = State({'index_fn': HandlerState(failure=True)}) await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=memories, memobase=Memo(), raw_event={'type': event_type, 'object': body}, event_queue=asyncio.Queue(), resource_indexed=Toggle(), # used! only to enable indexing. ) assert handlers.index_mock.call_count == 0
async def test_temporary_failures_with_expired_delays_are_reindexed( resource, settings, registry, memories, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} delayed = datetime.datetime(2020, 12, 31, 23, 59, 59, 0) memory = await memories.recall(raw_body=body) memory.indexing_state = State({'index_fn': HandlerState(delayed=delayed)}) await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=memories, memobase=Memo(), raw_event={'type': event_type, 'object': body}, event_queue=asyncio.Queue(), resource_indexed=Toggle(), # used! only to enable indexing. ) assert handlers.index_mock.call_count == 1
async def test_preserved_on_ignored_errors( resource, settings, registry, memories, indexers, index, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} memory = await memories.recall(raw_body=body) handlers.index_mock.side_effect = Exception("boo!") await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=memories, memobase=Memo(), raw_event={'type': event_type, 'object': body}, event_queue=asyncio.Queue(), resource_indexed=Toggle(), # used! only to enable indexing. ) assert set(index) == {None} assert set(index[None]) == {123} assert memory.indexing_state is None
async def test_successes_are_removed_from_the_indexing_state( resource, settings, registry, memories, indexers, caplog, event_type, handlers): caplog.set_level(logging.DEBUG) body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} memory = await memories.recall(raw_body=body) memory.indexing_state = State({'unrelated': HandlerState(success=True)}) handlers.index_mock.side_effect = 123 await process_resource_event( lifecycle=all_at_once, registry=registry, settings=settings, resource=resource, indexers=indexers, memories=memories, memobase=Memo(), raw_event={'type': event_type, 'object': body}, event_queue=asyncio.Queue(), resource_indexed=Toggle(), # used! only to enable indexing. ) assert handlers.index_mock.call_count == 1 assert memory.indexing_state is None
async def test_creation_with_explicit_loop(): loop = asyncio.new_event_loop() toggle = Toggle(loop=loop) assert toggle.loop is loop
async def test_creation_with_default_loop(): loop = asyncio.get_running_loop() toggle = Toggle() assert toggle.loop is loop
async def process_peering_event( *, raw_event: bodies.RawEvent, namespace: Optional[str], identity: Identity, settings: configuration.OperatorSettings, autoclean: bool = True, replenished: asyncio.Event, freeze_toggle: primitives.Toggle, ) -> None: """ Handle a single update of the peers by us or by other operators. When an operator with a higher priority appears, switch to the freeze-mode. The these operators disappear or become presumably dead, resume the event handling. The freeze object is passed both to the peers handler to set/clear it, and to all the resource handlers to check its value when the events arrive (see `create_tasks` and `run` functions). """ body: bodies.RawBody = raw_event['object'] meta: bodies.RawMeta = raw_event['object']['metadata'] # Silently ignore the peering objects which are not ours to worry. if meta.get('namespace') != namespace or meta.get( 'name') != settings.peering.name: return # Find if we are still the highest priority operator. pairs = cast(Mapping[str, Mapping[str, object]], body.get('status', {})) peers = [ Peer(identity=Identity(opid), **opinfo) for opid, opinfo in pairs.items() ] dead_peers = [peer for peer in peers if peer.is_dead] live_peers = [ peer for peer in peers if not peer.is_dead and peer.identity != identity ] prio_peers = [ peer for peer in live_peers if peer.priority > settings.peering.priority ] same_peers = [ peer for peer in live_peers if peer.priority == settings.peering.priority ] if autoclean and dead_peers: await clean(peers=dead_peers, settings=settings, namespace=namespace) if prio_peers: if freeze_toggle.is_off(): logger.info(f"Freezing operations in favour of {prio_peers}.") await freeze_toggle.turn_to(True) elif same_peers: logger.warning( f"Possibly conflicting operators with the same priority: {same_peers}." ) if freeze_toggle.is_off(): logger.warning(f"Freezing all operators, including self: {peers}") await freeze_toggle.turn_to(True) else: if freeze_toggle.is_on(): logger.info( f"Resuming operations after the freeze. Conflicting operators with the same priority are gone." ) await freeze_toggle.turn_to(False) # Either wait for external updates (and exit when they arrive), or until the blocking peers # are expected to expire, and force the immediate re-evaluation by a certain change of self. # This incurs an extra PATCH request besides usual keepalives, but in the complete silence # from other peers that existed a moment earlier, this should not be a problem. now = datetime.datetime.utcnow() delay = max([0] + [(peer.deadline - now).total_seconds() for peer in same_peers + prio_peers]) if delay: try: await asyncio.wait_for(replenished.wait(), timeout=delay) except asyncio.TimeoutError: await touch(identity=identity, settings=settings, namespace=namespace)
async def test_creation_with_explicit_loop(): loop = asyncio.new_event_loop() with contextlib.closing(loop): toggle = Toggle(loop=loop) assert toggle.loop is loop
async def test_initialised_as_on(): toggle = Toggle(True) assert toggle assert toggle.is_on() assert not toggle.is_off()
async def process_peering_event( *, raw_event: bodies.RawEvent, freeze_mode: primitives.Toggle, namespace: Optional[str], identity: Identity, settings: configuration.OperatorSettings, autoclean: bool = True, replenished: asyncio.Event, ) -> None: """ Handle a single update of the peers by us or by other operators. When an operator with a higher priority appears, switch to the freeze-mode. The these operators disappear or become presumably dead, resume the event handling. The freeze object is passed both to the peers handler to set/clear it, and to all the resource handlers to check its value when the events arrive (see `create_tasks` and `run` functions). """ body: bodies.RawBody = raw_event['object'] meta: bodies.RawMeta = raw_event['object']['metadata'] # Silently ignore the peering objects which are not ours to worry. if meta.get('namespace') != namespace or meta.get( 'name') != settings.peering.name: return # Find if we are still the highest priority operator. pairs = cast(Mapping[str, Mapping[str, object]], body.get('status', {})) peers = [ Peer(identity=Identity(opid), **opinfo) for opid, opinfo in pairs.items() ] dead_peers = [peer for peer in peers if peer.is_dead] live_peers = [ peer for peer in peers if not peer.is_dead and peer.identity != identity ] prio_peers = [ peer for peer in live_peers if peer.priority > settings.peering.priority ] same_peers = [ peer for peer in live_peers if peer.priority == settings.peering.priority ] if autoclean and dead_peers: await clean(peers=dead_peers, settings=settings, namespace=namespace) if prio_peers: if freeze_mode.is_off(): logger.info(f"Freezing operations in favour of {prio_peers}.") await freeze_mode.turn_on() elif same_peers: logger.warning( f"Possibly conflicting operators with the same priority: {same_peers}." ) if freeze_mode.is_off(): logger.warning(f"Freezing all operators, including self: {peers}") await freeze_mode.turn_on() else: if freeze_mode.is_on(): logger.info( f"Resuming operations after the freeze. Conflicting operators with the same priority are gone." ) await freeze_mode.turn_off()
async def test_created_as_off(): toggle = Toggle() assert not toggle assert not toggle.is_on() assert toggle.is_off()