Ejemplo n.º 1
0
async def test_other_peering_objects_are_ignored(mocker, k8s_mocked, settings,
                                                 replenished, peering_resource,
                                                 peering_namespace):

    status = mocker.Mock()
    status.items.side_effect = Exception("This should not be called.")
    event = bodies.RawEvent(
        type='ADDED',  # irrelevant
        object={
            'metadata': {
                'name': 'their-name'
            },
            'status': status,
        })

    wait_for = mocker.patch('asyncio.wait_for')

    settings.peering.name = 'our-name'
    await process_peering_event(
        raw_event=event,
        freeze_toggle=primitives.Toggle(),
        replenished=replenished,
        autoclean=False,
        identity='id',
        settings=settings,
        namespace=peering_namespace,
        resource=peering_resource,
    )
    assert not status.items.called
    assert not k8s_mocked.patch_obj.called
    assert wait_for.call_count == 0
Ejemplo n.º 2
0
async def test_ignored_for_higher_priority_peer_when_already_on(caplog, assert_logs):
    event = bodies.RawEvent(
        type='ADDED',  # irrelevant
        object={
            'metadata': {'name': 'name', 'namespace': 'namespace'},  # for matching
            'status': {
                'higher-prio': {
                    'priority': 101,
                    'lifetime': 10,
                    'lastseen': '2020-12-31T23:59:59'
                },
            },
        })

    replenished = asyncio.Event()
    freeze_mode = primitives.Toggle(True)
    ourselves = Peer(id='id', name='name', namespace='namespace', priority=100)

    caplog.set_level(0)
    assert freeze_mode.is_on()
    await process_peering_event(
        raw_event=event,
        freeze_mode=freeze_mode,
        ourselves=ourselves,
        replenished=replenished,
        autoclean=False,
    )
    assert freeze_mode.is_on()
    assert_logs([], prohibited=[
        "Possibly conflicting operators",
        "Freezing all operators, including self",
        "Freezing operations in favour of",
        "Resuming operations after the freeze",
    ])
Ejemplo n.º 3
0
async def test_other_peering_objects_are_ignored(mocker, settings, our_name,
                                                 our_namespace, their_name,
                                                 their_namespace):

    status = mocker.Mock()
    status.items.side_effect = Exception("This should not be called.")
    event = bodies.RawEvent(
        type='ADDED',  # irrelevant
        object={
            'metadata': {
                'name': their_name,
                'namespace': their_namespace
            },
            'status': status,
        })

    settings.peering.name = our_name
    await process_peering_event(
        raw_event=event,
        freeze_mode=primitives.Toggle(),
        replenished=asyncio.Event(),
        autoclean=False,
        identity='id',
        settings=settings,
        namespace=our_namespace,
    )
    assert not status.items.called
Ejemplo n.º 4
0
async def test_ignored_for_same_priority_peer_when_already_on(
        mocker, k8s_mocked, replenished, caplog, assert_logs, settings,
        peering_resource, peering_namespace):

    event = bodies.RawEvent(
        type='ADDED',  # irrelevant
        object={
            'metadata': {
                'name': 'name',
                'namespace': peering_namespace
            },  # for matching
            'status': {
                'higher-prio': {
                    'priority': 100,
                    'lifetime': 10,
                    'lastseen': '2020-12-31T23:59:59'
                },
            },
        })
    settings.peering.name = 'name'
    settings.peering.priority = 100

    freeze_toggle = primitives.Toggle(True)
    wait_for = mocker.patch('asyncio.wait_for')

    caplog.set_level(0)
    assert freeze_toggle.is_on()
    await process_peering_event(
        raw_event=event,
        freeze_toggle=freeze_toggle,
        replenished=replenished,
        autoclean=False,
        namespace=peering_namespace,
        resource=peering_resource,
        identity='id',
        settings=settings,
    )
    assert freeze_toggle.is_on()
    assert wait_for.call_count == 1
    assert 9 < wait_for.call_args[1]['timeout'] < 10
    assert not k8s_mocked.patch_obj.called
    assert_logs([
        "Possibly conflicting operators",
    ],
                prohibited=[
                    "Freezing all operators, including self",
                    "Freezing operations in favour of",
                    "Resuming operations after the freeze",
                ])
Ejemplo n.º 5
0
    def __init__(
        self,
        __src: Optional[Mapping[str, object]] = None,
    ) -> None:
        super().__init__()
        self._current = {}
        self._invalid = collections.defaultdict(list)
        self._lock = asyncio.Lock()

        if __src is not None:
            self._update_converted(__src)

        # Mark a pre-populated vault to be usable instantly,
        # or trigger the initial authentication for an empty vault.
        self._ready = primitives.Toggle(bool(self))
Ejemplo n.º 6
0
async def test_ignored_for_same_priority_peer_when_already_on(
        k8s_mocked, caplog, assert_logs, settings, peering_resource,
        peering_namespace):

    event = bodies.RawEvent(
        type='ADDED',  # irrelevant
        object={
            'metadata': {
                'name': 'name',
                'namespace': peering_namespace
            },  # for matching
            'status': {
                'higher-prio': {
                    'priority': 100,
                    'lifetime': 10,
                    'lastseen': '2020-12-31T23:59:59'
                },
            },
        })
    settings.peering.name = 'name'
    settings.peering.priority = 100

    conflicts_found = primitives.Toggle(True)
    k8s_mocked.sleep_or_wait.return_value = 1  # as if interrupted by stream pressure

    caplog.set_level(0)
    assert conflicts_found.is_on()
    await process_peering_event(
        raw_event=event,
        conflicts_found=conflicts_found,
        autoclean=False,
        namespace=peering_namespace,
        resource=peering_resource,
        identity='id',
        settings=settings,
    )
    assert conflicts_found.is_on()
    assert k8s_mocked.sleep_or_wait.call_count == 1
    assert 9 < k8s_mocked.sleep_or_wait.call_args[0][0][0] < 10
    assert not k8s_mocked.patch_obj.called
    assert_logs([
        "Possibly conflicting operators",
    ],
                prohibited=[
                    "Pausing all operators, including self",
                    "Pausing operations in favour of",
                    "Resuming operations after the pause",
                ])
Ejemplo n.º 7
0
async def test_toggled_on_for_same_priority_peer_when_initially_off(
        caplog, assert_logs, settings):

    event = bodies.RawEvent(
        type='ADDED',  # irrelevant
        object={
            'metadata': {
                'name': 'name',
                'namespace': 'namespace'
            },  # for matching
            'status': {
                'higher-prio': {
                    'priority': 100,
                    'lifetime': 10,
                    'lastseen': '2020-12-31T23:59:59'
                },
            },
        })
    settings.peering.name = 'name'
    settings.peering.priority = 100

    replenished = asyncio.Event()
    freeze_mode = primitives.Toggle(False)

    caplog.set_level(0)
    assert freeze_mode.is_off()
    await process_peering_event(
        raw_event=event,
        freeze_mode=freeze_mode,
        replenished=replenished,
        autoclean=False,
        namespace='namespace',
        identity='id',
        settings=settings,
    )
    assert freeze_mode.is_on()
    assert_logs([
        "Possibly conflicting operators",
        "Freezing all operators, including self",
    ],
                prohibited=[
                    "Freezing operations in favour of",
                    "Resuming operations after the freeze",
                ])
Ejemplo n.º 8
0
async def test_resumes_immediately_on_expiration_of_blocking_peers(
        mocker, k8s_mocked, replenished, caplog, assert_logs, settings,
        priority, peering_resource, peering_namespace):

    event = bodies.RawEvent(
        type='ADDED',  # irrelevant
        object={
            'metadata': {
                'name': 'name',
                'namespace': peering_namespace
            },  # for matching
            'status': {
                'higher-prio': {
                    'priority': priority,
                    'lifetime': 10,
                    'lastseen': '2020-12-31T23:59:59'
                },
            },
        })
    settings.peering.name = 'name'
    settings.peering.priority = 100

    freeze_toggle = primitives.Toggle(True)
    wait_for = mocker.patch('asyncio.wait_for',
                            side_effect=asyncio.TimeoutError)

    caplog.set_level(0)
    assert freeze_toggle.is_on()
    await process_peering_event(
        raw_event=event,
        freeze_toggle=freeze_toggle,
        replenished=replenished,
        autoclean=False,
        namespace=peering_namespace,
        resource=peering_resource,
        identity='id',
        settings=settings,
    )
    assert freeze_toggle.is_on()
    assert wait_for.call_count == 1
    assert 9 < wait_for.call_args[1]['timeout'] < 10
    assert k8s_mocked.patch_obj.called
Ejemplo n.º 9
0
async def test_resumes_immediately_on_expiration_of_blocking_peers(
        k8s_mocked, caplog, assert_logs, settings, priority, peering_resource,
        peering_namespace):

    event = bodies.RawEvent(
        type='ADDED',  # irrelevant
        object={
            'metadata': {
                'name': 'name',
                'namespace': peering_namespace
            },  # for matching
            'status': {
                'higher-prio': {
                    'priority': priority,
                    'lifetime': 10,
                    'lastseen': '2020-12-31T23:59:59'
                },
            },
        })
    settings.peering.name = 'name'
    settings.peering.priority = 100

    conflicts_found = primitives.Toggle(True)
    k8s_mocked.sleep_or_wait.return_value = None  # as if finished sleeping uninterrupted

    caplog.set_level(0)
    assert conflicts_found.is_on()
    await process_peering_event(
        raw_event=event,
        conflicts_found=conflicts_found,
        autoclean=False,
        namespace=peering_namespace,
        resource=peering_resource,
        identity='id',
        settings=settings,
    )
    assert conflicts_found.is_on()
    assert k8s_mocked.sleep_or_wait.call_count == 1
    assert 9 < k8s_mocked.sleep_or_wait.call_args[0][0][0] < 10
    assert k8s_mocked.patch_obj.called
Ejemplo n.º 10
0
async def spawn_tasks(
        *,
        lifecycle: Optional[lifecycles.LifeCycleFn] = None,
        registry: Optional[registries.OperatorRegistry] = None,
        memories: Optional[containers.ResourceMemories] = None,
        standalone: bool = False,
        priority: int = 0,
        peering_name: Optional[str] = None,
        liveness_endpoint: Optional[str] = None,
        namespace: Optional[str] = None,
        stop_flag: Optional[primitives.Flag] = None,
        ready_flag: Optional[primitives.Flag] = None,
        vault: Optional[credentials.Vault] = None,
) -> Tasks:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    # The freezer and the registry are scoped to this whole task-set, to sync them all.
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle()
    registry = registry if registry is not None else registries.get_default_registry()
    memories = memories if memories is not None else containers.ResourceMemories()
    vault = vault if vault is not None else global_vault
    vault = vault if vault is not None else credentials.Vault()
    event_queue: posting.K8sEventQueue = asyncio.Queue()
    freeze_mode: primitives.Toggle = primitives.Toggle()
    signal_flag: asyncio_Future = asyncio.Future()
    ready_flag = ready_flag if ready_flag is not None else asyncio.Event()
    tasks: MutableSequence[asyncio_Task] = []

    # Global credentials store for this operator, also for CRD-reading & peering mode detection.
    auth.vault_var.set(vault)

    # Few common background forever-running infrastructural tasks (irregular root tasks).
    tasks.extend([
        loop.create_task(_stop_flag_checker(
            signal_flag=signal_flag,
            stop_flag=stop_flag,
        )),
        loop.create_task(_startup_cleanup_activities(
            root_tasks=tasks,  # used as a "live" view, populated later.
            ready_flag=ready_flag,
            registry=registry,
            vault=vault,  # to purge & finalize the caches in the end.
        )),
    ])

    # Keeping the credentials fresh and valid via the authentication handlers on demand.
    tasks.extend([
        loop.create_task(_root_task_checker(
            name="credentials retriever", ready_flag=ready_flag,
            coro=activities.authenticator(
                registry=registry,
                vault=vault))),
    ])

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.extend([
        loop.create_task(_root_task_checker(
            name="poster of events", ready_flag=ready_flag,
            coro=posting.poster(
                event_queue=event_queue))),
    ])

    # Liveness probing -- so that Kubernetes would know that the operator is alive.
    if liveness_endpoint:
        tasks.extend([
            loop.create_task(_root_task_checker(
                name="health reporter", ready_flag=ready_flag,
                coro=probing.health_reporter(
                    registry=registry,
                    endpoint=liveness_endpoint))),
        ])

    # Monitor the peers, unless explicitly disabled.
    ourselves: Optional[peering.Peer] = await peering.Peer.detect(
        id=peering.detect_own_id(), priority=priority,
        standalone=standalone, namespace=namespace, name=peering_name,
    )
    if ourselves:
        tasks.extend([
            loop.create_task(peering.peers_keepalive(
                ourselves=ourselves)),
            loop.create_task(_root_task_checker(
                name="watcher of peering", ready_flag=ready_flag,
                coro=queueing.watcher(
                    namespace=namespace,
                    resource=ourselves.resource,
                    processor=functools.partial(peering.process_peering_event,
                                                ourselves=ourselves,
                                                freeze_mode=freeze_mode)))),
        ])

    # Resource event handling, only once for every known resource (de-duplicated).
    for resource in registry.resources:
        tasks.extend([
            loop.create_task(_root_task_checker(
                name=f"watcher of {resource.name}", ready_flag=ready_flag,
                coro=queueing.watcher(
                    namespace=namespace,
                    resource=resource,
                    freeze_mode=freeze_mode,
                    processor=functools.partial(processing.process_resource_event,
                                                lifecycle=lifecycle,
                                                registry=registry,
                                                memories=memories,
                                                resource=resource,
                                                event_queue=event_queue)))),
        ])

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        # Handle NotImplementedError when ran on Windows since asyncio only supports Unix signals
        try:
            loop.add_signal_handler(signal.SIGINT, signal_flag.set_result, signal.SIGINT)
            loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result, signal.SIGTERM)
        except NotImplementedError:
            logger.warning("OS signals are ignored: can't add signal handler in Windows.")

    else:
        logger.warning("OS signals are ignored: running not in the main thread.")

    return tasks
Ejemplo n.º 11
0
async def spawn_tasks(
    *,
    lifecycle: Optional[lifecycles.LifeCycleFn] = None,
    registry: Optional[registries.OperatorRegistry] = None,
    settings: Optional[configuration.OperatorSettings] = None,
    memories: Optional[containers.ResourceMemories] = None,
    standalone: Optional[bool] = None,
    priority: Optional[int] = None,
    peering_name: Optional[str] = None,
    liveness_endpoint: Optional[str] = None,
    namespace: Optional[str] = None,
    stop_flag: Optional[primitives.Flag] = None,
    ready_flag: Optional[primitives.Flag] = None,
    vault: Optional[credentials.Vault] = None,
) -> Collection[aiotasks.Task]:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    # The freezer and the registry are scoped to this whole task-set, to sync them all.
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle(
    )
    registry = registry if registry is not None else registries.get_default_registry(
    )
    settings = settings if settings is not None else configuration.OperatorSettings(
    )
    memories = memories if memories is not None else containers.ResourceMemories(
    )
    vault = vault if vault is not None else global_vault
    vault = vault if vault is not None else credentials.Vault()
    event_queue: posting.K8sEventQueue = asyncio.Queue()
    freeze_mode: primitives.Toggle = primitives.Toggle()
    signal_flag: aiotasks.Future = asyncio.Future()
    started_flag: asyncio.Event = asyncio.Event()
    tasks: MutableSequence[aiotasks.Task] = []

    # Map kwargs into the settings object.
    if peering_name is not None:
        settings.peering.mandatory = True
        settings.peering.name = peering_name
    if standalone is not None:
        settings.peering.standalone = standalone
    if priority is not None:
        settings.peering.priority = priority

    # Global credentials store for this operator, also for CRD-reading & peering mode detection.
    auth.vault_var.set(vault)

    # Special case: pass the settings container through the user-side handlers (no explicit args).
    # Toolkits have to keep the original operator context somehow, and the only way is contextvars.
    posting.settings_var.set(settings)

    # Few common background forever-running infrastructural tasks (irregular root tasks).
    tasks.append(
        aiotasks.create_task(name="stop-flag checker",
                             coro=_stop_flag_checker(signal_flag=signal_flag,
                                                     stop_flag=stop_flag)))
    tasks.append(
        aiotasks.create_task(name="ultimate termination",
                             coro=_ultimate_termination(settings=settings,
                                                        stop_flag=stop_flag)))
    tasks.append(
        aiotasks.create_task(
            name="startup/cleanup activities",
            coro=_startup_cleanup_activities(
                root_tasks=tasks,  # used as a "live" view, populated later.
                ready_flag=ready_flag,
                started_flag=started_flag,
                registry=registry,
                settings=settings,
                vault=vault)))  # to purge & finalize the caches in the end.

    # Kill all the daemons gracefully when the operator exits (so that they are not "hung").
    tasks.append(
        aiotasks.create_guarded_task(
            name="daemon killer",
            flag=started_flag,
            logger=logger,
            coro=daemons.daemon_killer(settings=settings, memories=memories)))

    # Keeping the credentials fresh and valid via the authentication handlers on demand.
    tasks.append(
        aiotasks.create_guarded_task(name="credentials retriever",
                                     flag=started_flag,
                                     logger=logger,
                                     coro=activities.authenticator(
                                         registry=registry,
                                         settings=settings,
                                         vault=vault)))

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.append(
        aiotasks.create_guarded_task(
            name="poster of events",
            flag=started_flag,
            logger=logger,
            coro=posting.poster(event_queue=event_queue)))

    # Liveness probing -- so that Kubernetes would know that the operator is alive.
    if liveness_endpoint:
        tasks.append(
            aiotasks.create_guarded_task(name="health reporter",
                                         flag=started_flag,
                                         logger=logger,
                                         coro=probing.health_reporter(
                                             registry=registry,
                                             settings=settings,
                                             endpoint=liveness_endpoint)))

    # Monitor the peers, unless explicitly disabled.
    if await peering.detect_presence(namespace=namespace, settings=settings):
        identity = peering.detect_own_id(manual=False)
        tasks.append(
            aiotasks.create_guarded_task(name="peering keepalive",
                                         flag=started_flag,
                                         logger=logger,
                                         coro=peering.keepalive(
                                             namespace=namespace,
                                             settings=settings,
                                             identity=identity)))
        tasks.append(
            aiotasks.create_guarded_task(
                name="watcher of peering",
                flag=started_flag,
                logger=logger,
                coro=queueing.watcher(
                    namespace=namespace,
                    settings=settings,
                    resource=peering.guess_resource(namespace=namespace),
                    processor=functools.partial(peering.process_peering_event,
                                                namespace=namespace,
                                                settings=settings,
                                                identity=identity,
                                                freeze_mode=freeze_mode))))

    # Resource event handling, only once for every known resource (de-duplicated).
    for resource in registry.resources:
        tasks.append(
            aiotasks.create_guarded_task(
                name=f"watcher of {resource.name}",
                flag=started_flag,
                logger=logger,
                coro=queueing.watcher(namespace=namespace,
                                      settings=settings,
                                      resource=resource,
                                      freeze_mode=freeze_mode,
                                      processor=functools.partial(
                                          processing.process_resource_event,
                                          lifecycle=lifecycle,
                                          registry=registry,
                                          settings=settings,
                                          memories=memories,
                                          resource=resource,
                                          event_queue=event_queue))))

    # On Ctrl+C or pod termination, cancel all tasks gracefully if possible.
    if threading.current_thread() is threading.main_thread():
        # Handle NotImplementedError when ran on Windows since asyncio only supports Unix signals
        try:
            loop.add_signal_handler(signal.SIGINT, signal_flag.set_result,
                                    signal.SIGINT)
            loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result,
                                    signal.SIGTERM)
        except NotImplementedError:
            logger.warning(
                "OS signals are ignored: can't add signal handler in Windows.")

    else:
        logger.warning(
            "OS signals are ignored: running not in the main thread.")

    return tasks