def test_good_aliases_over_good_addresses__symmetric(mocker, good1, good2): mocker.patch('socket.gethostname', return_value='localhost') mocker.patch('socket.gethostbyaddr', side_effect=lambda fqdn: (fqdn, [good1], [good2])) own_id = detect_own_id(manual=True) assert own_id == f'some-user@{good1}' mocker.patch('socket.gethostname', return_value='localhost') mocker.patch('socket.gethostbyaddr', side_effect=lambda fqdn: (fqdn, [good2], [good1])) own_id = detect_own_id(manual=True) assert own_id == f'some-user@{good2}'
def freeze( id: Optional[str], message: Optional[str], lifetime: int, namespaces: Collection[references.NamespacePattern], clusterwide: bool, peering_name: str, priority: int, ) -> None: """ Pause the resource handling in the operator(s). """ identity = peering.Identity(id) if id else peering.detect_own_id( manual=True) insights = references.Insights() settings = configuration.OperatorSettings() settings.peering.name = peering_name settings.peering.priority = priority return running.run(clusterwide=clusterwide, namespaces=namespaces, insights=insights, identity=identity, settings=settings, _command=peering.touch_command(insights=insights, identity=identity, settings=settings, lifetime=lifetime))
def test_from_a_pod_id(mocker, manual): mocker.patch('socket.gethostname', return_value='some-host') mocker.patch('socket.gethostbyaddr', side_effect=lambda fqdn: (fqdn, [], [])) mocker.patch.dict(os.environ, POD_ID='some-pod-1') own_id = detect_own_id(manual=manual) assert own_id == 'some-pod-1'
def test_suffixes_appended(mocker): mocker.patch('random.choices', return_value='random-str') mocker.patch('socket.gethostname', return_value='some-host') mocker.patch('socket.gethostbyaddr', side_effect=lambda fqdn: (fqdn, [], [])) with freezegun.freeze_time('2020-12-31T23:59:59.123456'): own_id = detect_own_id(manual=False) assert own_id == 'some-user@some-host/20201231235959/random-str'
async def spawn_tasks( *, lifecycle: Optional[execution.LifeCycleFn] = None, indexers: Optional[indexing.OperatorIndexers] = None, registry: Optional[registries.OperatorRegistry] = None, settings: Optional[configuration.OperatorSettings] = None, memories: Optional[inventory.ResourceMemories] = None, insights: Optional[references.Insights] = None, identity: Optional[peering.Identity] = None, standalone: Optional[bool] = None, priority: Optional[int] = None, peering_name: Optional[str] = None, liveness_endpoint: Optional[str] = None, clusterwide: bool = False, namespaces: Collection[references.NamespacePattern] = (), namespace: Optional[references.NamespacePattern] = None, # deprecated stop_flag: Optional[aioadapters.Flag] = None, ready_flag: Optional[aioadapters.Flag] = None, vault: Optional[credentials.Vault] = None, memo: Optional[object] = None, _command: Optional[Coroutine[None, None, None]] = None, ) -> Collection[aiotasks.Task]: """ Spawn all the tasks needed to run the operator. The tasks are properly inter-connected with the synchronisation primitives. """ loop = asyncio.get_running_loop() if namespaces and namespace: raise TypeError("Either namespaces= or namespace= can be passed. Got both.") elif namespace: warnings.warn("namespace= is deprecated; use namespaces=[...]", DeprecationWarning) namespaces = [namespace] if clusterwide and namespaces: raise TypeError("The operator can be either cluster-wide or namespaced, not both.") if not clusterwide and not namespaces: warnings.warn("Absence of either namespaces or cluster-wide flag will become an error soon." " For now, switching to the cluster-wide mode for backward compatibility.", FutureWarning) clusterwide = True # All tasks of the operator are synced via these primitives and structures: lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle() registry = registry if registry is not None else registries.get_default_registry() settings = settings if settings is not None else configuration.OperatorSettings() memories = memories if memories is not None else inventory.ResourceMemories() indexers = indexers if indexers is not None else indexing.OperatorIndexers() insights = insights if insights is not None else references.Insights() identity = identity if identity is not None else peering.detect_own_id(manual=False) vault = vault if vault is not None else credentials.Vault() memo = memo if memo is not None else ephemera.Memo() memo = ephemera.AnyMemo(memo) # type-casted event_queue: posting.K8sEventQueue = asyncio.Queue() signal_flag: aiotasks.Future = asyncio.Future() started_flag: asyncio.Event = asyncio.Event() operator_paused = aiotoggles.ToggleSet(any) tasks: MutableSequence[aiotasks.Task] = [] # Map kwargs into the settings object. settings.peering.clusterwide = clusterwide if peering_name is not None: settings.peering.mandatory = True settings.peering.name = peering_name if standalone is not None: settings.peering.standalone = standalone if priority is not None: settings.peering.priority = priority # Prepopulate indexers with empty indices -- to be available startup handlers. indexers.ensure(registry._indexing.get_all_handlers()) # Global credentials store for this operator, also for CRD-reading & peering mode detection. auth.vault_var.set(vault) # Special case: pass the settings container through the user-side handlers (no explicit args). # Toolkits have to keep the original operator context somehow, and the only way is contextvars. posting.settings_var.set(settings) # Few common background forever-running infrastructural tasks (irregular root tasks). tasks.append(aiotasks.create_task( name="stop-flag checker", coro=_stop_flag_checker( signal_flag=signal_flag, stop_flag=stop_flag))) tasks.append(aiotasks.create_task( name="ultimate termination", coro=_ultimate_termination( settings=settings, stop_flag=stop_flag))) tasks.append(aiotasks.create_task( name="startup/cleanup activities", coro=_startup_cleanup_activities( root_tasks=tasks, # used as a "live" view, populated later. ready_flag=ready_flag, started_flag=started_flag, registry=registry, settings=settings, indices=indexers.indices, vault=vault, memo=memo))) # to purge & finalize the caches in the end. # Kill all the daemons gracefully when the operator exits (so that they are not "hung"). tasks.append(aiotasks.create_guarded_task( name="daemon killer", flag=started_flag, logger=logger, coro=daemons.daemon_killer( settings=settings, memories=memories, operator_paused=operator_paused))) # Keeping the credentials fresh and valid via the authentication handlers on demand. tasks.append(aiotasks.create_guarded_task( name="credentials retriever", flag=started_flag, logger=logger, coro=activities.authenticator( registry=registry, settings=settings, indices=indexers.indices, vault=vault, memo=memo))) # K8s-event posting. Events are queued in-memory and posted in the background. # NB: currently, it is a global task, but can be made per-resource or per-object. tasks.append(aiotasks.create_guarded_task( name="poster of events", flag=started_flag, logger=logger, coro=posting.poster( backbone=insights.backbone, event_queue=event_queue))) # Liveness probing -- so that Kubernetes would know that the operator is alive. if liveness_endpoint: tasks.append(aiotasks.create_guarded_task( name="health reporter", flag=started_flag, logger=logger, coro=probing.health_reporter( registry=registry, settings=settings, endpoint=liveness_endpoint, indices=indexers.indices, memo=memo))) # Admission webhooks run as either a server or a tunnel or a fixed config. # The webhook manager automatically adjusts the cluster configuration at runtime. container: aiovalues.Container[reviews.WebhookClientConfig] = aiovalues.Container() tasks.append(aiotasks.create_guarded_task( name="admission insights chain", flag=started_flag, logger=logger, coro=aiobindings.condition_chain( source=insights.revised, target=container.changed))) tasks.append(aiotasks.create_guarded_task( name="admission validating configuration manager", flag=started_flag, logger=logger, coro=admission.validating_configuration_manager( container=container, settings=settings, registry=registry, insights=insights))) tasks.append(aiotasks.create_guarded_task( name="admission mutating configuration manager", flag=started_flag, logger=logger, coro=admission.mutating_configuration_manager( container=container, settings=settings, registry=registry, insights=insights))) tasks.append(aiotasks.create_guarded_task( name="admission webhook server", flag=started_flag, logger=logger, coro=admission.admission_webhook_server( container=container, settings=settings, registry=registry, insights=insights, webhookfn=functools.partial(admission.serve_admission_request, settings=settings, registry=registry, insights=insights, memories=memories, memobase=memo, indices=indexers.indices)))) # Permanent observation of what resource kinds and namespaces are available in the cluster. # Spawn and cancel dimensional tasks as they come and go; dimensions = resources x namespaces. tasks.append(aiotasks.create_guarded_task( name="resource observer", flag=started_flag, logger=logger, coro=observation.resource_observer( insights=insights, registry=registry, settings=settings))) tasks.append(aiotasks.create_guarded_task( name="namespace observer", flag=started_flag, logger=logger, coro=observation.namespace_observer( clusterwide=clusterwide, namespaces=namespaces, insights=insights, settings=settings))) # Explicit command is a hack for the CLI to run coroutines in an operator-like environment. # If not specified, then use the normal resource processing. It is not exposed publicly (yet). if _command is not None: tasks.append(aiotasks.create_guarded_task( name="the command", flag=started_flag, logger=logger, finishable=True, coro=_command)) else: tasks.append(aiotasks.create_guarded_task( name="multidimensional multitasker", flag=started_flag, logger=logger, coro=orchestration.ochestrator( settings=settings, insights=insights, identity=identity, operator_paused=operator_paused, processor=functools.partial(processing.process_resource_event, lifecycle=lifecycle, registry=registry, settings=settings, indexers=indexers, memories=memories, memobase=memo, event_queue=event_queue)))) # Ensure that all guarded tasks got control for a moment to enter the guard. await asyncio.sleep(0) # On Ctrl+C or pod termination, cancel all tasks gracefully. if threading.current_thread() is threading.main_thread(): # Handle NotImplementedError when ran on Windows since asyncio only supports Unix signals try: loop.add_signal_handler(signal.SIGINT, signal_flag.set_result, signal.SIGINT) loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result, signal.SIGTERM) except NotImplementedError: logger.warning("OS signals are ignored: can't add signal handler in Windows.") else: logger.warning("OS signals are ignored: running not in the main thread.") return tasks
def test_good_aliases_over_bad_hostnames(mocker, good, bad): mocker.patch('socket.gethostname', return_value=bad) mocker.patch('socket.gethostbyaddr', side_effect=lambda fqdn: (fqdn, [good], [])) own_id = detect_own_id(manual=True) assert own_id == f'some-user@{good}'
def test_suffixes_ignored(mocker): mocker.patch('socket.gethostname', return_value='some-host') mocker.patch('socket.gethostbyaddr', side_effect=lambda fqdn: (fqdn, [], [])) own_id = detect_own_id(manual=True) assert own_id == 'some-user@some-host'
def test_useless_suffixes_removed(mocker, fqdn): mocker.patch('socket.gethostname', return_value=fqdn) mocker.patch('socket.gethostbyaddr', side_effect=lambda fqdn: (fqdn, [], [])) own_id = detect_own_id(manual=True) assert own_id == 'some-user@my-host'