Exemplo n.º 1
0
def test_activity_kwargs(resource, activity):
    cause = ActivityCause(
        logger=logging.getLogger('kopf.test.fake.logger'),
        activity=activity,
        settings=OperatorSettings(),
    )
    kwargs = build_kwargs(cause=cause, extrakwarg=123)
    assert set(kwargs) == {'extrakwarg', 'logger', 'activity'}
    assert kwargs['extrakwarg'] == 123
    assert kwargs['logger'] is cause.logger
    assert kwargs['activity'] is activity
Exemplo n.º 2
0
def test_registry_and_settings_are_propagated(mocker):
    operator_mock = mocker.patch('kopf.reactor.running.operator')
    registry = OperatorRegistry()
    settings = OperatorSettings()
    with KopfRunner(['run', '--standalone'],
                    registry=registry,
                    settings=settings) as runner:
        pass
    assert runner.exit_code == 0
    assert runner.exception is None
    assert operator_mock.called
    assert operator_mock.call_args[1]['registry'] is registry
    assert operator_mock.call_args[1]['settings'] is settings
Exemplo n.º 3
0
def test_activity_kwargs(resource, activity, indices):
    cause = ActivityCause(
        memo=Memo(),
        logger=logging.getLogger('kopf.test.fake.logger'),
        indices=indices,
        activity=activity,
        settings=OperatorSettings(),
    )
    kwargs = build_kwargs(cause=cause, extrakwarg=123)
    assert set(kwargs) == {
        'extrakwarg', 'memo', 'logger', 'index1', 'index2', 'activity'
    }
    assert kwargs['extrakwarg'] == 123
    assert kwargs['index1'] is indices['index1']
    assert kwargs['index2'] is indices['index2']
    assert kwargs['logger'] is cause.logger
    assert kwargs['activity'] is activity
Exemplo n.º 4
0
def settings():
    return OperatorSettings()
Exemplo n.º 5
0
async def code_overhead(
    resource,
    stream,
    aresponses,
    watcher_limited,
    timer,
    _code_overhead_cache,
) -> CodeOverhead:
    """
    Estimate the overhead of synchronous code in the watching routines.

    The code overhead is caused by Kopf's and tests' own low-level activities:
    the code of ``watcher()``/``worker()`` itself, including a job scheduler,
    the local ``aresponses`` server, the API communication with that server
    in ``aiohttp``, serialization/deserialization in ``kopf.clients``, etc.

    The actual aspect being tested are the ``watcher()``/``worker()`` routines:
    their input/output and their timing regarding the blocking queue operations
    or explicit sleeps, not the timing of underlying low-level activities.
    So, the expected values for the durations of the call are adjusted for
    the estimated code overhead before asserting them.

    .. note::

        The tests are designed with small timeouts to run fast, so that
        the whole test-suite with thousands of tests is not delayed much.
        Once there is a way to simulate asyncio time like with ``freezegun``,
        or ``freezegun`` supports asyncio time, the problem can be solved by
        using the lengthy timeouts and ignoring the code overhead._

    The estimation of the overhead is measured by running a single-event cycle,
    which means one worker only, but with batching of events disabled. This
    ensures that only the fastest way is executed: no explicit or implicit
    sleeps are used (e.g. as in getting from an empty queue with timeouts).

    Extra 10-30% are added to the measured overhead to ensure that the future
    code executions would fit into the estimation despite the variations.

    Empirically, the overhead usually remains within the range of 50-150 ms.
    It does not depend on the number of events or unique uids in the stream.
    It does depend on the hardware used, or containers in the CI systems.

    Several dummy runs are used to average the values, to avoid fluctuation.
    The estimation happens only once per session, and is reused for all tests.
    """
    if not _code_overhead_cache:

        # Collect a few data samples to make the estimation realistic.
        overheads: List[float] = []
        for _ in range(10):

            # We feed the stream and consume the stream before we go into the tests,
            # which can feed the stream with their own events.
            stream.feed([
                {
                    'type': 'ADDED',
                    'object': {
                        'metadata': {
                            'uid': 'uid'
                        }
                    }
                },
            ])
            stream.close()

            # We use our own fixtures -- to not collide with the tests' fixtures.
            processor = CoroutineMock()
            settings = OperatorSettings()
            settings.batching.batch_window = 0
            settings.batching.idle_timeout = 1
            settings.batching.exit_timeout = 1

            with timer:
                await watcher(
                    namespace=None,
                    resource=resource,
                    settings=settings,
                    processor=processor,
                )

            # Ensure that everything worked as expected, i.e. the worker is not mocked,
            # and the whole code is actually executed down to the processor callback.
            assert processor.awaited, "The processor is not called for code overhead measurement."
            overheads.append(timer.seconds)

        # Reserve extra 10-30% from both sides for occasional variations.
        _code_overhead_cache.append(
            CodeOverhead(
                min=min(overheads) * 0.9,
                avg=sum(overheads) / len(overheads),
                max=max(overheads) * 1.1,
            ))

        # Cleanup our own endpoints, if something is left.
        aresponses._responses[:] = []

    # Uncomment for debugging of the actual timing: visible only with -s pytest option.
    # print(f"The estimated code overhead is {overhead}.")

    return _code_overhead_cache[0]