Exemple #1
0
async def monitor_sync(user_fs, event_bus, task_status):
    ctxs = SyncContextStore(user_fs)
    early_wakeup = trio.Event()

    def _trigger_early_wakeup():
        early_wakeup.set()
        # Don't wait for the *actual* awakening to change the status to
        # avoid having a period of time when the awakening is scheduled but
        # not yet notified to task_status
        task_status.awake()

    def _on_entry_updated(event, id, workspace_id=None):
        if workspace_id is None:
            # User manifest
            assert id == user_fs.user_manifest_id
            ctx = ctxs.get(id)
        else:
            ctx = ctxs.get(workspace_id)
        if ctx and ctx.set_local_change(id):
            _trigger_early_wakeup()

    def _on_realm_vlobs_updated(sender, realm_id, checkpoint, src_id,
                                src_version):
        ctx = ctxs.get(realm_id)
        if ctx and ctx.set_remote_change(src_id):
            _trigger_early_wakeup()

    def _on_sharing_updated(sender, new_entry, previous_entry):
        # If role have changed we have to reset the sync context given
        # behavior could have changed a lot (e.g. switching to/from read-only)
        ctxs.discard(new_entry.id)
        if new_entry.role is not None:
            ctx = ctxs.get(new_entry.id)
            if ctx:
                # Change the due_time so the context understants the early
                # wakeup is for him
                ctx.due_time = timestamp()
                _trigger_early_wakeup()

    def _on_entry_confined(event, entry_id, cause_id, workspace_id):
        ctx = ctxs.get(workspace_id)
        if ctx is not None:
            ctx.set_confined_entry(entry_id, cause_id)

    async def _ctx_action(ctx, meth):
        try:
            return await getattr(ctx, meth)()
        except BackendNotAvailable:
            raise
        except Exception:
            logger.exception("Sync monitor has crashed", workspace_id=ctx.id)
            # Reset sync context which is now in an undefined state
            ctxs.discard(ctx.id)
            ctx = ctxs.get(ctx.id)
            if ctx:
                # Add small cooldown just to be sure not end up in a crazy busy error loop
                ctx.due_time = timestamp() + TICK_CRASH_COOLDOWN
                return ctx.due_time
            else:
                return math.inf

    with event_bus.connect_in_context(
        (CoreEvent.FS_ENTRY_UPDATED, _on_entry_updated),
        (CoreEvent.BACKEND_REALM_VLOBS_UPDATED, _on_realm_vlobs_updated),
        (CoreEvent.SHARING_UPDATED, _on_sharing_updated),
        (CoreEvent.FS_ENTRY_CONFINED, _on_entry_confined),
    ):
        due_times = []
        # Init userfs sync context
        ctx = ctxs.get(user_fs.user_manifest_id)
        due_times.append(await _ctx_action(ctx, "bootstrap"))
        # Init workspaces sync context
        user_manifest = user_fs.get_user_manifest()
        for entry in user_manifest.workspaces:
            if entry.role is not None:
                ctx = ctxs.get(entry.id)
                if ctx:
                    due_times.append(await _ctx_action(ctx, "bootstrap"))

        task_status.started()
        while True:
            next_due_time = min(due_times)
            if next_due_time == math.inf:
                task_status.idle()
            with trio.move_on_at(next_due_time) as cancel_scope:
                await early_wakeup.wait()
                early_wakeup = trio.Event()
            # In case of early wakeup, `_trigger_early_wakeup` is responsible
            # for calling `task_status.awake()`
            if cancel_scope.cancelled_caught:
                task_status.awake()
            due_times.clear()
            await freeze_sync_monitor_mockpoint()
            for ctx in ctxs.iter():
                due_times.append(await _ctx_action(ctx, "tick"))
async def subscription_transform(stream: AsyncIterator[T], timeout: float=None,
        granularity: Callable[[T, T], bool]=None, granularity_timeout: float=None) -> AsyncIterator[T]:
    """\
    Implements the stream transformation described in `subscription.proto`.
    The identity transform would be `subscription_transform(stream, granularity=lambda a, b: True)`:
    no timing behavior is added, and all values are treated as distinct, and thus emitted.

    If `granularity` is not given, values are compared for equality, thus from `[0, 0, 2, 1, 1, 0, 0, 1]`,
    elements 1, 4, and 6 would be discarded as being duplicates of their previous values.
    A typical example granularity measure for numbers is a lower bound on value difference,
    e.g. `lambda a, b: abs(a-b) > THRESHOLD`.

    The `timeout` parameter specifies a minimum time to pass between subsequent emitted values.
    After the timeout has passed, the most recently received value (if any) will be considered
    as if it had just arrived on the input stream,
    and then all subsequent values are considered until the next emission.
    Suppose the input is [0, 1, 0, 1, 0] and the timeout is just enough to skip one value completely.
    After emitting `0`, the first `1` is skipped, and the second `0` is not emitted because it's not a new value.
    The second `1` is emitted; because at that time no timeout is active (the last emission was too long ago).
    Immediately after the emission the timeout starts again, the last `0` arrives and the input stream ends.
    Because the `0` should be emitted, the stream awaits the timeout a final time, emits the value, and then terminates.
    Had the last value been a `1`, the output stream would have terminated immediately,
    as the value would not be emitted.

    The `granularity_timeout` parameter specifies a maximum time to pass between subsequent emitted values,
    as long as there were input values at all.
    The `granularity` may discard values of the input stream,
    leading in the most extreme case to no emitted values at all.
    If a `granularity_timeout` is given, then the most recent input value is emitted after that time,
    restarting both the ordinary and granularity timeout in the process.
    Suppose the input is [0, 0, 0, 1, 1, 0, 0] and the granularity timeout is just enough to skip one value completely.
    After emitting `0` and skipping the next one, another `0` is emitted:
    although the default granularity discarded the unchanged value, the granularity timeout forces its emission.
    Then, the first `1` and next `0` are emitted as normal, as changed values appeared before the timeout ran out.
    After the last `0`, the input ends.
    The stream waits a final time for the granularity timeout, outputs the value, and then terminates.

    Suppose the input is [0, 0] and the granularity timeout is so low that it runs out before the second zero.
    The first zero is the last value seen before the granularity timeout ran out,
    but once emitted it is out of the picture. The second zero is simply emitted as soon as it arrives.
    """
    if timeout is None:
        timeout = -math.inf
    if granularity is None:
        granularity = lambda a, b: a != b
    if granularity_timeout is None:
        granularity_timeout = math.inf

    async with _skipping_stream(stream) as anext:
        # we need a first value for our granularity checks
        value, eof = await anext()
        if eof:
            return

        while True:
            last_emit_at = trio.current_time()
            yield value
            if eof:
                return

            last_value = value
            has_value = False

            with trio.move_on_at(last_emit_at + granularity_timeout):
                with trio.move_on_at(last_emit_at + timeout):
                    while not eof:
                        # wait until there's news, save a value if there is
                        _value, eof = await anext()
                        if not eof:
                            # there's a value
                            value = _value
                            has_value = True

                # if we get here, either the timeout ran out or EOF was reached
                if eof:
                    if not has_value:
                        # no value at all
                        return
                    elif granularity(last_value, value):
                        # a good value! Wait for the timeout, then emit that value
                        await trio.sleep_until(last_emit_at + timeout)
                        continue
                    elif granularity_timeout < math.inf:
                        # there's still a chance to send the value after the granularity timeout
                        await trio.sleep_forever()
                    else:
                        # again, nothing to send
                        return
                    # note that none of the branches continue here

                # not EOF, so do regular waiting for values
                while not eof and (not has_value or not granularity(last_value, value)):
                    # wait until there's news, save a value if there is
                    _value, eof = await anext()
                    if not eof:
                        # there's a value
                        value = _value
                        has_value = True

                if eof:
                    # EOF was reached.
                    # If there is a value, we know that the granularity did not break the loop;
                    # no need to check that again.
                    if not has_value:
                        # no value at all
                        return
                    elif granularity_timeout < math.inf:
                        # there's still a chance to send the value after the granularity timeout
                        await trio.sleep_forever()
                    else:
                        # again, nothing to send
                        return
                    # note that none of the branches continue here

            # after the granularity timeout, we're fine with any value
            if has_value:
                continue

            # wait for the next event
            value, eof = await anext()
            if eof:
                return