Beispiel #1
0
async def watch_schedule(ctx, trigger, *, loop=None):
    """Continually yields the next backup to be created.

    It watches two input sources: the rules as defined by
    Kubernetes resources, and the existing snapshots, as returned
    from Google Cloud. If either of them change, a new backup
    is scheduled.
    """
    loop = loop or asyncio.get_event_loop()
    _log = _logger.new()

    rulesgen = get_rules(ctx)
    snapgen = get_snapshots(ctx, trigger)

    _log.debug('watch_schedule.start')

    combined = combine_latest(
        rules=rulesgen,
        snapshots=snapgen,
        defaults={'snapshots': None, 'rules': None}
    )

    rules = None

    heartbeat_interval_seconds = ctx.config.get(
        'schedule_heartbeat_interval_seconds'
    )

    async def heartbeat():
        _logger.info(
            events.Rule.HEARTBEAT,
            rules=rules,
        )

        loop.call_later(
            heartbeat_interval_seconds,
            asyncio.ensure_future,
            heartbeat()
        )

    if heartbeat_interval_seconds:
        asyncio.ensure_future(heartbeat())

    async for item in combined:
        rules = item.get('rules')
        snapshots = item.get('snapshots')
        # _log = _log.bind(
        #     rules=rules,
        #     snapshots=snapshots,
        # )

        # Never schedule before we have data from both rules and snapshots
        if rules is None or snapshots is None:
            _log.debug(
                'watch_schedule.wait-for-both',
            )
            continue

        yield determine_next_snapshot(snapshots, rules)
Beispiel #2
0
async def get_snapshots(ctx: Context, rulesgen, reload_trigger):
    """Query the existing snapshots from the cloud provider backend(s).

    "rules" are all the disk rules we know about, and through it, we know
    the set of backends that are in play, and that need to verified.

    If the channel "reload_trigger" contains any value, we
    refresh the list of snapshots. This will then cause the
    next backup to be scheduled.
    """

    combined = combine_latest(rules=debounce(rulesgen, 4),
                              reload=reload_trigger)

    async for item in combined:
        # Figure out a se of backends that are in use with the rules
        backends = set()
        for rule in item['rules']:
            backends.add(get_backend_for_rule(ctx, rule))

        # Load and yield the snapshots for the set of backends.
        yield await load_snapshots(ctx, backends)
Beispiel #3
0
async def watch_schedule(ctx, trigger):
    """Continually yields the next backup to be created.

    It watches two input sources: the rules as defined by
    Kubernetes resources, and the existing snapshots, as returned
    from Google Cloud. If either of them change, a new backup
    is scheduled.
    """
    _log = _logger.new()

    rulesgen = get_rules(ctx)
    snapgen = get_snapshots(ctx, trigger)

    _log.debug('watch_schedule.start')

    combined = combine_latest(
        rules=rulesgen,
        snapshots=snapgen,
        defaults={'snapshots': None, 'rules': None}
    )

    async for item in combined:
        rules = item.get('rules')
        snapshots = item.get('snapshots')
        # _log = _log.bind(
        #     rules=rules,
        #     snapshots=snapshots,
        # )

        # Never schedule before we have data from both rules and snapshots
        if rules is None or snapshots is None:
            _log.debug(
                'watch_schedule.wait-for-both',
            )
            continue

        yield determine_next_snapshot(snapshots, rules)