예제 #1
0
def _handle_sequential_dumps(previous_snaps, current_snapname, individuals, backlog_num=None):
    """Dump according to a sequential scheme.

    Keep last backlog_num snaps, and dump with this algorithm:
        step 1) FULL         0
        step 2) INCR        0-1
        step 3) INCR        1-2
        ..
    If a backlog_num is specified, keep that many most-recent snapshots; if 0,
    infinite; if not specified, remove all but last."""
    global _opts
    if backlog_num is None and _opts:
        backlog_num = _opts.backlog_num
    num_previous_snaps = len(previous_snaps)
    if not individuals:
        incremental_send(previous_snaps[-1], current_snapname, num_previous_snaps)
    else:
        for ds in individuals:
            incremental_send(previous_snaps[-1], current_snapname, num_previous_snaps, dataset=ds)
    # clean up old serie
    if backlog_num == 0 or backlog_num > len(previous_snaps):
        return
    if backlog_num is None:
        backlog_num = 1
    for snap in previous_snaps[: len(previous_snaps) - backlog_num + 1]:
        zfs.destroy_snapshot(snap)
예제 #2
0
def _handle_alternate_dumps(previous_snaps, current_snapname, individuals, backlog_num=None):
    """Dump according to an alternate scheme.

    Keep last backlog_num snaps, and dump with this algorithm:
        step 1) FULL      0
        step 2) INCR     0-1
        step 3) INCR     0-2
        step 4) INCR     1-3
        step 5) INCR     2-4
        ...
    If a backlog_num is specified, keep that many most-recent snapshots; if 0,
    infinite; if not specified, remove all but last."""
    num_previous_snaps = len(previous_snaps)
    if backlog_num and len(previous_snaps) >= backlog_num:
        # time to start from scratch
        print "Starting over after %d steps." % len(previous_snaps)
        for ds in individuals:
            bkfname = full_send(current_snapname, dataset=ds)
        # prune old serie, if any
        print "Cleaning up snapshots from old series."
        for snap in previous_snaps:
            zfs.destroy_snapshot(snap)
        _done()
    # go incremental from 2 steps ago
    assert num_previous_snaps > 0
    if num_previous_snaps == 1:
        # backup from base snap
        if not individuals:
            incremental_send(previous_snaps[0], current_snapname, num_previous_snaps)
        else:
            for ds in individuals:
                incremental_send(previous_snaps[0], current_snapname, num_previous_snaps, dataset=ds)
    else:
        # backup from 2 steps before
        if not individuals:
            incremental_send(previous_snaps[-2], current_snapname, num_previous_snaps)
        else:
            for ds in individuals:
                incremental_send(previous_snaps[-2], current_snapname, num_previous_snaps, dataset=ds)
예제 #3
0
def main():
    # get user options
    global _opts
    _opts, args = get_option_parser().parse_args()
    # cleanup options
    if _opts.exclude_datasets:
        _opts.exclude_datasets = [ds.rstrip("/") for ds in _opts.exclude_datasets]
    if _opts.only_datasets:
        _opts.only_datasets = [ds.rstrip("/") for ds in _opts.only_datasets]
    if _opts.individual_dump_ds:
        _opts.individual_dump_ds = [ds.rstrip("/") for ds in _opts.individual_dump_ds]
    # get context
    snapctx = zsnapman.SnapshotContext(_opts.context)

    if _opts.individual_dump_ds:
        # pick the first as representative
        operating_dataset = _opts.individual_dump_ds[0]
    else:
        # default to root
        operating_dataset = ""

    # some manual handling?
    if _opts.list_snapshots:
        if _opts.context == "*":
            print "Contexts available:"
            for c in zsnapman.existing_contexts():
                print c
            for ctx in zsnapman.existing_contexts():
                _list_context(ctx, operating_dataset)
        else:
            _list_context(_opts.context, operating_dataset)
        _done()
    elif _opts.prune_exceeding_minutes is not None:
        print "Pruning '%s' snapshots older than '%d' minutes" % (_opts.context, _opts.prune_exceeding_minutes)
        for snap in snapctx.get_outdated_snapshots(
            backlog_minutes=_opts.prune_exceeding_minutes, dataset=operating_dataset
        ):
            zfs.destroy_snapshot(snap)
        _done()

    ## done with manual handling

    # kill outdated snapshots
    for snap in snapctx.get_outdated_snapshots(
        backlog_num=_opts.backlog_num, backlog_minutes=_opts.maxminutes, dataset=operating_dataset
    ):
        zfs.destroy_snapshot(snap)
    # get survived snaps in this context
    previous_snaps = snapctx.get_snapshots(dataset=operating_dataset)
    # take new snapshot
    # what dataset take individually?
    if _opts.only_datasets:
        ids = _opts.only_datasets
    elif _opts.individual_dump_ds:
        # these
        ids = _opts.individual_dump_ds
    else:
        # none specific, dump once root recursively
        ids = None
    # proceed taking the snapshot for the current session
    if not _opts.nosnap:
        current_snapname = snapctx.make_snap_name()
        zfs.take_snapshot(current_snapname, restrictdatasets=ids, nodatasets=_opts.exclude_datasets)
    else:
        if not previous_snaps:
            print "No existing snapshots in '%s'. Cannot proceed." % _opts.context
        current_snapname = previous_snaps.pop()

    if not _opts.send:
        _done()
    # dump is required
    if (
        _opts.fulldump
        or len(previous_snaps) == 0
        or (_opts.backlog_num is not None and len(previous_snaps) >= _opts.backlog_num)
    ):
        # full dump
        if not ids:
            bkfname = full_send(current_snapname)
        else:
            for ds in ids:
                bkfname = full_send(current_snapname, dataset=ds)
        _done()
    # look for what incremental algorithm the user wants
    if _opts.alternate_dumps:
        _handle_alternate_dumps(previous_snaps, current_snapname, individuals=ids)
    else:
        _handle_sequential_dumps(previous_snaps, current_snapname, individuals=ids)
    _done()