Example #1
0
 def get_latest(self):
     if len(self._snapshots) == 0:
         cfg = get_config()
         raise SoftError(
             f"Nothing to backup for filesystem '{cfg.get('FILESYSTEM')}'. Are you sure"
              f"SNAPSHOT_PREFIX='{cfg.get('SNAPSHOT_PREFIX')}' is correct?")
     return list(self._snapshots.values())[-1]
Example #2
0
def parse_args():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='list zfs3backup snapshots',
    )
    parser.add_argument('--s3-prefix',
                        dest='s3_prefix',
                        default=cfg.get('S3_PREFIX', 'zfs3backup/'),
                        help='S3 key prefix, defaults to zfs3backup/')
    parser.add_argument('--filesystem', '--dataset',
                        dest='filesystem',
                        default=cfg.get('FILESYSTEM'),
                        help='the zfs dataset/filesystem to operate on')
    parser.add_argument('--snapshot-prefix',
                        dest='snapshot_prefix',
                        default=None,
                        help=('Only operate on snapshots that start with this prefix. '
                              'Defaults to zfs-auto-snap:daily.'))
    parser.add_argument('--aws-profile',
                        dest='aws_profile',
                        default='default',
                        help=('Choose a non default ~/.aws/config profile '))
    parser.add_argument('--endpoint',
                        dest='s3_endpoint_url',
                        default='aws',
                        help=('Choose a non AWS endpoint (e.g. Wasabi)'))
                                     
    subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand')

    backup_parser = subparsers.add_parser(
        'backup', help='backup local zfs snapshots to an s3 bucket')
    backup_parser.add_argument('--snapshot', dest='snapshot', default=None,
                               help='Snapshot to backup. Defaults to latest.')
    backup_parser.add_argument('--dry-run', dest='dry', default=False, action='store_true',
                               help='Dry run.')
    backup_parser.add_argument('--compressor', dest='compressor', default=None,
                               choices=(['none'] + sorted(COMPRESSORS.keys())),
                               help=('Specify the compressor. Defaults to pigz1. '
                                     'Use "none" to disable.'))
    backup_parser.add_argument('--parseable', dest='parseable', action='store_true',
                               help='Machine readable output')
    incremental_group = backup_parser.add_mutually_exclusive_group()
    incremental_group.add_argument(
        '--full', dest='full', action='store_true', help='Perform full backup')
    incremental_group.add_argument(
        '--incremental', dest='incremental', default=True, action='store_true',
        help='Perform incremental backup; this is the default')

    restore_parser = subparsers.add_parser('restore', help='not implemented')
    restore_parser.add_argument(
        'snapshot', help='Snapshot to backup. Defaults to latest.')
    restore_parser.add_argument('--dry-run', dest='dry', default=False, action='store_true',
                                help='Dry run.')
    restore_parser.add_argument('--force', dest='force', default=False, action='store_true',
                                help='Force rollback of the filesystem (zfs recv -F).')
    subparsers.add_parser('status', help='show status of current backups')
    return parser.parse_args()
Example #3
0
def write_s3_data():
    """Takes the default data from FakeBucket and writes it to S3.
    Allows running the same tests against fakes and the boto api.
    """
    cfg = get_config()
    bucket = boto3.resource('s3').Bucket(cfg['BUCKET'])
    for name, metadata in FakeBucket.fake_data.iteritems():
        key = bucket.Object(os.path.join(FakeBucket.rand_prefix, name))
        key.put(Body="spam", Metadata=metadata)
    return bucket
Example #4
0
def main():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='Read a key from s3 and write the content to stdout', )
    parser.add_argument('name', help='name of S3 key')
    args = parser.parse_args()

    s3 = boto3.resource('s3')
    bucket = s3.Bucket(cfg['BUCKET'])

    download(bucket, args.name)
Example #5
0
def test_integration(sample_data):
    cfg = get_config()
    stream_handler = StreamHandler(sample_data)
    bucket = boto3.resource('s3').Bucket(cfg['BUCKET'])
    key_name = "zfs3backup_test_" + datetime.now().strftime("%Y%m%d_%H-%M-%S")
    sup = UploadSupervisor(stream_handler,
                           key_name,
                           bucket=bucket,
                           metadata=parse_metadata(
                               ["ana=are+mere", "dana=are=pere"]))
    etag = sup.main_loop()
    uploaded = bucket.Object(key_name)
    assert etag == '"d229c1fc0e509475afe56426c89d2724-2"'
    assert etag == uploaded.e_tag
    assert uploaded.metadata == {"ana": "are+mere", "dana": "are=pere"}
Example #6
0
def main():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='Read a key from s3 and write the content to stdout',
    )
    parser.add_argument('name', help='name of S3 key')
    args = parser.parse_args()

    if cfg['ENDPOINT']== 'aws':   # boto3.resource makes an intelligent decision with the default url
        s3 = boto3.Session(profile_name=cfg['PROFILE']).resource('s3')
    else:
        s3 = boto3.Session(profile_name=cfg['PROFILE']).resource('s3',endpoint_url=cfg['ENDPOINT'])

    bucket = s3.Bucket(cfg['BUCKET'])

    download(bucket, args.name)
Example #7
0
def main():
    cfg = get_config()
    args = parse_args()

    try:
        bucket = cfg['BUCKET']

    except KeyError as err:
        sys.stderr.write(f"Configuration error! {err} is not set.\n")
        sys.exit(1)
    if cfg['ENDPOINT']== 'aws':   # boto3.resource makes an intelligent decision with the default url
        s3 = boto3.Session(profile_name=cfg['PROFILE']).resource('s3')
    else:
        s3 = boto3.Session(profile_name=cfg['PROFILE']).resource('s3',endpoint_url=cfg['ENDPOINT'])
        
    bucket = s3.Bucket(bucket)
    

    fs_section = f"fs:{args.filesystem}"
    if args.snapshot_prefix is None:
        snapshot_prefix = cfg.get("SNAPSHOT_PREFIX", section=fs_section)
    else:
        snapshot_prefix = args.snapshot_prefix
    if args.subcommand == 'status':
        list_snapshots(bucket, s3_prefix=args.s3_prefix, snapshot_prefix=snapshot_prefix,
                       filesystem=args.filesystem)
    elif args.subcommand == 'backup':
        if args.compressor is None:
            compressor = cfg.get('COMPRESSOR', section=fs_section)
        else:
            compressor = args.compressor
        if compressor.lower() == 'none':
            compressor = None

        do_backup(bucket, s3_prefix=args.s3_prefix, snapshot_prefix=snapshot_prefix,
                  filesystem=args.filesystem, full=args.full, snapshot=args.snapshot,
                  dry=args.dry, compressor=compressor, parseable=args.parseable)
    elif args.subcommand == 'restore':
        restore(bucket, s3_prefix=args.s3_prefix, snapshot_prefix=snapshot_prefix,
                filesystem=args.filesystem, snapshot=args.snapshot, dry=args.dry,
                force=args.force)
Example #8
0
def main():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='Cleanup hanging multipart s3 uploads', )
    parser.add_argument('--max-age',
                        dest='max_days',
                        default=1,
                        type=int,
                        help='maximum age in days')
    parser.add_argument('--dry',
                        dest='dry_run',
                        action='store_true',
                        help='Don\'t cancel any upload')
    args = parser.parse_args()
    bucket = boto.connect_s3(cfg['S3_KEY_ID'],
                             cfg['S3_SECRET']).get_bucket(cfg['BUCKET'])
    cleanup_multipart(
        bucket,
        max_days=args.max_days,
        dry_run=args.dry_run,
    )
Example #9
0
def main():
    global quiet
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='send zfs3backup snapshots over ssh', )
    parser.add_argument('--filesystem',
                        '--dataset',
                        dest='filesystem',
                        default=cfg.get('FILESYSTEM'),
                        help='the zfs dataset/filesystem to operate on')
    parser.add_argument(
        '--remote-filesystem',
        '--remote-dataset',
        dest='remote_filesystem',
        default=None,
        help='the target zfs dataset/filesystem to send snapshots to')
    parser.add_argument(
        '--snapshot-prefix',
        dest='snapshot_prefix',
        default=cfg.get('SNAPSHOT_PREFIX', 'zfs-auto-snap:daily'),
        help='only operate on snapshots that start with this prefix')
    parser.add_argument('--pull',
                        dest='pull',
                        default=False,
                        action='store_true',
                        help='pull snapshots from remote')
    parser.add_argument('--quiet',
                        dest='quiet',
                        default=False,
                        action='store_true',
                        help='suppress output')
    parser.add_argument(
        '--dry-run',
        dest='dry_run',
        default=False,
        action='store_true',
        help='call zfs recv with -nv flags to test if snapshot can be sent')
    parser.add_argument('remote', help='hostname/address of remote server')
    args = parser.parse_args()
    quiet = args.quiet
    local_mgr = ZFSSnapshotManager(args.filesystem, args.snapshot_prefix)
    remote_fs = args.remote_filesystem or args.filesystem
    remote_mgr = RemoteZFSSnapshotManager(args.remote, remote_fs,
                                          args.snapshot_prefix)
    local_snaps = [
        s.name[len(args.filesystem) + 1:]  # strip fs name
        for s in local_mgr.list()
    ]
    remote_snaps = [
        s.name[len(remote_fs) + 1:]  # strip fs name
        for s in remote_mgr.list()
    ]
    if args.pull:
        pair = snapshots_to_send(source_snaps=remote_snaps,
                                 dest_snaps=local_snaps)
    else:
        pair = snapshots_to_send(source_snaps=local_snaps,
                                 dest_snaps=remote_snaps)
    cmd_pair = sync_snapshots(pair,
                              args.filesystem,
                              remote_fs,
                              args.remote,
                              args.pull,
                              dry_run=args.dry_run)
    if cmd_pair is None:
        return
    send_cmd, recv_cmd = cmd_pair
    executor = CommandExecutor()
    executor.pipe(send_cmd, recv_cmd, quiet=quiet)
Example #10
0
import argparse
import base64
import binascii
import functools
import hashlib
import logging
import json
import os
import sys

import boto3

from zfs3backup.config import get_config

Result = namedtuple('Result', ['success', 'traceback', 'index', 'md5', 'etag'])
CFG = get_config()
VERB_QUIET = 0
VERB_NORMAL = 1
VERB_PROGRESS = 2

s3 = boto3.resource('s3')


def multipart_etag(digests):
    """
    Computes etag for multipart uploads
    :type digests: list of hex-encoded md5 sums (string)
    :param digests: The list of digests for each individual chunk.

    :rtype: string
    :returns: The etag computed from the individual chunks.
Example #11
0
from cStringIO import StringIO
from datetime import datetime
from Queue import Queue
from uuid import uuid4
import hashlib

import boto3
import pytest

from zfs3backup.pput import (UploadSupervisor, UploadWorker, StreamHandler,
                             Result, WorkerCrashed, multipart_etag,
                             parse_metadata, retry, UploadException)
from zfs3backup.config import get_config

cfg = get_config()
_cached_sample_data = None


class ReadOnlyFile(object):
    """A read-only file like object.
    Helps ensure we don't accidentally mutate the fixture between test runs.
    """
    def __init__(self, fd, allowed=('read', 'seek')):
        self._fd = fd
        self._allowed = set(allowed)

    def __getattr__(self, name):
        if name in self._allowed:
            return getattr(self._fd, name)
        raise AssertionError(
            "this file-like-object is readonly, {} is now allowed".format(