Example #1
0
def main():
    cfg = get_config()
    args = parse_args()
    try:
        s3_key_id, s3_secret, bucket = cfg['S3_KEY_ID'], cfg['S3_SECRET'], cfg['BUCKET']
    except KeyError as err:
        sys.stderr.write("Configuration error! {} is not set.\n".format(err))
        sys.exit(1)
    bucket = boto.connect_s3(s3_key_id, s3_secret).get_bucket(bucket)
    fs_section = "fs:{}".format(args.filesystem)
    if args.snapshot_prefix is None:
        snapshot_prefix = cfg.get("SNAPSHOT_PREFIX", section=fs_section)
    else:
        snapshot_prefix = args.snapshot_prefix
    if args.subcommand == 'status':
        list_snapshots(bucket, s3_prefix=args.s3_prefix, snapshot_prefix=snapshot_prefix,
                       filesystem=args.filesystem)
    elif args.subcommand == 'backup':
        if args.compressor is None:
            compressor = cfg.get('COMPRESSOR', section=fs_section)
        else:
            compressor = args.compressor
        if compressor.lower() == 'none':
            compressor = None

        do_backup(bucket, s3_prefix=args.s3_prefix, snapshot_prefix=snapshot_prefix,
                  filesystem=args.filesystem, full=args.full, snapshot=args.snapshot,
                  dry=args.dry, compressor=compressor, parseable=args.parseable)
    elif args.subcommand == 'restore':
        restore(bucket, s3_prefix=args.s3_prefix, snapshot_prefix=snapshot_prefix,
                filesystem=args.filesystem, snapshot=args.snapshot, dry=args.dry,
                force=args.force)
Example #2
0
 def get_latest(self):
     if len(self._snapshots) == 0:
         cfg = get_config()
         raise SoftError(
             'Nothing to backup for filesystem "{}". Are you sure '
             'SNAPSHOT_PREFIX="{}" is correct?'.format(
                 self._fs_name, self._snapshot_prefix))
     return self.list()[-1]
Example #3
0
File: snap.py Project: numerant/z3
 def get_latest(self):
     if len(self._snapshots) == 0:
         cfg = get_config()
         raise SoftError(
             'Nothing to backup for filesystem "{}". Are you sure '
             'SNAPSHOT_PREFIX="{}" is correct?'.format(
                 cfg.get('FILESYSTEM'), cfg.get('SNAPSHOT_PREFIX')))
     return self._snapshots.values()[-1]
Example #4
0
def main():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='Read a key from s3 and write the content to stdout', )
    parser.add_argument('name', help='name of S3 key')
    args = parser.parse_args()
    bucket = boto.connect_s3(cfg['S3_KEY_ID'],
                             cfg['S3_SECRET']).get_bucket(cfg['BUCKET'])
    download(bucket, args.name)
Example #5
0
File: get.py Project: PressLabs/z3
def main():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='Read a key from s3 and write the content to stdout',
    )
    parser.add_argument('name', help='name of S3 key')
    args = parser.parse_args()
    bucket = boto.connect_s3(
        cfg['S3_KEY_ID'], cfg['S3_SECRET']).get_bucket(cfg['BUCKET'])
    download(bucket, args.name)
Example #6
0
def main():
    global quiet
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='send z3 snapshots over ssh',
    )
    parser.add_argument('--filesystem', '--dataset',
                        dest='filesystem',
                        default=cfg.get('FILESYSTEM'),
                        help='the zfs dataset/filesystem to operate on')
    parser.add_argument('--remote-filesystem', '--remote-dataset',
                        dest='remote_filesystem',
                        default=None,
                        help='the target zfs dataset/filesystem to send snapshots to')
    parser.add_argument('--snapshot-prefix',
                        dest='snapshot_prefix',
                        default=cfg.get('SNAPSHOT_PREFIX', 'zfs-auto-snap:daily'),
                        help='only operate on snapshots that start with this prefix')
    parser.add_argument('--pull',
                        dest='pull',
                        default=False,
                        action='store_true',
                        help='pull snapshots from remote')
    parser.add_argument('--quiet',
                        dest='quiet',
                        default=False,
                        action='store_true',
                        help='suppress output')
    parser.add_argument('--dry-run',
                        dest='dry_run',
                        default=False,
                        action='store_true',
                        help='call zfs recv with -nv flags to test if snapshot can be sent')
    parser.add_argument('remote', help='hostname/address of remote server')
    args = parser.parse_args()
    quiet = args.quiet
    local_mgr = ZFSSnapshotManager(args.filesystem, args.snapshot_prefix)
    remote_fs = args.remote_filesystem or args.filesystem
    remote_mgr = RemoteZFSSnapshotManager(args.remote, remote_fs, args.snapshot_prefix)
    local_snaps = [s.name[len(args.filesystem)+1:]  # strip fs name
                   for s in local_mgr.list()]
    remote_snaps = [s.name[len(remote_fs)+1:]  # strip fs name
                    for s in remote_mgr.list()]
    if args.pull:
        pair = snapshots_to_send(source_snaps=remote_snaps, dest_snaps=local_snaps)
    else:
        pair = snapshots_to_send(source_snaps=local_snaps, dest_snaps=remote_snaps)
    cmd_pair = sync_snapshots(
        pair, args.filesystem, remote_fs, args.remote, args.pull, dry_run=args.dry_run)
    if cmd_pair is None:
        return
    send_cmd, recv_cmd = cmd_pair
    executor = CommandExecutor()
    executor.pipe(send_cmd, recv_cmd, quiet=quiet)
Example #7
0
def main():
    global quiet
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='send z3 snapshots over ssh',
    )
    parser.add_argument('--filesystem', '--dataset',
                        dest='filesystem',
                        default=cfg.get('FILESYSTEM'),
                        help='the zfs dataset/filesystem to operate on')
    parser.add_argument('--remote-filesystem', '--remote-dataset',
                        dest='remote_filesystem',
                        default=None,
                        help='the target zfs dataset/filesystem to send snapshots to')
    parser.add_argument('--snapshot-prefix',
                        dest='snapshot_prefix',
                        default=cfg.get('SNAPSHOT_PREFIX', 'zfs-auto-snap:daily'),
                        help='only operate on snapshots that start with this prefix')
    parser.add_argument('--pull',
                        dest='pull',
                        default=False,
                        action='store_true',
                        help='pull snapshots from remote')
    parser.add_argument('--quiet',
                        dest='quiet',
                        default=False,
                        action='store_true',
                        help='suppress output')
    parser.add_argument('--dry-run',
                        dest='dry_run',
                        default=False,
                        action='store_true',
                        help='call zfs recv with -nv flags to test if snapshot can be sent')
    parser.add_argument('remote', help='hostname/address of remote server')
    args = parser.parse_args()
    quiet = args.quiet
    local_mgr = ZFSSnapshotManager(args.filesystem, args.snapshot_prefix)
    remote_fs = args.remote_filesystem or args.filesystem
    remote_mgr = RemoteZFSSnapshotManager(args.remote, remote_fs, args.snapshot_prefix)
    local_snaps = [s.name[len(args.filesystem)+1:]  # strip fs name
                   for s in local_mgr.list()]
    remote_snaps = [s.name[len(remote_fs)+1:]  # strip fs name
                    for s in remote_mgr.list()]
    if args.pull:
        pair = snapshots_to_send(source_snaps=remote_snaps, dest_snaps=local_snaps)
    else:
        pair = snapshots_to_send(source_snaps=local_snaps, dest_snaps=remote_snaps)
    cmd_pair = sync_snapshots(
        pair, args.filesystem, remote_fs, args.remote, args.pull, dry_run=args.dry_run)
    if cmd_pair is None:
        return
    send_cmd, recv_cmd = cmd_pair
    executor = CommandExecutor()
    executor.pipe(send_cmd, recv_cmd, quiet=quiet)
Example #8
0
def write_s3_data():
    """Takes the default data from FakeBucket and writes it to S3.
    Allows running the same tests against fakes and the boto api.
    """
    cfg = get_config()
    bucket = boto.connect_s3(cfg['S3_KEY_ID'],
                             cfg['S3_SECRET']).get_bucket(cfg['BUCKET'])
    for name, metadata in FakeBucket.fake_data.iteritems():
        key = bucket.new_key(os.path.join(FakeBucket.rand_prefix, name))
        headers = {("x-amz-meta-" + k): v for k, v in metadata.iteritems()}
        key.set_contents_from_string("spam", headers=headers)
    return bucket
Example #9
0
def write_s3_data():
    """Takes the default data from FakeBucket and writes it to S3.
    Allows running the same tests against fakes and the boto api.
    """
    cfg = get_config()
    bucket = boto.connect_s3(
        cfg['S3_KEY_ID'], cfg['S3_SECRET']).get_bucket(cfg['BUCKET'])
    for name, metadata in FakeBucket.fake_data.iteritems():
        key = bucket.new_key(os.path.join(FakeBucket.rand_prefix, name))
        headers = {("x-amz-meta-" + k): v for k, v in metadata.iteritems()}
        key.set_contents_from_string("spam", headers=headers)
    return bucket
Example #10
0
def main():
    cfg = get_config()
    args = parse_args()

    extra_config = {}
    if 'HOST' in cfg:
        extra_config['host'] = cfg['HOST']
    if 'S3_KEY_ID' in cfg:
        s3_key_id, s3_secret, bucket = cfg['S3_KEY_ID'], cfg['S3_SECRET'], cfg[
            'BUCKET']
        bucket = boto.connect_s3(s3_key_id, s3_secret,
                                 **extra_config).get_bucket(bucket)
    else:
        bucket = cfg['BUCKET']
        bucket = boto.connect_s3(**extra_config).get_bucket(bucket)

    fs_section = "fs:{}".format(args.filesystem)
    if args.snapshot_prefix is None:
        snapshot_prefix = cfg.get("SNAPSHOT_PREFIX", section=fs_section)
    else:
        snapshot_prefix = args.snapshot_prefix
    if args.subcommand == 'status':
        list_snapshots(bucket,
                       s3_prefix=args.s3_prefix,
                       snapshot_prefix=snapshot_prefix,
                       filesystem=args.filesystem)
    elif args.subcommand == 'backup':
        if args.compressor is None:
            compressor = cfg.get('COMPRESSOR', section=fs_section)
        else:
            compressor = args.compressor
        if compressor.lower() == 'none':
            compressor = None

        do_backup(bucket,
                  s3_prefix=args.s3_prefix,
                  snapshot_prefix=snapshot_prefix,
                  filesystem=args.filesystem,
                  full=args.full,
                  snapshot=args.snapshot,
                  dry=args.dry,
                  compressor=compressor,
                  parseable=args.parseable)
    elif args.subcommand == 'restore':
        restore(bucket,
                s3_prefix=args.s3_prefix,
                snapshot_prefix=snapshot_prefix,
                filesystem=args.filesystem,
                snapshot=args.snapshot,
                dry=args.dry,
                force=args.force)
Example #11
0
File: snap.py Project: rciorba/z3
def parse_args():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='list z3 snapshots',
    )
    parser.add_argument('--s3-prefix',
                        dest='s3_prefix',
                        default=cfg.get('S3_PREFIX', 'z3-backup/'),
                        help='S3 key prefix, defaults to z3-backup/')
    parser.add_argument('--filesystem', '--dataset',
                        dest='filesystem',
                        default=cfg.get('FILESYSTEM'),
                        help='the zfs dataset/filesystem to operate on')
    parser.add_argument('--snapshot-prefix',
                        dest='snapshot_prefix',
                        default=None,
                        help=('Only operate on snapshots that start with this prefix. '
                              'Defaults to zfs-auto-snap:daily.'))
    subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand')

    backup_parser = subparsers.add_parser(
        'backup', help='backup local zfs snapshots to an s3 bucket')
    backup_parser.add_argument('--snapshot', dest='snapshot', default=None,
                               help='Snapshot to backup. Defaults to latest.')
    backup_parser.add_argument('--dry-run', dest='dry', default=False, action='store_true',
                               help='Dry run.')
    backup_parser.add_argument('--compressor', dest='compressor', default=None,
                               choices=(['none'] + sorted(COMPRESSORS.keys())),
                               help=('Specify the compressor. Defaults to pigz1. '
                                     'Use "none" to disable.'))
    backup_parser.add_argument('--parseable', dest='parseable', action='store_true',
                               help='Machine readable output')
    incremental_group = backup_parser.add_mutually_exclusive_group()
    incremental_group.add_argument(
        '--full', dest='full', action='store_true', help='Perform full backup')
    incremental_group.add_argument(
        '--incremental', dest='incremental', default=True, action='store_true',
        help='Perform incremental backup; this is the default')

    restore_parser = subparsers.add_parser('restore', help='not implemented')
    restore_parser.add_argument(
        'snapshot', help='Snapshot to backup. Defaults to latest.')
    restore_parser.add_argument('--dry-run', dest='dry', default=False, action='store_true',
                                help='Dry run.')
    restore_parser.add_argument('--force', dest='force', default=False, action='store_true',
                                help='Force rollback of the filesystem (zfs recv -F).')
    subparsers.add_parser('status', help='show status of current backups')
    return parser.parse_args()
Example #12
0
def parse_args():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='list z3 snapshots',
    )
    parser.add_argument('--s3-prefix',
                        dest='s3_prefix',
                        default=cfg.get('S3_PREFIX', 'z3-backup/'),
                        help='S3 key prefix, defaults to z3-backup/')
    parser.add_argument('--filesystem', '--dataset',
                        dest='filesystem',
                        default=cfg.get('FILESYSTEM'),
                        help='the zfs dataset/filesystem to operate on')
    parser.add_argument('--snapshot-prefix',
                        dest='snapshot_prefix',
                        default=None,
                        help=('Only operate on snapshots that start with this prefix. '
                              'Defaults to zfs-auto-snap:daily.'))
    subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand')

    backup_parser = subparsers.add_parser(
        'backup', help='backup local zfs snapshots to an s3 bucket')
    backup_parser.add_argument('--snapshot', dest='snapshot', default=None,
                               help='Snapshot to backup. Defaults to latest.')
    backup_parser.add_argument('--dry-run', dest='dry', default=False, action='store_true',
                               help='Dry run.')
    backup_parser.add_argument('--compressor', dest='compressor', default=None,
                               choices=(['none'] + sorted(COMPRESSORS.keys())),
                               help=('Specify the compressor. Defaults to pigz1. '
                                     'Use "none" to disable.'))
    backup_parser.add_argument('--parseable', dest='parseable', action='store_true',
                               help='Machine readable output')
    incremental_group = backup_parser.add_mutually_exclusive_group()
    incremental_group.add_argument(
        '--full', dest='full', action='store_true', help='Perform full backup')
    incremental_group.add_argument(
        '--incremental', dest='incremental', default=True, action='store_true',
        help='Perform incremental backup; this is the default')

    restore_parser = subparsers.add_parser('restore', help='not implemented')
    restore_parser.add_argument(
        'snapshot', help='Snapshot to backup. Defaults to latest.')
    restore_parser.add_argument('--dry-run', dest='dry', default=False, action='store_true',
                                help='Dry run.')
    restore_parser.add_argument('--force', dest='force', default=False, action='store_true',
                                help='Force rollback of the filesystem (zfs recv -F).')
    subparsers.add_parser('status', help='show status of current backups')
    return parser.parse_args()
Example #13
0
def test_integration(sample_data):
    cfg = get_config()
    stream_handler = StreamHandler(sample_data)
    bucket = boto.connect_s3(
        cfg['S3_KEY_ID'], cfg['S3_SECRET']).get_bucket(cfg['BUCKET'])
    key_name = "z3_test_" + datetime.now().strftime("%Y%m%d_%H-%M-%S")
    sup = UploadSupervisor(
        stream_handler,
        key_name,
        bucket=bucket,
        headers=parse_metadata(["ana=are+mere", "dana=are=pere"])
    )
    etag = sup.main_loop()
    uploaded = bucket.get_key(key_name)
    assert etag == '"d229c1fc0e509475afe56426c89d2724-2"'
    assert etag == uploaded.etag
    assert uploaded.metadata == {"ana": "are+mere", "dana": "are=pere"}
Example #14
0
def main():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='Read a key from s3 and write the content to stdout',
    )
    parser.add_argument('name', help='name of S3 key')
    args = parser.parse_args()
    config = TransferConfig(max_concurrency=int(cfg['CONCURRENCY']), multipart_chunksize=int(re.sub('M', '', cfg['CHUNK_SIZE'])) * MB)
    if 'S3_KEY_ID' in cfg:
        s3 = boto3.client('s3', aws_access_key_id=cfg['S3_KEY_ID'], aws_secret_access_key=cfg['S3_SECRET'])
    else:
        s3 = boto3.client('s3')
    try:
        s3.download_fileobj(cfg['BUCKET'], args.name, sys.stdout, Config=config)
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "404":
            print("The object does not exist.")
        else:
            raise
Example #15
0
def main():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='Cleanup hanging multipart s3 uploads', )
    parser.add_argument('--max-age',
                        dest='max_days',
                        default=1,
                        type=int,
                        help='maximum age in days')
    parser.add_argument('--dry',
                        dest='dry_run',
                        action='store_true',
                        help='Don\'t cancel any upload')
    args = parser.parse_args()
    bucket = boto.connect_s3(cfg['S3_KEY_ID'],
                             cfg['S3_SECRET']).get_bucket(cfg['BUCKET'])
    cleanup_multipart(
        bucket,
        max_days=args.max_days,
        dry_run=args.dry_run,
    )
Example #16
0
def main():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='Cleanup hanging multipart s3 uploads',
    )
    parser.add_argument('--max-age',
                        dest='max_days',
                        default=1,
                        type=int,
                        help='maximum age in days')
    parser.add_argument('--dry',
                        dest='dry_run',
                        action='store_true',
                        help='Don\'t cancel any upload')
    args = parser.parse_args()
    bucket = boto.connect_s3(
        cfg['S3_KEY_ID'], cfg['S3_SECRET']).get_bucket(cfg['BUCKET'])
    cleanup_multipart(
        bucket,
        max_days=args.max_days,
        dry_run=args.dry_run,
    )
Example #17
0
File: pput.py Project: rciorba/z3
import argparse
import binascii
import functools
import hashlib
import logging
import json
import os
import sys

import boto.s3.multipart

from z3.config import get_config


Result = namedtuple('Result', ['success', 'traceback', 'index', 'md5'])
CFG = get_config()
VERB_QUIET = 0
VERB_NORMAL = 1
VERB_PROGRESS = 2


def multipart_etag(digests):
    """
    Computes etag for multipart uploads
    :type digests: list of hex-encoded md5 sums (string)
    :param digests: The list of digests for each individual chunk.

    :rtype: string
    :returns: The etag computed from the individual chunks.
    """
    etag = hashlib.md5()
Example #18
0
File: snap.py Project: numerant/z3
def main():
    cfg = get_config()
    args = parse_args()

    try:
        s3_key_id, s3_secret, bucket = cfg['S3_KEY_ID'], cfg['S3_SECRET'], cfg[
            'BUCKET']

        extra_config = {}
    except KeyError as err:
        sys.stderr.write("Configuration error! {} is not set.\n".format(err))
        sys.exit(1)

    if not ValidateCipher(args.cipher):
        raise AssertionError("Invalid cipher {}".format(args.cipher))
    if args.cipher and (not args.password_file
                        or args.password_file.lower == "none"):
        # For now, treat this as an error
        raise AssertionError(
            "Cannot specify an encryption cipher without a password file")

    bucket = boto.connect_s3(s3_key_id, s3_secret,
                             **extra_config).get_bucket(bucket)

    fs_section = "fs:{}".format(args.filesystem)
    if args.snapshot_prefix is None:
        snapshot_prefix = cfg.get("SNAPSHOT_PREFIX", section=fs_section)
    else:
        snapshot_prefix = args.snapshot_prefix
    if args.subcommand == 'status':
        list_snapshots(bucket,
                       s3_prefix=args.s3_prefix,
                       snapshot_prefix=snapshot_prefix,
                       filesystem=args.filesystem)
    elif args.subcommand == 'backup':
        if args.compressor is None:
            compressor = cfg.get('COMPRESSOR', section=fs_section)
        else:
            compressor = args.compressor
        if compressor.lower() == 'none':
            compressor = None

        do_backup(bucket,
                  s3_prefix=args.s3_prefix,
                  snapshot_prefix=snapshot_prefix,
                  filesystem=args.filesystem,
                  full=args.full,
                  snapshot=args.snapshot,
                  dry=args.dry,
                  compressor=compressor,
                  parseable=args.parseable,
                  cipher=args.cipher,
                  password_file=args.password_file)
    elif args.subcommand == 'restore':
        restore(bucket,
                s3_prefix=args.s3_prefix,
                snapshot_prefix=snapshot_prefix,
                filesystem=args.filesystem,
                snapshot=args.snapshot,
                dry=args.dry,
                force=args.force)
Example #19
0
from cStringIO import StringIO
from datetime import datetime
from Queue import Queue
from uuid import uuid4
import hashlib

import boto
import pytest

from z3.pput import (UploadSupervisor, UploadWorker, StreamHandler,
                     Result, WorkerCrashed, multipart_etag, parse_metadata,
                     retry, UploadException)
from z3.config import get_config


cfg = get_config()
_cached_sample_data = None


class ReadOnlyFile(object):
    """A read-only file like object.
    Helps ensure we don't accidentally mutate the fixture between test runs.
    """
    def __init__(self, fd, allowed=('read', 'seek')):
        self._fd = fd
        self._allowed = set(allowed)

    def __getattr__(self, name):
        if name in self._allowed:
            return getattr(self._fd, name)
        raise AssertionError("this file-like-object is readonly, {} is now allowed".format(name))
Example #20
0
from threading import Thread
import argparse
import binascii
import functools
import hashlib
import logging
import json
import os
import sys

import boto.s3.multipart

from z3.config import get_config

Result = namedtuple('Result', ['success', 'traceback', 'index', 'md5'])
CFG = get_config()
VERB_QUIET = 0
VERB_NORMAL = 1
VERB_PROGRESS = 2


def multipart_etag(digests):
    """
    Computes etag for multipart uploads
    :type digests: list of hex-encoded md5 sums (string)
    :param digests: The list of digests for each individual chunk.

    :rtype: string
    :returns: The etag computed from the individual chunks.
    """
    etag = hashlib.md5()