Beispiel #1
0
def run_backup(args):

    _set_user_env_vars(args)
    env.hosts = args.hosts.split(',')
    env.keyspaces = args.keyspaces.split(',') if args.keyspaces else None

    if args.new_snapshot:
        create_snapshot = True
    else:
        existing_snapshot = SnapshotCollection(
            args.aws_access_key_id,
            args.aws_secret_access_key,
            args.s3_base_path,
            args.s3_bucket_name,
            get_s3_connection_host(args.s3_bucket_region)
        ).get_snapshot_for(
            hosts=env.hosts,
            keyspaces=env.keyspaces,
            table=args.table,
            name=Snapshot.make_snapshot_name()
        )
        create_snapshot = existing_snapshot is None

    worker = BackupWorker(
        aws_access_key_id=args.aws_access_key_id,
        aws_secret_access_key=args.aws_secret_access_key,
        s3_bucket_region=args.s3_bucket_region,
        s3_ssenc=args.s3_ssenc,
        s3_connection_host=get_s3_connection_host(args.s3_bucket_region),
        cassandra_conf_path=args.cassandra_conf_path,
        cassandra_tools_bin_dir=args.cassandra_tools_bin_dir,
        cqlsh_user=args.cqlsh_user,
        cqlsh_password=args.cqlsh_password,
        backup_schema=args.backup_schema,
        buffer_size=args.buffer_size,
        use_sudo=args.use_sudo,
        use_local=args.use_local,
        connection_pool_size=args.connection_pool_size,
        exclude_tables=args.exclude_tables,
        reduced_redundancy=args.reduced_redundancy,
        rate_limit=args.rate_limit,
        quiet=args.quiet,
        nice=int(args.nice)
    )
    if create_snapshot:
        logging.info("Make a new snapshot")
        snapshot = Snapshot(
            base_path=args.s3_base_path,
            s3_bucket=args.s3_bucket_name,
            hosts=env.hosts,
            keyspaces=env.keyspaces,
            table=args.table
        )
        worker.snapshot(snapshot)
    else:
        logging.info("Add incrementals to snapshot {!s}".format(
            existing_snapshot))
        worker.update_snapshot(existing_snapshot)
Beispiel #2
0
def restore_backup(args):

    _set_user_env_vars(args)
    env.host_string = args.host

    snapshots = SnapshotCollection(
        args.aws_access_key_id, args.aws_secret_access_key, args.s3_base_path,
        args.s3_bucket_name, get_s3_connection_host(args.s3_bucket_region))

    if args.snapshot_name == 'LATEST':
        snapshot = snapshots.get_latest()
    else:
        snapshot = snapshots.get_snapshot_by_name(args.snapshot_name)

    worker = RestoreWorker(
        aws_access_key_id=args.aws_access_key_id,
        aws_secret_access_key=args.aws_secret_access_key,
        s3_bucket_region=args.s3_bucket_region,
        snapshot=snapshot,
        cassandra_tools_bin_dir=args.cassandra_tools_bin_dir,
        restore_dir=args.restore_dir,
        use_sudo=args.use_sudo,
        use_local=args.use_local)

    worker.restore(args.keyspace)
def main():
    subparsers = base_parser.add_subparsers(title='subcommands',
                                            dest='subcommand')
    base_parser.add_argument('--incremental_backups',
                             action='store_true',
                             default=False)

    put_parser = subparsers.add_parser('put',
                                       help='put files on s3 from a manifest')
    manifest_parser = subparsers.add_parser(
        'create-upload-manifest', help='put files on s3 from a manifest')

    # put arguments
    put_parser = add_s3_arguments(put_parser)
    put_parser.add_argument(
        '--manifest',
        required=True,
        help='The manifest containing the files to put on s3')

    put_parser.add_argument('--concurrency',
                            required=False,
                            default=DEFAULT_CONCURRENCY,
                            type=int,
                            help='Compress and upload concurrent processes')

    # create-upload-manifest arguments
    manifest_parser.add_argument('--snapshot_name', required=True, type=str)
    manifest_parser.add_argument('--snapshot_keyspaces',
                                 default='',
                                 required=False,
                                 type=str)
    manifest_parser.add_argument('--snapshot_table',
                                 required=False,
                                 default='',
                                 type=str)
    manifest_parser.add_argument('--data_path', required=True, type=str)
    manifest_parser.add_argument('--manifest_path', required=True, type=str)

    args = base_parser.parse_args()
    subcommand = args.subcommand

    if subcommand == 'create-upload-manifest':
        create_upload_manifest(args.snapshot_name, args.snapshot_keyspaces,
                               args.snapshot_table, args.data_path,
                               args.manifest_path, args.incremental_backups)

    if subcommand == 'put':
        check_lzop()
        put_from_manifest(args.s3_bucket_name,
                          get_s3_connection_host(args.s3_bucket_region),
                          args.s3_ssenc, args.s3_base_path,
                          args.aws_access_key_id, args.aws_secret_access_key,
                          args.manifest, args.concurrency,
                          args.incremental_backups)
def run_backup(args):
    if args.user:
        env.user = args.user

    if args.password:
        env.password = args.password

    if args.sshport:
        env.port = args.sshport

    env.hosts = args.hosts.split(',')

    if args.new_snapshot:
        create_snapshot = True
    else:
        existing_snapshot = SnapshotCollection(
            args.aws_access_key_id,
            args.aws_secret_access_key,
            args.s3_base_path,
            args.s3_bucket_name
        ).get_snapshot_for(
            hosts=env.hosts,
            keyspaces=args.keyspaces,
            table=args.table
        )
        create_snapshot = existing_snapshot is None

    worker = BackupWorker(
        aws_access_key_id=args.aws_access_key_id,
        aws_secret_access_key=args.aws_secret_access_key,
        s3_bucket_region=args.s3_bucket_region,
        s3_ssenc=args.s3_ssenc,
        s3_connection_host=get_s3_connection_host(args.s3_bucket_region),
        cassandra_data_path=args.cassandra_data_path,
        nodetool_path=args.nodetool_path,
        cassandra_bin_dir=args.cassandra_bin_dir,
        backup_schema=args.backup_schema,
        connection_pool_size=args.connection_pool_size
    )

    if create_snapshot:
        logging.info('make a new snapshot')
        snapshot = Snapshot(
            base_path=args.s3_base_path,
            s3_bucket=args.s3_bucket_name,
            hosts=env.hosts,
            keyspaces=args.keyspaces,
            table=args.table
        )
        worker.snapshot(snapshot)
    else:
        logging.info('add incrementals to snapshot %s' % existing_snapshot)
        worker.update_snapshot(existing_snapshot)
def main():
    subparsers = base_parser.add_subparsers(title='subcommands',
                                       dest='subcommand')
    base_parser.add_argument('--incremental_backups', action='store_true', default=False)

    put_parser = subparsers.add_parser('put', help='put files on s3 from a manifest')
    manifest_parser = subparsers.add_parser('create-upload-manifest', help='put files on s3 from a manifest')

    # put arguments
    put_parser = add_s3_arguments(put_parser)
    put_parser.add_argument('--manifest',
                           required=True,
                           help='The manifest containing the files to put on s3')

    put_parser.add_argument('--concurrency',
                           required=False,
                           default=DEFAULT_CONCURRENCY,
                           type=int,
                           help='Compress and upload concurrent processes')

    # create-upload-manifest arguments
    manifest_parser.add_argument('--snapshot_name', required=True, type=str)
    manifest_parser.add_argument('--snapshot_keyspaces', default='', required=False, type=str)
    manifest_parser.add_argument('--snapshot_table', required=False, default='', type=str)
    manifest_parser.add_argument('--data_path', required=True, type=str)
    manifest_parser.add_argument('--manifest_path', required=True, type=str)

    args = base_parser.parse_args()
    subcommand = args.subcommand

    if subcommand == 'create-upload-manifest':
        create_upload_manifest(
            args.snapshot_name,
            args.snapshot_keyspaces,
            args.snapshot_table,
            args.data_path,
            args.manifest_path,
            args.incremental_backups
        )

    if subcommand == 'put':
        check_lzop()
        put_from_manifest(
            args.s3_bucket_name,
            get_s3_connection_host(args.s3_bucket_region),
            args.s3_ssenc,
            args.s3_base_path,
            args.aws_access_key_id,
            args.aws_secret_access_key,
            args.manifest,
            args.concurrency,
            args.incremental_backups
        )
Beispiel #6
0
def run_backup(args):
    if args.user:
        env.user = args.user

    if args.password:
        env.password = args.password

    if args.sshkey - filename:
        env.key_filename = args.sshkey - filename

    if args.sshport:
        env.port = args.sshport

    env.hosts = args.hosts.split(',')

    if args.new_snapshot:
        create_snapshot = True
    else:
        existing_snapshot = SnapshotCollection(
            args.aws_access_key_id, args.aws_secret_access_key,
            args.s3_base_path,
            args.s3_bucket_name).get_snapshot_for(hosts=env.hosts,
                                                  keyspaces=args.keyspaces,
                                                  table=args.table)
        create_snapshot = existing_snapshot is None

    worker = BackupWorker(aws_access_key_id=args.aws_access_key_id,
                          aws_secret_access_key=args.aws_secret_access_key,
                          s3_bucket_region=args.s3_bucket_region,
                          s3_ssenc=args.s3_ssenc,
                          s3_connection_host=get_s3_connection_host(
                              args.s3_bucket_region),
                          cassandra_data_path=args.cassandra_data_path,
                          nodetool_path=args.nodetool_path,
                          cassandra_bin_dir=args.cassandra_bin_dir,
                          backup_schema=args.backup_schema,
                          connection_pool_size=args.connection_pool_size)

    if create_snapshot:
        logging.info('make a new snapshot')
        snapshot = Snapshot(base_path=args.s3_base_path,
                            s3_bucket=args.s3_bucket_name,
                            hosts=env.hosts,
                            keyspaces=args.keyspaces,
                            table=args.table)
        worker.snapshot(snapshot)
    else:
        logging.info('add incrementals to snapshot %s' % existing_snapshot)
        worker.update_snapshot(existing_snapshot)
Beispiel #7
0
def list_backups(args):
    snapshots = SnapshotCollection(
        args.aws_access_key_id, args.aws_secret_access_key, args.s3_base_path,
        args.s3_bucket_name, get_s3_connection_host(args.s3_bucket_region))
    path_snapshots = defaultdict(list)

    for snapshot in snapshots:
        base_path = os.path.join(snapshot.base_path.split('/')[:-1])
        path_snapshots[base_path].append(snapshot)

    for path, snapshots in path_snapshots.iteritems():
        for snapshot in snapshots:
            print("\t {!r} hosts:{!r} keyspaces:{!r} table:{!r}".format(
                snapshot, snapshot.hosts, snapshot.keyspaces, snapshot.table))
        print("------------------------{}".format('-' * len(path)))
def run_backup(args):
    if args.user:
        env.user = args.user

    if args.password:
        env.password = args.password

    if args.sshport:
        env.port = args.sshport

    if args.sshkey:
        env.key_filename = args.sshkey

    if not args.hosts:
        env.hosts = [socket.gethostname()]
        env.run = lambda cmd: local(cmd, capture=True)
    else:
        env.hosts = args.hosts.split(',')
        env.run = run

    keep_new_snapshot = args.keep_new_snapshot
    delete_old_snapshots = args.delete_old_snapshots
    delete_backups = args.delete_incremental_backups

    if args.new_snapshot:
        create_snapshot = True
        existing_snapshot = None
    else:
        if args.keep_new_snapshot:
            logging.warn('--new-snapshot not set. Ignoring --keep-new-snapshot ')
            keep_new_snapshot = False
        if args.delete_old_snapshots:
            logging.warn('--new-snapshot not set. Ignoring --delete-old-snapshots')
            delete_old_snapshots = False
        if args.delete_incremental_backups:
            logging.warn('--new-snapshot not set. Ignoring --delete-incremental-backup')
            delete_backups = False

        existing_snapshot = SnapshotCollection(
            args.aws_access_key_id,
            args.aws_secret_access_key,
            args.s3_base_path,
            args.s3_bucket_name
        ).get_snapshot_for(
            hosts=env.hosts,
            keyspaces=args.keyspaces,
            table=args.table
        )
        create_snapshot = existing_snapshot is None

    worker = BackupWorker(
        aws_access_key_id=args.aws_access_key_id,
        aws_secret_access_key=args.aws_secret_access_key,
        s3_bucket_region=args.s3_bucket_region,
        s3_ssenc=args.s3_ssenc,
        s3_connection_host=get_s3_connection_host(args.s3_bucket_region),
        cassandra_data_path=args.cassandra_data_path,
        nodetool_path=args.nodetool_path,
        cassandra_bin_dir=args.cassandra_bin_dir,
        backup_schema=args.backup_schema,
        connection_pool_size=args.connection_pool_size,
        agent_path=args.agent_path,
        agent_virtualenv=args.agent_virtualenv,
        use_sudo=(not args.no_sudo)
    )

    if create_snapshot:
        logging.info('make a new snapshot')
        snapshot = Snapshot(
            base_path=args.s3_base_path,
            s3_bucket=args.s3_bucket_name,
            hosts=env.hosts,
            keyspaces=args.keyspaces,
            table=args.table
        )
        worker.snapshot(snapshot, keep_new_snapshot=keep_new_snapshot, delete_old_snapshots=delete_old_snapshots,
                        delete_backups=delete_backups)
    else:
        logging.info('add incrementals to snapshot %s' % existing_snapshot)
        worker.update_snapshot(existing_snapshot)