예제 #1
0
def zpools():
    """Creates two temporary zpools to be called from test functions. Yields the two pool names
    and destroys them after testing."""

    zpool = '/sbin/zpool'
    _word = randomword(8)
    pool0 = 'pyznap_source_' + _word
    pool1 = 'pyznap_dest_' + _word

    # Create temporary files on which the zpools are created
    with NamedTemporaryFile() as file0, NamedTemporaryFile() as file1:
        filename0 = file0.name
        filename1 = file1.name

        # Fix size to 100Mb
        file0.seek(100 * 1024**2 - 1)
        file0.write(b'0')
        file0.seek(0)
        file1.seek(100 * 1024**2 - 1)
        file1.write(b'0')
        file1.seek(0)

        # Create temporary test pools
        for pool, filename in zip([pool0, pool1], [filename0, filename1]):
            try:
                sp.check_call([zpool, 'create', pool, filename])
            except sp.CalledProcessError as err:
                logger.error(err)
                return

        try:
            fs0 = zfs.open(pool0)
            fs1 = zfs.open(pool1)
            assert fs0.name == pool0
            assert fs1.name == pool1
        except (DatasetNotFoundError, AssertionError, Exception) as err:
            logger.error(err)
        else:
            yield fs0, fs1

        # Destroy temporary test pools
        for pool in [pool0, pool1]:
            try:
                sp.check_call([zpool, 'destroy', pool])
            except sp.CalledProcessError as err:
                logger.error(err)
예제 #2
0
def zpools():
    """Creates two temporary zpools to be called from test functions. Yields the two pool names
    and destroys them after testing."""

    # Create temporary files on which the zpools are created
    with NamedTemporaryFile() as file0, NamedTemporaryFile() as file1:
        filename0 = file0.name
        filename1 = file1.name

        # Fix size to 100Mb
        file0.seek(100*1024**2-1)
        file0.write(b'0')
        file0.seek(0)
        file1.seek(100*1024**2-1)
        file1.write(b'0')
        file1.seek(0)
        
        # Create temporary test pools
        for pool, filename in zip([POOL0, POOL1], [filename0, filename1]):
            try:
                sp.check_call([ZPOOL, 'create', pool, filename])
            except sp.CalledProcessError as err:
                logger.error(err)
                return

        try:
            fs0 = zfs.open(POOL0)
            fs1 = zfs.open(POOL1)
            assert fs0.name == POOL0
            assert fs1.name == POOL1
        except (DatasetNotFoundError, AssertionError, Exception) as err:
            logger.error(err)
        else:
            yield fs0, fs1

        # Destroy temporary test pools
        for pool in [POOL0, POOL1]:
            try:
                sp.check_call([ZPOOL, 'destroy', pool])
            except sp.CalledProcessError as err:
                logger.error(err)
예제 #3
0
파일: send.py 프로젝트: orgoj/pyznap
def send_config(config, settings={}):
    """Tries to sync all entries in the config to their dest. Finds all children of the filesystem
    and calls send_filesystem on each of them.

    Parameters:
    ----------
    config : {list of dict}
        Full config list containing all strategies for different filesystems
    """

    logger = logging.getLogger(__name__)
    logger.info('Sending snapshots...')

    for conf in config:
        if not conf.get('dest', None):
            logger.debug('Ignore config from send {}...'.format(conf['name']))
            continue
        logger.debug('Process config {}...'.format(conf['name']))

        backup_source = conf['name']
        try:
            _type, source_name, user, host, port = parse_name(backup_source)
        except ValueError as err:
            logger.error('Could not parse {:s}: {}...'.format(
                backup_source, err))
            continue

        # if source is remote, open ssh connection
        if _type == 'ssh':
            key = conf['key'] if conf.get('key', None) else None
            compress = conf['compress'].pop(0) if conf.get('compress',
                                                           None) else 'lzop'
            try:
                ssh_source = SSH(user,
                                 host,
                                 port=port,
                                 key=key,
                                 compress=compress)
            except (FileNotFoundError, SSHException):
                continue
            source_name_log = '{:s}@{:s}:{:s}'.format(user, host, source_name)
        else:
            ssh_source = None
            source_name_log = source_name

        try:
            # Children includes the base filesystem (named 'source_name')
            source_children = zfs.find_exclude(conf,
                                               config,
                                               ssh=ssh_source,
                                               matching=settings['matching'])
        except DatasetNotFoundError as err:
            logger.error(
                'Source {:s} does not exist...'.format(source_name_log))
            continue
        except ValueError as err:
            logger.error(err)
            continue
        except CalledProcessError as err:
            logger.error('Error while opening source {:s}: \'{:s}\'...'.format(
                source_name_log, err.stderr.rstrip()))
            continue

        send_exclude_property = conf.get('send_exclude_property')

        # Send to every backup destination
        for backup_dest in conf['dest']:
            # get exclude rules
            exclude = conf['exclude'].pop(0) if conf.get('exclude',
                                                         None) else []
            # check if raw send was requested
            raw = conf['raw_send'].pop(0) if conf.get('raw_send',
                                                      None) else False
            # check if we need to retry
            retries = conf['retries'].pop(0) if conf.get('retries',
                                                         None) else 0
            retry_interval = conf['retry_interval'].pop(0) if conf.get(
                'retry_interval', None) else 10
            # check if resumable send was requested
            resume = conf['resume'].pop(0) if conf.get('resume',
                                                       None) else False
            # check if send_last_snapshot was requested
            send_last_snapshot = conf['send_last_snapshot'].pop(0) if conf.get(
                'send_last_snapshot', None) else False
            if send_last_snapshot == 'no':
                send_last_snapshot = False
            # check if we should create dataset if it doesn't exist
            dest_auto_create = conf['dest_auto_create'].pop(0) if conf.get(
                'dest_auto_create', None) else False

            try:
                _type, dest_name, user, host, port = parse_name(backup_dest)
            except ValueError as err:
                logger.error('Could not parse {:s}: {}...'.format(
                    backup_dest, err))
                continue

            # if dest is remote, open ssh connection
            if _type == 'ssh':
                dest_key = conf['dest_keys'].pop(0) if conf.get(
                    'dest_keys', None) else None
                # if 'ssh_source' is set, then 'compress' is already set and we use same compression for both source and dest
                # if not then we take the next entry in config
                if not ssh_source:
                    compress = conf['compress'].pop(0) if conf.get(
                        'compress', None) else 'lzop'
                try:
                    ssh_dest = SSH(user,
                                   host,
                                   port=port,
                                   key=dest_key,
                                   compress=compress)
                except (FileNotFoundError, SSHException):
                    continue
                dest_name_log = '{:s}@{:s}:{:s}'.format(user, host, dest_name)
            else:
                ssh_dest = None
                dest_name_log = dest_name

            # check if dest exists
            try:
                zfs.open(dest_name, ssh=ssh_dest)
            except DatasetNotFoundError:
                if dest_auto_create:
                    logger.info(
                        'Destination {:s} does not exist, will create it...'.
                        format(dest_name_log))
                    if create_dataset(dest_name, dest_name_log, ssh=ssh_dest):
                        continue
                else:
                    logger.error(
                        'Destination {:s} does not exist, manually create it or use "dest-auto-create" option...'
                        .format(dest_name_log))
                    continue
            except ValueError as err:
                logger.error(err)
                continue
            except CalledProcessError as err:
                logger.error(
                    'Error while opening dest {:s}: \'{:s}\'...'.format(
                        dest_name_log, err.stderr.rstrip()))
                continue

            # Match children on source to children on dest
            if source_name == '':
                dest_children_names = [
                    dest_name + '/' + child.name for child in source_children
                ]
            else:
                dest_children_names = [
                    child.name.replace(source_name, dest_name)
                    for child in source_children
                ]
            # Send all children to corresponding children on dest
            for source_fs, dest_name in zip(source_children,
                                            dest_children_names):
                # exclude filesystems from rules
                if any(
                        fnmatch(source_fs.name, pattern)
                        for pattern in exclude):
                    logger.debug(
                        'Matched {} in exclude rules, not sending...'.format(
                            source_fs))
                    continue
                # check exclude attribute
                if send_exclude_property and source_fs.ispropval(
                        send_exclude_property, check='false'):
                    logger.debug(
                        'Not sending {}, have property {:s}=false'.format(
                            source_fs, send_exclude_property))
                    continue
                # TODO: create missing skipped filesystem on destination
                # send not excluded filesystems
                for retry in range(1, retries + 2):
                    rc = send_filesystem(source_fs,
                                         dest_name,
                                         ssh_dest=ssh_dest,
                                         raw=raw,
                                         resume=resume,
                                         send_last_snapshot=send_last_snapshot,
                                         dest_auto_create=dest_auto_create)
                    if rc == 2 and retry <= retries:
                        logger.info(
                            'Retrying send in {:d}s (retry {:d} of {:d})...'.
                            format(retry_interval, retry, retries))
                        sleep(retry_interval)
                    else:
                        break

            if ssh_dest:
                ssh_dest.close()

        if ssh_source:
            ssh_source.close()
예제 #4
0
파일: send.py 프로젝트: orgoj/pyznap
def send_filesystem(source_fs,
                    dest_name,
                    ssh_dest=None,
                    raw=False,
                    resume=False,
                    send_last_snapshot=False,
                    dest_auto_create=False):
    """Checks for common snapshots between source and dest.
    If none are found, send the oldest snapshot, then update with the most recent one.
    If there are common snaps, update destination with the most recent one.

    Parameters:
    ----------
    source_fs : {ZFSFilesystem}
        Source zfs filesystem from where to send
    dest_name : {str}
        Name of the location to send to
    ssh_dest : {ssh.SSH}, optional
        Open ssh connection for remote backup (the default is None, meaning local backup)

    Returns
    -------
    int
        0 if success, 1 if not, 2 for ssh errors
    """

    logger = logging.getLogger(__name__)
    dest_name_log = '{:s}@{:s}:{:s}'.format(
        ssh_dest.user, ssh_dest.host, dest_name) if ssh_dest else dest_name

    logger.debug('Sending {} to {:s}...'.format(source_fs, dest_name_log))

    resume_token = None
    # Check if dest already has a 'zfs receive' ongoing
    if check_recv(dest_name, ssh=ssh_dest):
        return 1

    # get snapshots on source, catch exception if dataset was destroyed since pyznap was started
    try:
        snapshots = source_fs.snapshots()[::-1]
    except (DatasetNotFoundError, DatasetBusyError) as err:
        logger.error('Error while opening source {}: {}...'.format(
            source_fs, err))
        return 1
    except CalledProcessError as err:
        message = err.stderr.rstrip()
        if message.startswith('ssh: '):
            logger.error(
                'Connection issue while opening source {}: \'{:s}\'...'.format(
                    source_fs, message))
            return 2
        else:
            logger.error('Error while opening source {}: \'{:s}\'...'.format(
                source_fs, message))
            return 1
    snapnames = [snap.name.split('@')[1] for snap in snapshots]

    try:
        snapshot = snapshots[0]  # Most recent snapshot
        base = snapshots[-1]  # Oldest snapshot
    except IndexError:
        logger.error('No snapshots on {}, cannot send...'.format(source_fs))
        return 1

    try:
        dest_fs = zfs.open(dest_name, ssh=ssh_dest)
    except DatasetNotFoundError:
        if dest_auto_create:
            logger.info(
                'Destination {:s} does not exist, will create it...'.format(
                    dest_name_log))
            if create_dataset(dest_name, dest_name_log, ssh=ssh_dest):
                return 1
        else:
            logger.error(
                'Destination {:s} does not exist, manually create it or use "dest-auto-create" option...'
                .format(dest_name_log))
            return 1
        dest_snapnames = []
        common = set()
    except CalledProcessError as err:
        message = err.stderr.rstrip()
        if message.startswith('ssh: '):
            logger.error(
                'Connection issue while opening dest {:s}: \'{:s}\'...'.format(
                    dest_name_log, message))
            return 2
        else:
            logger.error('Error while opening dest {:s}: \'{:s}\'...'.format(
                dest_name_log, message))
            return 1
    else:
        # if dest exists, check for resume token
        resume_token = dest_fs.getprops().get('receive_resume_token',
                                              (None, None))[0]
        # find common snapshots between source & dest
        dest_snapnames = [
            snap.name.split('@')[1] for snap in dest_fs.snapshots()
        ]
        common = set(snapnames) & set(dest_snapnames)

    # if not resume and resume_token is not None:
    #     if not abort:
    #         logger.error('{:s} contains partially-complete state from "zfs receive -s" (~{:s}), '
    #                      'but neither resume nor abort option is given...'
    #                      .format(dest_name_log, bytes_fmt(base.stream_size(raw=raw, resume_token=resume_token))))
    #         return 1
    #     else:
    #         logger.info('{:s} contains partially-complete state from "zfs receive -s" (~{:s}), '
    #                     'will abort it...'
    #                     .format(dest_name_log, bytes_fmt(base.stream_size(raw=raw, resume_token=resume_token))))
    #         if abort_resume(dest_fs):
    #             return 1

    zfs.STATS.add('zfs_send_filesystem_count')

    was_transfer = False
    if resume_token is not None:
        logger.info(
            'Found resume token. Resuming last transfer of {:s} (~{:s})...'.
            format(
                dest_name_log,
                bytes_fmt(base.stream_size(raw=raw,
                                           resume_token=resume_token))))
        was_transfer = True
        rc = send_snap(base,
                       dest_name,
                       base=None,
                       ssh_dest=ssh_dest,
                       raw=raw,
                       resume=True,
                       resume_token=resume_token)
        if rc:
            return rc
        # we need to update common snapshots after finishing the resumable send
        dest_snapnames = [
            snap.name.split('@')[1] for snap in dest_fs.snapshots()
        ]
        common = set(snapnames) & set(dest_snapnames)

    if not common:
        if dest_snapnames:
            logger.error(
                'No common snapshots on {:s}, but snapshots exist. Not sending...'
                .format(dest_name_log))
            return 1
        else:
            if send_last_snapshot:
                base = snapshot
                for snap in snapshots:
                    if send_last_snapshot in snap.name.split('@')[1]:
                        base = snap
                        break
                logger.info(
                    'No common snapshots on {:s}, sending last snapshot {} (~{:s})...'
                    .format(dest_name_log, base,
                            bytes_fmt(base.stream_size(raw=raw))))
            else:
                logger.info(
                    'No common snapshots on {:s}, sending oldest snapshot {} (~{:s})...'
                    .format(dest_name_log, base,
                            bytes_fmt(base.stream_size(raw=raw))))
            was_transfer = True
            rc = send_snap(base,
                           dest_name,
                           base=None,
                           ssh_dest=ssh_dest,
                           raw=raw,
                           resume=resume)
            if rc:
                return rc
    else:
        # If there are common snapshots, get the most recent one
        base = next(
            filter(lambda x: x.name.split('@')[1] in common, snapshots), None)

    if base.name != snapshot.name:
        logger.info(
            'Updating {:s} with recent snapshot {} from {} (~{:s})...'.format(
                dest_name_log, snapshot,
                base.name.split('@')[1],
                bytes_fmt(snapshot.stream_size(base, raw=raw))))
        was_transfer = True
        rc = send_snap(snapshot,
                       dest_name,
                       base=base,
                       ssh_dest=ssh_dest,
                       raw=raw,
                       resume=resume)
        if rc:
            return rc

    if was_transfer:
        zfs.STATS.add('zfs_send_changed_count')
    else:
        zfs.STATS.add('zfs_send_unchanged_count')
    logger.info('{:s} is up to date...'.format(dest_name_log))
    return 0
예제 #5
0
def zpools():
    """Creates two temporary zpools to be called from test functions, source is local and dest on
    remote ssh location. Yields the two pool names and destroys them after testing."""

    sftp_filename = '/tmp/' + randomword(10)

    # ssh arguments for zfs functions
    ssh = open_ssh(USER, HOST, port=PORT, key=KEY)
    sftp = ssh.open_sftp()

    # Create temporary file on which the source zpool is created. Manually create sftp file
    with NamedTemporaryFile() as file0, sftp.open(sftp_filename, 'w') as file1:
        filename0 = file0.name
        filename1 = sftp_filename

        # Fix size to 100Mb
        file0.seek(100 * 1024**2 - 1)
        file0.write(b'0')
        file0.seek(0)
        file1.seek(100 * 1024**2 - 1)
        file1.write(b'0')
        file1.seek(0)

        # Create temporary test pools
        try:
            sp.check_output([ZPOOL, 'create', POOL0, filename0])
        except sp.CalledProcessError as err:
            logger.error(err)
            return

        try:
            sp.check_output([ZPOOL, 'create', POOL1, filename1], ssh=ssh)
        except sp.CalledProcessError as err:
            logger.error(err)
            return

        try:
            fs0 = zfs.open(POOL0)
            fs1 = zfs.open(POOL1, ssh=ssh)
            assert fs0.name == POOL0
            assert fs1.name == POOL1
        except (DatasetNotFoundError, AssertionError, Exception) as err:
            logger.error(err)
        else:
            yield fs0, fs1

        # Destroy temporary test pools
        try:
            sp.check_output([ZPOOL, 'destroy', POOL0])
        except sp.CalledProcessError as err:
            logger.error(err)

        try:
            sp.check_output([ZPOOL, 'destroy', POOL1], ssh=ssh)
        except sp.CalledProcessError as err:
            logger.error(err)

    # Delete tempfile on dest
    sftp.remove(sftp_filename)
    sftp.close()
    ssh.close()
예제 #6
0
def status_filesystem(filesystem,
                      conf,
                      output='log',
                      show_all=False,
                      main_fs=False,
                      values=None,
                      filter=None,
                      filter_values=None,
                      filter_exclude=None):
    """Deletes snapshots of a single filesystem according to conf.

    Parameters:
    ----------
    filesystem : {ZFSFilesystem}
        Filesystem to status
    conf : {dict}
        Config entry with snapshot strategy
    main_fs:
        mark configured filesystem, ignore exclude zfs property
    """

    global OUTPUT

    logger = logging.getLogger(__name__)

    fs_name = str(filesystem)
    if filter_exclude:
        if any(fnmatch(fs_name, pattern) for pattern in filter_exclude):
            logger.debug('Exclude filesystem {} by --exclude'.format(fs_name))
            return

    logger.debug('Checking snapshots on {}...'.format(fs_name))
    zfs.STATS.add('checked_count')

    snap = conf.get('snap', False)
    clean = conf.get('clean', False)
    send = bool(conf.get('dest', False))
    excluded = False
    snap_exclude_property = conf['snap_exclude_property']
    if not main_fs and snap_exclude_property and filesystem.ispropval(
            snap_exclude_property, check='false'):
        zfs.STATS.add('snap_excluded_count')
        logger.debug(
            'Ignore dataset fron snap {:s}, have property {:s}=false'.format(
                filesystem.name, snap_exclude_property))
        snap = False
        clean = False
    send_exclude_property = conf['send_exclude_property']
    if not main_fs and send_exclude_property and filesystem.ispropval(
            send_exclude_property, check='false'):
        zfs.STATS.add('send_excluded_count')
        logger.debug(
            'Ignore dataset fron send {:s}, have property {:s}=false'.format(
                filesystem.name, snap_exclude_property))
        send = False
    if not (snap or clean or send):
        if show_all:
            zfs.STATS.add('excluded_count')
            excluded = True
        else:
            return

    # increase stats count and check excludes in send
    if snap:
        zfs.STATS.add('snap_count')
    if clean:
        zfs.STATS.add('clean_count')
    if send:
        dest = conf.get('dest', False)
        if dest and conf['exclude']:
            # check excluded
            sending = []
            for exclude, dst in zip(conf['exclude'], dest):
                if exclude and any(
                        fnmatch(filesystem.name, pattern)
                        for pattern in exclude):
                    zfs.STATS.add('dest_excluded_count')
                    logger.debug('Excluded from send {} -> {}...'.format(
                        filesystem, dst))
                    sending.append(False)
                else:
                    sending.append(dst)
            dest = sending
        send = send and dest and any([x for x in dest if bool(x)])
    else:
        dest = None

    if send:
        zfs.STATS.add('send_count')

    snapshots = {t: [] for t in SNAPSHOT_TYPES}
    # catch exception if dataset was destroyed since pyznap was started
    try:
        fs_snapshots = filesystem.snapshots()
    except (DatasetNotFoundError, DatasetBusyError) as err:
        logger.error('Error while opening {}: {}...'.format(filesystem, err))
        return 1
    have_snapshots = bool(fs_snapshots)
    # categorize snapshots
    for snaps in fs_snapshots:
        # Ignore snapshots not taken with pyznap
        if not snaps.name.split('@')[1].startswith('pyznap'):
            continue
        try:
            snap_type = snaps.name.split('_')[-1]
            snapshots[snap_type].append(snaps)
        except (ValueError, KeyError):
            continue

    # Reverse sort by time taken
    for snaps in snapshots.values():
        snaps.reverse()

    level = logging.INFO

    # prepare data for status
    counts = {}
    for s in snapshots.keys():
        counts[s] = conf.get(s, 0) or 0 if clean else 0
    pyznap_snapshots = sum(len(s) for s in snapshots.values())

    # TODO: remote uptodate check
    # TODO: T/F oversnapshot/undesnapshot/othersnapshots/unvantedsnapshot on exluded fs

    # check needed snapshots count
    missing_snapshots = any(
        [len(snapshots[t]) < counts[t] for t in SNAPSHOT_TYPES])
    extra_snapshots = any(
        [len(snapshots[t]) > counts[t] for t in SNAPSHOT_TYPES])
    if missing_snapshots:
        level = logging.WARNING

    # make status data
    status = OrderedDict()
    status['hostname'] = os.uname()[1]
    status['name'] = fs_name
    status['conf'] = conf['name']
    status['excluded'] = excluded
    status['do-snap'] = snap
    status['do-clean'] = clean
    status['do-send'] = send
    status['conf-snap_exclude_property'] = snap_exclude_property
    status['conf-send_exclude_property'] = send_exclude_property
    status['snapshot-have'] = have_snapshots
    status['snapshot-missing'] = missing_snapshots
    status['snapshot-extra'] = extra_snapshots
    status['snapshot-count-all'] = len(fs_snapshots)
    status['snapshot-count-pyznap'] = pyznap_snapshots
    status['snapshot-count-nopyznap'] = len(fs_snapshots) - pyznap_snapshots
    for stype in SNAPSHOT_TYPES:
        status['snapshot-types-' +
               stype] = str(len(snapshots[stype])) + '/' + str(counts[stype])

    def bytes_fmt_no_raw(bytes):
        return bytes if output == 'jsonl' else bytes_fmt(bytes)

    status['dest'] = dest
    if dest:
        i = 0
        snapnames = [snap.name.split('@')[1] for snap in fs_snapshots]
        for d in dest:
            if d:
                _prefix = 'dest-' + str(i) + '-'
                _type, _dest_name, _user, _host, _port = parse_name(d)
                status[_prefix + 'type'] = _type
                status[_prefix + 'host'] = _host
                if conf['name']:
                    dest_name = fs_name.replace(conf['name'], _dest_name)
                else:
                    dest_name = _dest_name + '/' + fs_name
                status[_prefix + 'name'] = dest_name
                # check snapshots on dest
                common_snapshots = []
                ssh_dest = get_ssh_for_dest(d, conf)
                try:
                    dest_fs = zfs.open(dest_name, ssh=ssh_dest)
                except DatasetNotFoundError:
                    dest_snapshots = []
                    dest_snapnames = []
                    common = set()
                except CalledProcessError as err:
                    message = err.stderr.rstrip()
                    if message.startswith('ssh: '):
                        logger.error(
                            'Connection issue while opening dest {:s}: \'{:s}\'...'
                            .format(dest_name, message))
                        return 2
                    else:
                        logger.error(
                            'Error while opening dest {:s}: \'{:s}\'...'.
                            format(dest_name, message))
                        return 1
                else:
                    # find common snapshots between source & dest
                    dest_snapshots = dest_fs.snapshots()
                    dest_snapnames = [
                        snap.name.split('@')[1] for snap in dest_snapshots
                    ]
                    common = set(snapnames) & set(dest_snapnames)
                    if common:
                        common_snapshots = [
                            s for s in snapnames if s in common
                        ]
                status[_prefix + 'snapshot-count'] = len(dest_snapnames)
                status[_prefix +
                       'snapshot-count-common'] = len(common_snapshots)
                if common_snapshots:
                    status[_prefix +
                           'snapshot-common-first'] = common_snapshots[0]
                    status[_prefix +
                           'snapshot-common-last'] = common_snapshots[-1]
                if dest_snapnames:
                    status[_prefix + 'snapshot-dest-first'] = dest_snapnames[0]
                    status[_prefix + 'snapshot-dest-last'] = dest_snapnames[-1]
            i += 1

    def add_snapshot_status(snapshot, label):
        props = snapshot.getprops()
        status['snapshot-info-' +
               label + '-timestamp'] = datetime.fromtimestamp(
                   int(props['creation'][0])).isoformat()
        status['snapshot-info-' + label + '-referenced'] = bytes_fmt_no_raw(
            int(props['referenced'][0]))
        status['snapshot-info-' + label +
               '-logicalreferenced'] = bytes_fmt_no_raw(
                   int(props['logicalreferenced'][0]))

    if fs_snapshots:
        add_snapshot_status(fs_snapshots[0], 'first')
        add_snapshot_status(fs_snapshots[-1], 'last')

    props = filesystem.getprops()
    for p in ZFS_SIZE_PROPERTIES:
        status['zfs-' + p] = bytes_fmt_no_raw(int(props[p][0]))
    for p in ZFS_OTHER_PROPERTIES:
        status['zfs-' + p] = props[p][0] if p in props else '---'

    if filter_values:
        for f, v in filter_values.items():
            if status[f] != v:
                return

    if values:
        fstatus = {}
        for v in values:
            for k in tuple(status.keys()):
                if fnmatch(k, v):
                    fstatus[k] = status[k]
                    del status[k]
        status = fstatus

    if output == 'jsonl':
        print(json.dumps(status))
    elif output == 'html':
        OUTPUT.append(status)
    else:
        logger.log(level, 'STATUS: ' + json.dumps(status))
예제 #7
0
def zpools():
    """Creates two temporary zpools to be called from test functions, source is local and dest on
    remote ssh location. Yields the two pool names and destroys them after testing."""

    zpool = '/sbin/zpool'
    _word = randomword(8)
    pool0 = 'pyznap_source_' + _word
    pool1 = 'pyznap_dest_' + _word

    sftp_filename = '/tmp/' + randomword(10)

    # ssh arguments for zfs functions
    ssh = SSH(USER, HOST, port=PORT, key=KEY)
    # need paramiko for sftp file
    sshclient = open_ssh(USER, HOST, port=PORT, key=KEY)
    sftp = sshclient.open_sftp()

    # Create temporary file on which the source zpool is created. Manually create sftp file
    with NamedTemporaryFile() as file0, sftp.open(sftp_filename, 'w') as file1:
        filename0 = file0.name
        filename1 = sftp_filename

        # Fix size to 100Mb
        file0.seek(100 * 1024**2 - 1)
        file0.write(b'0')
        file0.seek(0)
        file1.seek(100 * 1024**2 - 1)
        file1.write(b'0')
        file1.seek(0)

        # Create temporary test pools
        try:
            run([zpool, 'create', pool0, filename0])
        except sp.CalledProcessError as err:
            logger.error(err)
            return

        try:
            run([zpool, 'create', pool1, filename1], ssh=ssh)
        except sp.CalledProcessError as err:
            logger.error(err)
            return

        try:
            fs0 = zfs.open(pool0)
            fs1 = zfs.open(pool1, ssh=ssh)
            assert fs0.name == pool0
            assert fs1.name == pool1
        except (DatasetNotFoundError, AssertionError, Exception) as err:
            logger.error(err)
        else:
            yield fs0, fs1

        # Destroy temporary test pools
        try:
            run([zpool, 'destroy', pool0])
        except sp.CalledProcessError as err:
            logger.error(err)

        try:
            run([zpool, 'destroy', pool1], ssh=ssh)
        except sp.CalledProcessError as err:
            logger.error(err)

    # Delete tempfile on dest
    sftp.remove(sftp_filename)
    sftp.close()
    ssh.close()
예제 #8
0
def fix_snapshots(filesystems,
                  format=None,
                  type=None,
                  type_map=None,
                  recurse=False):
    """Fix snapshots name

    Parameters:
    ----------
    filesystems : [strings]
        Filesystems to fix
    """

    logger = logging.getLogger(__name__)

    if format.startswith('@'):
        if not type_map and format in MAPS:
            type_map = MAPS[format]
        if format in FORMATS:
            format = FORMATS[format]
        else:
            logger.error('Unknown format {}.'.format(format))
            sys.exit(1)

    logger.debug('FORMAT: ' + str(format))
    logger.debug('MAP: ' + str(type_map))

    rp = re.compile(format)
    now = datetime.now()
    cur_century = int(now.year / 100) * 100

    # for all specified filesystems
    for fsname in filesystems:
        logger.info('Checking snapshots on {}...'.format(fsname))
        try:
            parent = zfs.open(fsname)
        except DatasetNotFoundError:
            logger.error('Filesystem not exists {}'.format(fsname))
            continue

        if recurse:
            # get all child's filesystem
            fstree = zfs.find(fsname, types=['filesystem', 'volume'])
        else:
            # only scan specified filesystem
            fstree = [parent]

        for filesystem in fstree:

            logger.info('Fixing {}...'.format(filesystem.name))
            snapshots = filesystem.snapshots()
            for snapshot in snapshots:
                snapname = snapshot.snapname()
                try:
                    r = rp.match(snapname)
                except:
                    r = False
                if r:
                    # guess year
                    year = re_get_group_int(r, 'year', default=now.year)
                    if year < 100:
                        year += +cur_century
                    # get type from snap, with optional map or default type if specified
                    snaptype = r.group('type')
                    if type_map:
                        if snaptype in type_map:
                            snaptype = type_map[snaptype]
                    if not snaptype and type:
                        snaptype = type
                    if not snaptype:
                        logger.error(
                            'Unknown snap type {} for snapshot {}'.format(
                                snaptype, snapname))
                        continue
                    new_snapname = 'pyznap_' + datetime(
                        year,
                        re_get_group_int(r, 'month', default=now.month),
                        re_get_group_int(r, 'day', default=now.day),
                        hour=re_get_group_int(r, 'hour', default=now.hour),
                        minute=re_get_group_int(
                            r, 'minute', default=now.minute),
                        second=re_get_group_int(
                            r, 'second', default=now.second)).strftime(
                                '%Y-%m-%d_%H:%M:%S') + '_' + snaptype
                    logger.debug('Renaming {} -> {}'.format(
                        snapname, new_snapname))
                    snapshot.rename(snapshot.fsname() + '@' + new_snapname)
예제 #9
0
파일: send.py 프로젝트: 0mp/pyznap
def send_snap(source_fs, dest_name, ssh=None):
    """Checks for common snapshots between source and dest.
    If none are found, send the oldest snapshot, then update with the most recent one.
    If there are common snaps, update destination with the most recent one.

    Parameters:
    ----------
    source_fs : {ZFSFilesystem}
        Source zfs filesystem from where to send
    dest_name : {str}
        Name of the location to send to
    ssh : {paramiko.SSHClient}, optional
        Open ssh connection for remote backup (the default is None, meaning local backup)

    Returns
    -------
    int
        0 if success, 1 if not
    """

    logger = logging.getLogger(__name__)
    dest_name_log = '{:s}@{:s}:{:s}'.format(ssh.user, ssh.host,
                                            dest_name) if ssh else dest_name

    logger.debug('Sending {} to {:s}...'.format(source_fs, dest_name_log))

    # Check if ssh session still active
    if ssh and not ssh.get_transport().is_active():
        logger.error(
            'Error while sending to {:s}: ssh session not active...'.format(
                dest_name_log))
        return 1

    # Check if dest already has a 'zfs receive' ongoing
    if check_recv(dest_name, ssh=ssh):
        return 1

    # Get snapshots on source
    snapshots = source_fs.snapshots()[::-1]
    snapnames = [snap.name.split('@')[1] for snap in snapshots]
    try:
        snapshot = snapshots[0]  # Most recent snapshot
        base = snapshots[-1]  # Oldest snapshot
    except IndexError:
        logger.error('No snapshots on {}, cannot send...'.format(source_fs))
        return 1

    try:
        dest_fs = zfs.open(dest_name, ssh=ssh)
    except DatasetNotFoundError:
        dest_snapnames = []
        common = set()
    except CalledProcessError as err:
        logger.error('Error while opening dest {:s}: \'{:s}\'...'.format(
            dest_name_log, err.stderr.rstrip()))
        return 1
    else:
        dest_snapnames = [
            snap.name.split('@')[1] for snap in dest_fs.snapshots()
        ]
        # Find common snapshots between source & dest
        common = set(snapnames) & set(dest_snapnames)

    if not common:
        if dest_snapnames:
            logger.error(
                'No common snapshots on {:s}, but snapshots exist. Not sending...'
                .format(dest_name_log))
            return 1
        else:
            logger.info(
                'No common snapshots on {:s}, sending oldest snapshot {} (~{:s})...'
                .format(dest_name_log, base, base.stream_size()))
            if send_recv(base, dest_name, base=None, ssh=ssh):
                return 1
    else:
        # If there are common snapshots, get the most recent one
        base = next(
            filter(lambda x: x.name.split('@')[1] in common, snapshots), None)

    if base.name != snapshot.name:
        logger.info('Updating {:s} with recent snapshot {} (~{:s})...'.format(
            dest_name_log, snapshot, snapshot.stream_size(base)))
        if send_recv(snapshot, dest_name, base=base, ssh=ssh):
            return 1

    logger.info('{:s} is up to date...'.format(dest_name_log))
    return 0
예제 #10
0
파일: send.py 프로젝트: 0mp/pyznap
def send_config(config):
    """Tries to sync all entries in the config to their dest. Finds all children of the filesystem
    and calls send_snap on each of them.

    Parameters:
    ----------
    config : {list of dict}
        Full config list containing all strategies for different filesystems
    """

    logger = logging.getLogger(__name__)
    logger.info('Sending snapshots...')

    for conf in config:
        if not conf.get('dest', None):
            continue

        source_name = conf['name']
        if source_name.startswith('ssh'):
            logger.error('Cannot send from remote location ({:s})...'.format(
                source_name))
            continue

        try:
            # Children includes the base filesystem (named 'source_fs')
            source_children = zfs.find(path=source_name,
                                       types=['filesystem', 'volume'])
        except DatasetNotFoundError as err:
            logger.error('Source {:s} does not exist...'.format(source_name))
            continue
        except ValueError as err:
            logger.error(err)
            continue
        except CalledProcessError as err:
            logger.error('Error while opening source {:s}: \'{:s}\'...'.format(
                source_name, err.stderr.rstrip()))
            continue

        # Send to every backup destination
        for backup_dest in conf['dest']:
            try:
                _type, dest_name, user, host, port = parse_name(backup_dest)
            except ValueError as err:
                logger.error('Could not parse {:s}: {}...'.format(
                    backup_dest, err))
                continue

            if _type == 'ssh':
                dest_key = conf['dest_keys'].pop(
                    0) if conf['dest_keys'] else None
                try:
                    ssh = open_ssh(user, host, port=port, key=dest_key)
                except (FileNotFoundError, SSHException):
                    continue
                dest_name_log = '{:s}@{:s}:{:s}'.format(user, host, dest_name)
            else:
                ssh = None
                dest_name_log = dest_name

            # Check if base destination filesystem exists, if not do not send
            try:
                zfs.open(dest_name, ssh=ssh)
            except DatasetNotFoundError:
                logger.error(
                    'Destination {:s} does not exist...'.format(dest_name_log))
                continue
            except ValueError as err:
                logger.error(err)
                continue
            except CalledProcessError as err:
                logger.error(
                    'Error while opening dest {:s}: \'{:s}\'...'.format(
                        dest_name_log, err.stderr.rstrip()))
                continue
            else:
                # Match children on source to children on dest
                dest_children_names = [
                    child.name.replace(source_name, dest_name)
                    for child in source_children
                ]
                # Send all children to corresponding children on dest
                for source, dest in zip(source_children, dest_children_names):
                    send_snap(source, dest, ssh=ssh)
            finally:
                if ssh:
                    ssh.close()
예제 #11
0
def send_config(config):
    """Tries to sync all entries in the config to their dest. Finds all children of the filesystem
    and calls send_filesystem on each of them.

    Parameters:
    ----------
    config : {list of dict}
        Full config list containing all strategies for different filesystems
    """

    logger = logging.getLogger(__name__)
    logger.info('Sending snapshots...')

    for conf in config:
        if not conf.get('dest', None):
            continue

        dry_run = conf.get('dry_run', None)
        dry_msg = '*** DRY RUN ***' if dry_run else ''
        backup_source = conf['name']
        try:
            _type, source_name, user, host, port = parse_name(backup_source)
        except ValueError as err:
            logger.error('Could not parse {:s}: {}...'.format(
                backup_source, err))
            continue

        # if source is remote, open ssh connection
        if _type == 'ssh':
            key = conf['key'] if conf.get('key', None) else None
            compress = conf['compress'].pop(0) if conf.get('compress',
                                                           None) else 'lzop'
            try:
                ssh_source = SSH(user,
                                 host,
                                 port=port,
                                 key=key,
                                 compress=compress)
            except (FileNotFoundError, SSHException):
                continue
            source_name_log = '{:s}@{:s}:{:s}'.format(user, host, source_name)
        else:
            ssh_source = None
            source_name_log = source_name

        try:
            # Children includes the base filesystem (named 'source_name')
            source_children = zfs.find(path=source_name,
                                       types=['filesystem', 'volume'],
                                       ssh=ssh_source)
        except DatasetNotFoundError as err:
            logger.error(
                'Source {:s} does not exist...'.format(source_name_log))
            continue
        except ValueError as err:
            logger.error(err)
            continue
        except CalledProcessError as err:
            logger.error('Error while opening source {:s}: \'{:s}\'...'.format(
                source_name_log, err.stderr.rstrip()))
            continue

        # Send to every backup destination
        for backup_dest in conf['dest']:
            # get exclude rules
            exclude = conf['exclude'].pop(0) if conf.get('exclude',
                                                         None) else []
            # check if raw send was requested
            raw = conf['raw_send'].pop(0) if conf.get('raw_send',
                                                      None) else False
            # check if we need to retry
            retries = conf['retries'].pop(0) if conf.get('retries',
                                                         None) else 0
            retry_interval = conf['retry_interval'].pop(0) if conf.get(
                'retry_interval', None) else 10
            # check if resumable send was requested
            resume = conf['resume'].pop(0) if conf.get('resume',
                                                       None) else False
            # check if we should create dataset if it doesn't exist
            dest_auto_create = conf['dest_auto_create'].pop(0) if conf.get(
                'dest_auto_create', None) else False

            try:
                _type, dest_name, user, host, port = parse_name(backup_dest)
            except ValueError as err:
                logger.error('Could not parse {:s}: {}...'.format(
                    backup_dest, err))
                continue

            # if dest is remote, open ssh connection
            if _type == 'ssh':
                dest_key = conf['dest_keys'].pop(0) if conf.get(
                    'dest_keys', None) else None
                # if 'ssh_source' is set, then 'compress' is already set and we use same compression for both source and dest
                # if not then we take the next entry in config
                if not ssh_source:
                    compress = conf['compress'].pop(0) if conf.get(
                        'compress', None) else 'lzop'
                try:
                    ssh_dest = SSH(user,
                                   host,
                                   port=port,
                                   key=dest_key,
                                   compress=compress)
                except (FileNotFoundError, SSHException):
                    continue
                dest_name_log = '{:s}@{:s}:{:s}'.format(user, host, dest_name)
            else:
                ssh_dest = None
                dest_name_log = dest_name

            # check if dest exists
            try:
                zfs.open(dest_name, ssh=ssh_dest)
            except DatasetNotFoundError:
                if dest_auto_create:
                    logger.info(
                        'Destination {:s} does not exist, will create it... {}'
                        .format(dest_name_log, dry_msg))
                    if create_dataset(dest_name,
                                      dest_name_log,
                                      ssh=ssh_dest,
                                      dry_run=dry_run):
                        continue
                else:
                    logger.error(
                        'Destination {:s} does not exist, manually create it or use "dest-auto-create" option...'
                        .format(dest_name_log))
                    continue
            except ValueError as err:
                logger.error(err)
                continue
            except CalledProcessError as err:
                logger.error(
                    'Error while opening dest {:s}: \'{:s}\'...'.format(
                        dest_name_log, err.stderr.rstrip()))
                continue

            # Match children on source to children on dest
            dest_children_names = [
                child.name.replace(source_name, dest_name)
                for child in source_children
            ]
            # Send all children to corresponding children on dest
            for source_fs, dest_name in zip(source_children,
                                            dest_children_names):
                # exclude filesystems from rules
                if any(
                        fnmatch(source_fs.name, pattern)
                        for pattern in exclude):
                    logger.debug(
                        'Matched {} in exclude rules, not sending...'.format(
                            source_fs))
                    continue

                # Check for ZFS user property to bypass filesystem
                fs_props = source_fs.getprops()

                exclude_prop = 'pyznap:exclude'
                ignore_me = fs_props.get(exclude_prop,
                                         ('false', 'false'))[0].lower()
                logger.debug("Property {}={} for {}".format(
                    exclude_prop, ignore_me, source_fs))
                if ignore_me == 'true':
                    logger.info('Matched {}={} for {}, not sending...'.format(
                        exclude_prop, ignore_me, source_fs))
                    continue

                # Check for max size
                used_prop = 'used'
                fs_used_bytes = int(fs_props.get(used_prop,
                                                 ('0', '0'))[0])  # Bytes
                fs_used_fmt = bytes_fmt(fs_used_bytes)  # MB
                logger.debug("Property {}={} ({}) for {}".format(
                    used_prop, fs_used_fmt, fs_used_bytes, source_fs))

                max_prop = 'pyznap:max_size'
                fs_max_fmt = fs_props.get(max_prop, ('0', '0'))[0]  # String
                fs_max_bytes = parse_size(fs_max_fmt)  # Bytes
                logger.debug("Property {}={} ({}) for {}".format(
                    max_prop, fs_max_fmt, fs_max_bytes, source_fs))
                if fs_max_bytes > 0 and fs_used_bytes > fs_max_bytes:
                    logger.info(
                        'Filesystem size {} exceeds {}={} for {}, not sending...'
                        .format(fs_used_fmt, max_prop, fs_max_fmt, source_fs))
                    continue

                # send not excluded filesystems
                for retry in range(1, retries + 2):
                    rc = send_filesystem(source_fs,
                                         dest_name,
                                         ssh_dest=ssh_dest,
                                         raw=raw,
                                         resume=resume,
                                         dry_run=dry_run)
                    if rc == 2 and retry <= retries:
                        logger.info(
                            'Retrying send in {:d}s (retry {:d} of {:d})...'.
                            format(retry_interval, retry, retries))
                        sleep(retry_interval)
                    else:
                        break

            if ssh_dest:
                ssh_dest.close()

        if ssh_source:
            ssh_source.close()
예제 #12
0
def send_config(config):
    """Tries to sync all entries in the config to their dest. Finds all children of the filesystem
    and calls send_filesystem on each of them.

    Parameters:
    ----------
    config : {list of dict}
        Full config list containing all strategies for different filesystems
    """

    logger = logging.getLogger(__name__)
    logger.info('Sending snapshots...')

    for conf in config:
        if not conf.get('dest', None):
            continue

        backup_source = conf['name']
        try:
            _type, source_name, user, host, port = parse_name(backup_source)
        except ValueError as err:
            logger.error('Could not parse {:s}: {}...'.format(
                backup_source, err))
            continue

        # if source is remote, open ssh connection
        if _type == 'ssh':
            key = conf['key'] if conf.get('key', None) else None
            compress = conf['compress'].pop(0) if conf.get('compress',
                                                           None) else 'lzop'
            try:
                ssh_source = SSH(user,
                                 host,
                                 port=port,
                                 key=key,
                                 compress=compress)
            except (FileNotFoundError, SSHException):
                continue
            source_name_log = '{:s}@{:s}:{:s}'.format(user, host, source_name)
        else:
            ssh_source = None
            source_name_log = source_name

        try:
            # Children includes the base filesystem (named 'source_name')
            source_children = zfs.find(path=source_name,
                                       types=['filesystem', 'volume'],
                                       ssh=ssh_source)
        except DatasetNotFoundError as err:
            logger.error(
                'Source {:s} does not exist...'.format(source_name_log))
            continue
        except ValueError as err:
            logger.error(err)
            continue
        except CalledProcessError as err:
            logger.error('Error while opening source {:s}: \'{:s}\'...'.format(
                source_name_log, err.stderr.rstrip()))
            continue

        # Send to every backup destination
        for backup_dest in conf['dest']:
            try:
                _type, dest_name, user, host, port = parse_name(backup_dest)
            except ValueError as err:
                logger.error('Could not parse {:s}: {}...'.format(
                    backup_dest, err))
                continue

            # if dest is remote, open ssh connection
            if _type == 'ssh':
                dest_key = conf['dest_keys'].pop(0) if conf.get(
                    'dest_keys', None) else None
                # if 'ssh_source' is set, then 'compress' is already set and we use same compression for both source and dest
                # if not then we take the next entry in config
                if not ssh_source:
                    compress = conf['compress'].pop(0) if conf.get(
                        'compress', None) else 'lzop'
                try:
                    ssh_dest = SSH(user,
                                   host,
                                   port=port,
                                   key=dest_key,
                                   compress=compress)
                except (FileNotFoundError, SSHException):
                    continue
                dest_name_log = '{:s}@{:s}:{:s}'.format(user, host, dest_name)
            else:
                ssh_dest = None
                dest_name_log = dest_name

            # get exclude rules
            exclude = conf['exclude'].pop(0) if conf.get('exclude',
                                                         None) else []

            # check if raw send was requested
            raw = conf['raw_send'].pop(0) if conf.get('raw_send',
                                                      None) else False

            # Check if base destination filesystem exists, if not do not send
            try:
                zfs.open(dest_name, ssh=ssh_dest)
            except DatasetNotFoundError:
                logger.error(
                    'Destination {:s} does not exist...'.format(dest_name_log))
                continue
            except ValueError as err:
                logger.error(err)
                continue
            except CalledProcessError as err:
                logger.error(
                    'Error while opening dest {:s}: \'{:s}\'...'.format(
                        dest_name_log, err.stderr.rstrip()))
                continue
            else:
                # Match children on source to children on dest
                dest_children_names = [
                    child.name.replace(source_name, dest_name)
                    for child in source_children
                ]
                # Send all children to corresponding children on dest
                for source_fs, dest_name in zip(source_children,
                                                dest_children_names):
                    # exclude filesystems from rules
                    if any(
                            fnmatch(source_fs.name, pattern)
                            for pattern in exclude):
                        logger.debug(
                            'Matched {} in exclude rules, not sending...'.
                            format(source_fs))
                        continue
                    # send not excluded filesystems
                    send_filesystem(source_fs,
                                    dest_name,
                                    ssh_dest=ssh_dest,
                                    raw=raw)
            finally:
                if ssh_dest:
                    ssh_dest.close()

        if ssh_source:
            ssh_source.close()