Пример #1
0
def main() -> None:
    zfs = ZFS()
    zfs.createDataset()

    loader = unittest.TestLoader()
    tests = loader.discover('.')
    testRunner = unittest.runner.TextTestRunner()
    testRunner.run(tests)

    zfs.destroyDataset()
Пример #2
0
    def run(settings):
        """
        Executes a single run where certain datasets might or might not be snapshotted
        """

        now = datetime.now()
        yda = datetime.now() - timedelta(1)
        today = '{0:04d}{1:02d}{2:02d}'.format(now.year, now.month, now.day)
        yesterday = '{0:04d}{1:02d}{2:02d}'.format(yda.year, yda.month,
                                                   yda.day)

        snapshots = ZFS.get_snapshots()
        datasets = ZFS.get_datasets()
        for dataset in datasets:
            if dataset in settings:
                try:
                    dataset_settings = settings[dataset]
                    local_snapshots = snapshots.get(dataset, [])

                    take_snapshot = dataset_settings['snapshot'] is True
                    replicate = dataset_settings['replicate'] is not None

                    # Decide whether we need to handle this dataset
                    execute = False
                    if take_snapshot is True or replicate is True:
                        if dataset_settings['time'] == 'trigger':
                            # We wait until we find a trigger file in the filesystem
                            trigger_filename = '{0}/.trigger'.format(
                                dataset_settings['mountpoint'])
                            if os.path.exists(trigger_filename):
                                Manager.logger.info(
                                    'Trigger found on {0}'.format(dataset))
                                os.remove(trigger_filename)
                                execute = True
                        else:
                            trigger_time = dataset_settings['time'].split(':')
                            hour = int(trigger_time[0])
                            minutes = int(trigger_time[1])
                            if (now.hour > hour or
                                (now.hour == hour and now.minute >= minutes)
                                ) and today not in local_snapshots:
                                Manager.logger.info(
                                    'Time passed for {0}'.format(dataset))
                                execute = True

                    if execute is True:
                        # Pre exectution command
                        if dataset_settings['preexec'] is not None:
                            Helper.run_command(dataset_settings['preexec'],
                                               '/')

                        if take_snapshot is True:
                            # Take today's snapshotzfs
                            Manager.logger.info(
                                'Taking snapshot {0}@{1}'.format(
                                    dataset, today))
                            ZFS.snapshot(dataset, today)
                            local_snapshots.append(today)
                            Manager.logger.info(
                                'Taking snapshot {0}@{1} complete'.format(
                                    dataset, today))

                        # Replicating, if required
                        if replicate is True:
                            Manager.logger.info(
                                'Replicating {0}'.format(dataset))
                            replicate_settings = dataset_settings['replicate']
                            push = replicate_settings['target'] is not None
                            remote_dataset = replicate_settings[
                                'target'] if push else replicate_settings[
                                    'source']
                            remote_snapshots = ZFS.get_snapshots(
                                remote_dataset, replicate_settings['endpoint'])
                            last_common_snapshot = None
                            if remote_dataset in remote_snapshots:
                                if push is True:  # If pushing, we search for the last local snapshot that is remotely available
                                    for snapshot in local_snapshots:
                                        if snapshot in remote_snapshots[
                                                remote_dataset]:
                                            last_common_snapshot = snapshot
                                else:  # Else, we search for the last remote snapshot that is locally available
                                    for snapshot in remote_snapshots[
                                            remote_dataset]:
                                        if snapshot in local_snapshots:
                                            last_common_snapshot = snapshot
                            if last_common_snapshot is not None:  # There's a common snapshot
                                previous_snapshot = None
                                if push is True:
                                    for snapshot in local_snapshots:
                                        if snapshot == last_common_snapshot:
                                            previous_snapshot = last_common_snapshot
                                            continue
                                        if previous_snapshot is not None:
                                            # There is a snapshot on this host that is not yet on the other side.
                                            size = ZFS.get_size(
                                                dataset, previous_snapshot,
                                                snapshot)
                                            Manager.logger.info(
                                                '  {0}@{1} > {0}@{2} ({3})'.
                                                format(dataset,
                                                       previous_snapshot,
                                                       snapshot, size))
                                            ZFS.replicate(
                                                dataset,
                                                previous_snapshot,
                                                snapshot,
                                                remote_dataset,
                                                replicate_settings.get(
                                                    'buffer_size',
                                                    BUFFER_SIZE),
                                                replicate_settings['endpoint'],
                                                direction='push',
                                                compression=replicate_settings[
                                                    'compression'])
                                            ZFS.hold(dataset, snapshot)
                                            ZFS.hold(
                                                remote_dataset, snapshot,
                                                replicate_settings['endpoint'])
                                            ZFS.release(
                                                dataset, previous_snapshot)
                                            ZFS.release(
                                                remote_dataset,
                                                previous_snapshot,
                                                replicate_settings['endpoint'])
                                            previous_snapshot = snapshot
                                else:
                                    for snapshot in remote_snapshots[
                                            remote_dataset]:
                                        if snapshot == last_common_snapshot:
                                            previous_snapshot = last_common_snapshot
                                            continue
                                        if previous_snapshot is not None:
                                            # There is a remote snapshot that is not yet on the local host.
                                            size = ZFS.get_size(
                                                remote_dataset,
                                                previous_snapshot, snapshot,
                                                replicate_settings['endpoint'])
                                            Manager.logger.info(
                                                '  {0}@{1} > {0}@{2} ({3})'.
                                                format(remote_dataset,
                                                       previous_snapshot,
                                                       snapshot, size))
                                            ZFS.replicate(
                                                remote_dataset,
                                                previous_snapshot,
                                                snapshot,
                                                dataset,
                                                replicate_settings.get(
                                                    'buffer_size',
                                                    BUFFER_SIZE),
                                                replicate_settings['endpoint'],
                                                direction='pull',
                                                compression=replicate_settings[
                                                    'compression'])
                                            ZFS.hold(dataset, snapshot)
                                            ZFS.hold(
                                                remote_dataset, snapshot,
                                                replicate_settings['endpoint'])
                                            ZFS.release(
                                                dataset, previous_snapshot)
                                            ZFS.release(
                                                remote_dataset,
                                                previous_snapshot,
                                                replicate_settings['endpoint'])
                                            previous_snapshot = snapshot
                            elif push is True and len(local_snapshots) > 0:
                                # No common snapshot
                                if remote_dataset not in remote_snapshots:
                                    # No remote snapshot, full replication
                                    snapshot = local_snapshots[-1]
                                    size = ZFS.get_size(
                                        dataset, None, snapshot)
                                    Manager.logger.info(
                                        '  {0}@         > {0}@{1} ({2})'.
                                        format(dataset, snapshot, size))
                                    ZFS.replicate(
                                        dataset,
                                        None,
                                        snapshot,
                                        remote_dataset,
                                        replicate_settings.get(
                                            'buffer_size', BUFFER_SIZE),
                                        replicate_settings['endpoint'],
                                        direction='push',
                                        compression=replicate_settings[
                                            'compression'])
                                    ZFS.hold(dataset, snapshot)
                                    ZFS.hold(remote_dataset, snapshot,
                                             replicate_settings['endpoint'])
                            elif push is False and remote_dataset in remote_snapshots and len(
                                    remote_snapshots[remote_dataset]) > 0:
                                # No common snapshot
                                if len(local_snapshots) == 0:
                                    # No local snapshot, full replication
                                    snapshot = remote_snapshots[
                                        remote_dataset][-1]
                                    size = ZFS.get_size(
                                        remote_dataset, None, snapshot,
                                        replicate_settings['endpoint'])
                                    Manager.logger.info(
                                        '  {0}@         > {0}@{1} ({2})'.
                                        format(remote_dataset, snapshot, size))
                                    ZFS.replicate(
                                        remote_dataset,
                                        None,
                                        snapshot,
                                        dataset,
                                        replicate_settings.get(
                                            'buffer_size', BUFFER_SIZE),
                                        replicate_settings['endpoint'],
                                        direction='pull',
                                        compression=replicate_settings[
                                            'compression'])
                                    ZFS.hold(dataset, snapshot)
                                    ZFS.hold(remote_dataset, snapshot,
                                             replicate_settings['endpoint'])
                            Manager.logger.info(
                                'Replicating {0} complete'.format(dataset))

                        # Post execution command
                        if dataset_settings['postexec'] is not None:
                            Helper.run_command(dataset_settings['postexec'],
                                               '/')

                    # Cleaning the snapshots (cleaning is mandatory)
                    if today in local_snapshots or yesterday in local_snapshots:
                        Cleaner.clean(dataset, local_snapshots,
                                      dataset_settings['schema'])

                except Exception as ex:
                    Manager.logger.error('Exception: {0}'.format(str(ex)))
Пример #3
0
    def run(settings):
        """
        Executes a single run where certain datasets might or might not be snapshotted
        """

        now = datetime.now()
        today = "{0:04d}{1:02d}{2:02d}".format(now.year, now.month, now.day)

        snapshots = ZFS.get_snapshots()
        datasets = ZFS.get_datasets()
        for dataset in datasets:
            if dataset in settings:
                try:
                    dataset_settings = settings[dataset]
                    local_snapshots = snapshots.get(dataset, [])

                    take_snapshot = dataset_settings["snapshot"] is True
                    replicate = dataset_settings["replicate"] is not None

                    # Decide whether we need to handle this dataset
                    execute = False
                    if take_snapshot is True or replicate is True:
                        if dataset_settings["time"] == "trigger":
                            # We wait until we find a trigger file in the filesystem
                            trigger_filename = "{0}/.trigger".format(dataset_settings["mountpoint"])
                            if os.path.exists(trigger_filename):
                                Manager.logger.info("Trigger found on {0}".format(dataset))
                                os.remove(trigger_filename)
                                execute = True
                        else:
                            trigger_time = dataset_settings["time"].split(":")
                            hour = int(trigger_time[0])
                            minutes = int(trigger_time[1])
                            if (
                                now.hour > hour or (now.hour == hour and now.minute >= minutes)
                            ) and today not in local_snapshots:
                                Manager.logger.info("Time passed for {0}".format(dataset))
                                execute = True

                    if execute is True:
                        # Pre exectution command
                        if dataset_settings["preexec"] is not None:
                            Helper.run_command(dataset_settings["preexec"], "/")

                        if take_snapshot is True:
                            # Take today's snapshotzfs
                            Manager.logger.info("Taking snapshot {0}@{1}".format(dataset, today))
                            ZFS.snapshot(dataset, today)
                            local_snapshots.append(today)
                            Manager.logger.info("Taking snapshot {0}@{1} complete".format(dataset, today))

                        # Replicating, if required
                        if replicate is True:
                            Manager.logger.info("Replicating {0}".format(dataset))
                            replicate_settings = dataset_settings["replicate"]
                            push = replicate_settings["target"] is not None
                            remote_dataset = replicate_settings["target"] if push else replicate_settings["source"]
                            remote_snapshots = ZFS.get_snapshots(remote_dataset, replicate_settings["endpoint"])
                            last_common_snapshot = None
                            if remote_dataset in remote_snapshots:
                                if (
                                    push is True
                                ):  # If pushing, we search for the last local snapshot that is remotely available
                                    for snapshot in local_snapshots:
                                        if snapshot in remote_snapshots[remote_dataset]:
                                            last_common_snapshot = snapshot
                                else:  # Else, we search for the last remote snapshot that is locally available
                                    for snapshot in remote_snapshots[remote_dataset]:
                                        if snapshot in local_snapshots:
                                            last_common_snapshot = snapshot
                            if last_common_snapshot is not None:  # There's a common snapshot
                                previous_snapshot = None
                                if push is True:
                                    for snapshot in local_snapshots:
                                        if snapshot == last_common_snapshot:
                                            previous_snapshot = last_common_snapshot
                                            continue
                                        if previous_snapshot is not None:
                                            # There is a snapshot on this host that is not yet on the other side.
                                            size = ZFS.get_size(dataset, previous_snapshot, snapshot)
                                            Manager.logger.info(
                                                "  {0}@{1} > {0}@{2} ({3})".format(
                                                    dataset, previous_snapshot, snapshot, size
                                                )
                                            )
                                            ZFS.replicate(
                                                dataset,
                                                previous_snapshot,
                                                snapshot,
                                                remote_dataset,
                                                replicate_settings["endpoint"],
                                                direction="push",
                                                compression=replicate_settings["compression"],
                                            )
                                            previous_snapshot = snapshot
                                else:
                                    for snapshot in remote_snapshots[remote_dataset]:
                                        if snapshot == last_common_snapshot:
                                            previous_snapshot = last_common_snapshot
                                            continue
                                        if previous_snapshot is not None:
                                            # There is a remote snapshot that is not yet on the local host.
                                            size = ZFS.get_size(
                                                remote_dataset,
                                                previous_snapshot,
                                                snapshot,
                                                replicate_settings["endpoint"],
                                            )
                                            Manager.logger.info(
                                                "  {0}@{1} > {0}@{2} ({3})".format(
                                                    remote_dataset, previous_snapshot, snapshot, size
                                                )
                                            )
                                            ZFS.replicate(
                                                remote_dataset,
                                                previous_snapshot,
                                                snapshot,
                                                dataset,
                                                replicate_settings["endpoint"],
                                                direction="pull",
                                                compression=replicate_settings["compression"],
                                            )
                                            previous_snapshot = snapshot
                            elif push is True and len(local_snapshots) > 0:
                                # No common snapshot
                                if remote_dataset not in remote_snapshots:
                                    # No remote snapshot, full replication
                                    snapshot = local_snapshots[-1]
                                    size = ZFS.get_size(dataset, None, snapshot)
                                    Manager.logger.info(
                                        "  {0}@         > {0}@{1} ({2})".format(dataset, snapshot, size)
                                    )
                                    ZFS.replicate(
                                        dataset,
                                        None,
                                        snapshot,
                                        remote_dataset,
                                        replicate_settings["endpoint"],
                                        direction="push",
                                        compression=replicate_settings["compression"],
                                    )
                            elif (
                                push is False
                                and remote_dataset in remote_snapshots
                                and len(remote_snapshots[remote_dataset]) > 0
                            ):
                                # No common snapshot
                                if len(local_snapshots) == 0:
                                    # No local snapshot, full replication
                                    snapshot = remote_snapshots[remote_dataset][-1]
                                    size = ZFS.get_size(remote_dataset, None, snapshot, replicate_settings["endpoint"])
                                    Manager.logger.info(
                                        "  {0}@         > {0}@{1} ({2})".format(remote_dataset, snapshot, size)
                                    )
                                    ZFS.replicate(
                                        remote_dataset,
                                        None,
                                        snapshot,
                                        dataset,
                                        replicate_settings["endpoint"],
                                        direction="pull",
                                        compression=replicate_settings["compression"],
                                    )
                            Manager.logger.info("Replicating {0} complete".format(dataset))

                        # Post execution command
                        if dataset_settings["postexec"] is not None:
                            Helper.run_command(dataset_settings["postexec"], "/")

                    # Cleaning the snapshots (cleaning is mandatory)
                    if today in local_snapshots:
                        Cleaner.clean(dataset, local_snapshots, dataset_settings["schema"])
                except Exception as ex:
                    Manager.logger.error("Exception: {0}".format(str(ex)))
Пример #4
0
    def run(settings):
        """
        Executes a single run where certain datasets might or might not be snapshotted
        """

        now = datetime.now()
        yda = datetime.now() - timedelta(1)
        today = '{0:04d}{1:02d}{2:02d}'.format(now.year, now.month, now.day)
        yesterday = '{0:04d}{1:02d}{2:02d}'.format(yda.year, yda.month, yda.day)

        snapshots = ZFS.get_snapshots()
        datasets = ZFS.get_datasets()
        for dataset in datasets:
            if dataset in settings:
                try:
                    dataset_settings = settings[dataset]
                    local_snapshots = snapshots.get(dataset, [])

                    take_snapshot = dataset_settings['snapshot'] is True
                    replicate = dataset_settings['replicate'] is not None

                    # Decide whether we need to handle this dataset
                    execute = False
                    if take_snapshot is True or replicate is True:
                        if dataset_settings['time'] == 'trigger':
                            # We wait until we find a trigger file in the filesystem
                            trigger_filename = '{0}/.trigger'.format(dataset_settings['mountpoint'])
                            if os.path.exists(trigger_filename):
                                Manager.logger.info('Trigger found on {0}'.format(dataset))
                                os.remove(trigger_filename)
                                execute = True
                        else:
                            trigger_time = dataset_settings['time'].split(':')
                            hour = int(trigger_time[0])
                            minutes = int(trigger_time[1])
                            if (now.hour > hour or (now.hour == hour and now.minute >= minutes)) and today not in local_snapshots:
                                Manager.logger.info('Time passed for {0}'.format(dataset))
                                execute = True

                    if execute is True:
                        # Pre exectution command
                        if dataset_settings['preexec'] is not None:
                            Helper.run_command(dataset_settings['preexec'], '/')

                        if take_snapshot is True:
                            # Take today's snapshotzfs
                            Manager.logger.info('Taking snapshot {0}@{1}'.format(dataset, today))
                            ZFS.snapshot(dataset, today)
                            local_snapshots.append(today)
                            Manager.logger.info('Taking snapshot {0}@{1} complete'.format(dataset, today))

                        # Replicating, if required
                        if replicate is True:
                            Manager.logger.info('Replicating {0}'.format(dataset))
                            replicate_settings = dataset_settings['replicate']
                            push = replicate_settings['target'] is not None
                            remote_dataset = replicate_settings['target'] if push else replicate_settings['source']
                            remote_snapshots = ZFS.get_snapshots(remote_dataset, replicate_settings['endpoint'])
                            last_common_snapshot = None
                            if remote_dataset in remote_snapshots:
                                if push is True:  # If pushing, we search for the last local snapshot that is remotely available
                                    for snapshot in local_snapshots:
                                        if snapshot in remote_snapshots[remote_dataset]:
                                            last_common_snapshot = snapshot
                                else:  # Else, we search for the last remote snapshot that is locally available
                                    for snapshot in remote_snapshots[remote_dataset]:
                                        if snapshot in local_snapshots:
                                            last_common_snapshot = snapshot
                            if last_common_snapshot is not None:  # There's a common snapshot
                                previous_snapshot = None
                                if push is True:
                                    for snapshot in local_snapshots:
                                        if snapshot == last_common_snapshot:
                                            previous_snapshot = last_common_snapshot
                                            continue
                                        if previous_snapshot is not None:
                                            # There is a snapshot on this host that is not yet on the other side.
                                            size = ZFS.get_size(dataset, previous_snapshot, snapshot)
                                            Manager.logger.info('  {0}@{1} > {0}@{2} ({3})'.format(dataset, previous_snapshot, snapshot, size))
                                            ZFS.replicate(dataset, previous_snapshot, snapshot, remote_dataset, replicate_settings.get('buffer_size', BUFFER_SIZE), replicate_settings['endpoint'], direction='push', compression=replicate_settings['compression'])
                                            ZFS.hold(dataset, snapshot)
                                            ZFS.hold(remote_dataset, snapshot, replicate_settings['endpoint'])
                                            ZFS.release(dataset, previous_snapshot)
                                            ZFS.release(remote_dataset, previous_snapshot, replicate_settings['endpoint'])
                                            previous_snapshot = snapshot
                                else:
                                    for snapshot in remote_snapshots[remote_dataset]:
                                        if snapshot == last_common_snapshot:
                                            previous_snapshot = last_common_snapshot
                                            continue
                                        if previous_snapshot is not None:
                                            # There is a remote snapshot that is not yet on the local host.
                                            size = ZFS.get_size(remote_dataset, previous_snapshot, snapshot, replicate_settings['endpoint'])
                                            Manager.logger.info('  {0}@{1} > {0}@{2} ({3})'.format(remote_dataset, previous_snapshot, snapshot, size))
                                            ZFS.replicate(remote_dataset, previous_snapshot, snapshot, dataset, replicate_settings.get('buffer_size', BUFFER_SIZE), replicate_settings['endpoint'], direction='pull', compression=replicate_settings['compression'])
                                            ZFS.hold(dataset, snapshot)
                                            ZFS.hold(remote_dataset, snapshot, replicate_settings['endpoint'])
                                            ZFS.release(dataset, previous_snapshot)
                                            ZFS.release(remote_dataset, previous_snapshot, replicate_settings['endpoint'])
                                            previous_snapshot = snapshot
                            elif push is True and len(local_snapshots) > 0:
                                # No common snapshot
                                if remote_dataset not in remote_snapshots:
                                    # No remote snapshot, full replication
                                    snapshot = local_snapshots[-1]
                                    size = ZFS.get_size(dataset, None, snapshot)
                                    Manager.logger.info('  {0}@         > {0}@{1} ({2})'.format(dataset, snapshot, size))
                                    ZFS.replicate(dataset, None, snapshot, remote_dataset, replicate_settings.get('buffer_size', BUFFER_SIZE), replicate_settings['endpoint'], direction='push', compression=replicate_settings['compression'])
                                    ZFS.hold(dataset, snapshot)
                                    ZFS.hold(remote_dataset, snapshot, replicate_settings['endpoint'])
                            elif push is False and remote_dataset in remote_snapshots and len(remote_snapshots[remote_dataset]) > 0:
                                # No common snapshot
                                if len(local_snapshots) == 0:
                                    # No local snapshot, full replication
                                    snapshot = remote_snapshots[remote_dataset][-1]
                                    size = ZFS.get_size(remote_dataset, None, snapshot, replicate_settings['endpoint'])
                                    Manager.logger.info('  {0}@         > {0}@{1} ({2})'.format(remote_dataset, snapshot, size))
                                    ZFS.replicate(remote_dataset, None, snapshot, dataset, replicate_settings.get('buffer_size', BUFFER_SIZE), replicate_settings['endpoint'], direction='pull', compression=replicate_settings['compression'])
                                    ZFS.hold(dataset, snapshot)
                                    ZFS.hold(remote_dataset, snapshot, replicate_settings['endpoint'])
                            Manager.logger.info('Replicating {0} complete'.format(dataset))

                        # Post execution command
                        if dataset_settings['postexec'] is not None:
                            Helper.run_command(dataset_settings['postexec'], '/')

                    # Cleaning the snapshots (cleaning is mandatory)
                    if today in local_snapshots or yesterday in local_snapshots:
                        Cleaner.clean(dataset, local_snapshots, dataset_settings['schema'])

                except Exception as ex:
                    Manager.logger.error('Exception: {0}'.format(str(ex)))
Пример #5
0
    def clean(dataset, snapshots, schema):
        today = datetime.now()

        # Parsing schema
        match = re.match('^(?P<days>[0-9]+)d(?P<weeks>[0-9]+)w(?P<months>[0-9]+)m(?P<years>[0-9]+)y$', schema)
        if not match:
            Cleaner.logger.info('Got invalid schema for dataset {0}: {1}'.format(dataset, schema))
            return
        matchinfo = match.groupdict()
        settings = {}
        for key in matchinfo.keys():
            settings[key] = int(matchinfo[key])

        # Loading snapshots
        snapshot_dict = []
        for snapshot in snapshots:
            if re.match('^(\d{4})(1[0-2]|0[1-9])(0[1-9]|[1-2]\d|3[0-1])$', snapshot) is not None:
                snapshot_dict.append({'name': snapshot,
                                      'time': datetime.strptime(snapshot, '%Y%m%d'),
                                      'age': (today - datetime.strptime(snapshot, '%Y%m%d')).days})

        buckets = {}
        counter = -1
        for i in range(settings['days']):
            counter += 1
            buckets[counter] = []
        for i in range(settings['weeks']):
            counter += 7
            buckets[counter] = []
        for i in range(settings['months']):
            counter += 28
            buckets[counter] = []
        for i in range(settings['years']):
            counter += (28 * 12)
            buckets[counter] = []

        will_delete = False

        end_of_life_snapshots = []
        for snapshot in snapshot_dict:
            possible_keys = []
            for key in buckets:
                if snapshot['age'] <= key:
                    possible_keys.append(key)
            if possible_keys:
                buckets[min(possible_keys)].append(snapshot)
            else:
                will_delete = True
                end_of_life_snapshots.append(snapshot)

        to_delete = {}
        to_keep = {}
        for key in buckets:
            oldest = None
            if len(buckets[key]) == 1:
                oldest = buckets[key][0]
            else:
                for snapshot in buckets[key]:
                    if oldest is None:
                        oldest = snapshot
                    elif snapshot['age'] > oldest['age']:
                        oldest = snapshot
                    else:
                        will_delete = True
                        to_delete[key] = to_delete.get(key, []) + [snapshot]
            to_keep[key] = oldest
            to_delete[key] = to_delete.get(key, [])

        if will_delete is True:
            Cleaner.logger.info('Cleaning {0}'.format(dataset))

        keys = to_delete.keys()
        keys.sort()
        for key in keys:
            for snapshot in to_delete[key]:
                Cleaner.logger.info('  Destroying {0}@{1}'.format(dataset, snapshot['name']))
                ZFS.destroy(dataset, snapshot['name'])
        for snapshot in end_of_life_snapshots:
            Cleaner.logger.info('  Destroying {0}@{1}'.format(dataset, snapshot['name']))
            ZFS.destroy(dataset, snapshot['name'])

        if will_delete is True:
            Cleaner.logger.info('Cleaning {0} complete'.format(dataset))
Пример #6
0
    def clean(dataset, snapshots, schema):
        today = datetime.now()

        # Parsing schema
        match = re.match(
            '^(?P<days>[0-9]+)d(?P<weeks>[0-9]+)w(?P<months>[0-9]+)m(?P<years>[0-9]+)y$',
            schema)
        if not match:
            Cleaner.logger.info(
                'Got invalid schema for dataset {0}: {1}'.format(
                    dataset, schema))
            return
        matchinfo = match.groupdict()
        settings = {}
        for key in matchinfo.keys():
            settings[key] = int(matchinfo[key])

        # Loading snapshots
        snapshot_dict = []
        held_snapshots = []
        for snapshot in snapshots:
            if re.match('^(\d{4})(1[0-2]|0[1-9])(0[1-9]|[1-2]\d|3[0-1])$',
                        snapshot) is not None:
                if ZFS.is_held(dataset, snapshot):
                    held_snapshots.append(snapshot)
                    continue
                snapshot_dict.append({
                    'name':
                    snapshot,
                    'time':
                    datetime.strptime(snapshot, '%Y%m%d'),
                    'age': (today - datetime.strptime(snapshot, '%Y%m%d')).days
                })

        buckets = {}
        counter = -1
        for i in range(settings['days']):
            counter += 1
            buckets[counter] = []
        for i in range(settings['weeks']):
            counter += 7
            buckets[counter] = []
        for i in range(settings['months']):
            counter += 28
            buckets[counter] = []
        for i in range(settings['years']):
            counter += (28 * 12)
            buckets[counter] = []

        will_delete = False

        end_of_life_snapshots = []
        for snapshot in snapshot_dict:
            possible_keys = []
            for key in buckets:
                if snapshot['age'] <= key:
                    possible_keys.append(key)
            if possible_keys:
                buckets[min(possible_keys)].append(snapshot)
            else:
                will_delete = True
                end_of_life_snapshots.append(snapshot)

        to_delete = {}
        to_keep = {}
        for key in buckets:
            oldest = None
            if len(buckets[key]) == 1:
                oldest = buckets[key][0]
            else:
                for snapshot in buckets[key]:
                    if oldest is None:
                        oldest = snapshot
                    elif snapshot['age'] > oldest['age']:
                        oldest = snapshot
                    else:
                        will_delete = True
                        to_delete[key] = to_delete.get(key, []) + [snapshot]
            to_keep[key] = oldest
            to_delete[key] = to_delete.get(key, [])

        if will_delete is True:
            Cleaner.logger.info('Cleaning {0}'.format(dataset))
            for snapshot in held_snapshots:
                Cleaner.logger.info('  Skipping held {0}@{1}'.format(
                    dataset, snapshot))

        keys = to_delete.keys()
        keys.sort()
        for key in keys:
            for snapshot in to_delete[key]:
                Cleaner.logger.info('  Destroying {0}@{1}'.format(
                    dataset, snapshot['name']))
                ZFS.destroy(dataset, snapshot['name'])
        for snapshot in end_of_life_snapshots:
            Cleaner.logger.info('  Destroying {0}@{1}'.format(
                dataset, snapshot['name']))
            ZFS.destroy(dataset, snapshot['name'])

        if will_delete is True:
            Cleaner.logger.info('Cleaning {0} complete'.format(dataset))
Пример #7
0
class Tests(unittest.TestCase):
    zfs = ZFS()

    def setUp(self) -> None:
        self.page = Page(headless=True)
        self.assertIn("ZFS-Snap-Diff", self.page.title())

    def testActualFileContent(self) -> None:
        fs.createTestFile(self.zfs.mountpoint() + "/file.txt",
                          ["firstline", "secondline", "thirdline"])

        self.page.selectView("Browse filesystem")
        self.page.selectDataset(self.zfs.dataset)
        self.page.findByXPath("//td[contains(.,'file.txt')]").click()
        self.assertIn("Current content of file.txt",
                      self.page.findById("file-actions-header").text)
        self.assertIn(
            "firstline\nsecondline\nthirdline",
            self.page.findByCSS("#file-actions-body > pre > code").text)

    def testCreateSnapshotInBrowseFilesystem(self) -> None:
        self.page.selectView("Browse filesystem")
        self.page.selectDataset(self.zfs.dataset)
        self.page.createSnapshot("create-snapshot-in-browse-filesystem")
        self.assertIn("@create-snapshot-in-browse-filesystem' created",
                      self.page.alertText())

    def testCreateSnapshotInBrowseSnapshots(self) -> None:
        self.page.selectView("Browse snapshots")
        self.page.selectDataset(self.zfs.dataset)
        self.page.createSnapshot("create-snapshot-in-browse-snapshots")
        self.assertIn("@create-snapshot-in-browse-snapshots' created",
                      self.page.alertText())

    def testDestroySnapshot(self) -> None:
        self.page.selectView("Browse snapshots")
        self.page.selectDataset(self.zfs.dataset)

        # create snapshot
        self.page.createSnapshot("destroy-snapshot")
        self.page.closeAlert()

        # destroy snapshot
        self.page.destroySnapshot("destroy-snapshot")
        self.assertIn("Snapshot 'destroy-snapshot' destroyed",
                      self.page.alertText())
        self.page.closeAlert()

    def testRenameSnapshot(self) -> None:
        self.page.selectView("Browse snapshots")
        self.page.selectDataset(self.zfs.dataset)

        # create snapshot
        self.page.createSnapshot("rename-snapshot")
        self.page.closeAlert()

        # rename snapshot
        self.page.renameSnapshot("rename-snapshot", "snapshot-rename")
        self.assertIn(
            "Snapshot 'rename-snapshot' renamed to 'snapshot-rename'",
            self.page.alertText())
        self.page.closeAlert()

    def testCloneSnapshot(self) -> None:
        self.page.selectView("Browse snapshots")
        self.page.selectDataset(self.zfs.dataset)

        # create snapshot
        self.page.createSnapshot("clone-snapshot")
        self.page.closeAlert()

        # clone snapshot
        self.page.cloneSnapshot("clone-snapshot", "cloned")
        self.assertIn(
            "Snapshot 'clone-snapshot' cloned to '" + self.zfs.pool +
            "/cloned'", self.page.alertText())
        self.page.closeAlert()

    def testRollbackSnapshot(self) -> None:
        self.page.selectView("Browse snapshots")
        self.page.selectDataset(self.zfs.dataset)

        # create snapshot
        self.page.createSnapshot("rollback-snapshot")
        self.assertIn("@rollback-snapshot' created", self.page.alertText())
        self.page.closeAlert()

        # create a file
        fs.createTestFile(self.zfs.mountpoint() + "/rollback-test.txt",
                          ["dummy"])
        self.assertTrue(fs.exists(self.zfs.mountpoint() +
                                  "/rollback-test.txt"))

        # rollback
        self.page.rollbackSnapshot("rollback-snapshot")
        self.assertIn("Snapshot 'rollback-snapshot' rolled back",
                      self.page.alertText())
        self.assertFalse(
            fs.exists(self.zfs.mountpoint() + "/rollback-test.txt"))

    def tearDown(self) -> None:
        self.page.close()