def setUp(self):
        logger = logging.getLogger()
        logger.addHandler(logging.StreamHandler(sys.stdout))
        logger.setLevel(logging.DEBUG)

        # Generate series of snapshot names
        snapshot_names = list()
        now = datetime.now(timezone.utc)
        for i in range(0, 24 * 120):
            timestamp = now - timedelta(hours=i)
            sn = SnapshotName(timestamp=timestamp)
            snapshot_names.append(sn)

        self.snapshot_names = snapshot_names
Beispiel #2
0
    def retrieve_snapshots(self):
        """ Determine snapshot names. Snapshot names are sorted in reverse order (newest first).
        stored internally (self.snapshot_names) and also returned. """

        self._log_info('retrieving snapshots')

        output = self.exec_check_output('btrfs sub list -o "%s"' %
                                        self.container_subvolume_path)

        # output is delivered as a byte sequence, decode to unicode string and split lines
        lines = output.decode().splitlines()

        subvolumes = list(map(lambda x: Subvolume.parse(x), lines))

        # verify snapshot subvolume path consistency
        if len(subvolumes) > 0:
            subvol_path = os.path.dirname(subvolumes[0].path)
            subvol_inconsistent_path = \
                next((s.path for s in subvolumes if os.path.dirname(s.path) != subvol_path), None)

            if subvol_inconsistent_path:
                raise Exception(
                    'inconsistent path detected at %s [%s != %s], indicating a nested'
                    ' folder/subvolume structure within a container subvolume.'
                    ' each backup job must have a dedicated source/destination container subvolume'
                    % (self.url.path, subvol_path, subvol_inconsistent_path))

        # sort and return
        snapshots = []
        for sv in subvolumes:
            try:
                snapshots.append(
                    Snapshot(SnapshotName.parse(os.path.basename(sv.path)),
                             sv))
            except:
                # skip snapshot names which cannot be parsed
                pass

        self.__snapshots = sorted(snapshots,
                                  key=lambda s: s.name.timestamp,
                                  reverse=True)
        return self.__snapshots
Beispiel #3
0
    def retrieve_snapshots(self):
        """ Determine snapshot names. Snapshot names are sorted in reverse order (newest first).
        stored internally (self.snapshot_names) and also returned. """

        self._log_info('retrieving snapshots')

        output = self.exec_check_output('btrfs sub list -o "%s"' % self.container_subvolume_path)

        # output is delivered as a byte sequence, decode to unicode string and split lines
        lines = output.decode().splitlines()

        subvolumes = list(map(lambda x: Subvolume.parse(x), lines))

        # verify snapshot subvolume path consistency
        if len(subvolumes) > 0:
            subvol_path = os.path.dirname(subvolumes[0].path)
            subvol_inconsistent_path = \
                next((s.path for s in subvolumes if os.path.dirname(s.path) != subvol_path), None)

            if subvol_inconsistent_path:
                raise Exception('inconsistent path detected at %s [%s != %s], indicating a nested'
                                ' folder/subvolume structure within a container subvolume.'
                                ' each backup job must have a dedicated source/destination container subvolume'
                                % (self.url.path, subvol_path, subvol_inconsistent_path))

        # sort and return
        snapshots = []
        for sv in subvolumes:
            try:
                snapshots.append(Snapshot(SnapshotName.parse(os.path.basename(sv.path)), sv))
            except:
                # skip snapshot names which cannot be parsed
                pass

        self.__snapshots = sorted(snapshots, key=lambda s: s.name.timestamp, reverse=True)
        return self.__snapshots
 def test_instantiation(self):
     print(SnapshotName.parse('sx-20150102-132010-utc'))
Beispiel #5
0
    def run(self):
        """ Performs backup run """
        starting_time = time.monotonic()

        _logger.info(self.source)
        if self.destination:
            _logger.info(self.destination)

        # Prepare environments
        _logger.info('preparing environment')
        self.source.prepare_environment()
        if self.destination:
            self.destination.prepare_environment()

        # Retrieve snapshot names of both source and destination
        self.source.retrieve_snapshots()
        if self.destination:
            self.destination.retrieve_snapshots()

        new_snapshot_name = SnapshotName()
        if len(self.source.snapshots) > 0 \
                and new_snapshot_name.timestamp <= self.source.snapshots[0].name.timestamp:
            raise Error(('current snapshot name [%s] would be older than newest existing snapshot [%s] '
                         'which may indicate a system time problem')
                        % (new_snapshot_name, self.source.snapshots[0].name))

        temp_name = self.source.create_temp_name()

        # btrfs send command/subprocess
        source_parent_path = None
        if len(self.source.snapshots) > 0:
            # Indicates if an incremental snapshot can/should be performed
            incremental = False

            # Latest source and destination snapshot timestamp has to match for incremental transfer
            if self.destination is not None:
                if len(self.destination.snapshots) > 0:
                    if self.source.snapshots[0].name.timestamp != self.destination.snapshots[0].name.timestamp:
                        _logger.warn(
                            ('Latest timestamps of source [%s] and destination [%s] do not match. A full snapshot will '
                             'be transferred')
                            % (self.source.snapshots[0].name.timestamp, self.destination.snapshots[0].name.timestamp))
                    else:
                        incremental = True
                else:
                    _logger.warn('Destination has no snapshots, a full snapshot will be transferred')

            else:
                incremental = True

            # Set source parent path in case incremental transfer is applicable
            if incremental:
                source_parent_path = os.path.join(self.source.container_subvolume_path,
                                                  str(self.source.snapshots[0].name))

        # Create source snapshot
        temp_source_path = self.source.create_snapshot(temp_name)

        # Recovery handler, swallows all exceptions and logs them
        def recover(l, warn_msg: str):
            try:
                l()
            except Exception as e:
                _logger.error(str(e))
                _logger.warn(warn_msg)

        temp_dest_path = None
        final_dest_path = None
        # Transfer temporary snapshot
        if self.destination:
            temp_dest_path = self.destination.build_path(temp_name)
            final_dest_path = os.path.join(self.destination.url.path, str(new_snapshot_name))

            try:
                self.source.transfer_btrfs_snapshot(self.destination,
                                                    source_path=temp_source_path,
                                                    source_parent_path=source_parent_path,
                                                    compress=self.source.compress)
            except BaseException as e:
                recover(lambda: self.source.remove_btrfs_subvolume(temp_source_path),
                        'could not remove temporary source snapshot [%s]' % temp_source_path)
                raise e

        try:
            final_source_path = os.path.join(self.source.container_subvolume_path, str(new_snapshot_name))

            # Rename temporary source snapshot to final snapshot name
            self.source.move_file(temp_source_path, final_source_path)
        except BaseException as e:
            recover(lambda: self.source.remove_btrfs_subvolume(temp_source_path),
                    'could not remove temporary source snapshot [%s]' % temp_source_path)
            if self.destination:
                recover(lambda: self.destination.remove_btrfs_subvolume(temp_dest_path),
                        'could not remove temporary destination snapshot [%s]' % temp_dest_path)
            raise e

        if self.destination:
            try:
                # Rename temporary destination snapshot to final snapshot name
                self.destination.move_file(temp_dest_path, final_dest_path)
            except Exception as e:
                # Try to avoid inconsistent state by removing successfully created source snapshot
                recover(lambda: self.source.remove_btrfs_subvolume(final_source_path),
                        'could not remove source snapshot [%s] after failed finalization of destination snapshot'
                        % final_source_path)
                recover(lambda: self.destination.remove_btrfs_subvolume(temp_dest_path),
                        'could not remove temporary destination snapshot [%s]' % temp_dest_path)
                raise e

        # Update snapshot name lists
        self.source.snapshots.insert(0, Snapshot(new_snapshot_name, None))
        if self.destination:
            self.destination.snapshots.insert(0, Snapshot(new_snapshot_name, None))

        # Clean out excess backups/snapshots
        self.source.purge_snapshots()
        if self.destination:
            self.destination.purge_snapshots()

        _logger.info('backup %s created successfully in %s'
                     % (new_snapshot_name,
                        time.strftime("%H:%M:%S", time.gmtime(time.monotonic() - starting_time))))