def test_filter(self):
        k = RetentionExpression(
            '1d:4/d, 4d:daily, 1w:2/4d, 1m:weekly, 12m:1/y, 23m:none')
        #k = KeepExpression('10')

        start = time.perf_counter()
        (items_to_remove_by_condition,
         items_to_keep) = k.filter(self.snapshot_names, lambda x: x.timestamp)

        items_to_remove_amount = sum(
            map(lambda x: len(x), items_to_remove_by_condition.values()))
        items_to_keep_amount = len(items_to_keep)

        # for i in items_to_keep:
        #     print(i)
        #
        # for c in items_to_remove_by_condition.keys():
        #     print(c)
        #     for i in items_to_remove_by_condition[c]:
        #         print(i)

        print('Items to remove %d to keep %d' %
              (items_to_remove_amount, items_to_keep_amount))
        self.assertEqual(
            items_to_remove_amount + items_to_keep_amount,
            len(self.snapshot_names),
            'Sum of items to keep and remove must be total number of items')
        print(time.perf_counter() - start)
Пример #2
0
    def read(self):
        cparser = ConfigParser()

        if os.path.exists(self.__CONFIG_FILENAME):
            with open(self.__CONFIG_FILENAME, 'r') as file:
                cparser.read_file(file)

            source_retention_str = cparser.get(self.__SECTION_NAME, self.__KEY_SOURCE_RETENTION, fallback=None)
            dest_retention_str = cparser.get(self.__SECTION_NAME, self.__KEY_DEST_RETENTION, fallback=None)
            self.__source_retention = RetentionExpression(source_retention_str) if source_retention_str else None
            self.__destination_retention = RetentionExpression(dest_retention_str) if dest_retention_str else None
            self.__log_ident = cparser.get(self.__SECTION_NAME, self.__KEY_LOG_IDENT, fallback=None)
            self.__email_recipient = cparser.get(self.__SECTION_NAME, self.__key_EMAIL_RECIPIENT, fallback=None)
Пример #3
0
    def test_filter(self):
        k = RetentionExpression('1d:4/d, 4d:daily, 1w:2/d, 1m:weekly, 3m:none')
        #k = KeepExpression('10')

        start = time.perf_counter()
        (items_to_remove_by_condition, items_to_keep) = k.filter(self.snapshot_names, lambda x: x.timestamp)

        items_to_remove_amount = sum(map(lambda x: len(x), items_to_remove_by_condition.values()))
        items_to_keep_amount = len(items_to_keep)

        # for i in items_to_keep:
        #     print(i)
        #
        # for c in items_to_remove_by_condition.keys():
        #     print(c)
        #     for i in items_to_remove_by_condition[c]:
        #         print(i)

        print('Items to remove %d to keep %d' % (items_to_remove_amount, items_to_keep_amount))
        self.assertEqual(items_to_remove_amount + items_to_keep_amount, len(self.snapshot_names),
                         'Sum of items to keep and remove must be total number of items')
        print(time.perf_counter() - start)
Пример #4
0
    def purge_snapshots(self, retention: RetentionExpression = None):
        """
        Purge snapshots
        :param retention: Optional override of location's retention
        :type retention: RetentionExpression
        """
        if retention is None:
            retention = self.__retention
        else:
            self._log_info('Retention expression override [%s]' % retention)

        """ Clean out excess backups/snapshots. The newest one (index 0) will always be kept. """
        if retention is not None and len(self.__snapshots) > 1:
            (to_remove_by_condition, to_retain) = retention.filter(self.__snapshots[1:],
                                                                   lambda sn: sn.name.timestamp)

            for c in to_remove_by_condition.keys():
                to_remove = to_remove_by_condition[c]

                self._log_info('removing %d snapshot%s due to retention [%s]: %s'
                               % (len(to_remove),
                                  's' if len(to_remove) > 1 else '',
                                  str(c), ', '.join(list(map(lambda x: str(x), to_remove)))))
                self.remove_snapshots(list(map(lambda x: str(x), to_remove)))
Пример #5
0
    def purge_snapshots(self, retention: RetentionExpression = None):
        """
        Purge snapshots
        :param retention: Optional override of location's retention
        :type retention: RetentionExpression
        """
        if retention is None:
            retention = self.__retention
        else:
            self._log_info('Retention expression override [%s]' % retention)

        """ Clean out excess backups/snapshots. The newest one (index 0) will always be kept. """
        if retention is not None and len(self.__snapshots) > 1:
            (to_remove_by_condition, to_retain) = retention.filter(self.__snapshots[1:],
                                                                   lambda sn: sn.name.timestamp)

            for c in to_remove_by_condition.keys():
                to_remove = to_remove_by_condition[c]

                self._log_info('removing %d snapshot%s due to retention [%s]: %s'
                               % (len(to_remove),
                                  's' if len(to_remove) > 1 else '',
                                  str(c), ', '.join(list(map(lambda x: str(x), to_remove)))))
                self.remove_snapshots(list(map(lambda x: str(x), to_remove)))
Пример #6
0
def main():

    # Initialize logging
    args = parser.parse_args()

    # Read global configuration
    Configuration.instance().read()

    logger = logging.getLogger()

    if not args.quiet:
        log_std_handler = logging.StreamHandler(sys.stdout)
        log_std_handler.setFormatter(
            logging.Formatter('%(levelname)s %(message)s'))
        logger.addHandler(log_std_handler)

    log_memory_handler = None
    log_trace = False
    email_recipient = None

    def handle_exception(ex: Exception):
        """
        Exception handler
        :param ex:
        :return:
        """

        # Log exception message
        if len(str(ex)) > 0:
            logger.error('%s' % str(ex))

        if isinstance(ex, CalledProcessError):
            if ex.output:
                output = ex.output.decode().strip()
                if len(output) > 0:
                    logger.error('%s' % output)

        if log_trace:
            # Log stack trace
            logger.error(traceback.format_exc())

        # Email notification
        if email_recipient:
            try:
                # Format message and send
                msg = '\n'.join(
                    map(
                        lambda log_record: log_memory_handler.formatter.format(
                            log_record), log_memory_handler.buffer))
                mail.send(email_recipient, '%s FAILED' % _APP_NAME, msg)
            except Exception as ex:
                logger.error(str(ex))

    # Syslog handler
    if args.command == _CMD_RUN:
        log_syslog_handler = logging.handlers.SysLogHandler('/dev/log')
        log_syslog_handler.setFormatter(
            logging.Formatter(_APP_NAME +
                              '[%(process)d] %(levelname)s %(message)s'))
        logger.addHandler(log_syslog_handler)

        # Log ident support
        if args.log_ident:
            log_ident = args.log_ident if args.log_ident else Configuration.instance(
            ).log_ident
            if log_ident:
                log_syslog_handler.ident = log_ident + ' '

        # Mail notification support
        if args.mail is not None:
            email_recipient = args.mail if len(
                args.mail) > 0 else Configuration.instance().email_recipient

            # Memory handler will buffer output for sending via mail later if needed
            log_memory_handler = logging.handlers.MemoryHandler(capacity=-1)
            log_memory_handler.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
            logger.addHandler(log_memory_handler)

    if args.verbosity and args.verbosity >= 1:
        logger.setLevel(logging.DEBUG)
        log_trace = True
    else:
        logger.setLevel(logging.INFO)
    logger.info('%s v%s' % (_APP_NAME, __version__))

    exitcode = 0

    try:
        if args.command == _CMD_RUN:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.run()
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_INIT:
            source_retention = RetentionExpression(
                args.source_retention) if args.source_retention else None
            destination_retention = RetentionExpression(
                args.destination_retention
            ) if args.destination_retention else None
            job = Job.init(
                source_url=urllib.parse.urlsplit(args.source_subvolume),
                source_retention=source_retention,
                dest_url=urllib.parse.urlsplit(args.destination_subvolume)
                if args.destination_subvolume else None,
                dest_retention=destination_retention,
                compress=args.compress)

        elif args.command == _CMD_UPDATE:
            source_retention = RetentionExpression(
                args.source_retention) if args.source_retention else None
            dest_retention = RetentionExpression(
                args.destination_retention
            ) if args.destination_retention else None
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.update(
                        source_retention=source_retention,
                        dest_retention=dest_retention,
                        compress=args.compress if args.compress else
                        not args.no_compress if args.no_compress else None)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_DESTROY:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.destroy(purge=args.purge)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_INFO:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume),
                                   raise_errors=False)
                    job.print_info()
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_PURGE:
            source_retention = RetentionExpression(
                args.source_retention) if args.source_retention else None
            dest_retention = RetentionExpression(
                args.destination_retention
            ) if args.destination_retention else None
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.purge(source_retention=source_retention,
                              dest_retention=dest_retention)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_TRANSFER:
            source = Location(urllib.parse.urlsplit(args.source_subvolume))
            destination = Location(
                urllib.parse.urlsplit(args.destination_subvolume))
            source.transfer_btrfs_snapshot(destination, compress=args.compress)

    except SystemExit as e:
        if e.code != 0:
            raise

    except KeyboardInterrupt as k:
        exitcode = 1

    except Exception as e:
        handle_exception(e)
        exitcode = 1

    exit(exitcode)
Пример #7
0
    def init(source_url: parse.SplitResult,
             dest_url: parse.SplitResult,
             source_retention: RetentionExpression = None,
             dest_retention: RetentionExpression = None,
             compress: bool = None) -> 'Job':
        """
        Initializes a new backup job
        :param source_url: Source url string
        :param dest_url: Destination url string
        :param source_retention: Source retention expression string
        :param dest_retention: Destination retention expression string
        :param compress: Compress flag
        :return: Backup job
        :rtype: Job
        """
        source = JobLocation(source_url, location_type=JobLocation.TYPE_SOURCE)
        dest = JobLocation(dest_url, location_type=JobLocation.TYPE_DESTINATION) if dest_url else None

        if source.has_configuration():
            raise Error('source is already initialized')

        if dest and dest.has_configuration():
            raise Error('destination is already initialized')

        # New uuid for both locations
        source.uuid = uuid.uuid4()
        if dest:
            dest.uuid = source.uuid

        # Set parameters
        if source_retention:
            source.retention = source_retention
        if not source.retention:
            source.retention = Configuration.instance().source_retention
        if not source.retention:
            source.retention = RetentionExpression(_DEFAULT_RETENTION_SOURCE)

        if dest:
            if dest_retention:
                dest.retention = dest_retention
            if not dest.retention:
                dest.retention = Configuration.instance().destination_retention
            if not dest.retention:
                dest.retention = RetentionExpression(_DEFAULT_RETENTION_DESTINATION)

        if compress:
            source.compress = compress
            if dest:
                dest.compress = compress
        if not source.compress:
            source.compress = False
        if dest and not dest.compress:
            dest.compress = False

        # Prepare environments
        _logger.info('preparing source and destination environment')
        source.prepare_environment()
        if dest:
            dest.prepare_environment()

        # Writing configurations
        source.write_configuration(dest)
        if dest:
            dest.write_configuration(source)

        _logger.info(source)
        if dest:
            _logger.info(dest)

        _logger.info('initialized successfully')

        return Job(source, dest)
Пример #8
0
    def read_configuration(self) -> 'JobLocation':
        """
        Read configuration file from container subvolume
        :return: Corresponding location
        """
        # Read configuration file
        out = self.exec_check_output('cat "%s"' % self.configuration_filename)
        file = out.decode().splitlines()

        corresponding_location = None

        parser = ConfigParser()
        parser.read_file(file)

        section = parser.sections()[0]

        # Section name implies location type
        if section == JobLocation.TYPE_SOURCE:
            location_type = JobLocation.TYPE_SOURCE
        elif section == JobLocation.TYPE_DESTINATION:
            location_type = JobLocation.TYPE_DESTINATION
        else:
            raise ValueError('invalid section name/location type [%s]' % section)

        # Parse config string values
        location_uuid = parser.get(section, self.__KEY_UUID, fallback=None)
        source = parser.get(section, self.__KEY_SOURCE, fallback=None)
        source_container = parser.get(section, self.__KEY_SOURCE_CONTAINER, fallback=None)
        destination = parser.get(section, self.__KEY_DESTINATION, fallback=None)
        # Keep has been renamed to retention.
        # Supporting the old name for backward compatibility.
        retention = parser.get(section, self.__KEY_RETENTION, fallback=None)
        if not retention:
            retention = parser.get(section, self.__KEY_KEEP, fallback=None)

        # Convert to instances where applicable
        location_uuid = UUID(location_uuid) if location_uuid else None
        source = parse.urlsplit(source) if source else None
        source_container = source_container if source_container else None
        destination = parse.urlsplit(destination) if destination else None
        retention = RetentionExpression(retention) if retention else None
        compress = True if distutils.util.strtobool(parser.get(section, self.__KEY_COMPRESS, fallback='False')) \
            else False

        if location_type == JobLocation.TYPE_SOURCE:
            # Amend url/container relpath from current path for source locations
            # if container relative path was not provided
            if not self.container_subvolume_relpath:
                source_container = os.path.basename(self.container_subvolume_path.rstrip(os.path.sep))
                source = parse.SplitResult(scheme=self.url.scheme,
                                           netloc=self.url.netloc,
                                           path=os.path.abspath(os.path.join(self.url.path, os.path.pardir)),
                                           query=self.url.query,
                                           fragment=None)

                self.url = source
                self.container_subvolume_relpath = source_container

            if destination:
                corresponding_location = JobLocation(destination,
                                                     location_type=JobLocation.TYPE_DESTINATION)

        elif location_type == JobLocation.TYPE_DESTINATION:
            if source:
                corresponding_location = JobLocation(source,
                                                     location_type=JobLocation.TYPE_SOURCE,
                                                     container_subvolume_relpath=source_container)

        self.location_type = location_type
        self.uuid = location_uuid
        self.retention = retention
        self.compress = compress

        return corresponding_location
Пример #9
0
import uuid
import io
import os
import distutils.util
from configparser import ConfigParser
from uuid import UUID
from urllib import parse

from btrfs_sxbackup.entities import Snapshot
from btrfs_sxbackup.entities import SnapshotName
from btrfs_sxbackup.retention import RetentionExpression
from btrfs_sxbackup import shell
from btrfs_sxbackup.entities import Subvolume

_logger = logging.getLogger(__name__)
_DEFAULT_RETENTION_SOURCE = RetentionExpression('3')
_DEFAULT_RETENTION_DESTINATION = RetentionExpression('2d: 1/d, 2w:3/w, 1m:1/w, 2m:none')
_DEFAULT_CONTAINER_RELPATH = '.sxbackup'


class Error(Exception):
    pass


class Configuration:
    """ btrfs-sxbackup global configuration file """

    __instance = None

    __CONFIG_FILENAME = '/etc/btrfs-sxbackup.conf'
Пример #10
0
    def init(source_url: parse.SplitResult,
             dest_url: parse.SplitResult,
             source_retention: RetentionExpression = None,
             dest_retention: RetentionExpression = None,
             compress: bool = None,
	     container_subvolume_relpath: str = DEFAULT_CONTAINER_RELPATH,
             identical_filesystem: bool = False) -> 'Job':
        """
        Initializes a new backup job
        :param source_url: Source url string
        :param dest_url: Destination url string
        :param source_retention: Source retention expression string
        :param dest_retention: Destination retention expression string
        :param compress: Compress flag
        :return: Backup job
        :rtype: Job
        """
        source = JobLocation(source_url, location_type=JobLocation.TYPE_SOURCE, container_subvolume_relpath=container_subvolume_relpath)
        dest = JobLocation(dest_url, location_type=JobLocation.TYPE_DESTINATION) if dest_url else None

        if source.has_configuration():
            raise Error('source is already initialized')

        if dest and dest.has_configuration():
            raise Error('destination is already initialized')

        # New uuid for both locations
        source.uuid = uuid.uuid4()
        if dest:
            dest.uuid = source.uuid

        # Set parameters
        if source_retention:
            source.retention = source_retention
        if not source.retention:
            source.retention = Configuration.instance().source_retention
        if not source.retention:
            source.retention = RetentionExpression(_DEFAULT_RETENTION_SOURCE)

        if dest:
            if dest_retention:
                dest.retention = dest_retention
            if not dest.retention:
                dest.retention = Configuration.instance().destination_retention
            if not dest.retention:
                dest.retention = RetentionExpression(_DEFAULT_RETENTION_DESTINATION)

        if compress:
            source.compress = compress
            if dest:
                dest.compress = compress
        if not source.compress:
            source.compress = False
        if dest and not dest.compress:
            dest.compress = False
        
        # Check if the filesystem is on identical hosts
        if str(source.url.netloc) == str(dest.url.netloc):
            _logger.info('Identical hosts for source and destination')
            #Check if the filesystems UUID is identical
            if source.filesystem == dest.filesystem:
                _logger.info('Identical filesystem fo source and destination')
                source.identical_filesystem = True
                dest.identical_filesystem = True
            else:
                source.identical_filesystem = False
                dest.identical_filesystem = False
                _logger.info('Source and destination are on different filesystems')
            
        # Prepare environments
        _logger.info('preparing source and destination environment')
        source.prepare_environment()
        if dest:
            dest.prepare_environment()

        # Writing configurations
        source.write_configuration(dest)
        if dest:
            dest.write_configuration(source)

        _logger.info(source)
        if dest:
            _logger.info(dest)

        _logger.info('initialized successfully')

        return Job(source, dest)
Пример #11
0
def main():
    # Parse arguments
    parser = ArgumentParser(prog=_APP_NAME)
    parser.add_argument('-q',
                        '--quiet',
                        dest='quiet',
                        action='store_true',
                        default=False,
                        help='do not log to stdout')
    parser.add_argument('--version',
                        action='version',
                        version='%s v%s' % (_APP_NAME, __version__))
    parser.add_argument(
        '-v',
        dest='verbosity',
        action='count',
        help='can be specified multiple times to increase verbosity')

    subparsers = parser.add_subparsers()
    subparsers.required = True
    subparsers.dest = 'command'

    # Reusable options
    compress_args = ['-c', '--compress']
    compress_kwargs = {
        'action':
        'store_true',
        'help':
        'enables compression during transmission. Requires lzop to be installed on both source'
        ' and destination',
        'default':
        None
    }

    source_retention_args = ['-sr', '--source-retention']
    source_retention_kwargs = {
        'type':
        str,
        'default':
        None,
        'help':
        'expression defining which source snapshots to retain/cleanup.'
        ' can be a static number (of backups) or more complex expression like'
        ' "1d:4/d, 1w:daily, 2m:none" literally translating to: "1 day from now keep'
        ' 4 backups a day, 1 week from now keep daily backups,'
        ' 2 months from now keep none"'
    }

    destination_retention_args = ['-dr', '--destination-retention']
    destination_retention_kwargs = {
        'type':
        str,
        'default':
        None,
        'help':
        'expression defining which destination snapshots to retain/cleanup.'
        ' can be a static number (of backups) or more complex'
        ' expression (see --source-retention argument)'
    }

    subvolumes_args = ['subvolumes']
    subvolumes_kwargs = {
        'type': str,
        'nargs': '+',
        'metavar': 'subvolume',
        'help':
        'backup job source or destination subvolume. local path or SSH url'
    }

    # Initialize command cmdline params
    p_init = subparsers.add_parser(_CMD_INIT, help='initialize backup job')
    p_init.add_argument(
        'source_subvolume',
        type=str,
        metavar='source-subvolume',
        help='source subvolume tobackup. local path or ssh url')
    p_init.add_argument(
        'destination_subvolume',
        type=str,
        metavar='destination-subvolume',
        nargs='?',
        default=None,
        help=
        'optional destination subvolume receiving backup snapshots. local path or ssh url'
    )
    p_init.add_argument(*source_retention_args, **source_retention_kwargs)
    p_init.add_argument(*destination_retention_args,
                        **destination_retention_kwargs)
    p_init.add_argument(*compress_args, **compress_kwargs)

    p_destroy = subparsers.add_parser(
        _CMD_DESTROY,
        help='destroy backup job by removing configuration files from source'
        ' and destination. backup snapshots will be kept on both sides'
        ' by default.')
    p_destroy.add_argument(*subvolumes_args, **subvolumes_kwargs)
    p_destroy.add_argument(
        '--purge',
        action='store_true',
        help='removes all backup snapshots from source and destination')

    # Update command cmdline params
    p_update = subparsers.add_parser(_CMD_UPDATE, help='update backup job')
    p_update.add_argument(*subvolumes_args, **subvolumes_kwargs)
    p_update.add_argument(*source_retention_args, **source_retention_kwargs)
    p_update.add_argument(*destination_retention_args,
                          **destination_retention_kwargs)
    p_update.add_argument(*compress_args, **compress_kwargs)
    p_update.add_argument('-nc',
                          '--no-compress',
                          action='store_true',
                          help='disable compression during transmission')

    # Run command cmdline params
    p_run = subparsers.add_parser(_CMD_RUN, help='run backup job')
    p_run.add_argument(*subvolumes_args, **subvolumes_kwargs)
    p_run.add_argument(
        '-m',
        '--mail',
        type=str,
        nargs='?',
        const='',
        help=
        'enables email notifications. If an email address is given, it overrides the'
        ' default email-recipient setting in /etc/btrfs-sxbackup.conf')
    p_run.add_argument(
        '-li',
        '--log-ident',
        dest='log_ident',
        type=str,
        default=None,
        help='log ident used for syslog logging, defaults to script name')

    # Info command cmdline params
    p_info = subparsers.add_parser(_CMD_INFO, help='backup job info')
    p_info.add_argument(*subvolumes_args, **subvolumes_kwargs)

    # Purge command cmdline params
    p_purge = subparsers.add_parser(
        _CMD_PURGE, help="purge backups according to retention expressions")
    p_purge.add_argument(*subvolumes_args, **subvolumes_kwargs)
    purge_source_retention_kwargs = source_retention_kwargs.copy()
    purge_destination_retention_kwargs = destination_retention_kwargs.copy()
    purge_source_retention_kwargs[
        'help'] = 'Optionally override %s' % purge_source_retention_kwargs[
            'help']
    purge_destination_retention_kwargs[
        'help'] = 'Optionally override %s' % purge_destination_retention_kwargs[
            'help']
    p_purge.add_argument(*source_retention_args,
                         **purge_source_retention_kwargs)
    p_purge.add_argument(*destination_retention_args,
                         **purge_destination_retention_kwargs)

    # Transfer
    p_transfer = subparsers.add_parser(_CMD_TRANSFER, help='transfer snapshot')
    p_transfer.add_argument(
        'source_subvolume',
        type=str,
        metavar='source-subvolume',
        help='source subvolume to transfer. local path or ssh url')
    p_transfer.add_argument(
        'destination_subvolume',
        type=str,
        metavar='destination-subvolume',
        help='destination subvolume. local path or ssh url')
    p_transfer.add_argument(*compress_args, **compress_kwargs)

    # Initialize logging
    args = parser.parse_args()

    # Read global configuration
    Configuration.instance().read()

    logger = logging.getLogger()

    if not args.quiet:
        log_std_handler = logging.StreamHandler(sys.stdout)
        log_std_handler.setFormatter(
            logging.Formatter('%(levelname)s %(message)s'))
        logger.addHandler(log_std_handler)

    log_memory_handler = None
    log_trace = False
    email_recipient = None

    def handle_exception(ex: Exception):
        """
        Exception handler
        :param ex:
        :return:
        """

        # Log exception message
        if len(str(ex)) > 0:
            logger.error('%s' % str(ex))

        if isinstance(ex, CalledProcessError):
            if ex.output:
                output = ex.output.decode().strip()
                if len(output) > 0:
                    logger.error('%s' % output)

        if log_trace:
            # Log stack trace
            logger.error(traceback.format_exc())

        # Email notification
        if email_recipient:
            try:
                # Format message and send
                msg = '\n'.join(
                    map(
                        lambda log_record: log_memory_handler.formatter.format(
                            log_record), log_memory_handler.buffer))
                mail.send(email_recipient, '%s FAILED' % _APP_NAME, msg)
            except Exception as ex:
                logger.error(str(ex))

    # Syslog handler
    if args.command == _CMD_RUN:
        log_syslog_handler = logging.handlers.SysLogHandler('/dev/log')
        log_syslog_handler.setFormatter(
            logging.Formatter(_APP_NAME +
                              '[%(process)d] %(levelname)s %(message)s'))
        logger.addHandler(log_syslog_handler)

        # Log ident support
        if args.log_ident:
            log_ident = args.log_ident if args.log_ident else Configuration.instance(
            ).log_ident
            if log_ident:
                log_syslog_handler.ident = log_ident + ' '

        # Mail notification support
        if args.mail is not None:
            email_recipient = args.mail if len(
                args.mail) > 0 else Configuration.instance().email_recipient

            # Memory handler will buffer output for sending via mail later if needed
            log_memory_handler = logging.handlers.MemoryHandler(capacity=-1)
            log_memory_handler.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
            logger.addHandler(log_memory_handler)

    if args.verbosity and args.verbosity >= 1:
        logger.setLevel(logging.DEBUG)
        log_trace = True
    else:
        logger.setLevel(logging.INFO)
    logger.info('%s v%s' % (_APP_NAME, __version__))

    exitcode = 0

    try:
        if args.command == _CMD_RUN:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.run()
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_INIT:
            source_retention = RetentionExpression(
                args.source_retention) if args.source_retention else None
            destination_retention = RetentionExpression(
                args.destination_retention
            ) if args.destination_retention else None
            job = Job.init(
                source_url=urllib.parse.urlsplit(args.source_subvolume),
                source_retention=source_retention,
                dest_url=urllib.parse.urlsplit(args.destination_subvolume)
                if args.destination_subvolume else None,
                dest_retention=destination_retention,
                compress=args.compress)

        elif args.command == _CMD_UPDATE:
            source_retention = RetentionExpression(
                args.source_retention) if args.source_retention else None
            dest_retention = RetentionExpression(
                args.destination_retention
            ) if args.destination_retention else None
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.update(
                        source_retention=source_retention,
                        dest_retention=dest_retention,
                        compress=args.compress if args.compress else
                        not args.no_compress if args.no_compress else None)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_DESTROY:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.destroy(purge=args.purge)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_INFO:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume),
                                   raise_errors=False)
                    job.print_info()
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_PURGE:
            source_retention = RetentionExpression(
                args.source_retention) if args.source_retention else None
            dest_retention = RetentionExpression(
                args.destination_retention
            ) if args.destination_retention else None
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.purge(source_retention=source_retention,
                              dest_retention=dest_retention)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_TRANSFER:
            source = Location(urllib.parse.urlsplit(args.source_subvolume))
            destination = Location(
                urllib.parse.urlsplit(args.destination_subvolume))
            source.transfer_btrfs_snapshot(destination, compress=args.compress)

    except SystemExit as e:
        if e.code != 0:
            raise

    except KeyboardInterrupt as k:
        exitcode = 1

    except Exception as e:
        handle_exception(e)
        exitcode = 1

    exit(exitcode)