def main(): # pragma: no cover args = parse_arguments(*sys.argv[1:]) logging.basicConfig(level=logging.INFO, format='%(message)s') config_filenames = tuple(collect.collect_config_filenames(args.config_paths)) if len(config_filenames) == 0: logger.critical('No files to validate found') sys.exit(1) found_issues = False for config_filename in config_filenames: try: validate.parse_configuration(config_filename, validate.schema_filename()) except (ValueError, OSError, validate.Validation_error) as error: logging.critical('{}: Error parsing configuration file'.format(config_filename)) logging.critical(error) found_issues = True if found_issues: sys.exit(1) else: logger.info( 'All given configuration files are valid: {}'.format(', '.join(config_filenames)) )
def test_parse_configuration_raises_for_missing_schema_file(): mock_config_and_schema('') builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('schema.yaml').and_raise(FileNotFoundError) with pytest.raises(FileNotFoundError): module.parse_configuration('config.yaml', 'schema.yaml')
def test_parse_configuration_raises_for_missing_schema_file(): mock_config_and_schema('') builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('schema.yaml').and_raise(FileNotFoundError) with pytest.raises(FileNotFoundError): module.parse_configuration('config.yaml', 'schema.yaml')
def test_parse_configuration_with_schema_lacking_examples_does_not_raise(): mock_config_and_schema( ''' location: source_directories: - /home repositories: - hostname.borg ''', ''' map: location: required: true map: source_directories: required: true seq: - type: scalar repositories: required: true seq: - type: scalar ''', ) module.parse_configuration('config.yaml', 'schema.yaml')
def test_parse_configuration_with_schema_lacking_examples_does_not_raise(): mock_config_and_schema( ''' location: source_directories: - /home repositories: - hostname.borg ''', ''' map: location: required: true map: source_directories: required: true seq: - type: scalar repositories: required: true seq: - type: scalar ''', ) module.parse_configuration('config.yaml', 'schema.yaml')
def test_parse_configuration_raises_for_validation_error(): mock_config_and_schema(''' location: source_directories: yes repositories: - hostname.borg ''') with pytest.raises(module.Validation_error): module.parse_configuration('config.yaml', 'schema.yaml')
def test_parse_configuration_raises_for_validation_error(): mock_config_and_schema( ''' location: source_directories: yes repositories: - hostname.borg ''' ) with pytest.raises(module.Validation_error): module.parse_configuration('config.yaml', 'schema.yaml')
def test_parse_configuration_merges_include(): mock_config_and_schema(''' location: source_directories: - /home repositories: - hostname.borg retention: keep_daily: 1 <<: !include include.yaml ''') builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('include.yaml').and_return(''' keep_daily: 7 keep_hourly: 24 ''') result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': { 'source_directories': ['/home'], 'repositories': ['hostname.borg'] }, 'retention': { 'keep_daily': 1, 'keep_hourly': 24 }, }
def test_parse_configuration_transforms_file_into_mapping(): mock_config_and_schema(''' location: source_directories: - /home - /etc repositories: - hostname.borg retention: keep_daily: 7 consistency: checks: - repository - archives ''') result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': { 'source_directories': ['/home', '/etc'], 'repositories': ['hostname.borg'] }, 'retention': { 'keep_daily': 7 }, 'consistency': { 'checks': ['repository', 'archives'] }, }
def load_configurations(config_filenames): ''' Given a sequence of configuration filenames, load and validate each configuration file. Return the results as a tuple of: dict of configuration filename to corresponding parsed configuration, and sequence of logging.LogRecord instances containing any parse errors. ''' # Dict mapping from config filename to corresponding parsed config dict. configs = collections.OrderedDict() logs = [] # Parse and load each configuration file. for config_filename in config_filenames: try: configs[config_filename] = validate.parse_configuration( config_filename, validate.schema_filename()) except (ValueError, OSError, validate.Validation_error) as error: logs.extend([ logging.makeLogRecord( dict( levelno=logging.CRITICAL, levelname='CRITICAL', msg='{}: Error parsing configuration file'.format( config_filename), )), logging.makeLogRecord( dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)), ]) return (configs, logs)
def test_parse_configuration_transforms_file_into_mapping(): mock_config_and_schema( ''' location: source_directories: - /home - /etc repositories: - hostname.borg retention: keep_minutely: 60 keep_hourly: 24 keep_daily: 7 consistency: checks: - repository - archives ''' ) result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': {'source_directories': ['/home', '/etc'], 'repositories': ['hostname.borg']}, 'retention': {'keep_daily': 7, 'keep_hourly': 24, 'keep_minutely': 60}, 'consistency': {'checks': ['repository', 'archives']}, }
def run_configuration(config_filename, args): # pragma: no cover ''' Parse a single configuration file, and execute its defined pruning, backups, and/or consistency checks. ''' logger.info('{}: Parsing configuration file'.format(config_filename)) config = validate.parse_configuration(config_filename, validate.schema_filename()) (location, storage, retention, consistency, hooks) = (config.get(section_name, {}) for section_name in ('location', 'storage', 'retention', 'consistency', 'hooks')) try: local_path = location.get('local_path', 'borg') remote_path = location.get('remote_path') borg_create.initialize_environment(storage) if args.create: hook.execute_hook(hooks.get('before_backup'), config_filename, 'pre-backup') _run_commands(args, consistency, local_path, location, remote_path, retention, storage) if args.create: hook.execute_hook(hooks.get('after_backup'), config_filename, 'post-backup') except (OSError, CalledProcessError): hook.execute_hook(hooks.get('on_error'), config_filename, 'on-error') raise
def test_parse_configuration_merges_include(): mock_config_and_schema( ''' location: source_directories: - /home repositories: - hostname.borg retention: keep_daily: 1 <<: !include include.yaml ''' ) builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('include.yaml').and_return( ''' keep_daily: 7 keep_hourly: 24 ''' ) result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': {'source_directories': ['/home'], 'repositories': ['hostname.borg']}, 'retention': {'keep_daily': 1, 'keep_hourly': 24}, }
def collect_configuration_run_summary_logs(config_filenames, args): ''' Given a sequence of configuration filenames and parsed command-line arguments as an argparse.ArgumentParser instance, run each configuration file and yield a series of logging.LogRecord instances containing summary information about each run. ''' # Dict mapping from config filename to corresponding parsed config dict. configs = collections.OrderedDict() # Parse and load each configuration file. for config_filename in config_filenames: try: logger.info('{}: Parsing configuration file'.format(config_filename)) configs[config_filename] = validate.parse_configuration( config_filename, validate.schema_filename() ) except (ValueError, OSError, validate.Validation_error) as error: yield logging.makeLogRecord( dict( levelno=logging.CRITICAL, msg='{}: Error parsing configuration file'.format(config_filename), ) ) yield logging.makeLogRecord(dict(levelno=logging.CRITICAL, msg=error)) # Run cross-file validation checks. if args.extract or (args.list and args.archive): try: validate.guard_configuration_contains_repository(args.repository, configs) except ValueError as error: yield logging.makeLogRecord(dict(levelno=logging.CRITICAL, msg=error)) return # Execute the actions corresponding to each configuration file. for config_filename, config in configs.items(): try: run_configuration(config_filename, config, args) yield logging.makeLogRecord( dict( levelno=logging.INFO, msg='{}: Successfully ran configuration file'.format(config_filename), ) ) except (ValueError, OSError, CalledProcessError) as error: yield logging.makeLogRecord( dict( levelno=logging.CRITICAL, msg='{}: Error running configuration file'.format(config_filename), ) ) yield logging.makeLogRecord(dict(levelno=logging.CRITICAL, msg=error)) if not config_filenames: yield logging.makeLogRecord( dict( levelno=logging.CRITICAL, msg='{}: No configuration files found'.format(' '.join(args.config_paths)), ) )
def main(): # pragma: no cover try: args = parse_arguments(*sys.argv[1:]) config_filenames = tuple( collect.collect_config_filenames(args.config_paths)) convert.guard_configuration_upgraded(LEGACY_CONFIG_PATH, config_filenames) if len(config_filenames) == 0: raise ValueError( 'Error: No configuration files found in: {}'.format(' '.join( args.config_paths))) for config_filename in config_filenames: config = validate.parse_configuration(config_filename, validate.schema_filename()) (location, storage, retention, consistency) = (config.get(section_name, {}) for section_name in ('location', 'storage', 'retention', 'consistency')) remote_path = location.get('remote_path') create.initialize(storage) for repository in location['repositories']: if args.prune: prune.prune_archives(args.verbosity, repository, retention, remote_path=remote_path) if args.create: create.create_archive( args.verbosity, repository, location, storage, ) if args.check: check.check_archives(args.verbosity, repository, consistency, remote_path=remote_path) except (ValueError, OSError, CalledProcessError) as error: print(error, file=sys.stderr) sys.exit(1)
def test_parse_configuration_passes_through_quoted_punctuation(): escaped_punctuation = string.punctuation.replace('\\', r'\\').replace( '"', r'\"') mock_config_and_schema(''' location: source_directories: - /home repositories: - "{}.borg" '''.format(escaped_punctuation)) result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': { 'source_directories': ['/home'], 'repositories': ['{}.borg'.format(string.punctuation)], } }
def test_parse_configuration_applies_normalization(): mock_config_and_schema(''' location: source_directories: - /home repositories: - hostname.borg exclude_if_present: .nobackup ''') result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': { 'source_directories': ['/home'], 'repositories': ['hostname.borg'], 'exclude_if_present': ['.nobackup'], } }
def test_parse_configuration_passes_through_quoted_punctuation(): escaped_punctuation = string.punctuation.replace('\\', r'\\').replace('"', r'\"') mock_config_and_schema( ''' location: source_directories: - /home repositories: - "{}.borg" '''.format(escaped_punctuation) ) result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': { 'source_directories': ['/home'], 'repositories': ['{}.borg'.format(string.punctuation)], }, }
def test_parse_configuration_applies_overrides(): mock_config_and_schema(''' location: source_directories: - /home repositories: - hostname.borg local_path: borg1 ''') result = module.parse_configuration( 'config.yaml', 'schema.yaml', overrides=['location.local_path=borg2']) assert result == { 'location': { 'source_directories': ['/home'], 'repositories': ['hostname.borg'], 'local_path': 'borg2', } }
def run_configuration(config_filename, args): # pragma: no cover ''' Parse a single configuration file, and execute its defined pruning, backups, and/or consistency checks. ''' logger.info('{}: Parsing configuration file'.format(config_filename)) config = validate.parse_configuration(config_filename, validate.schema_filename()) (location, storage, retention, consistency, hooks) = ( config.get(section_name, {}) for section_name in ('location', 'storage', 'retention', 'consistency', 'hooks') ) try: remote_path = location.get('remote_path') create.initialize_environment(storage) hook.execute_hook(hooks.get('before_backup'), config_filename, 'pre-backup') for unexpanded_repository in location['repositories']: repository = os.path.expanduser(unexpanded_repository) if args.prune: logger.info('{}: Pruning archives'.format(repository)) prune.prune_archives(args.verbosity, repository, retention, remote_path=remote_path) if args.create: logger.info('{}: Creating archive'.format(repository)) create.create_archive( args.verbosity, repository, location, storage, ) if args.check: logger.info('{}: Running consistency checks'.format(repository)) check.check_archives(args.verbosity, repository, consistency, remote_path=remote_path) hook.execute_hook(hooks.get('after_backup'), config_filename, 'post-backup') except (OSError, CalledProcessError): hook.execute_hook(hooks.get('on_error'), config_filename, 'on-error') raise
def collect_configuration_run_summary_logs(config_filenames, args): ''' Given a sequence of configuration filenames and parsed command-line arguments as an argparse.ArgumentParser instance, run each configuration file and yield a series of logging.LogRecord instances containing summary information about each run. As a side effect of running through these configuration files, output their JSON results, if any, to stdout. ''' # Dict mapping from config filename to corresponding parsed config dict. configs = collections.OrderedDict() # Parse and load each configuration file. for config_filename in config_filenames: try: logger.info('{}: Parsing configuration file'.format(config_filename)) configs[config_filename] = validate.parse_configuration( config_filename, validate.schema_filename() ) except (ValueError, OSError, validate.Validation_error) as error: yield logging.makeLogRecord( dict( levelno=logging.CRITICAL, msg='{}: Error parsing configuration file'.format(config_filename), ) ) yield logging.makeLogRecord(dict(levelno=logging.CRITICAL, msg=error)) # Run cross-file validation checks. if args.extract or (args.list and args.archive): try: validate.guard_configuration_contains_repository(args.repository, configs) except ValueError as error: yield logging.makeLogRecord(dict(levelno=logging.CRITICAL, msg=error)) return # Execute the actions corresponding to each configuration file. json_results = [] for config_filename, config in configs.items(): try: json_results.extend(list(run_configuration(config_filename, config, args))) yield logging.makeLogRecord( dict( levelno=logging.INFO, msg='{}: Successfully ran configuration file'.format(config_filename), ) ) except (ValueError, OSError, CalledProcessError) as error: yield logging.makeLogRecord( dict( levelno=logging.CRITICAL, msg='{}: Error running configuration file'.format(config_filename), ) ) yield logging.makeLogRecord(dict(levelno=logging.CRITICAL, msg=error)) if json_results: sys.stdout.write(json.dumps(json_results)) if not config_filenames: yield logging.makeLogRecord( dict( levelno=logging.CRITICAL, msg='{}: No configuration files found'.format(' '.join(args.config_paths)), ) )
def test_parse_configuration_raises_for_syntax_error(): mock_config_and_schema('foo:\nbar') with pytest.raises(ValueError): module.parse_configuration('config.yaml', 'schema.yaml')
def test_parse_configuration_raises_for_missing_config_file(): with pytest.raises(FileNotFoundError): module.parse_configuration('config.yaml', 'schema.yaml')
def test_parse_configuration_raises_for_missing_config_file(): with pytest.raises(FileNotFoundError): module.parse_configuration('config.yaml', 'schema.yaml')
def run_configuration(config_filename, args): # pragma: no cover ''' Parse a single configuration file, and execute its defined pruning, backups, and/or consistency checks. ''' logger.info('{}: Parsing configuration file'.format(config_filename)) config = validate.parse_configuration(config_filename, validate.schema_filename()) (location, storage, retention, consistency, hooks) = (config.get(section_name, {}) for section_name in ('location', 'storage', 'retention', 'consistency', 'hooks')) try: local_path = location.get('local_path', 'borg') remote_path = location.get('remote_path') borg_create.initialize_environment(storage) hook.execute_hook(hooks.get('before_backup'), config_filename, 'pre-backup') for unexpanded_repository in location['repositories']: repository = os.path.expanduser(unexpanded_repository) dry_run_label = ' (dry run; not making any changes)' if args.dry_run else '' if args.prune: logger.info('{}: Pruning archives{}'.format( repository, dry_run_label)) borg_prune.prune_archives( args.verbosity, args.dry_run, repository, storage, retention, local_path=local_path, remote_path=remote_path, ) if args.create: logger.info('{}: Creating archive{}'.format( repository, dry_run_label)) borg_create.create_archive( args.verbosity, args.dry_run, repository, location, storage, local_path=local_path, remote_path=remote_path, ) if args.check: logger.info( '{}: Running consistency checks'.format(repository)) borg_check.check_archives( args.verbosity, repository, storage, consistency, local_path=local_path, remote_path=remote_path, ) if args.list: logger.info('{}: Listing archives'.format(repository)) borg_list.list_archives( args.verbosity, repository, storage, local_path=local_path, remote_path=remote_path, ) if args.info: logger.info('{}: Displaying summary info for archives'.format( repository)) borg_info.display_archives_info( args.verbosity, repository, storage, local_path=local_path, remote_path=remote_path, ) hook.execute_hook(hooks.get('after_backup'), config_filename, 'post-backup') except (OSError, CalledProcessError): hook.execute_hook(hooks.get('on_error'), config_filename, 'on-error') raise
def test_parse_configuration_raises_for_syntax_error(): mock_config_and_schema('foo:\nbar') with pytest.raises(ValueError): module.parse_configuration('config.yaml', 'schema.yaml')