Beispiel #1
0
def main(args: argparse.Namespace) -> None:
    staticconf.DictConfiguration({'yes': args.yes})
    destination, destination_str = _parse_destination(args.dest, args.name)
    before_timestamp = parse_time(args.before) if args.before else int(
        time.time())

    backup_store = get_backup_store(args.name)

    with backup_store.unlock(preserve_scratch=args.preserve_scratch_dir):
        files_to_restore: List[ManifestEntry]
        if args.sha:
            files_to_restore = backup_store.manifest.get_entries_by_sha(
                args.sha)
            if not files_to_restore:
                raise ValueError(
                    f'Sha {args.sha} does not match anything in the store')

        else:
            search_results = backup_store.manifest.search(
                like=args.like,
                before_timestamp=before_timestamp,
                history_limit=1,
            )
            # Restore the most recent version of all files that haven't been deleted
            files_to_restore = [h[0] for _, h in search_results if h[0].sha]

        if _confirm_restore(files_to_restore, destination, destination_str):
            _restore(files_to_restore, destination, backup_store)
Beispiel #2
0
def test_base_sha_corrupted(capsys):
    sleep(1)  # make sure the backup timestamps differ
    with itest_setup(
        test_file_history,
        _TestFileData('bar', 'asdfasdfasdf'),
        _TestFileData('foo', 'asdfasdfasd'),
    ):
        BACKUP_ARGS.dry_run = False
        backup(BACKUP_ARGS)

    backup_store = get_backup_store('data1_backup')
    with backup_store.unlock(preserve_scratch=True):
        backup_store.manifest._cursor.execute(
            'update manifest set key_pair=? where abs_file_name like ? ',
            (b'hjkl', '%bar'),
        )
        backup_store.manifest._commit()

    VERIFY_ARGS.fast = False
    verify(VERIFY_ARGS)
    out, _ = capsys.readouterr()
    assert 'ERROR' in out

    # Shouldn't create duplicate entries when we fix
    with backup_store.unlock(preserve_scratch=True):
        backup_store.manifest._cursor.execute('select * from manifest')
        rows = backup_store.manifest._cursor.fetchall()
        assert len(rows) == 3

    # After the fix, verify should be clean
    verify(VERIFY_ARGS)
    out, _ = capsys.readouterr()
    assert 'ERROR' not in out
Beispiel #3
0
def main(args: argparse.Namespace) -> None:
    """ entry point for the 'backup' subcommand """
    if args.dry_run:
        logger.warning('Running in dry-run mode; no files will be backed up!')
    logger.info(f'Starting backup for {args.name}')
    backup_store = get_backup_store(args.name)

    with backup_store.unlock(dry_run=args.dry_run,
                             preserve_scratch=args.preserve_scratch_dir):
        marked_files: Set[str] = set()
        for base_path in staticconf.read_list('directories',
                                              namespace=args.name):
            abs_base_path = os.path.abspath(base_path)
            exclusions = compile_exclusions(
                staticconf.read_list('exclusions', [], namespace=args.name))
            marked_files |= _scan_directory(
                abs_base_path,
                backup_store,
                exclusions,
                args.dry_run,
            )

        for abs_file_name in backup_store.manifest.files() - marked_files:
            logger.info(f'{abs_file_name} has been deleted')
            if not args.dry_run:
                backup_store.manifest.delete(abs_file_name)
    logger.info(f'Backup for {args.name} finished')
Beispiel #4
0
def test_duplicate_entries(both_bad, capsys):
    VERIFY_ARGS.fast = True
    backup_store = get_backup_store('data1_backup')
    with backup_store.unlock(preserve_scratch=True):
        # We can't test this with the "unique" index in place
        backup_store.manifest._cursor.execute('drop index mfst_unique_idx')
        backup_store.manifest._cursor.execute('select * from manifest')
        row = backup_store.manifest._cursor.fetchone()
        backup_store.manifest._cursor.execute(
            '''
            insert into manifest (abs_file_name, sha, uid, gid, mode, key_pair, commit_timestamp)
            values (?, ?, ?, ?, ?, ?, ?)
            ''',
            (
                row['abs_file_name'],
                row['sha'],
                row['uid'],
                row['gid'],
                row['mode'],
                row['key_pair'],
                row['commit_timestamp'] + 10,
            )
        )
        if both_bad:
            backup_store.manifest._cursor.execute('update manifest set key_pair=?', (b'asdf',))
        backup_store.manifest._commit()
    verify(VERIFY_ARGS)
    out, _ = capsys.readouterr()
    assert 'Found 2 duplicate entries' in out
    if both_bad:
        assert 'is corrupt' in out
        assert 'No valid entries' in out
    else:
        assert 'seems good' in out

    # Shouldn't create duplicate entries when we fix
    with backup_store.unlock(preserve_scratch=True):
        backup_store.manifest._cursor.execute('select * from manifest')
        rows = backup_store.manifest._cursor.fetchall()
        assert len(rows) == 1

    with backup_store.unlock(preserve_scratch=True):
        # Restore the "unique" index
        backup_store.manifest._cursor.execute(
            'create unique index mfst_unique_idx on manifest(abs_file_name, sha, uid, gid, mode)',
        )
        backup_store.manifest._commit()

    # After the fix, verify should be clean
    VERIFY_ARGS.fast = False
    verify(VERIFY_ARGS)
    out, _ = capsys.readouterr()
    assert 'ERROR' not in out
Beispiel #5
0
def main(args: argparse.Namespace) -> None:
    after_timestamp = parse_time(args.after) if args.after else 0
    before_timestamp = parse_time(args.before) if args.before else int(
        time.time())

    backup_store = get_backup_store(args.name)
    with backup_store.unlock():
        search_results = backup_store.manifest.search(
            after_timestamp=after_timestamp,
            before_timestamp=before_timestamp,
            file_limit=args.file_limit,
            history_limit=args.history_limit,
            like=args.like,
        )
    if not args.details:
        _print_summary(args.name, search_results, args.deleted, args.changed)
    else:
        _print_details(args.name, search_results, args.deleted, args.changed,
                       args.sha_length)
Beispiel #6
0
def main(args: argparse.Namespace) -> None:
    staticconf.YamlConfiguration(args.config, flatten=False)
    backup_set_config = staticconf.read('backups')[args.name]
    staticconf.DictConfiguration(backup_set_config, namespace=args.name)
    backup_store = get_backup_store(args.name)

    if args.manifest:
        manifest = Manifest(args.filename)
        private_key_filename = backup_store.config.read('private_key_filename',
                                                        default='')
        lock_manifest(
            manifest,
            private_key_filename,
            backup_store._save,
            backup_store._load,
            backup_store.options,
        )
    else:
        with backup_store.unlock():
            backup_store.save_if_new(args.filename)
Beispiel #7
0
def test_shas_with_bad_key_pairs(both_bad, capsys):
    with itest_setup(
        test_file_history,
        _TestFileData('bar', 'asdfasdfasdf'),
    ):
        BACKUP_ARGS.dry_run = False
        backup(BACKUP_ARGS)

    VERIFY_ARGS.fast = True
    backup_store = get_backup_store('data1_backup')
    with backup_store.unlock(preserve_scratch=True):
        backup_store.manifest._cursor.execute(
            'update manifest set key_pair=? where abs_file_name like ?',
            (b'asdf', '%bar'),
        )
        if both_bad:
            backup_store.manifest._cursor.execute(
                'update manifest set key_pair=? where abs_file_name like ?',
                (b'hjkl', '%foo'),
            )
        backup_store.manifest._commit()
    verify(VERIFY_ARGS)
    out, _ = capsys.readouterr()
    assert 'Found 2 entries for' in out
    if both_bad:
        assert 'No valid entries' in out
    else:
        assert 'seems good' in out

    # Shouldn't create duplicate entries when we fix
    with backup_store.unlock(preserve_scratch=True):
        backup_store.manifest._cursor.execute('select * from manifest')
        rows = backup_store.manifest._cursor.fetchall()
        assert len(rows) == 2

    # After the fix, verify should be clean
    VERIFY_ARGS.fast = False
    verify(VERIFY_ARGS)
    out, _ = capsys.readouterr()
    assert 'ERROR' not in out
Beispiel #8
0
def test_verify_corrupted(capsys):
    VERIFY_ARGS.fast = False
    backup_store = get_backup_store('data1_backup')
    with backup_store.unlock(preserve_scratch=True):
        backup_store.manifest._cursor.execute(
            'update manifest set key_pair =?',
            (b'asdf',),
        )
        backup_store.manifest._commit()
    verify(VERIFY_ARGS)
    out, _ = capsys.readouterr()
    assert 'ERROR' in out

    # Shouldn't create duplicate entries when we fix
    with backup_store.unlock(preserve_scratch=True):
        backup_store.manifest._cursor.execute('select * from manifest')
        rows = backup_store.manifest._cursor.fetchall()
        assert len(rows) == 1

    # After the fix, verify should be clean
    verify(VERIFY_ARGS)
    out, _ = capsys.readouterr()
    assert 'ERROR' not in out
Beispiel #9
0
def main(args: argparse.Namespace) -> None:
    staticconf.DictConfiguration({'yes': args.yes})

    backup_store = get_backup_store(args.name)

    with backup_store.unlock(preserve_scratch=args.preserve_scratch_dir):
        files_to_verify: List[ManifestEntry]
        if args.fast:
            _fast_verify(backup_store)
            return

        elif args.sha:
            files_to_verify = backup_store.manifest.get_entries_by_sha(
                args.sha)
            if not files_to_verify:
                raise ValueError(
                    f'Sha {args.sha} does not match anything in the store')

        else:
            search_results = backup_store.manifest.search(like=args.like, )
            # Verify the most recent version of all files that haven't been deleted
            files_to_verify = [h[0] for _, h in search_results if h[0].sha]

        _verify(files_to_verify, backup_store, args.show_all)