def test_restore_full(s3_manager): """Test full restore on empty zfs dataset""" zfs_list = 'pool@p1\t0\t19K\t-\t19K\n' # we have no pool/fs snapshots locally zfs_manager = FakeZFSManager(fs_name='pool/fs', expected=zfs_list, snapshot_prefix='snap_') fake_cmd = FakeCommandExecutor() pair_manager = PairManager(s3_manager, zfs_manager, command_executor=fake_cmd) pair_manager.restore('pool/fs@snap_1_f') expected = "z3_get {}pool/fs@snap_1_f | pigz -d | zfs recv pool/fs@snap_1_f".format( FakeBucket.rand_prefix) assert fake_cmd._called_commands == [expected]
def test_restore_noop(s3_manager): """Test restore does nothing when snapshot already exist locally""" expected = ( 'pool@p1\t0\t19K\t-\t19K\n' # we have no pool/fs snapshots locally 'pool/fs@snap_1_f\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_2\t10.0M\t10.0M\t-\t10.0M\n' ) zfs_manager = FakeZFSManager(fs_name='pool/fs', expected=expected, snapshot_prefix='snap_') fake_cmd = FakeCommandExecutor() pair_manager = PairManager(s3_manager, zfs_manager, command_executor=fake_cmd) pair_manager.restore('pool/fs@snap_2') assert fake_cmd._called_commands == []
def test_restore_broken(s3_manager): """Tests restoring a broken snapshot raises integrity error""" zfs_list = ( 'pool@p1\t0\t19K\t-\t19K\n' # we have no pool/fs snapshots locally 'pool/fs@snap_1_f\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_2\t10.0M\t10.0M\t-\t10.0M\n' ) zfs_manager = FakeZFSManager(fs_name='pool/fs', expected=zfs_list, snapshot_prefix='snap_') fake_cmd = FakeCommandExecutor() pair_manager = PairManager(s3_manager, zfs_manager, command_executor=fake_cmd) with pytest.raises(IntegrityError) as excp_info: pair_manager.restore('pool/fs@snap_4_mp') assert excp_info.value.message == \ "Broken snapshot detected pool/fs@snap_4_mp, reason: 'missing parent'"
def test_restore_incremental_empty_dataset(s3_manager): """Tests incremental restore on a zfs dataset with no snapshots""" zfs_list = 'pool@p1\t0\t19K\t-\t19K\n' # we have no pool/fs snapshots locally zfs_manager = FakeZFSManager(fs_name='pool/fs', expected=zfs_list, snapshot_prefix='snap_') fake_cmd = FakeCommandExecutor() pair_manager = PairManager(s3_manager, zfs_manager, command_executor=fake_cmd) pair_manager.restore('pool/fs@snap_3') # ask for an incremental snapshot # all incremental snapshots until we hit a full snapshot are expected expected = [ "z3_get {}pool/fs@snap_1_f | pigz -d | zfs recv pool/fs@snap_1_f", "z3_get {}pool/fs@snap_2 | zfs recv pool/fs@snap_2", "z3_get {}pool/fs@snap_3 | zfs recv pool/fs@snap_3", ] expected = [e.format(FakeBucket.rand_prefix) for e in expected] assert fake_cmd._called_commands == expected
def test_backup_incremental_missing_parent(s3_manager): expected = ( 'pool@p1\t0\t19K\t-\t19K\n' 'pool/fs@snap_1_f\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_4_mp\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_5\t10.0M\t10.0M\t-\t10.0M\n' ) zfs_manager = FakeZFSManager(fs_name='pool/fs', expected=expected, snapshot_prefix='snap_') fake_cmd = FakeCommandExecutor() pair_manager = PairManager(s3_manager, zfs_manager, command_executor=fake_cmd) with pytest.raises(IntegrityError) as excp_info: pair_manager.backup_incremental() assert excp_info.value.message == \ "Broken snapshot detected pool/fs@snap_5, reason: 'parent broken'" assert fake_cmd._called_commands == []
def test_restore_force(s3_manager): """Tests incremental restore with forced rollback""" zfs_list = ( 'pool@p1\t0\t19K\t-\t19K\n' # we have no pool/fs snapshots locally 'pool/fs@snap_1_f\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_2\t10.0M\t10.0M\t-\t10.0M\n' ) zfs_manager = FakeZFSManager(fs_name='pool/fs', expected=zfs_list, snapshot_prefix='snap_') fake_cmd = FakeCommandExecutor() pair_manager = PairManager(s3_manager, zfs_manager, command_executor=fake_cmd) pair_manager.restore('pool/fs@snap_3', force=True) # ask for an incremental snapshot # all incremental snapshots until we hit a full snapshot are expected expected = [ "z3_get {}pool/fs@snap_3 | zfs recv -F pool/fs@snap_3", ] expected = [e.format(FakeBucket.rand_prefix) for e in expected] assert fake_cmd._called_commands == expected
def test_backup_incremental_cycle(s3_manager): zfs_list = ( 'pool@p1\t0\t19K\t-\t19K\n' 'pool/fs@snap_1_f\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_2\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_3\t10.0M\t10.0M\t-\t10.0M\n' # the next 2 have bad metadata in the s3 fixture 'pool/fs@snap_6_cycle\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_7_cycle\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_8\t10.0M\t10.0M\t-\t10.0M\n' ) zfs_manager = FakeZFSManager(fs_name='pool/fs', expected=zfs_list, snapshot_prefix='snap_') fake_cmd = FakeCommandExecutor() pair_manager = PairManager(s3_manager, zfs_manager, command_executor=fake_cmd) with pytest.raises(IntegrityError) as excp_info: pair_manager.backup_incremental() assert excp_info.value.message == \ "Broken snapshot detected pool/fs@snap_7_cycle, reason: 'cycle detected'" assert fake_cmd._called_commands == []
def test_backup_full_compressed(s3_manager): zfs_list = ( 'pool/fs@snap_1_f\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_2\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_3\t10.0M\t10.0M\t-\t10.0M\n' 'pool/fs@snap_8\t10.0M\t10.0M\t-\t10.0M\n' ) zfs_manager = FakeZFSManager(fs_name='pool/fs', expected=zfs_list, snapshot_prefix='snap_') fake_cmd = FakeCommandExecutor() pair_manager = PairManager( s3_manager, zfs_manager, command_executor=fake_cmd, compressor='pigz1') pair_manager.backup_full() commands = [ "zfs send -nvP 'pool/fs@snap_8'", ("zfs send 'pool/fs@snap_8' | " "pigz -1 --blocksize 4096 | " "pput --quiet --estimated 1234 --meta size=1234 --meta is_full=true " "--meta compressor=pigz1 {}pool/fs@snap_8"), ] expected = [e.format(FakeBucket.rand_prefix) for e in commands] assert fake_cmd._called_commands == expected
def pair_manager(s3_manager): zfs_manager = FakeZFSManager(fs_name='pool/fs', snapshot_prefix='snap_') fake_cmd = FakeCommandExecutor() return PairManager(s3_manager, zfs_manager, command_executor=fake_cmd)