Пример #1
0
def root_dataset(
    force_clean: bool,
    zfs: libzfs.ZFS,
    pool: libzfs.ZFSPool
) -> libzfs.ZFSDataset:

    dataset_name = f"{pool.name}/iocage-test"

    if force_clean:
        try:
            dataset = zfs.get_dataset(dataset_name)
            helper_functions.unmount_and_destroy_dataset_recursive(dataset)
        except libzfs.ZFSException:
            pass

    try:
        pool.create(dataset_name, {})
    except libzfs.ZFSException:
        if force_clean is True:
            raise

    dataset = zfs.get_dataset(dataset_name)
    if not dataset.mountpoint:
        dataset.mount()

    return dataset

    if force_clean:
        helper_functions.unmount_and_destroy_dataset_recursive(dataset)
Пример #2
0
def shared_zfs_dataset(
    root_dataset: libzfs.ZFSDataset,
    zfs: libzfs.ZFS
) -> libzfs.ZFSDataset:
    name = f"{root_dataset.name}/shared-" + str(random.randint(1, 32768))
    root_dataset.pool.create(name, {})
    dataset = zfs.get_dataset(name)
    dataset.properties["jailed"] = libzfs.ZFSUserProperty("on")
    yield dataset
    dataset.delete()
Пример #3
0
 def import_pool(self, pool):
     self.pools.pop(pool.guid)
     self.updated.emit()
     try:
         logging.info('Importing pool ' + str(pool.name) + ' (' +
                      str(pool.guid) + ')')
         ZFS().import_pool(pool, pool.name, {})
     except ZFSException as e:
         logging.error('Import failed: ' + str(e))
         self.import_error.emit(pool, str(e).capitalize())
         return
     self.import_success.emit(pool)
Пример #4
0
    def zfs_volume(self, root_dataset: libzfs.ZFSDataset,
                   zfs: libzfs.ZFS) -> libzfs.ZFSDataset:

        r = random.randint(0, sys.maxsize)
        dataset_name = f"{root_dataset.name}/zvol{r}"

        root_dataset.pool.create(dataset_name,
                                 fsopts=dict(volsize="16M"),
                                 fstype=libzfs.DatasetType.VOLUME)

        dataset = zfs.get_dataset(dataset_name)
        yield dataset

        dataset.delete()
Пример #5
0
def root_dataset(zfs: libzfs.ZFS, pool: libzfs.ZFSPool) -> libzfs.ZFSDataset:
    """Return the root dataset for tests."""
    dataset_name = f"{pool.name}/libioc-test"

    try:
        pool.create(dataset_name, {})
    except libzfs.ZFSException:
        pass

    dataset = zfs.get_dataset(dataset_name)

    if dataset.properties["mountpoint"].value == "none":
        mountpoint = libzfs.ZFSUserProperty("/.libioc-test")
        dataset.properties["mountpoint"] = mountpoint

    if not dataset.mountpoint:
        dataset.mount()

    return dataset
Пример #6
0
    def test_can_be_created(self, host: 'iocage.Host.Host',
                            local_release: 'iocage.Release.ReleaseGenerator',
                            logger: 'iocage.Logger.Logger', zfs: libzfs.ZFS,
                            root_dataset: libzfs.ZFSDataset) -> None:
        """Test if jails can be created."""
        jail = iocage.Jail.Jail(dict(id="foobar"),
                                new=True,
                                host=host,
                                logger=logger,
                                zfs=zfs)
        jail.create(local_release)

        dataset = zfs.get_dataset(f"{root_dataset.name}/jails/{jail.name}")

        def cleanup() -> None:
            helper_functions.unmount_and_destroy_dataset_recursive(dataset)

        try:
            assert not jail.config["basejail"]
            assert not jail.config["basejail_type"]

            assert dataset.mountpoint is not None
            assert os.path.isfile(f"{dataset.mountpoint}/config.json")
            assert os.path.isdir(f"{dataset.mountpoint}/root")

            data = read_jail_config_json(f"{dataset.mountpoint}/config.json")

            try:
                assert data["basejail"] is "no"
            except KeyError:
                pass

            try:
                assert (data["basejail"] is "") or (data["basejail"] == "none")
            except KeyError:
                pass

        except BaseException as e:
            cleanup()
            raise e

        cleanup()
Пример #7
0
    def test_can_be_created(self, host: 'libioc.Host.Host',
                            local_release: 'libioc.Release.ReleaseGenerator',
                            logger: 'libioc.Logger.Logger', zfs: libzfs.ZFS,
                            root_dataset: libzfs.ZFSDataset) -> None:
        """Test if NullFS basejails can be created."""
        jail = libioc.Jail.Jail({"basejail": True},
                                new=True,
                                host=host,
                                logger=logger,
                                zfs=zfs)
        jail.create(local_release)

        dataset = zfs.get_dataset(f"{root_dataset.name}/jails/{jail.name}")

        def cleanup() -> None:
            helper_functions.unmount_and_destroy_dataset_recursive(dataset)

        try:
            assert jail.config["basejail"]
            assert jail.config["basejail_type"] == "nullfs"

            assert dataset.mountpoint is not None
            assert os.path.isfile(f"{dataset.mountpoint}/config.json")
            assert os.path.isdir(f"{dataset.mountpoint}/root")

            data = read_jail_config_json(f"{dataset.mountpoint}/config.json")

            assert data["basejail"] == "yes"

            try:
                assert data["basejail_type"] == "nullfs"
            except KeyError:
                pass

        except Exception as e:
            cleanup()
            raise e

        cleanup()
Пример #8
0
def root_dataset(
    zfs: libzfs.ZFS,
    pool: libzfs.ZFSPool
) -> libzfs.ZFSDataset:
    """Return the root dataset for tests."""
    dataset_name = f"{pool.name}/libioc-test"

    try:
        pool.create(dataset_name, {})
    except libzfs.ZFSException:
        pass

    dataset = zfs.get_dataset(dataset_name)

    if dataset.properties["mountpoint"].value == "none":
        mountpoint = libzfs.ZFSUserProperty("/.libioc-test")
        dataset.properties["mountpoint"] = mountpoint

    if not dataset.mountpoint:
        dataset.mount()

    return dataset
Пример #9
0
    def test_can_be_created(self, new_jail: 'libioc.Jail.Jail',
                            local_release: 'libioc.Release.ReleaseGenerator',
                            root_dataset: libzfs.ZFSDataset,
                            zfs: libzfs.ZFS) -> None:
        """Test if NullFS basejails can be created."""
        new_jail.config["basejail"] = True
        new_jail.create(local_release)

        dataset = zfs.get_dataset(f"{root_dataset.name}/jails/{new_jail.name}")

        assert new_jail.is_basejail is True
        assert new_jail.config["basejail"] is True
        assert new_jail.config["basejail_type"] == "nullfs"

        assert dataset.mountpoint is not None
        assert os.path.isfile(f"{dataset.mountpoint}/config.json")
        assert os.path.isdir(f"{dataset.mountpoint}/root")

        data = read_jail_config_json(f"{dataset.mountpoint}/config.json")

        assert data["basejail"] == "yes"
        if "basejail_type" in data:
            assert data["basejail_type"] == "nullfs"
Пример #10
0
 def _do_scan(self):
     imports = list(ZFS().find_import(search_paths=disk_paths))
     return sorted(imports, key=lambda pool: str(pool.name))
Пример #11
0
 def _do_scan(self):
     return sorted(list(ZFS().pools), key=lambda pool: str(pool.name))
Пример #12
0
from libzfs import (ZFS, ZFSDataset, ZFSSnapshot, ZFSException, Error as
                    ZFSErrorCode)
from tempfile import _RandomNameSequence

_zfs = ZFS()

for name in ('get_object', 'get_dataset', 'get_dataset_by_path'):
    locals()[name] = getattr(_zfs, name)


def create_snapshot(dataset, name):
    fullname = '{}@{}'.format(dataset.name, name)
    dataset.snapshot(fullname)
    return get_object(fullname)


def create_clone(snapshot, name, **opts):
    if isinstance(snapshot, TemporarySnapshot):
        snapshot = snapshot.snap
    snapshot.clone(name, opts)
    return get_object(name)


class TemporarySnapshot:
    def __init__(self, dataset, name):
        self.snap = create_snapshot(dataset, name)

    def __del__(self):
        if hasattr(self, 'snap'):
            self.snap.delete(recursive_children=True)