def test_simple_target(self):
        """ test to make sure a simple target validates correctly.  No actual
        ZFS code is executed here.
        """
        # create a basic zpool object
        zpool = Zpool("rpool")
        zpool.action = "use_existing"

        # create one filesystem object
        fs = Filesystem("rpool/test1")
        fs.dataset_path = fs.name

        # create the DOC structure
        self.logical.insert_children(zpool)
        zpool.insert_children(fs)

        self.doc.volatile.insert_children(self.target)

        zpool_name, dataset, action, dataset_mp = dc.validate_target()
        self.assertTrue(zpool_name == zpool.name)
        self.assertTrue(dataset == "rpool/test1")
        self.assertTrue(action == "create")
        # the mountpoint will be None since the Filesystem.from_xml() method
        # is not called to determine the actual mountpoint
        self.assertFalse(dataset_mp)
    def test_simple_target(self):
        """ test to make sure a simple target validates correctly.  No actual
        ZFS code is executed here.
        """
        # create a basic zpool object
        zpool = Zpool("rpool")
        zpool.action = "use_existing"

        # create one filesystem object
        fs = Filesystem("rpool/test1")
        fs.dataset_path = fs.name

        # create the DOC structure
        self.logical.insert_children(zpool)
        zpool.insert_children(fs)

        self.doc.volatile.insert_children(self.target)

        zpool_name, dataset, action, dataset_mp = dc.validate_target()
        self.assertTrue(zpool_name == zpool.name)
        self.assertTrue(dataset == "rpool/test1")
        self.assertTrue(action == "create")
        # the mountpoint will be None since the Filesystem.from_xml() method
        # is not called to determine the actual mountpoint
        self.assertFalse(dataset_mp)
    def get_dataset(self):
        '''Returns a Filesystem for test purposes. If the ZFS dataset already
        exists, the test is aborted, to prevent accidental destruction of data.
        If a dataset is given, it is stored and destroyed (as well as any
        descendants) after test execution'''
        if not _PERMISSIONS:
            raise SkipTest("Insufficient permissions to run ZFS test")
        
        test_ids = (self.id()).split(".")

        # Use name of the test case as the name of the function.
        # name of test case is found in the last field of the test id.
        ds_name = _ZFS_TEST_DS % test_ids[-1] + "_%s"
      
        tried = []
        # try to look for a unique name for the ZFS dataset to be used
        # for the tests.  If can not find a unique name within 15 tries, 
        # notify the user, so, they can do some cleanup of their test datasets.
        for x in xrange(15):
            dataset = Filesystem(ds_name % x)
            tried.append(dataset.name)
            if not dataset.exists:
                break
        else:
            raise SkipTest("Could not generate unique ZFS dataset to safely"
                           " test. Tried: %s" % tried)
        
        dataset.create(dry_run=False)
        self.__dataset = dataset
        return dataset
    def get_dataset(self):
        '''Returns a Filesystem for test purposes. If the ZFS dataset already
        exists, the test is aborted, to prevent accidental destruction of data.
        If a dataset is given, it is stored and destroyed (as well as any
        descendants) after test execution'''
        if not _PERMISSIONS:
            raise SkipTest("Insufficient permissions to run ZFS test")

        test_ids = (self.id()).split(".")

        # Use name of the test case as the name of the function.
        # name of test case is found in the last field of the test id.
        ds_name = _ZFS_TEST_DS % test_ids[-1] + "_%s"

        tried = []
        # try to look for a unique name for the ZFS dataset to be used
        # for the tests.  If can not find a unique name within 15 tries,
        # notify the user, so, they can do some cleanup of their test datasets.
        for x in xrange(15):
            dataset = Filesystem(ds_name % x)
            tried.append(dataset.name)
            if not dataset.exists:
                break
        else:
            raise SkipTest("Could not generate unique ZFS dataset to safely"
                           " test. Tried: %s" % tried)

        dataset.create(dry_run=False)
        self.__dataset = dataset
        return dataset
def tearDown():
    '''Clean up the base dataset. Note: This is only run when running in 
    a 'nose' environment (which is the case for running via
    tools/test/slim_test). See also setUp()
    
    '''
    if _PERMISSIONS and not _LEAVE_ZFS:
        base_ds = Filesystem(_ZFS_TEST_DS_BASE)
        if base_ds.exists:
            base_ds.destroy(dry_run=False, recursive=True)
def tearDown():
    '''Clean up the base dataset. Note: This is only run when running in 
    a 'nose' environment (which is the case for running via
    tools/test/slim_test). See also setUp()
    
    '''
    if _PERMISSIONS and not _LEAVE_ZFS:
        base_ds = Filesystem(_ZFS_TEST_DS_BASE)
        if base_ds.exists:
            base_ds.destroy(dry_run=False, recursive=True)
Exemple #7
0
    def test_delete_filesystem(self):
        """ test to make sure the delete action correctly errors
        """
        # create a basic zpool object
        zpool = Zpool("rpool")
        zpool.action = "use_existing"

        # create one filesystem object with an action of delete
        fs = Filesystem("rpool/test1")
        fs.action = "delete"

        # create the DOC structure
        self.logical.insert_children(zpool)
        zpool.insert_children(fs)

        self.doc.volatile.insert_children(self.target)

        self.assertRaises(RuntimeError, dc.validate_target)
Exemple #8
0
    def test_two_filesystems(self):
        """ test to make sure two Filesystem objects correctly errors
        """
        # create a basic zpool object
        zpool = Zpool("rpool")
        zpool.action = "use_existing"

        # create two filesystem objects
        fs1 = Filesystem("rpool/test1")
        fs2 = Filesystem("rpool/test2")

        # create the DOC structure
        self.logical.insert_children(zpool)
        zpool.insert_children([fs1, fs2])

        self.doc.volatile.insert_children(self.target)

        self.assertRaises(RuntimeError, dc.validate_target)
    def test_delete_filesystem(self):
        """ test to make sure the delete action correctly errors
        """
        # create a basic zpool object
        zpool = Zpool("rpool")
        zpool.action = "use_existing"

        # create one filesystem object with an action of delete
        fs = Filesystem("rpool/test1")
        fs.action = "delete"

        # create the DOC structure
        self.logical.insert_children(zpool)
        zpool.insert_children(fs)

        self.doc.volatile.insert_children(self.target)

        self.assertRaises(RuntimeError, dc.validate_target)
Exemple #10
0
    def setUp(self):
        """ Initialises a runtime environment for execution of
            SystemBootMenu unit tests.
        """
        super(SystemBootMenuTestCase, self).setUp()

        desired_dict = {}
        # For the purposes of having a sane environment for the
        # test case, use the active BE. This allows pybootmgt
        # initialisation to locate all the bits it expects.
        for self.be_name, be_pool, be_root_ds, default in be_list():
            be_fs = Filesystem(be_root_ds)
            if be_fs.get('mounted') == 'yes' and \
               be_fs.get('mountpoint') == '/':
                desired_dict["rpoolname"] = be_pool
                desired_dict["rpoolmount"] = \
                    Filesystem(be_pool).get('mountpoint')
                desired_dict["bename"] = self.be_name
                desired_dict["bemount"] = '/'
                break
        if self.arch == "sparc":
            desired_dict["phys_xml"] = PHYS_SPARC_XML
        else:
            desired_dict["phys_xml"] = PHYS_X86_XML
        # Make sure the active BE was found before proceeding with the test
        try:
            desired_dict["bemount"]
        except KeyError:
            raise RuntimeError("Couldn't find active BE to use in unit test")
        boot_mods_dom = etree.fromstring(BOOT_MODS_XML)
        self.doc.import_from_manifest_xml(boot_mods_dom, volatile=True)
        target_desired_dom = etree.fromstring(DYNAMIC_DESIRED_XML \
                                              % desired_dict)
        self.doc.import_from_manifest_xml(target_desired_dom, volatile=True)

        # logical BE:from_xml() method ignores the mountpoint property in the
        # XML so manually override its None value.
        boot_env = self.doc.volatile.get_descendants(class_type=BE)[0]
        boot_env.mountpoint = '/'
        self.boot_menu = SystemBootMenu("Test SystemBootMenu Checkpoint")
        # Force the arch in SystemBootMenu to match self.arch in case we
        # are trying to override it.
        self.boot_menu.arch = self.arch
    def test_snapshot_new_zfs_snap(self):
        '''Test InstallEngine.snapshot where the given snapshot doesn't yet exist'''
        dataset = self.get_dataset()
        self.engine.dataset = dataset
        cp_data = self.engine.get_cp_data(self.name_list[0])
        snap = Filesystem(dataset.snapname(".step_" + cp_data.name))

        self.engine.snapshot(cp_data=cp_data)
        self.assertTrue(os.path.exists(cp_data.data_cache_path),
                        "Path doesn't exist: " + cp_data.data_cache_path)
        self.assertTrue(snap.exists)
    def test_snapshot_overwrite_zfs_snap(self):
        '''Ensure InstallEngine.snapshot overwrites an existing snapshot'''
        cp_data = self.engine.get_cp_data(self.name_list[0])
        dataset = self.get_dataset()
        self.engine.dataset = dataset
        snapname = self.engine.get_zfs_snapshot_name(cp_data.name)
        dataset.snapshot(snapname)

        snap = Filesystem(dataset.snapname(snapname))

        self.engine.snapshot(cp_data=cp_data)
        self.assertTrue(os.path.exists(cp_data.data_cache_path),
                        "Path doesn't exist: " + cp_data.data_cache_path)
        self.assertTrue(snap.exists)
Exemple #13
0
    def test_create_pool(self):
        """ test to make sure the create action on a zpool correctly errors
        """
        # create a basic zpool object with an action of create
        zpool = Zpool("rpool")
        zpool.action = "create"

        # create one filesystem object
        fs = Filesystem("rpool/test1")

        # create the DOC structure
        self.logical.insert_children(zpool)
        zpool.insert_children(fs)

        self.doc.volatile.insert_children(self.target)

        self.assertRaises(RuntimeError, dc.validate_target)
Exemple #14
0
    def test_two_zpools(self):
        """ test to make sure two Zpool objects correctly errors
        """
        # create two zpool objects
        zpool1 = Zpool("rpool")
        zpool1.action = "use_existing"
        zpool2 = Zpool("rpool-two")
        zpool2.action = "use_existing"

        # create one filesystem object
        fs1 = Filesystem("rpool/test1")

        # create the DOC structure
        self.logical.insert_children([zpool1, zpool2])
        zpool1.insert_children(fs1)

        self.doc.volatile.insert_children(self.target)

        self.assertRaises(RuntimeError, dc.validate_target)
Exemple #15
0
    def select_zone_targets(self, from_manifest):
        '''Logic to select the targets for a zone.

           Given the alternate pool dataset, and the targets from the
           manifest, make the selections.

           If no suitable selection can be made, then the SelectionError
           exception will be raised.  This should only be the cause if the
           selected alternate pool dataset does not exist.

           Returns a new set of Targets that can be insterted into the
           Data Object Cache for TargetInstantiationZone to use.

        '''

        # The selected alternate pool dataset must be set
        if self.selected_dataset is None:
            raise SelectionError("No dataset selected as alternate pool "
                                 "dataset.")

        # Verify selected dataset exists
        fs = Filesystem(self.selected_dataset)
        if not fs.exists:
            raise SelectionError("Dataset (%s) does not exist." % \
                                 self.selected_dataset)

        if from_manifest:
            self.logger.debug("from_manifest =\n%s\n" % \
                              (str(from_manifest[0])))
        else:
            self.logger.debug("from_manifest is empty\n")

        # Instantiate desired target, logical, and zpool objects.
        target = Target(Target.DESIRED)
        logical = Logical(DEFAULT_LOGICAL_NAME)
        logical.noswap = True
        logical.nodump = True
        zpool = logical.add_zpool(self.selected_dataset, is_root=True)

        for manifest_target in from_manifest:
            # Copy filesystem children into desired zpool
            for fs in manifest_target.get_descendants(class_type=Filesystem):
                zpool.insert_children(copy.deepcopy(fs))

            # Copy BE children into desired zpool
            for be in manifest_target.get_descendants(class_type=BE):
                zpool.insert_children(copy.deepcopy(be))

        # Check if we have a BE object under zpool.
        # If not, create one.
        be_list = zpool.get_children(class_type=BE)
        if not be_list:
            # Instantiate new BE object and insert it into zpool.
            be = BE()
            zpool.insert_children(be)
        else:
            # Zpool will have only one BE object.
            be = be_list[0]

        # Set BE's mountpoint to the mountpoint we need
        # to mount it at to do the installation.
        be.mountpoint = self.be_mountpoint

        # Insert desired logical object into the desired target object.
        target.insert_children(logical)

        # Insert desired target object into the DOC.
        self.doc.persistent.insert_children(target)
Exemple #16
0
def setup_build_dataset(zpool, fs, resume_checkpoint=None):
    """ Setup the build datasets for use by DC. This includes setting up:
    - top level build dataset
    - a child dataset named 'build_data'
    - a child dataset named 'media'
    - a child dataset named 'logs'
    - a snapshot of the empty build_data dataset - build_data@empty
    """
    eng = InstallEngine.get_instance()
    doc = eng.data_object_cache

    build_data = eng.dataset
    empty_snap = Filesystem(
        os.path.join(zpool.name, fs.name, "build_data@empty"))
    logs = Filesystem(os.path.join(zpool.name, fs.name, "logs"))
    media = Filesystem(os.path.join(zpool.name, fs.name, "media"))

    if fs.action == "create":
        # recursively destroy the Filesystem dataset before creating it
        fs.destroy(dry_run=False, recursive=True)

    fs.create()
    build_data.create()
    logs.create()
    media.create()

    if fs.action == "preserve":
        # check to see if base_dataset/build_data@empty exists.
        if resume_checkpoint is None and empty_snap.exists:
            # rollback the dataset only if DC is not resuming from a specific
            # checkpoint
            build_data.rollback("empty", recursive=True)

    if not empty_snap.exists:
        build_data.snapshot("empty")

    # Now that the base dataset is created, store the mountpoint ZFS calculated
    base_dataset_mp = fs.get("mountpoint")

    # check for the existence of a lock file, bail out if one exists.
    if os.path.exists(base_dataset_mp):
        if os.path.exists(os.path.join(base_dataset_mp, DC_LOCKFILE)):
            raise RuntimeError("distro_const: An instance of distro_const "
                               "is already running in %s" % base_dataset_mp)

    DC_LOGGER.info("Build datasets successfully setup")
    return (base_dataset_mp, build_data.get("mountpoint"),
            logs.get("mountpoint"), media.get("mountpoint"))
Exemple #17
0
def setup_build_dataset(zpool_name,
                        base_dataset,
                        base_action,
                        base_dataset_mp,
                        resume_checkpoint=None,
                        execute=True):
    """ Setup the build datasets for use by DC. This includes setting up:
    - top level build dataset
    - a child dataset named 'build_data'
    - a child dataset named 'media'
    - a child dataset named 'logs'
    - a snapshot of the empty build_data dataset - build_data@empty
    """
    eng = InstallEngine.get_instance()
    doc = eng.data_object_cache

    # register an internal TI checkpoint
    eng.register_checkpoint(TI_DICT["name"], TI_DICT["mod_path"],
                            TI_DICT["class"])

    if not execute:
        return

    build_data = Filesystem(
        os.path.join(zpool_name, base_dataset, "build_data"))
    empty_snap = Filesystem(
        os.path.join(zpool_name, base_dataset, "build_data@empty"))

    # set the other mountpoints
    build_data_mp = os.path.join(base_dataset_mp, "build_data")
    logs_mp = os.path.join(base_dataset_mp, "logs")
    media_mp = os.path.join(base_dataset_mp, "media")

    # if resume_checkpoint is not None, ensure that the build datasets do
    # actually exist
    if resume_checkpoint is not None:
        if base_dataset_mp is None or \
           build_data_mp is None or \
           logs_mp is None or \
           media_mp is None or\
           not empty_snap.exists:
            raise RuntimeError("Build dataset not correctly setup.  "
                               "distro_const cannot be resumed.")

    # check for the existence of a lock file, bail out
    # if one exists.
    if base_dataset_mp is not None and os.path.exists(base_dataset_mp):
        if os.path.exists(os.path.join(base_dataset_mp, DC_LOCKFILE)):
            raise RuntimeError("distro_const: An instance of distro_const " \
                               "is already running in " + base_dataset_mp)

    # create DOC nodes
    build_data_node = Filesystem(os.path.join(base_dataset, "build_data"))
    build_data_node.mountpoint = build_data_mp
    logs_node = Filesystem(os.path.join(base_dataset, "logs"))
    logs_node.mountpoint = logs_mp
    media_node = Filesystem(os.path.join(base_dataset, "media"))
    media_node.mountpoint = media_mp

    if base_action == "preserve":
        # check to see if base_dataset/build_data@empty exists.
        if resume_checkpoint is None and empty_snap.exists:
            # rollback the dataset only if DC is not resuming from a specific
            # checkpoint
            build_data.rollback("empty", recursive=True)

    build_data_node.action = base_action
    logs_node.action = base_action
    media_node.action = base_action

    # insert all three nodes.
    zpool = doc.get_descendants(class_type=Zpool)[0]
    zpool.insert_children([build_data_node, logs_node, media_node])

    execute_checkpoint()

    # the from_xml() call of Filesystem tries to manually set the mountpoint of
    # the base dataset.  In doing that, it makes assumptions about how ZFS
    # determines the mountpoint of the dataset.  Now that ZFS has created the
    # dataset, query ZFS to set the mountpoint based on what ZFS set it to.
    base_dataset_object = Filesystem(os.path.join(zpool_name, base_dataset))
    base_dataset_mp = base_dataset_object.get("mountpoint")

    # (re)set the other mountpoints
    build_data_mp = os.path.join(base_dataset_mp, "build_data")
    logs_mp = os.path.join(base_dataset_mp, "logs")
    media_mp = os.path.join(base_dataset_mp, "media")

    # create the @empty snapshot if needed
    if not empty_snap.exists:
        build_data.snapshot("empty")

    DC_LOGGER.info("Build datasets successfully setup")
    return (base_dataset_mp, build_data_mp, logs_mp, media_mp)
    def discover_zpools(self, search_name=""):
        """ discover_zpools - method to walk zpool list output to create Zpool
        objects.  Returns a logical DOC object with all zpools populated.
        """
        # create a logical element
        logical = Logical("logical")

        # set noswap and nodump to True until a zvol is found otherwise
        logical.noswap = True
        logical.nodump = True

        # retreive the list of zpools
        cmd = [ZPOOL, "list", "-H", "-o", "name"]
        p = run(cmd)

        # Get the list of zpools
        zpool_list = p.stdout.splitlines()

        # walk the list and populate the DOC
        for zpool_name in zpool_list:
            # if the user has specified a specific search name, only run
            # discovery on that particular pool name
            if search_name and zpool_name != search_name:
                continue

            self.logger.debug("Populating DOC for zpool:  %s", zpool_name)

            # create a new Zpool DOC object and insert it
            zpool = Zpool(zpool_name)
            zpool.action = "preserve"
            logical.insert_children(zpool)

            # check to see if the zpool is the boot pool
            cmd = [ZPOOL, "list", "-H", "-o", "bootfs", zpool_name]
            p = run(cmd)
            if p.stdout.rstrip() != "-":
                zpool.is_root = True

            # get the mountpoint of the zpool
            cmd = [ZFS, "get", "-H", "-o", "value", "mountpoint", zpool_name]
            p = run(cmd)
            zpool.mountpoint = p.stdout.strip()

            # set the vdev_mapping on each physical object in the DOC tree for
            # this zpool
            self.set_vdev_map(zpool)

            # for each zpool, get all of its datasets.  Switch to the C locale
            # so we don't have issues with LC_NUMERIC settings
            cmd = [
                ZFS, "list", "-r", "-H", "-o", "name,type,used,mountpoint",
                zpool_name
            ]
            p = run(cmd, env={"LC_ALL": "C"})

            # walk each dataset and create the appropriate DOC objects for
            # each.  Skip the first line of list output, as the top level
            # dataset (also the dataset with the same name as that of the
            # zpool) may have a different mountpoint than the zpool.
            for dataset in p.stdout.rstrip().split("\n")[1:]:
                try:
                    name, ds_type, ds_size, mountpoint = dataset.split(None, 3)
                except ValueError as err:
                    # trap on ValueError so any inconsistencies are captured
                    self.logger.debug("Unable to process dataset: %r" %
                                      dataset)
                    self.logger.debug(str(err))
                    continue

                # fix the name field to remove the name of the pool
                name = name.partition(zpool_name + "/")[2]

                if ds_type == "filesystem":
                    obj = Filesystem(name)
                    obj.mountpoint = mountpoint
                elif ds_type == "volume":
                    obj = Zvol(name)
                    obj.size = Size(ds_size)

                    # check for swap/dump.  If there's a match, set the zvol
                    # 'use' attribute and the noswap/nodump attribute of
                    # logical.  The zpool name needs to be re-attached to the
                    # zvol name to match what was already parsed
                    if os.path.join(zpool_name, name) in self.swap_list:
                        obj.use = "swap"
                        logical.noswap = False
                    if os.path.join(zpool_name, name) in self.dump_list:
                        obj.use = "dump"
                        logical.nodump = False

                obj.action = "preserve"
                zpool.insert_children(obj)

        return logical
def setup_build_dataset(zpool_name, base_dataset, base_action, base_dataset_mp,
                        resume_checkpoint=None, execute=True):
    """ Setup the build datasets for use by DC. This includes setting up:
    - top level build dataset
    - a child dataset named 'build_data'
    - a child dataset named 'media'
    - a child dataset named 'logs'
    - a snapshot of the empty build_data dataset - build_data@empty
    """
    eng = InstallEngine.get_instance()
    doc = eng.data_object_cache

    # register an internal TI checkpoint
    eng.register_checkpoint(TI_DICT["name"], TI_DICT["mod_path"],
                            TI_DICT["class"])

    if not execute:
        return

    build_data = Filesystem(os.path.join(zpool_name, base_dataset,
                                         "build_data"))
    empty_snap = Filesystem(os.path.join(zpool_name, base_dataset,
                                         "build_data@empty"))

    # set the other mountpoints
    build_data_mp = os.path.join(base_dataset_mp, "build_data")
    logs_mp = os.path.join(base_dataset_mp, "logs")
    media_mp = os.path.join(base_dataset_mp, "media")

    # if resume_checkpoint is not None, ensure that the build datasets do
    # actually exist
    if resume_checkpoint is not None:
        if base_dataset_mp is None or \
           build_data_mp is None or \
           logs_mp is None or \
           media_mp is None or\
           not empty_snap.exists:
            raise RuntimeError("Build dataset not correctly setup.  "
                               "distro_const cannot be resumed.")

    # check for the existence of a lock file, bail out
    # if one exists.
    if base_dataset_mp is not None and os.path.exists(base_dataset_mp):
        if os.path.exists(os.path.join(base_dataset_mp, DC_LOCKFILE)):
            raise RuntimeError("distro_const: An instance of distro_const " \
                               "is already running in " + base_dataset_mp)

    # create DOC nodes
    build_data_node = Filesystem(os.path.join(base_dataset, "build_data"))
    build_data_node.mountpoint = build_data_mp
    logs_node = Filesystem(os.path.join(base_dataset, "logs"))
    logs_node.mountpoint = logs_mp
    media_node = Filesystem(os.path.join(base_dataset, "media"))
    media_node.mountpoint = media_mp

    if base_action == "preserve":
        # check to see if base_dataset/build_data@empty exists.
        if resume_checkpoint is None and empty_snap.exists:
            # rollback the dataset only if DC is not resuming from a specific
            # checkpoint
            build_data.rollback("empty", recursive=True)

    build_data_node.action = base_action
    logs_node.action = base_action
    media_node.action = base_action

    # insert all three nodes.
    zpool = doc.get_descendants(class_type=Zpool)[0]
    zpool.insert_children([build_data_node, logs_node, media_node])

    execute_checkpoint()

    # the from_xml() call of Filesystem tries to manually set the mountpoint of
    # the base dataset.  In doing that, it makes assumptions about how ZFS
    # determines the mountpoint of the dataset.  Now that ZFS has created the
    # dataset, query ZFS to set the mountpoint based on what ZFS set it to.
    base_dataset_object = Filesystem(os.path.join(zpool_name, base_dataset))
    base_dataset_mp = base_dataset_object.get("mountpoint")

    # (re)set the other mountpoints
    build_data_mp = os.path.join(base_dataset_mp, "build_data")
    logs_mp = os.path.join(base_dataset_mp, "logs")
    media_mp = os.path.join(base_dataset_mp, "media")

    # create the @empty snapshot if needed
    if not empty_snap.exists:
        build_data.snapshot("empty")

    DC_LOGGER.info("Build datasets successfully setup")
    return (base_dataset_mp, build_data_mp, logs_mp, media_mp)