示例#1
0
def setup_build_dataset(zpool_name,
                        base_dataset,
                        base_action,
                        base_dataset_mp,
                        resume_checkpoint=None,
                        execute=True):
    """ Setup the build datasets for use by DC. This includes setting up:
    - top level build dataset
    - a child dataset named 'build_data'
    - a child dataset named 'media'
    - a child dataset named 'logs'
    - a snapshot of the empty build_data dataset - build_data@empty
    """
    eng = InstallEngine.get_instance()
    doc = eng.data_object_cache

    # register an internal TI checkpoint
    eng.register_checkpoint(TI_DICT["name"], TI_DICT["mod_path"],
                            TI_DICT["class"])

    if not execute:
        return

    build_data = Filesystem(
        os.path.join(zpool_name, base_dataset, "build_data"))
    empty_snap = Filesystem(
        os.path.join(zpool_name, base_dataset, "build_data@empty"))

    # set the other mountpoints
    build_data_mp = os.path.join(base_dataset_mp, "build_data")
    logs_mp = os.path.join(base_dataset_mp, "logs")
    media_mp = os.path.join(base_dataset_mp, "media")

    # if resume_checkpoint is not None, ensure that the build datasets do
    # actually exist
    if resume_checkpoint is not None:
        if base_dataset_mp is None or \
           build_data_mp is None or \
           logs_mp is None or \
           media_mp is None or\
           not empty_snap.exists:
            raise RuntimeError("Build dataset not correctly setup.  "
                               "distro_const cannot be resumed.")

    # check for the existence of a lock file, bail out
    # if one exists.
    if base_dataset_mp is not None and os.path.exists(base_dataset_mp):
        if os.path.exists(os.path.join(base_dataset_mp, DC_LOCKFILE)):
            raise RuntimeError("distro_const: An instance of distro_const " \
                               "is already running in " + base_dataset_mp)

    # create DOC nodes
    build_data_node = Filesystem(os.path.join(base_dataset, "build_data"))
    build_data_node.mountpoint = build_data_mp
    logs_node = Filesystem(os.path.join(base_dataset, "logs"))
    logs_node.mountpoint = logs_mp
    media_node = Filesystem(os.path.join(base_dataset, "media"))
    media_node.mountpoint = media_mp

    if base_action == "preserve":
        # check to see if base_dataset/build_data@empty exists.
        if resume_checkpoint is None and empty_snap.exists:
            # rollback the dataset only if DC is not resuming from a specific
            # checkpoint
            build_data.rollback("empty", recursive=True)

    build_data_node.action = base_action
    logs_node.action = base_action
    media_node.action = base_action

    # insert all three nodes.
    zpool = doc.get_descendants(class_type=Zpool)[0]
    zpool.insert_children([build_data_node, logs_node, media_node])

    execute_checkpoint()

    # the from_xml() call of Filesystem tries to manually set the mountpoint of
    # the base dataset.  In doing that, it makes assumptions about how ZFS
    # determines the mountpoint of the dataset.  Now that ZFS has created the
    # dataset, query ZFS to set the mountpoint based on what ZFS set it to.
    base_dataset_object = Filesystem(os.path.join(zpool_name, base_dataset))
    base_dataset_mp = base_dataset_object.get("mountpoint")

    # (re)set the other mountpoints
    build_data_mp = os.path.join(base_dataset_mp, "build_data")
    logs_mp = os.path.join(base_dataset_mp, "logs")
    media_mp = os.path.join(base_dataset_mp, "media")

    # create the @empty snapshot if needed
    if not empty_snap.exists:
        build_data.snapshot("empty")

    DC_LOGGER.info("Build datasets successfully setup")
    return (base_dataset_mp, build_data_mp, logs_mp, media_mp)
    def discover_zpools(self, search_name=""):
        """ discover_zpools - method to walk zpool list output to create Zpool
        objects.  Returns a logical DOC object with all zpools populated.
        """
        # create a logical element
        logical = Logical("logical")

        # set noswap and nodump to True until a zvol is found otherwise
        logical.noswap = True
        logical.nodump = True

        # retreive the list of zpools
        cmd = [ZPOOL, "list", "-H", "-o", "name"]
        p = run(cmd)

        # Get the list of zpools
        zpool_list = p.stdout.splitlines()

        # walk the list and populate the DOC
        for zpool_name in zpool_list:
            # if the user has specified a specific search name, only run
            # discovery on that particular pool name
            if search_name and zpool_name != search_name:
                continue

            self.logger.debug("Populating DOC for zpool:  %s", zpool_name)

            # create a new Zpool DOC object and insert it
            zpool = Zpool(zpool_name)
            zpool.action = "preserve"
            logical.insert_children(zpool)

            # check to see if the zpool is the boot pool
            cmd = [ZPOOL, "list", "-H", "-o", "bootfs", zpool_name]
            p = run(cmd)
            if p.stdout.rstrip() != "-":
                zpool.is_root = True

            # get the mountpoint of the zpool
            cmd = [ZFS, "get", "-H", "-o", "value", "mountpoint", zpool_name]
            p = run(cmd)
            zpool.mountpoint = p.stdout.strip()

            # set the vdev_mapping on each physical object in the DOC tree for
            # this zpool
            self.set_vdev_map(zpool)

            # for each zpool, get all of its datasets.  Switch to the C locale
            # so we don't have issues with LC_NUMERIC settings
            cmd = [
                ZFS, "list", "-r", "-H", "-o", "name,type,used,mountpoint",
                zpool_name
            ]
            p = run(cmd, env={"LC_ALL": "C"})

            # walk each dataset and create the appropriate DOC objects for
            # each.  Skip the first line of list output, as the top level
            # dataset (also the dataset with the same name as that of the
            # zpool) may have a different mountpoint than the zpool.
            for dataset in p.stdout.rstrip().split("\n")[1:]:
                try:
                    name, ds_type, ds_size, mountpoint = dataset.split(None, 3)
                except ValueError as err:
                    # trap on ValueError so any inconsistencies are captured
                    self.logger.debug("Unable to process dataset: %r" %
                                      dataset)
                    self.logger.debug(str(err))
                    continue

                # fix the name field to remove the name of the pool
                name = name.partition(zpool_name + "/")[2]

                if ds_type == "filesystem":
                    obj = Filesystem(name)
                    obj.mountpoint = mountpoint
                elif ds_type == "volume":
                    obj = Zvol(name)
                    obj.size = Size(ds_size)

                    # check for swap/dump.  If there's a match, set the zvol
                    # 'use' attribute and the noswap/nodump attribute of
                    # logical.  The zpool name needs to be re-attached to the
                    # zvol name to match what was already parsed
                    if os.path.join(zpool_name, name) in self.swap_list:
                        obj.use = "swap"
                        logical.noswap = False
                    if os.path.join(zpool_name, name) in self.dump_list:
                        obj.use = "dump"
                        logical.nodump = False

                obj.action = "preserve"
                zpool.insert_children(obj)

        return logical
def setup_build_dataset(zpool_name, base_dataset, base_action, base_dataset_mp,
                        resume_checkpoint=None, execute=True):
    """ Setup the build datasets for use by DC. This includes setting up:
    - top level build dataset
    - a child dataset named 'build_data'
    - a child dataset named 'media'
    - a child dataset named 'logs'
    - a snapshot of the empty build_data dataset - build_data@empty
    """
    eng = InstallEngine.get_instance()
    doc = eng.data_object_cache

    # register an internal TI checkpoint
    eng.register_checkpoint(TI_DICT["name"], TI_DICT["mod_path"],
                            TI_DICT["class"])

    if not execute:
        return

    build_data = Filesystem(os.path.join(zpool_name, base_dataset,
                                         "build_data"))
    empty_snap = Filesystem(os.path.join(zpool_name, base_dataset,
                                         "build_data@empty"))

    # set the other mountpoints
    build_data_mp = os.path.join(base_dataset_mp, "build_data")
    logs_mp = os.path.join(base_dataset_mp, "logs")
    media_mp = os.path.join(base_dataset_mp, "media")

    # if resume_checkpoint is not None, ensure that the build datasets do
    # actually exist
    if resume_checkpoint is not None:
        if base_dataset_mp is None or \
           build_data_mp is None or \
           logs_mp is None or \
           media_mp is None or\
           not empty_snap.exists:
            raise RuntimeError("Build dataset not correctly setup.  "
                               "distro_const cannot be resumed.")

    # check for the existence of a lock file, bail out
    # if one exists.
    if base_dataset_mp is not None and os.path.exists(base_dataset_mp):
        if os.path.exists(os.path.join(base_dataset_mp, DC_LOCKFILE)):
            raise RuntimeError("distro_const: An instance of distro_const " \
                               "is already running in " + base_dataset_mp)

    # create DOC nodes
    build_data_node = Filesystem(os.path.join(base_dataset, "build_data"))
    build_data_node.mountpoint = build_data_mp
    logs_node = Filesystem(os.path.join(base_dataset, "logs"))
    logs_node.mountpoint = logs_mp
    media_node = Filesystem(os.path.join(base_dataset, "media"))
    media_node.mountpoint = media_mp

    if base_action == "preserve":
        # check to see if base_dataset/build_data@empty exists.
        if resume_checkpoint is None and empty_snap.exists:
            # rollback the dataset only if DC is not resuming from a specific
            # checkpoint
            build_data.rollback("empty", recursive=True)

    build_data_node.action = base_action
    logs_node.action = base_action
    media_node.action = base_action

    # insert all three nodes.
    zpool = doc.get_descendants(class_type=Zpool)[0]
    zpool.insert_children([build_data_node, logs_node, media_node])

    execute_checkpoint()

    # the from_xml() call of Filesystem tries to manually set the mountpoint of
    # the base dataset.  In doing that, it makes assumptions about how ZFS
    # determines the mountpoint of the dataset.  Now that ZFS has created the
    # dataset, query ZFS to set the mountpoint based on what ZFS set it to.
    base_dataset_object = Filesystem(os.path.join(zpool_name, base_dataset))
    base_dataset_mp = base_dataset_object.get("mountpoint")

    # (re)set the other mountpoints
    build_data_mp = os.path.join(base_dataset_mp, "build_data")
    logs_mp = os.path.join(base_dataset_mp, "logs")
    media_mp = os.path.join(base_dataset_mp, "media")

    # create the @empty snapshot if needed
    if not empty_snap.exists:
        build_data.snapshot("empty")

    DC_LOGGER.info("Build datasets successfully setup")
    return (base_dataset_mp, build_data_mp, logs_mp, media_mp)