コード例 #1
0
    def test_create_sparc_ramdisks(self):
        """ test case for sparc ramdisk creation
        """
        self.baa.kernel_arch = "sparc"
        # set the nbpi to 1024
        self.baa.nbpi = 1024

        # create a 1MB file in both self.baa.ba_build directory
        cmd = ["/usr/sbin/mkfile", "1m", os.path.join(self.baa.ba_build, "a")]
        run(cmd)

        self.baa.create_ramdisks()

        for entry in ["set root_is_ramdisk=1", "set ramdisk_size="]:
            cmd = [
                "/usr/bin/grep", entry,
                os.path.join(self.baa.ba_build, "etc/system")
            ]
            self.assertEqual(run_silent(cmd).wait(), 0)

        self.assert_(self.baa.lofi is not None)

        # create /platform/sun4u/lib/fs/ufs/bootblk from /etc/system
        bb = os.path.join(self.baa.pkg_img_path,
                          "platform/sun4u/lib/fs/ufs/bootblk")
        shutil.copy2("/etc/system", bb)
        os.chmod(bb, 0444)

        # create a symlink in /usr to the bootblock
        os.symlink(
            bb,
            os.path.join(self.baa.pkg_img_path,
                         "usr/platform/sun4u/lib/fs/ufs/bootblk"))
コード例 #2
0
    def create_usr_archive(self):
        """ class method to create the /usr file system archive
        """
        os.chdir(self.pkg_img_path)

        # Generate the /usr file system archive.
        self.logger.info("Generating /usr file system archive")

        cmd = [cli.MKISOFS, "-o", "solaris.zlib", "-quiet", "-N",
               "-l", "-R", "-U", "-allow-multidot", "-no-iso-translate",
               "-cache-inodes", "-d", "-D", "-V", "\"compress\"", "usr"]

        # Use the iso_sort file if one is specified
        if self.dist_iso_sort is not None and \
           os.path.exists(self.dist_iso_sort):
            # insert the flags directly after the name of the output file
            cmd.insert(3, "-sort")
            cmd.insert(4, self.dist_iso_sort)
        run(cmd)

        self.logger.info("Compressing /usr file system archive using: " +
                         self.compression_type)

        cmd = [cli.LOFIADM, "-C", self.compression_type,
               os.path.join(self.pkg_img_path, "solaris.zlib")]
        try:
            p = run(cmd)
        except CalledProcessError:
            raise RuntimeError("Compression of /usr file system failed: " +
                               os.strerror(p.returncode))
コード例 #3
0
    def test_create_sparc_ramdisks(self):
        """ test case for sparc ramdisk creation
        """
        self.baa.kernel_arch = "sparc"
        # set the nbpi to 1024
        self.baa.nbpi = 1024

        # create a 1MB file in both self.baa.ba_build directory
        cmd = ["/usr/sbin/mkfile", "1m", os.path.join(self.baa.ba_build, "a")]
        run(cmd)

        self.baa.create_ramdisks()

        for entry in ["set root_is_ramdisk=1", "set ramdisk_size="]:
            cmd = ["/usr/bin/grep", entry, os.path.join(self.baa.ba_build,
                                                        "etc/system")]
            self.assertEqual(run_silent(cmd).wait(), 0)

        self.assert_(self.baa.lofi is not None)

        # create /platform/sun4u/lib/fs/ufs/bootblk from /etc/system
        bb = os.path.join(self.baa.pkg_img_path,
                          "platform/sun4u/lib/fs/ufs/bootblk")
        shutil.copy2("/etc/system", bb)
        os.chmod(bb, 0444)

        # create a symlink in /usr to the bootblock
        os.symlink(bb, os.path.join(self.baa.pkg_img_path,
                                    "usr/platform/sun4u/lib/fs/ufs/bootblk"))
コード例 #4
0
    def create_misc_archive(self):
        """ class method to create the /mnt/misc file system archive
        """
        os.chdir(self.pkg_img_path)

        self.logger.info("Generating /mnt/misc file system archive")

        os.mkdir("miscdirs")
        shutil.move("opt", "miscdirs")
        shutil.move("etc", "miscdirs")
        shutil.move("var", "miscdirs")

        # add Software node to install items from /mnt/misc

        src_path = Dir("/mnt/misc")
        src = Source()
        src.insert_children(src_path)

        dst_path = Dir(INSTALL_TARGET_VAR)
        dst = Destination()
        dst.insert_children(dst_path)

        tr_install_misc = CPIOSpec()
        tr_install_misc.action = CPIOSpec.INSTALL
        tr_install_misc.contents = ["."]
        tr_install_misc.size = str(dir_size(os.path.join(self.pkg_img_path,
                                                         "miscdirs")))

        misc_software_node = Software(TRANSFER_MISC, type="CPIO")
        misc_software_node.insert_children([src, dst, tr_install_misc])
        self.doc.persistent.insert_children(misc_software_node)

        cmd = [cli.MKISOFS, "-o", "solarismisc.zlib", "-N", "-l", "-R",
               "-U", "-allow-multidot", "-no-iso-translate", "-quiet",
               "-cache-inodes", "-d", "-D", "-V", "\"compress\"",
               "miscdirs"]
        run(cmd)

        self.logger.info("Compressing /mnt/misc file system archive " +
                         "using: " + self.compression_type)

        cmd = [cli.LOFIADM, "-C", self.compression_type,
               os.path.join(self.pkg_img_path, "solarismisc.zlib")]
        p = run(cmd, check_result=Popen.ANY)
        if p.returncode != 0:
            if "invalid algorithm name" in p.stderr:
                raise RuntimeError("Invalid compression algorithm " +
                    "specified for /mnt/misc archive: " +
                    self.compression_type)
            else:
                raise RuntimeError("Compression of /mnt/misc file system " +
                                   "failed:  " + os.strerror(p.returncode))

        # the removal of /usr must be deferred to until solarismisc.zlib has
        # been created because the contents of solarismisc.zlib actually come
        # from /usr
        shutil.rmtree(os.path.join(self.pkg_img_path, "miscdirs"),
                      ignore_errors=True)
        shutil.rmtree(os.path.join(self.pkg_img_path, "usr"),
                      ignore_errors=True)
コード例 #5
0
    def create_usr_archive(self):
        """ class method to create the /usr file system archive
        """
        os.chdir(self.pkg_img_path)

        # Generate the /usr file system archive.
        self.logger.info("Generating /usr file system archive")

        cmd = [
            cli.MKISOFS, "-o", "solaris.zlib", "-quiet", "-N", "-l", "-R",
            "-U", "-allow-multidot", "-no-iso-translate", "-cache-inodes",
            "-d", "-D", "-V", "\"compress\"", "usr"
        ]

        # Use the iso_sort file if one is specified
        if self.dist_iso_sort is not None and \
           os.path.exists(self.dist_iso_sort):
            # insert the flags directly after the name of the output file
            cmd.insert(3, "-sort")
            cmd.insert(4, self.dist_iso_sort)
        run(cmd)

        self.logger.info("Compressing /usr file system archive using: " +
                         self.compression_type)

        cmd = [
            cli.LOFIADM, "-C", self.compression_type,
            os.path.join(self.pkg_img_path, "solaris.zlib")
        ]
        try:
            p = run(cmd)
        except CalledProcessError:
            raise RuntimeError("Compression of /usr file system failed: " +
                               os.strerror(p.returncode))
コード例 #6
0
ファイル: cleanuar.py プロジェクト: losmith2017/training
def do_cmd(cmd, subprocess, zone):
    """Run the command in 'cmd' on the zone passed in 'zone'. If it's a kernel
    zone, zlogin. Otherwise, just run it in the executing environment, since
    for our purposes, we're working with altroots - non-global zone altroots
    are accessible in the executing context. Returns the Popen instance
    post-execution.

    # Any command that uses "*" wildcard needs to the shell parameter set
    # to true. The "run" command doesn't have the option for passing to
    # the shell variable, so use Popen instead.


    """
    if zone.brand == 'solaris-kz':
        zlogin_cmd = [ZLOGIN, '-U', zone.name]
        if subprocess == "popen":
            zlogin_cmd.extend(cmd[0])
            zlogin_cmd.extend(cmd[1])
        else:
            zlogin_cmd.extend(cmd)
        with SetUIDasEUID():
            return run(zlogin_cmd)
    else:
        if subprocess == "popen":
            return Popen("%s %s" % (cmd[0], cmd[1]),
                         shell=True,
                         stdin=PIPE,
                         stdout=PIPE,
                         stderr=PIPE,
                         close_fds=True)
        else:
            return run(cmd)
コード例 #7
0
 def install_bootblock(self, lofi_device):
     """ class method to install the boot blocks for a sparc distribution
     """
     cmd = [cli.INSTALLBOOT,
            os.path.join(self.pkg_img_path,
                         "usr/platform/sun4u/lib/fs/ufs/bootblk"),
            lofi_device]
     run(cmd)
コード例 #8
0
 def install_bootblock(self, lofi_device):
     """ class method to install the boot blocks for a sparc distribution
     """
     cmd = [cli.INSTALLBOOT,
            os.path.join(self.pkg_img_path,
                         "usr/platform/sun4u/lib/fs/ufs/bootblk"),
            lofi_device]
     run(cmd)
コード例 #9
0
    def discover_partition(self, partition, blocksize):
        """ discover_partition - method to discover a physical disk's
        partition layout

        partition - partition object as discovered by ldm
        blocksize - blocksize of the disk
        """
        # store the attributes locally so libdiskmgt doesn't have to keep
        # looking them up
        partition_attributes = partition.attributes

        # partition name is ctdp path.  Split the string on "p"
        root_path, _none, index = partition.name.partition("p")

        # create a DOC object for this partition.  Set validate_children to
        # False so the shadow code doesn't adjust the start sector or size for
        # any children discovered
        new_partition = Partition(index, validate_children=False)
        new_partition.action = "preserve"
        new_partition.part_type = partition_attributes.id

        # check the partition's ID to set the partition type correctly
        if partition_attributes.id == \
            Partition.name_to_num("Solaris/Linux swap"):
            try:
                # try to call prtvtoc on slice 2.  If it succeeds, this is a
                # solaris1 partition.
                slice2 = root_path + "s2"
                cmd = [PRTVTOC, slice2]
                run(cmd, stdout=Popen.DEVNULL)
            except CalledProcessError:
                # the call to prtvtoc failed which means this partition is
                # Linux swap. To be sure, prtvtoc failure might also mean an
                # unlabeled Solaris1 partition but we aren't going to worry
                # about that for now. Displaying an unlabeled (and therefore
                # unused) Solaris1 partition should not have any unforeseen
                # consequences in terms of data loss.
                new_partition.is_linux_swap = True

        new_partition.size = Size(str(partition_attributes.nsectors) + \
                             Size.sector_units, blocksize=blocksize)
        new_partition.start_sector = long(partition_attributes.relsect)
        new_partition.size_in_sectors = partition_attributes.nsectors

        # If the partition is an EFI system partition check to see if it
        # is PCFS formatted. We only do this on EFI system partitions to save
        # time since mounting can be slow.
        if new_partition.is_efi_system:
            ctdp = partition.name.rsplit('/', 1)[-1]
            new_partition._is_pcfs_formatted = self.is_pcfs_formatted(ctdp)

        return new_partition
コード例 #10
0
    def execute(self, dry_run=False):
        """ Primary execution method used by the Checkpoint parent class.
        dry_run is not used in DC
        """
        self.logger.info("=== Executing Custom Script Checkpoint ===")
        self.logger.info("Custom Script provided is: '%s'" % self.command)

        # replace DOC and DC strings in the command
        self.replace_strings()

        self.logger.info("Custom Script to run is: '%s'" % self.command)

        if not dry_run:
            run(self.command, shell=True)
コード例 #11
0
    def execute(self, dry_run=False):
        """ Primary execution method used by the Checkpoint parent class.
        dry_run is not used in DC
        """
        self.logger.info("=== Executing Custom Script Checkpoint ===")
        self.logger.info("Custom Script provided is: '%s'" % self.command)

        # replace DOC and DC strings in the command
        self.replace_strings()

        self.logger.info("Custom Script to run is: '%s'" % self.command)

        if not dry_run:
            run(self.command, shell=True)
コード例 #12
0
    def test_create_x86_ramdisks(self):
        """ test case for x86 ramdisk creation
        """
        self.baa.kernel_arch = "x86"
        # set the nbpi to 1024
        self.baa.nbpi = 1024

        # create a 1MB file
        cmd = ["/usr/sbin/mkfile", "1m", os.path.join(self.baa.ba_build, "a")]
        run(cmd)

        self.baa.create_ramdisks()
        self.assert_(self.baa.lofi is not None)

        self.baa.create_archives()
コード例 #13
0
def unmount_libc_overlay(logger):
    '''If the processor optimized libc is mounted on top of
       the base libc on /lib/libc.so.1,
       unmount the optimized libc overlay so the one without processor
       optimization can be copied into the install target by subsequent
       cpio transfer checkpoints.
    '''

    p = run([MOUNT], logger=logger)

    # check the output of the mount command to see whether the optimized
    # libc is mounted, if so, unmount it.
    if "/lib/libc.so.1 on" in p.stdout:
        logger.debug("Optimized libc is mounted, unmounting...")
        run([UMOUNT, "/lib/libc.so.1"], logger=logger)
コード例 #14
0
def unmount_libc_overlay(logger):
    '''If the processor optimized libc is mounted on top of
       the base libc on /lib/libc.so.1,
       unmount the optimized libc overlay so the one without processor
       optimization can be copied into the install target by subsequent
       cpio transfer checkpoints.
    '''

    p = run([MOUNT], logger=logger)

    # check the output of the mount command to see whether the optimized
    # libc is mounted, if so, unmount it.
    if "/lib/libc.so.1 on" in p.stdout:
        logger.debug("Optimized libc is mounted, unmounting...")
        run([UMOUNT, "/lib/libc.so.1"], logger=logger)
コード例 #15
0
    def test_create_x86_ramdisks(self):
        """ test case for x86 ramdisk creation
        """
        self.baa.kernel_arch = "x86"
        # set the nbpi to 1024
        self.baa.nbpi = 1024

        # create a 1MB file
        cmd = ["/usr/sbin/mkfile", "1m", os.path.join(self.baa.ba_build, "a")]
        run(cmd)

        self.baa.create_ramdisks()
        self.assert_(self.baa.lofi is not None)

        self.baa.create_archives()
コード例 #16
0
    def test_generate_gnome_caches(self):
        # create a dummy script in /lib/dummy
        dummy = os.path.join(self.ppim.pkg_img_path, "lib/dummy")
        with open(dummy, "w+") as fh:
            fh.write("#!/usr/bin/bash -p\n")
            fh.write("PATH=/bin:/usr/bin\n")
            fh.write("METHOD=$1\n")
            fh.write("case $METHOD in\n")
            fh.write("    'start')\n")
            fh.write("    /usr/bin/echo 'start' > /start\n")
            fh.write("    ;;\n")
            fh.write("    'refresh')\n")
            fh.write("    /usr/bin/echo 'refresh' > /refresh\n")
            fh.write("    ;;\n")
            fh.write("esac\n")
        os.chmod(dummy, 0755)

        # set SVCCFG_REPOSITORY to the pkg_img_path
        os.environ.update({
            "SVCCFG_REPOSITORY":
            os.path.join(self.ppim.pkg_img_path, "etc/svc/repository.db")
        })

        # create a new transient manifest
        testlib.create_transient_manifest(
            "test-desktop-cache-test",
            os.path.join(self.ppim.pkg_img_path,
                         "lib/svc/manifest/system/trans.xml"), "/lib/dummy")

        # svccfg import the transient manifest
        cmd = [
            os.path.join(self.ppim.pkg_img_path, "usr/sbin/svccfg"), "import",
            os.path.join(self.ppim.pkg_img_path,
                         "lib/svc/manifest/system/trans.xml")
        ]
        run(cmd)

        # generate the caches
        self.ppim.generate_gnome_caches()

        # need to sleep for just a moment to let fork() finish cleaning up
        time.sleep(0.5)

        # verify that /refresh exists in the pkg_image dir
        self.assert_(
            os.path.isfile(os.path.join(self.ppim.pkg_img_path, "refresh")))

        del os.environ["SVCCFG_REPOSITORY"]
コード例 #17
0
    def home_zfs_parent_exists(self):
        '''Return True if parent of to-be-created home ZFS dataset exists.
        Otherwise, return False.

        Home ZFS dataset is created in form of <root_pool>/export/home/<login>
        with mountpoint /export/home/<login> inherited from parent dataset.

        The check verifies that ZFS dataset with to-be-inherited /export/home
        mountpoint exists.
        '''

        # obtain mountpoints for all existing ZFS datasets
        cmd = [ZFS, "list", "-H", "-o", "mountpoint"]
        try:
            mountpoint_list = run(cmd, logger=LOGGER)
        except CalledProcessError:
            LOGGER.warn("Could not determine if parent of home ZFS dataset "
                        "exists.")
            return False

        if "/export/home" not in mountpoint_list.stdout.splitlines():
            LOGGER.debug("Parent of home ZFS dataset does not exist, creation "
                         "of user account disabled.")
            return False

        LOGGER.debug("Parent of home ZFS dataset exists, creation of user "
                     "account enabled.")
        return True
コード例 #18
0
    def setup_croinfo(self):
        """ set up a DataObjectDict representing the output from
        /usr/sbin/croinfo
        """
        cmd = [CROINFO, "-h", "-O", "cAR"]
        p = run(cmd)

        # for systems that do not support CRO, nothing will be returned in
        # stdout so simply return.
        if not p.stdout:
            return

        # keep a positional counter since we can't use OrderedDicts until 2.7
        i = 1
        for line in p.stdout.splitlines():
            (ctd, alias, receptacle) = line.split(":")
            # skip any entries where the ctd is missing.
            if not ctd:
                continue

            self.cro_dict[ctd] = (i, alias or None, receptacle)
            i += 1

        if self.cro_dict:
            # Only insert if there is something in it
            self.doc.persistent.insert_children(
                DataObjectDict(CRO_LABEL, self.cro_dict, generate_xml=True))
コード例 #19
0
    def test_configure_smf_default_hostname(self):
        # insert a system/identity:node serivce into the var/svc manifest
        manifest = os.path.join(self.ppim.pkg_img_path,
                                "var/svc/manifest/system/var_stub.xml")
        with open(manifest, "r") as fh:
            data = fh.read().splitlines()

        for line in NODENAME.split("\n"):
            data.insert(-2, line)

        with open(manifest, "w+") as fh:
            fh.write("\n".join(data))

        self.ppim.configure_smf()

        # set SVCCFG_REPOSITORY to the pkg_img_path
        os.environ.update({
            "SVCCFG_REPOSITORY":
            os.path.join(self.ppim.pkg_img_path, "etc/svc/repository.db")
        })

        # verify the instance's general/enabled is set to true
        cmd = [
            os.path.join(self.ppim.pkg_img_path, "usr/sbin/svccfg"), "-s",
            "libstub:default", "listprop", "general/enabled"
        ]
        p = run(cmd)
        # the format of the output will look like:
        # 'general/enabled  boolean  true\n'
        #                            ^^^^
        self.assertEqual(p.stdout.split()[2], "true", p.stdout)

        del os.environ["SVCCFG_REPOSITORY"]
コード例 #20
0
ファイル: users.py プロジェクト: SynetoStorageOS/caiman
    def home_zfs_parent_exists(self):
        '''Return True if parent of to-be-created home ZFS dataset exists.
        Otherwise, return False.

        Home ZFS dataset is created in form of <root_pool>/export/home/<login>
        with mountpoint /export/home/<login> inherited from parent dataset.

        The check verifies that ZFS dataset with to-be-inherited /export/home
        mountpoint exists.
        '''

        # obtain mountpoints for all existing ZFS datasets
        cmd = [ZFS, "list", "-H", "-o", "mountpoint"]
        try:
            mountpoint_list = run(cmd, logger=LOGGER)
        except CalledProcessError:
            LOGGER.warn("Could not determine if parent of home ZFS dataset "
                        "exists.")
            return False

        if "/export/home" not in mountpoint_list.stdout.splitlines():
            LOGGER.debug("Parent of home ZFS dataset does not exist, creation "
                         "of user account disabled.")
            return False

        LOGGER.debug("Parent of home ZFS dataset exists, creation of user "
                     "account enabled.")
        return True
コード例 #21
0
    def run(self):
        ''' Override run method from parent class.

            Runs Firefox.
        '''

        cmd = [FIREFOX]
        if self.uri is not None:
            cmd.append(self.uri)

        try:
            run(cmd)
        except CalledProcessError, err:
            logger = logging.getLogger(INSTALL_LOGGER_NAME)
            logger.error("ERROR: executing command [%s] failed: [%s]", cmd,
                         err)
コード例 #22
0
    def test_configure_smf_custom_hostname(self):
        hostname = "hostnametest"

        # insert a system/identity:node serivce into the var/svc manifest
        manifest = os.path.join(self.ppim.pkg_img_path,
                                "var/svc/manifest/system/var_stub.xml")
        with open(manifest, "r") as fh:
            data = fh.read().splitlines()

        for line in NODENAME.split("\n"):
            data.insert(-2, line)

        with open(manifest, "w+") as fh:
            fh.write("\n".join(data).replace("solaris", hostname))

        self.ppim.configure_smf()

        # set SVCCFG_REPOSITORY to the pkg_img_path
        os.environ.update({"SVCCFG_REPOSITORY":
            os.path.join(self.ppim.pkg_img_path, "etc/svc/repository.db")})

        # verify the instance's general/enabled is set to true
        cmd = [os.path.join(self.ppim.pkg_img_path, "usr/sbin/svccfg"), "-s",
               "libstub:default", "listprop", "general/enabled"]
        p = run(cmd)

        # the format of the output will look like:
        # 'general/enabled  boolean  true\n'
        #                            ^^^^
        self.assertEqual(p.stdout.split()[2], "true", p.stdout)

        del os.environ["SVCCFG_REPOSITORY"]
コード例 #23
0
    def is_pcfs_formatted(self, ctdn):
        """ is_pcfs_formatted() Return a Boolean value of True if the GPT
        partition guid is already formatted with a pcfs filesystem.

        This test is useful in conjuction with the is_efi_system property to
        determine if an existing EFI system partition  can be reused to store
        the Solaris UEFI boot program. If False, a format using mkfs on the
        partition would be required.
        """

        dev_dsk = "/dev/dsk/%s" % ctdn
        if os.path.exists(dev_dsk) is False:
            raise RuntimeError("No such block device exists: %s" % dev_dsk)

        # Check to see if it's mounted first. If it is we can look at the
        # fstype it is mounted with.
        mntmatch = None
        try:
            mnttab_open()
            mntmatch = getmntany(mnt_special=dev_dsk)
        finally:
            mnttab_close()

        # Mounted
        if mntmatch is not None:
            if mntmatch.get('mnt_fstype') == 'pcfs':
                return True
            else:
                return False
        # Not mounted
        else:
            # Try to mount with pcfs. This is a much more robust and reliable
            # check that the ESP is usable than simply checking with
            # fstyp(1M)
            mount_point = tempfile.mkdtemp(dir="/system/volatile",
                                           prefix="esp_%s_" % (ctdn))
            try:
                mount_cmd = [PCFSMOUNT, dev_dsk, mount_point]
                run(mount_cmd)
                umount_cmd = [UMOUNT, dev_dsk]
                run(umount_cmd)
                return True
            except CalledProcessError:
                return False
            finally:
                os.rmdir(mount_point)
            return False
コード例 #24
0
    def test_archive_size(self):
        """ test case for an x86 archive that has content taking up 100M
        """
        self.baa.ba_build = self.tdir
        self.baa.kernel_arch = "x86"

        # create an archive that is 100MB
        f = os.path.join(self.tdir, "fake/directory", "archive_file")
        cmd = ["/usr/sbin/mkfile", "100M", f]
        run(cmd)

        size = self.baa.calculate_ba_size(self.tdir)

        # verify the calculated size is at least
        # 20MB bigger than the test directory size
        min_expected_size = 120 * 1024  # 120MB
        self.assert_(size >= min_expected_size,
                     "%d %d" % (size, min_expected_size))
コード例 #25
0
    def test_archive_size(self):
        """ test case for an x86 archive that has content taking up 100M
        """
        self.baa.ba_build = self.tdir
        self.baa.kernel_arch = "x86"

        # create an archive that is 100MB
        f = os.path.join(self.tdir, "fake/directory", "archive_file")
        cmd = ["/usr/sbin/mkfile", "100M", f]
        run(cmd)

        size = self.baa.calculate_ba_size(self.tdir)

        # verify the calculated size is at least
        # 20MB bigger than the test directory size
        min_expected_size = 120 * 1024  # 120MB
        self.assert_(size >= min_expected_size, "%d %d" %
                     (size, min_expected_size))
コード例 #26
0
    def test_generate_gnome_caches(self):
        # create a dummy script in /lib/dummy
        dummy = os.path.join(self.ppim.pkg_img_path, "lib/dummy")
        with open(dummy, "w+") as fh:
            fh.write("#!/usr/bin/bash -p\n")
            fh.write("PATH=/bin:/usr/bin\n")
            fh.write("METHOD=$1\n")
            fh.write("case $METHOD in\n")
            fh.write("    'start')\n")
            fh.write("    /usr/bin/echo 'start' > /start\n")
            fh.write("    ;;\n")
            fh.write("    'refresh')\n")
            fh.write("    /usr/bin/echo 'refresh' > /refresh\n")
            fh.write("    ;;\n")
            fh.write("esac\n")
        os.chmod(dummy, 0755)

        # set SVCCFG_REPOSITORY to the pkg_img_path
        os.environ.update({"SVCCFG_REPOSITORY": os.path.join(
            self.ppim.pkg_img_path, "etc/svc/repository.db")})

        # create a new transient manifest
        testlib.create_transient_manifest("test-desktop-cache-test",
            os.path.join(self.ppim.pkg_img_path,
                         "lib/svc/manifest/system/trans.xml"), "/lib/dummy")

        # svccfg import the transient manifest
        cmd = [os.path.join(self.ppim.pkg_img_path, "usr/sbin/svccfg"),
               "import", os.path.join(self.ppim.pkg_img_path,
                                      "lib/svc/manifest/system/trans.xml")]
        run(cmd)

        # generate the caches
        self.ppim.generate_gnome_caches()

        # need to sleep for just a moment to let fork() finish cleaning up
        time.sleep(0.5)

        # verify that /refresh exists in the pkg_image dir
        self.assert_(os.path.isfile(os.path.join(self.ppim.pkg_img_path,
                                                 "refresh")))

        del os.environ["SVCCFG_REPOSITORY"]
コード例 #27
0
ファイル: iscsi.py プロジェクト: alhazred/caiman
    def initiator_name(self):
        """ property to return the initiator name
        """

        if self._initiator_name is None:
            cmd = [ISCSIADM, "list", "initiator-node"]
            p = run(cmd)
            for line in p.stdout.splitlines():
                if line.startswith("Initiator node name:"):
                    self._initiator_name = line.split(": ")[1]
        return self._initiator_name
コード例 #28
0
def get_pkg_version(package, repo=None):
    """ method to retrieve the version of a given package
    package - package whose version to retrieve
    repo - which repo to query
    """
    if not repo:
        raise SystemExit("get_pkg_version missing repo")
    print "\nRetrieving package version of %s from:\n%s" % (package, repo)
    cmd = [PKG, "list", "-Hafv", "-g", repo, package]
    proc = run(cmd)
    version_re = re.compile(r"^pkg.*?%s@(.*?):" % package)
    version = version_re.search(proc.stdout).group(1)
    print version
    return version
コード例 #29
0
ファイル: __init__.py プロジェクト: SynetoStorageOS/caiman
def is_root_pool(pool_name):
    """ function to determine if a zpool is the boot pool
    """
    cmd = ["/usr/sbin/zpool", "get", "bootfs", pool_name]
    p = run(cmd)
    # if the pool is the boot pool, the output looks like
    # NAME   PROPERTY  VALUE                SOURCE
    # rpool  bootfs    rpool/ROOT/dataset   local
    # if the pool is NOT the boot pool, the VALUE will be a hypen (-)
    for line in p.stdout.splitlines():
        if line.startswith(pool_name):
            if line.split()[2] != "-":
                return True
    return False
コード例 #30
0
def is_root_pool(pool_name):
    """ function to determine if a zpool is the boot pool
    """
    cmd = ["/usr/sbin/zpool", "get", "bootfs", pool_name]
    p = run(cmd)
    # if the pool is the boot pool, the output looks like
    # NAME   PROPERTY  VALUE                SOURCE
    # rpool  bootfs    rpool/ROOT/dataset   local
    # if the pool is NOT the boot pool, the VALUE will be a hypen (-)
    for line in p.stdout.splitlines():
        if line.startswith(pool_name):
            if line.split()[2] != "-":
                return True
    return False
コード例 #31
0
ファイル: support_info.py プロジェクト: alhazred/caiman
def _current_net_exists():
    '''Return True/False that system is on a network.
    '''
    argslist = ['/usr/sbin/ipadm', 'show-addr', '-p', '-o',
                'addrobj,state,current']
    try:
        ipadm_out = run(argslist, logger=LOGGER())
    except CalledProcessError:
        LOGGER().error("current_net_exists(): Error calling ipadm")
        raise

    lines = ipadm_out.stdout.splitlines()
    filtered = [line for line in lines if not line.startswith("lo") and
        ":ok:U" in line]
    return len(filtered) >= 1
コード例 #32
0
    def test_customize_config_files(self):
        self.ppim.root_password = "******"
        self.ppim.is_plaintext = "false"

        self.ppim.customize_config_files()

        # /etc/shadow
        # verify that root password was set up
        with open(os.path.join(self.ppim.pkg_img_path, "etc/shadow")) as fh:
            line = fh.readline()
            if line.startswith("root"):
                self.assert_(self.ppim.root_password in line)

        # /etc/system
        # verify the 'saved' /etc/system file is 0 bytes
        self.assert_(
            os.path.getsize(
                os.path.join(self.ppim.pkg_img_path, "save/etc/system")) == 0)

        # verify the new entries in /etc/system
        entry_list = ["zfs:zfs_arc_max=0x4002000", "zfs:zfs_vdev_cache_size=0"]
        for entry in entry_list:
            cmd = [
                "/usr/bin/grep", entry,
                os.path.join(self.ppim.pkg_img_path, "etc/system")
            ]
            self.assertEqual(run_silent(cmd).wait(), 0)

        # /etc/default/dhcpagent
        # verify the save directory entry exists
        self.assert_(
            os.path.isdir(
                os.path.join(self.ppim.pkg_img_path, "save/etc/default")))

        # verify op code 17 is in the modified dhcpagent file
        cmd = [
            "/usr/bin/egrep", "^PARAM_REQUEST_LIST",
            os.path.join(self.ppim.pkg_img_path, "etc/default/dhcpagent")
        ]
        p = run(cmd)
        self.assertTrue("17" in p.stdout)

        # verify that /etc/inet/hosts was saved
        self.assert_(
            os.path.isfile(
                os.path.join(self.ppim.pkg_img_path, "save/etc/inet/hosts")))
コード例 #33
0
    def get_pkg_version(self, pkg):
        """ class method to store the version of a package into a path

        pkg - which package to query
        path - where to write the output to
        """
        self.logger.debug("extracting package version of %s" % pkg)
        version_re = re.compile(r"FMRI:.*?%s@.*?\,(.*?):" % pkg)

        cmd = [cli.PKG, "-R", self.pkg_img_path, "info", pkg]
        p = run(cmd)
        version = version_re.search(p.stdout).group(1)

        # auto-install pkg version needs to live in the persistent
        # section of the DOC to ensure pause/resume works correctly.
        # Save it here for later update in DOC by the execute method.
        self.dc_pers_dict[pkg] = version
コード例 #34
0
ファイル: pre_pkg_img_mod.py プロジェクト: alhazred/caiman
    def get_pkg_version(self, pkg):
        """ class method to store the version of a package into a path

        pkg - which package to query
        path - where to write the output to
        """
        self.logger.debug("extracting package version of %s" % pkg)
        version_re = re.compile(r"FMRI:.*?%s@.*?\,(.*?):" % pkg)

        cmd = [cli.PKG, "-R", self.pkg_img_path, "info", pkg]
        p = run(cmd)
        version = version_re.search(p.stdout).group(1)

        # auto-install pkg version needs to live in the persistent
        # section of the DOC to ensure pause/resume works correctly.
        # Save it here for later update in DOC by the execute method.
        self.dc_pers_dict[pkg] = version
コード例 #35
0
    def get_license(self):
        """ class method to get license and save to a file

        The OTN license from the osnet-incorporation pkg is obtained and
        saved to a file for later use by the ai_publish_pkg checkpoint.

        """
        self.logger.debug("obtaining OTN license for AI image package")
        cmd = [cli.PKG, "-R", self.pkg_img_path, "info", "--license",
               "osnet-incorporation"]
        pkg_info = run(cmd)

        # if the tmp_dir doesn't exist create it
        if not os.path.exists(self.tmp_dir):
            os.makedirs(self.tmp_dir)

        lic_path = os.path.join(self.tmp_dir, "lic_OTN")
        with open(lic_path, 'w') as otn_license:
            otn_license.write(pkg_info.stdout)
コード例 #36
0
ファイル: pre_pkg_img_mod.py プロジェクト: alhazred/caiman
    def get_license(self):
        """ class method to get license and save to a file

        The OTN license from the osnet-incorporation pkg is obtained and
        saved to a file for later use by the ai_publish_pkg checkpoint.

        """
        self.logger.debug("obtaining OTN license for AI image package")
        cmd = [
            cli.PKG, "-R", self.pkg_img_path, "info", "--license",
            "osnet-incorporation"
        ]
        pkg_info = run(cmd)

        # if the tmp_dir doesn't exist create it
        if not os.path.exists(self.tmp_dir):
            os.makedirs(self.tmp_dir)

        lic_path = os.path.join(self.tmp_dir, "lic_OTN")
        with open(lic_path, 'w') as otn_license:
            otn_license.write(pkg_info.stdout)
コード例 #37
0
    def setup_iscsi(self):
        """ set up the iSCSI initiator appropriately (if specified)
        such that any physical/logical iSCSI devices can be discovered.
        """
        SVC = "svc:/network/iscsi/initiator:default"

        # pull out all of the iscsi entries from the DOC
        iscsi_list = self.doc.get_descendants(class_type=Iscsi)

        # if there's nothing to do, simply return
        if not iscsi_list:
            return

        # verify iscsiadm is available
        if not os.path.exists(ISCSIADM):
            raise RuntimeError("iSCSI discovery enabled but %s does " \
                "not exist" % ISCSIADM)

        # ensure the iscsi/initiator service is online
        state_cmd = [SVCS, "-H", "-o", "STATE", SVC]
        p = run(state_cmd, check_result=Popen.ANY)
        if p.returncode != 0:
            # iscsi/initiator is not installed
            raise RuntimeError("%s not found - is it installed?" % SVC)

        # if the service is offline, enable it
        state = p.stdout.strip()
        if state == "offline":
            cmd = [SVCADM, "enable", "-s", SVC]
            run(cmd)

            # verify the service is now online
            p = run(state_cmd, check_result=Popen.ANY)
            state = p.stdout.strip()

            if state != "online":
                raise RuntimeError("Unable to start %s" % SVC)

        elif state != "online":
            # the service is in some other kind of state, so raise an error
            raise RuntimeError("%s requires manual servicing" % SVC)

        for iscsi in iscsi_list:
            # check iscsi.source for dhcp.  If set, query dhcpinfo for the
            # iSCSI boot parameters and set the rest of the Iscsi object
            # attributes
            if iscsi.source == "dhcp":
                p = run([DHCPINFO, "Rootpath"])

                # RFC 4173 defines the format of iSCSI boot parameters in DHCP
                # Rootpath as follows:
                # Rootpath=iscsi:<IP>:<protocol>:<port>:<LUN>:<target>
                iscsi_str = p.stdout.partition('=')[2]
                params = iscsi_str.split(':')
                iscsi.target_ip = params[1]
                iscsi.target_port = params[3]
                iscsi.target_lun = params[4]
                iscsi.target_name = params[5]

            if iscsi.target_name is not None:
                # set up static discovery of targets
                discovery_str = iscsi.target_name + "," + iscsi.target_ip
                if iscsi.target_port is not None:
                    discovery_str += ":" + iscsi.target_port
                cmd = [ISCSIADM, "add", "static-config", discovery_str]
                run(cmd)

                cmd = [ISCSIADM, "modify", "discovery", "--static", "enable"]
                run(cmd)

                iscsi_list_cmd = [
                    ISCSIADM, "list", "target", "-S", discovery_str
                ]
            else:
                # set up discovery of sendtargets targets
                discovery_str = iscsi.target_ip
                if iscsi.target_port is not None:
                    discovery_str += ":" + iscsi.target_port
                cmd = [ISCSIADM, "add", "discovery-address", discovery_str]
                run(cmd)

                cmd = [
                    ISCSIADM, "modify", "discovery", "--sendtargets", "enable"
                ]
                run(cmd)

                iscsi_list_cmd = [ISCSIADM, "list", "target", "-S"]

            # run devfsadm and wait for the iscsi devices to configure
            run([DEVFSADM, "-i", "iscsi"])

            # list all the targets found
            iscsi_list = run(iscsi_list_cmd)

            # the output will look like:
            #
            # Target: <iqn string>
            #        Alias: -
            #        TPGT: 1
            #        ISID: 4000002a0000
            #        Connections: 1
            #        LUN: 1
            #             Vendor:  SUN
            #             Product: COMSTAR
            #             OS Device Name: <ctd>
            #        LUN: 0
            #             Vendor:  SUN
            #             Product: COMSTAR
            #             OS Device Name: <ctd>
            #
            # The LUN number and ctd strings are the only values we're
            # interested in.
            iscsi_dict = dict()

            # walk the output from iscsiadm list target to create a mapping
            # between ctd and LUN number.
            for line in iscsi_list.stdout.splitlines():
                line = line.lstrip()
                if line.startswith("LUN:"):
                    lun_num = line.split(": ")[1]
                if line.startswith("OS Device Name:") and lun_num is not None:
                    iscsi_ctd = line.rpartition("/")[2]
                    iscsi_dict[iscsi_ctd] = lun_num

                    # reset the lun_num for the next lun
                    lun_num = None

            # try to map iscsi_lun back to iscsi.target_lun
            for iscsi_ctd, iscsi_lun in iscsi_dict.items():
                if iscsi.target_lun is not None:
                    if iscsi.target_lun == iscsi_lun:
                        iscsi.parent.ctd = iscsi_ctd.partition("s2")[0]
                        break
                else:
                    iscsi.parent.ctd = iscsi_ctd.partition("s2")[0]
                    break
            else:
                raise RuntimeError("target_lun: %s not found on target" %
                                   iscsi.target_lun)
コード例 #38
0
    def discover_zpools(self, search_name=""):
        """ discover_zpools - method to walk zpool list output to create Zpool
        objects.  Returns a logical DOC object with all zpools populated.
        """
        # create a logical element
        logical = Logical("logical")

        # set noswap and nodump to True until a zvol is found otherwise
        logical.noswap = True
        logical.nodump = True

        # retreive the list of zpools
        cmd = [ZPOOL, "list", "-H", "-o", "name"]
        p = run(cmd)

        # Get the list of zpools
        zpool_list = p.stdout.splitlines()

        # walk the list and populate the DOC
        for zpool_name in zpool_list:
            # if the user has specified a specific search name, only run
            # discovery on that particular pool name
            if search_name and zpool_name != search_name:
                continue

            self.logger.debug("Populating DOC for zpool:  %s", zpool_name)

            # create a new Zpool DOC object and insert it
            zpool = Zpool(zpool_name)
            zpool.action = "preserve"
            logical.insert_children(zpool)

            # check to see if the zpool is the boot pool
            cmd = [ZPOOL, "list", "-H", "-o", "bootfs", zpool_name]
            p = run(cmd)
            if p.stdout.rstrip() != "-":
                zpool.is_root = True

            # get the mountpoint of the zpool
            cmd = [ZFS, "get", "-H", "-o", "value", "mountpoint", zpool_name]
            p = run(cmd)
            zpool.mountpoint = p.stdout.strip()

            # set the vdev_mapping on each physical object in the DOC tree for
            # this zpool
            self.set_vdev_map(zpool)

            # for each zpool, get all of its datasets.  Switch to the C locale
            # so we don't have issues with LC_NUMERIC settings
            cmd = [
                ZFS, "list", "-r", "-H", "-o", "name,type,used,mountpoint",
                zpool_name
            ]
            p = run(cmd, env={"LC_ALL": "C"})

            # walk each dataset and create the appropriate DOC objects for
            # each.  Skip the first line of list output, as the top level
            # dataset (also the dataset with the same name as that of the
            # zpool) may have a different mountpoint than the zpool.
            for dataset in p.stdout.rstrip().split("\n")[1:]:
                try:
                    name, ds_type, ds_size, mountpoint = dataset.split(None, 3)
                except ValueError as err:
                    # trap on ValueError so any inconsistencies are captured
                    self.logger.debug("Unable to process dataset: %r" %
                                      dataset)
                    self.logger.debug(str(err))
                    continue

                # fix the name field to remove the name of the pool
                name = name.partition(zpool_name + "/")[2]

                if ds_type == "filesystem":
                    obj = Filesystem(name)
                    obj.mountpoint = mountpoint
                elif ds_type == "volume":
                    obj = Zvol(name)
                    obj.size = Size(ds_size)

                    # check for swap/dump.  If there's a match, set the zvol
                    # 'use' attribute and the noswap/nodump attribute of
                    # logical.  The zpool name needs to be re-attached to the
                    # zvol name to match what was already parsed
                    if os.path.join(zpool_name, name) in self.swap_list:
                        obj.use = "swap"
                        logical.noswap = False
                    if os.path.join(zpool_name, name) in self.dump_list:
                        obj.use = "dump"
                        logical.nodump = False

                obj.action = "preserve"
                zpool.insert_children(obj)

        return logical
コード例 #39
0
    def discover_disk(self, drive):
        """ discover_disk - method to discover a physical disk's attributes,
        partitions and slices

        drive - which physical drive to parse
        """
        # create a DOC object for this drive.  Set validate_children to False
        # so the shadow code doesn't adjust the start sector or size for any
        # children discovered
        new_disk = Disk("disk", validate_children=False)

        # extract drive attributes and media information
        drive_attributes = drive.attributes
        drive_media = drive.media

        # if a drive is offline or down return None
        if drive_attributes.status == "DOWN":
            self.logger.debug("disk '%s' is offline" % new_disk.name)
            return None

        # set attributes for the disk
        if type(drive.aliases) is list and len(drive.aliases) > 0:
            new_disk.wwn = getattr(drive.aliases[0].attributes, "wwn", None)

        existing_wwns = [d.wwn for d in \
                         self.root.get_descendants(class_type=Disk) \
                         if d.wwn is not None]

        if new_disk.wwn is not None and new_disk.wwn in existing_wwns:
            # this wwn has already been discovered, so skip further discovery
            return None

        for alias in drive.aliases:
            # check to see if the wwn changes between drive aliases
            if getattr(alias.attributes, "wwn", None) != new_disk.wwn:
                self.logger.warning("disk '%s' has different wwn for the "
                                    "aliases" % new_disk.ctd)
                return None
            if drive_media is not None:
                if self.verify_disk_read(alias.name,
                                         drive_media.attributes.blocksize):
                    new_disk.active_ctds.append(alias.name)
                else:
                    new_disk.passive_ctds.append(alias.name)

        # set the new_disk ctd string
        if new_disk.wwn is None:
            # use the only alias name
            new_disk.ctd = drive.aliases[0].name
        else:
            # use the first active ctd
            new_disk.ctd = new_disk.active_ctds[0]
        new_disk.devid = drive.name
        new_disk.iscdrom = drive.cdrom
        new_disk.opath = drive_attributes.opath

        # set the devpath
        if os.path.islink(drive_attributes.opath):
            link = os.readlink(drive_attributes.opath)

            # clean up the link to get rid of the leading '../../devices/' and
            # trailing minor name
            if link.startswith("../../devices") and ":" in link:
                link = link.partition("../../devices")[2].rpartition(":")[0]
                new_disk.devpath = link

        # check for SPARC eeprom settings which would interfere with finding
        # the boot disk
        if self.arch == "sparc":
            if not self.sparc_diag_mode:
                # check eeprom settings
                cmd = [EEPROM, "diag-switch?"]
                p = run(cmd)
                diag_switch_value = p.stdout.partition("=")[2]
                if diag_switch_value.strip().lower() == "true":
                    # set a variable so we don't check every single disk and
                    # log a message
                    self.sparc_diag_mode = True
                    self.logger.info("Unable to determine bootdisk with " + \
                                     "diag-switch? eeprom setting set to " + \
                                     "'true'.  Please set diag-switch? " + \
                                     "to false and reboot the system")

        # check for the bootdisk
        if not self.sparc_diag_mode:
            if self.is_bootdisk(new_disk.ctd):
                new_disk.disk_keyword = DiskKeyword()

        # set vendor information for the drive
        new_disk.disk_prop = DiskProp()
        if drive.controllers:
            new_disk.disk_prop.dev_type = drive.controllers[0].attributes.type
        new_disk.disk_prop.dev_vendor = drive_attributes.vendor_id

        # walk the media node to extract partitions and slices
        if drive_media is None:
            # since the drive has no media, we can't determine any attributes
            # about it (geometry, slices, partitions, etc.) so simply return
            # None
            return None
        else:
            # store the attributes locally so libdiskmgt doesn't have to keep
            # looking them up
            drive_media_attributes = drive_media.attributes

            # if a drive comes back without basic information, skip discovery
            # entirely
            if not DMMediaAttr.NACCESSIBLE in drive_media_attributes or \
               not DMMediaAttr.BLOCKSIZE in drive_media_attributes:
                self.logger.debug("skipping discovery of %s" % new_disk.ctd)
                return None

            # retrieve the drive's geometry
            new_disk = self.set_geometry(drive_media_attributes, new_disk)

            # keep a list of slices already visited so they're not discovered
            # again later
            visited_slices = []

            # if this system is x86 and the drive has slices but no fdisk
            # partitions, don't report any of the slices
            if self.arch == "i386" and drive_media.slices and not \
               drive_media.partitions:
                return new_disk

            for partition in drive_media.partitions:
                new_partition = self.discover_partition(
                    partition, drive_media_attributes.blocksize)

                # add the partition to the disk object
                new_disk.insert_children(new_partition)

                # check for slices associated with this partition.  If found,
                # add them as children
                for slc in partition.media.slices:
                    # discover the slice
                    new_slice = self.discover_slice(
                        slc, drive_media_attributes.blocksize)

                    # constrain when a slice is added to the DOC.  We only want
                    # to add a slice when:
                    # - the partition id is a Solaris partition (non EFI)
                    # - the fstyp is 'zfs' (for EFI labeled disks)
                    # - the fstyp is 'unknown_fstyp' AND it's slice 8 (for EFI
                    #   labeled disks)
                    if new_partition.is_solaris:
                        new_partition.insert_children(new_slice)
                    elif new_partition.part_type == 238:
                        # call fstyp to try to figure out the slice type
                        cmd = [FSTYP, slc.name]
                        p = run(cmd, check_result=Popen.ANY)
                        if p.returncode == 0:
                            if p.stdout.strip() == "zfs":
                                # add the slice since it's used by zfs
                                new_partition.insert_children(new_slice)
                        else:
                            if p.stderr.startswith("unknown_fstyp") and \
                                new_slice.name == "8":
                                # add the slice since it's an EFI boot slice
                                new_partition.insert_children(new_slice)

                    # keep a record this slice so it's not discovered again
                    visited_slices.append(slc.name)

            for slc in drive_media.slices:
                # discover the slice if it's not already been found
                if slc.name not in visited_slices:
                    new_slice = self.discover_slice(
                        slc, drive_media_attributes.blocksize)
                    new_disk.insert_children(new_slice)

            return new_disk
コード例 #40
0
    def sparc_fiocompress(self, mountpoint):
        """ class method to fiocompress majority of the files
            in the boot archive.
            Note: this method only applies to SPARC
        """
        # construct a list of exclusion files and directories
        exclude_dirs = ["usr/kernel"]
        exclude_files = copy.deepcopy(self.uncompressed_files)
        flist = os.path.join(self.ba_build, "boot/solaris/filelist.ramdisk")
        with open(flist, "r") as fh:
            lines = [line.strip() for line in fh.readlines()]
        for line in lines:
            if os.path.isdir(os.path.join(self.ba_build, line)):
                exclude_dirs.append(line)
            elif os.path.isfile(os.path.join(self.ba_build, line)):
                exclude_files.append(line)

        # get the cwd
        cwd = os.getcwd()

        os.chdir(self.ba_build)
        for root, dirs, files in os.walk("."):
            # strip off the leading . or ./
            root = root.lstrip("./")

            # check to see if root is in the exclude_dirs list
            if root in exclude_dirs:
                self.logger.debug("skipping " + root + " due to exclude list")
                continue

            # walk each dir and if the entry is in the exclude_dir list, skip
            # it
            for d in dirs:
                if os.path.join(root, d) in exclude_dirs:
                    self.logger.debug("skipping " + os.path.join(root, d) + \
                                      " due to exclude list")
                    dirs.remove(d)

            # walk each file and if it's in the skip_list, continue
            for f in files:
                if os.path.join(root, f) in exclude_files:
                    self.logger.debug("skipping " + os.path.join(root, f) + \
                                     " due to exclude list")
                    continue

                # we have a file that needs to be fiocompressed
                ba_path = os.path.join(self.ba_build, root, f)
                mp_path = os.path.join(mountpoint, root, f)

                # ensure that the file meets the following criteria:
                # - it is a regular file
                # - size > 0
                # is is NOT a hardlink
                statinfo = os.lstat(ba_path)
                if stat.S_ISREG(statinfo.st_mode) and not \
                   statinfo.st_size == 0 and \
                   statinfo.st_nlink < 2:
                    # fiocompress the file
                    cmd = [cli.FIOCOMPRESS, "-mc", ba_path, mp_path]
                    run(cmd)

        # return to the original directory
        os.chdir(cwd)
コード例 #41
0
    def configure_smf(self):
        """ class method for the configuration of SMF manifests
        """

        #
        # For purposes of System Configuration, network/physical
        # and network/install services have to depend on manifest-import
        # and milestone/config. That creates dependency cycle on install
        # media, since network/physical (depending on network/install)
        # takes care of bringing up PXE/wanboot NIC which is needed for
        # purposes of mounting root filesystem (in case of network boot).
        # And milestone/config and manifest-import depend on smf services
        # responsible for assembling root filesystem.
        #
        # As a workaround, deliver media specific smf manifests
        # for milestone/config, network/physical and network/install -
        # import milestone/config and manifest-import without specifying
        # network/physical and network/install as their dependents.
        # We can do that, since media come pre-configured.
        #
        # Save the original manifests - they replace media specific ones
        # on the target system in case of CPIO transfer method.
        #
        save_list = ["lib/svc/manifest/network/network-install.xml",
                     "lib/svc/manifest/network/network-physical.xml"]

        self.save_files_directories(save_list)

        self.logger.info("Preloading SMF repository")

        # create a unique file in /tmp for the construction of the SMF
        # repository
        _none, repo_name = tempfile.mkstemp(dir="/tmp", prefix="install_repo_")

        # Set environment variables needed by svccfg.
        smf_env_vars = dict()
        smf_env_vars["SVCCFG_REPOSITORY"] = repo_name
        smf_env_vars["SVCCFG_CONFIGD_PATH"] = os.path.join(
            self.pkg_img_path, "lib/svc/bin/svc.configd")
        smf_env_vars["SVCCFG_DTD"] = os.path.join(
            self.pkg_img_path, "usr/share/lib/xml/dtd/service_bundle.dtd.1")
        smf_env_vars["SVCCFG_MANIFEST_PREFIX"] = self.pkg_img_path
        smf_env_vars["SVCCFG_CHECKHASH"] = "1"

        # add all of the manifests in /var and /lib
        for manifest_dir in ["lib", "var"]:
            import_dir = os.path.join(self.pkg_img_path,
                                      "%s/svc/manifest" % manifest_dir)
            cmd = [cli.SVCCFG, "import", import_dir]
            try:
                p = run(cmd, env=smf_env_vars)
            except CalledProcessError:
                raise RuntimeError("Error importing manifests from %s" % \
                                   import_dir)

        # Apply each profile from the manifest
        for svc_profile_path in self.svc_profiles:
            self.logger.info("Applying SMF profile: %s" % svc_profile_path)
            cmd = [cli.SVCCFG, "apply", svc_profile_path]
            run(cmd, env=smf_env_vars)

        # set the hostname of the distribution
        if self.hostname is not None:
            cmd = [cli.SVCCFG, "-s", "system/identity:node", "setprop",
                   "config/nodename", "=", "astring:", '"%s"' % self.hostname]
            run(cmd, env=smf_env_vars)
        else:
            # retrieve the default hostname
            cmd = [cli.SVCCFG, "-s", "system/identity:node", "listprop",
                   "config/nodename"]
            p = run(cmd, env=smf_env_vars)
            self.hostname = p.stdout.strip().split()[2]

        # move the repo from /tmp to the proper place
        self.logger.debug("moving repo from /tmp into pkg_image directory")
        shutil.move(repo_name, os.path.join(self.pkg_img_path,
            "etc/svc/repository.db"))

        # Run a custom script, if provided to update the pkg_img_path
        self.modify_script()
コード例 #42
0
    args = parser.parse_args()

    OSNET_REPO = args.osnetrepo
    AI_PKG_REPO = args.aipkgrepo
    print "Starting at " + time.asctime()
    print "Using OSNET_REPO %s" % OSNET_REPO
    print "Using AI_PKG_REPO %s" % AI_PKG_REPO

    # Create the repository (as needed) if it's a local path (no scheme)
    # or file:/// scheme.
    print "\nCreating repository at %s" % args.repo
    scheme = urlparse.urlsplit(args.repo).scheme
    if scheme in ("file", ""):
        # Try to create the repo
        cmd = [PKGREPO, "create", args.repo]
        repo = run(cmd, check_result=Popen.ANY)

        if repo.returncode == 0:
            # New repo was created. Add the publisher and make it default
            cmd = [PKGREPO, "-s", args.repo, "add-publisher", args.publisher]
            run(cmd)
            cmd = [PKGREPO, "-s", args.repo, "set",
                   "publisher/prefix=%s" % args.publisher]
            run(cmd)

    # Get version of consolidation/osnet/osnet-incorporation
    incorp_version = get_pkg_version(OSNET, OSNET_REPO)

    # Get version of install-image/solaris-auto-install
    ai_pkg_version = get_pkg_version(AI_PKG_NAME, AI_PKG_REPO)
コード例 #43
0
    def sparc_fiocompress(self, mountpoint):
        """ class method to fiocompress majority of the files
            in the boot archive.
            Note: this method only applies to SPARC
        """
        # construct a list of exclusion files and directories
        exclude_dirs = ["usr/kernel"]
        exclude_files = copy.deepcopy(self.uncompressed_files)
        flist = os.path.join(self.ba_build, "boot/solaris/filelist.ramdisk")
        with open(flist, "r") as fh:
            lines = [line.strip() for line in fh.readlines()]
        for line in lines:
            if os.path.isdir(os.path.join(self.ba_build, line)):
                exclude_dirs.append(line)
            elif os.path.isfile(os.path.join(self.ba_build, line)):
                exclude_files.append(line)

        # get the cwd
        cwd = os.getcwd()

        os.chdir(self.ba_build)
        for root, dirs, files in os.walk("."):
            # strip off the leading . or ./
            root = root.lstrip("./")

            # check to see if root is in the exclude_dirs list
            if root in exclude_dirs:
                self.logger.debug("skipping " + root + " due to exclude list")
                continue

            # walk each dir and if the entry is in the exclude_dir list, skip
            # it
            for d in dirs:
                if os.path.join(root, d) in exclude_dirs:
                    self.logger.debug("skipping " + os.path.join(root, d) + \
                                      " due to exclude list")
                    dirs.remove(d)

            # walk each file and if it's in the skip_list, continue
            for f in files:
                if os.path.join(root, f) in exclude_files:
                    self.logger.debug("skipping " + os.path.join(root, f) + \
                                     " due to exclude list")
                    continue

                # we have a file that needs to be fiocompressed
                ba_path = os.path.join(self.ba_build, root, f)
                mp_path = os.path.join(mountpoint, root, f)

                # ensure that the file meets the following criteria:
                # - it is a regular file
                # - size > 0
                # is is NOT a hardlink
                statinfo = os.lstat(ba_path)
                if stat.S_ISREG(statinfo.st_mode) and not \
                   statinfo.st_size == 0 and \
                   statinfo.st_nlink < 2:
                    # fiocompress the file
                    cmd = [cli.FIOCOMPRESS, "-mc", ba_path, mp_path]
                    run(cmd)

        # return to the original directory
        os.chdir(cwd)
コード例 #44
0
    def configure_system(self):
        """ class method for the execution of various, isolated shell commands
        needed to configure the boot archive.
        """
        self.logger.info("preparing boot archive")

        # configure devices
        cmd = [cli.DEVFSADM, "-r", self.ba_build]
        run(cmd)

        # etc/dev/.devfsadm_dev.lock gets created every time
        # devfsadm is run. remove it since there's no point
        # in carrying it forward through to the image
        lockfile = os.path.join(self.ba_build, "etc/dev/.devfsadm_dev.lock")
        if os.path.exists(lockfile):
            self.logger.debug("removing devfsadm lock file")
            os.remove(lockfile)

        # Set a marker so that every boot is a reconfiguration boot
        cmd = [cli.TOUCH, os.path.join(self.ba_build, "reconfigure")]
        run(cmd)

        # Set up /etc/rtc_config
        cmd = [
            cli.CP,
            os.path.join(self.file_defaults, "rtc_config.default"),
            os.path.join(self.ba_build, "etc/rtc_config")
        ]
        run(cmd)

        # go to the ba_build
        self.logger.debug("creating symlinks and mountpoints")
        os.chdir(self.ba_build)

        # create ./tmp.  mkdir and chmod have to be done seperately
        self.logger.debug("creating tmp dir and setting it to 01777")
        os.mkdir("tmp")
        os.chmod("tmp", 01777)

        # create ./proc
        self.logger.debug("creating proc directory")
        os.mkdir("proc")

        # create ./mnt
        self.logger.debug("creating mnt directory")
        os.mkdir("mnt")

        # create bin symlink to /usr/bin if needed
        self.logger.debug("checking for symlink of bin -> usr/bin")
        if not os.path.islink("bin"):
            os.symlink("usr/bin", "bin")

        # create mountpoints for misc and pkg zlibs
        self.logger.debug("creating mnt/misc and mnt/pkg mountpoints")
        os.mkdir("mnt/misc", 0755)
        os.mkdir("mnt/pkg", 0755)

        # create volume set id file, use system name + date for uniqueness
        with open(".volsetid", "w") as v:
            volsetid = os.uname()[1] + '-' + \
                       datetime.datetime.now().isoformat()
            self.logger.debug("setting .volsetid to %s" % volsetid)
            v.write(volsetid)

        # chmod it to 444 and set the ownership to root:root (0:0)
        os.chmod(".volsetid", 0444)
        os.chown(".volsetid", 0, 0)

        # create the file marking the image type (e.g. .autoinstall or
        # .livecd)
        self.logger.debug("creating image_type file")
        with open(self.image_type, "w"):
            pass

        # create .cdrom directory
        self.logger.debug("creating .cdrom directory")
        os.mkdir(".cdrom", 0755)

        # create opt symlink to mnt/misc/opt if needed
        self.logger.debug("checking for symlink of opt -> mnt/misc/opt")
        if not os.path.islink("opt"):
            os.symlink("mnt/misc/opt", "opt")

        tr_uninstall = CPIOSpec()
        tr_uninstall.action = CPIOSpec.UNINSTALL
        tr_uninstall.contents = ["opt"]

        root_tr_software_node = self.doc.persistent.get_descendants(
            name=TRANSFER_ROOT, class_type=Software, not_found_is_err=True)[0]
        root_tr_software_node.insert_children(tr_uninstall)

        # copy the SMF repository from pkg_image_path to ba_build
        pkg_img_path_repo = os.path.join(self.pkg_img_path,
                                         "etc/svc/repository.db")
        ba_build_repo = os.path.join(self.ba_build, "etc/svc/repository.db")
        shutil.copy2(pkg_img_path_repo, ba_build_repo)
コード例 #45
0
    def create_misc_archive(self):
        """ class method to create the /mnt/misc file system archive
        """
        os.chdir(self.pkg_img_path)

        self.logger.info("Generating /mnt/misc file system archive")

        os.mkdir("miscdirs")
        shutil.move("opt", "miscdirs")
        shutil.move("etc", "miscdirs")
        shutil.move("var", "miscdirs")

        # add Software node to install items from /mnt/misc

        src_path = Dir("/mnt/misc")
        src = Source()
        src.insert_children(src_path)

        dst_path = Dir(INSTALL_TARGET_VAR)
        dst = Destination()
        dst.insert_children(dst_path)

        tr_install_misc = CPIOSpec()
        tr_install_misc.action = CPIOSpec.INSTALL
        tr_install_misc.contents = ["."]
        tr_install_misc.size = str(
            dir_size(os.path.join(self.pkg_img_path, "miscdirs")))

        misc_software_node = Software(TRANSFER_MISC, type="CPIO")
        misc_software_node.insert_children([src, dst, tr_install_misc])
        self.doc.persistent.insert_children(misc_software_node)

        cmd = [
            cli.MKISOFS, "-o", "solarismisc.zlib", "-N", "-l", "-R", "-U",
            "-allow-multidot", "-no-iso-translate", "-quiet", "-cache-inodes",
            "-d", "-D", "-V", "\"compress\"", "miscdirs"
        ]
        run(cmd)

        self.logger.info("Compressing /mnt/misc file system archive " +
                         "using: " + self.compression_type)

        cmd = [
            cli.LOFIADM, "-C", self.compression_type,
            os.path.join(self.pkg_img_path, "solarismisc.zlib")
        ]
        p = run(cmd, check_result=Popen.ANY)
        if p.returncode != 0:
            if "invalid algorithm name" in p.stderr:
                raise RuntimeError("Invalid compression algorithm " +
                                   "specified for /mnt/misc archive: " +
                                   self.compression_type)
            else:
                raise RuntimeError("Compression of /mnt/misc file system " +
                                   "failed:  " + os.strerror(p.returncode))

        # the removal of /usr must be deferred to until solarismisc.zlib has
        # been created because the contents of solarismisc.zlib actually come
        # from /usr
        shutil.rmtree(os.path.join(self.pkg_img_path, "miscdirs"),
                      ignore_errors=True)
        shutil.rmtree(os.path.join(self.pkg_img_path, "usr"),
                      ignore_errors=True)
コード例 #46
0
ファイル: pre_pkg_img_mod.py プロジェクト: alhazred/caiman
    def generate_gnome_caches(self):
        """ class method to generate the needed gnome caches
        """
        # GNOME service start methods are executed in order to
        # pre-generate the gnome caches. Since these services are
        # not alternate root aware, the start methods need to be
        # executed in a chroot'd environment (chroot'd to the pkg_image
        # area).
        #
        # Also, the service start methods redirect their output to /dev/null.
        # Create a temporary file named 'dev/null' inside the pkg_image
        # area where these services can dump messages to. Once the caches
        # have been generated, the temporary 'dev/null' file needs to be
        # removed.

        #
        # Needed, otherwise it was observed that some binaries run within
        # 'chroot' environment fail to determine 'current working directory'.
        #
        os.chdir(self.pkg_img_path)

        self.logger.debug("creating temporary /dev/null in pkg_image")
        cmd = [cli.TOUCH, os.path.join(self.pkg_img_path, "dev/null")]
        run(cmd)

        # Set environment variables needed by svccfg.
        smf_env_vars = dict()
        smf_env_vars["SVCCFG_CONFIGD_PATH"] = os.path.join(
            self.pkg_img_path, "lib/svc/bin/svc.configd")
        smf_env_vars["SVCCFG_DTD"] = os.path.join(
            self.pkg_img_path, "usr/share/lib/xml/dtd/service_bundle.dtd.1")
        smf_env_vars["SVCCFG_MANIFEST_PREFIX"] = self.pkg_img_path
        smf_env_vars["SVCCFG_CHECKHASH"] = "1"
        smf_env_vars["SVCCFG_REPOSITORY"] = os.path.join(
            self.pkg_img_path, "etc/svc/repository.db")

        # generate a list of services to refresh
        cmd = [cli.SVCCFG, "list", "*desktop-cache*"]
        p = run(cmd, stderr_loglevel=logging.ERROR, env=smf_env_vars)
        service_list = p.stdout.splitlines()

        # if no services were found, log a message
        if not service_list:
            self.logger.error("WARNING:  no services named *desktop-cache* "
                              "were found")

        # since there is only a handful of methods to execute, there is
        # negligible overhead to spawning a process to execute the method.
        for service in service_list:
            # remove ":default" from the service name
            service = service.replace(":default", "")

            # get the name of the refresh/exec script
            cmd = [cli.SVCCFG, "-s", service, "listprop", "refresh/exec"]
            try:
                p = run(cmd, env=smf_env_vars)
            except CalledProcessError:
                self.logger.critical("service: " + service + " does " +
                                     "not have a start method")
                continue

            # the output looks like:
            # refresh/exec  astring  "/lib/svc/method/method-name %m"\n

            # the method is the 3rd argument
            method = p.stdout.split()[2]

            # strip the double-quotes from the method
            method = method.strip('"')

            # fork a process for chroot
            pid = os.fork()
            cmd = [cli.BASH, method, "refresh"]
            if pid == 0:
                os.chroot(self.pkg_img_path)
                self.logger.debug("executing:  %s" % " ".join(cmd))
                run(cmd)
                os._exit(0)
            else:
                # wait for the child to exit
                _none, status = os.wait()
                if status != 0:
                    raise RuntimeError("%s failed" % " ".join(cmd))

        # We disabled gnome-netstatus-applet for the liveCD but we want it
        # to be active when the default user logs in after installation.
        # By giving the saved copy of panel-default-setup.entries a later
        # timestamp than the global gconf cache we'll end up enabling the
        # applet on first reboot when the desktop-cache/gconf-cache service
        # starts.
        cmd = [
            cli.TOUCH,
            os.path.join(self.pkg_img_path,
                         "etc/gconf/schemas/panel-default-setup.entries")
        ]
        run(cmd)

        # remove the temporary dev/null
        self.logger.debug("removing temporary /dev/null from pkg_image")
        os.unlink(os.path.join(self.pkg_img_path, "dev/null"))

        self.logger.info("Creating font cache")
        pid = os.fork()
        cmd = [cli.FC_CACHE, "--force"]
        if pid == 0:
            os.chroot(self.pkg_img_path)
            run(cmd)
            os._exit(0)
        else:
            _none, status = os.wait()
            if status != 0:
                raise RuntimeError("%s failed" % " ".join(cmd))
コード例 #47
0
    def generate_gnome_caches(self):
        """ class method to generate the needed gnome caches
        """
        # GNOME service start methods are executed in order to
        # pre-generate the gnome caches. Since these services are
        # not alternate root aware, the start methods need to be
        # executed in a chroot'd environment (chroot'd to the pkg_image
        # area).
        #
        # Also, the service start methods redirect their output to /dev/null.
        # Create a temporary file named 'dev/null' inside the pkg_image
        # area where these services can dump messages to. Once the caches
        # have been generated, the temporary 'dev/null' file needs to be
        # removed.

        #
        # Needed, otherwise it was observed that some binaries run within
        # 'chroot' environment fail to determine 'current working directory'.
        #
        os.chdir(self.pkg_img_path)

        self.logger.debug("creating temporary /dev/null in pkg_image")
        cmd = [cli.TOUCH, os.path.join(self.pkg_img_path, "dev/null")]
        run(cmd)

        # Set environment variables needed by svccfg.
        smf_env_vars = dict()
        smf_env_vars["SVCCFG_CONFIGD_PATH"] = os.path.join(
            self.pkg_img_path, "lib/svc/bin/svc.configd")
        smf_env_vars["SVCCFG_DTD"] = os.path.join(
            self.pkg_img_path, "usr/share/lib/xml/dtd/service_bundle.dtd.1")
        smf_env_vars["SVCCFG_MANIFEST_PREFIX"] = self.pkg_img_path
        smf_env_vars["SVCCFG_CHECKHASH"] = "1"
        smf_env_vars["SVCCFG_REPOSITORY"] = os.path.join(
            self.pkg_img_path, "etc/svc/repository.db")

        # generate a list of services to refresh
        cmd = [cli.SVCCFG, "list", "*desktop-cache*"]
        p = run(cmd, stderr_loglevel=logging.ERROR, env=smf_env_vars)
        service_list = p.stdout.splitlines()

        # if no services were found, log a message
        if not service_list:
            self.logger.error("WARNING:  no services named *desktop-cache* "
                              "were found")

        # since there is only a handful of methods to execute, there is
        # negligible overhead to spawning a process to execute the method.
        for service in service_list:
            # remove ":default" from the service name
            service = service.replace(":default", "")

            # get the name of the refresh/exec script
            cmd = [cli.SVCCFG, "-s", service, "listprop", "refresh/exec"]
            try:
                p = run(cmd, env=smf_env_vars)
            except CalledProcessError:
                self.logger.critical("service: " + service + " does " +
                                     "not have a start method")
                continue

            # the output looks like:
            # refresh/exec  astring  "/lib/svc/method/method-name %m"\n

            # the method is the 3rd argument
            method = p.stdout.split()[2]

            # strip the double-quotes from the method
            method = method.strip('"')

            # fork a process for chroot
            pid = os.fork()
            cmd = [cli.BASH, method, "refresh"]
            if pid == 0:
                os.chroot(self.pkg_img_path)
                self.logger.debug("executing:  %s" % " ".join(cmd))
                run(cmd)
                os._exit(0)
            else:
                # wait for the child to exit
                _none, status = os.wait()
                if status != 0:
                    raise RuntimeError("%s failed" % " ".join(cmd))

        # We disabled gnome-netstatus-applet for the liveCD but we want it
        # to be active when the default user logs in after installation.
        # By giving the saved copy of panel-default-setup.entries a later
        # timestamp than the global gconf cache we'll end up enabling the
        # applet on first reboot when the desktop-cache/gconf-cache service
        # starts.
        cmd = [cli.TOUCH, os.path.join(self.pkg_img_path,
               "etc/gconf/schemas/panel-default-setup.entries")]
        run(cmd)

        # remove the temporary dev/null
        self.logger.debug("removing temporary /dev/null from pkg_image")
        os.unlink(os.path.join(self.pkg_img_path, "dev/null"))

        self.logger.info("Creating font cache")
        pid = os.fork()
        cmd = [cli.FC_CACHE, "--force"]
        if pid == 0:
            os.chroot(self.pkg_img_path)
            run(cmd)
            os._exit(0)
        else:
            _none, status = os.wait()
            if status != 0:
                raise RuntimeError("%s failed" % " ".join(cmd))
コード例 #48
0
    def configure_system(self):
        """ class method for the execution of various, isolated shell commands
        needed to configure the boot archive.
        """
        self.logger.info("preparing boot archive")

        # configure devices
        cmd = [cli.DEVFSADM, "-r", self.ba_build]
        run(cmd)

        # etc/dev/.devfsadm_dev.lock gets created every time
        # devfsadm is run. remove it since there's no point
        # in carrying it forward through to the image
        lockfile = os.path.join(self.ba_build, "etc/dev/.devfsadm_dev.lock")
        if os.path.exists(lockfile):
            self.logger.debug("removing devfsadm lock file")
            os.remove(lockfile)

        # Set a marker so that every boot is a reconfiguration boot
        cmd = [cli.TOUCH, os.path.join(self.ba_build, "reconfigure")]
        run(cmd)

        # Set up /etc/rtc_config
        cmd = [cli.CP, os.path.join(self.file_defaults, "rtc_config.default"),
               os.path.join(self.ba_build, "etc/rtc_config")]
        run(cmd)

        # go to the ba_build
        self.logger.debug("creating symlinks and mountpoints")
        os.chdir(self.ba_build)

        # create ./tmp.  mkdir and chmod have to be done seperately
        self.logger.debug("creating tmp dir and setting it to 01777")
        os.mkdir("tmp")
        os.chmod("tmp", 01777)

        # create ./proc
        self.logger.debug("creating proc directory")
        os.mkdir("proc")

        # create ./mnt
        self.logger.debug("creating mnt directory")
        os.mkdir("mnt")

        # create bin symlink to /usr/bin if needed
        self.logger.debug("checking for symlink of bin -> usr/bin")
        if not os.path.islink("bin"):
            os.symlink("usr/bin", "bin")

        # create mountpoints for misc and pkg zlibs
        self.logger.debug("creating mnt/misc and mnt/pkg mountpoints")
        os.mkdir("mnt/misc", 0755)
        os.mkdir("mnt/pkg", 0755)

        # create volume set id file, use system name + date for uniqueness
        with open(".volsetid", "w") as v:
            volsetid = os.uname()[1] + '-' + \
                       datetime.datetime.now().isoformat()
            self.logger.debug("setting .volsetid to %s" % volsetid)
            v.write(volsetid)

        # chmod it to 444 and set the ownership to root:root (0:0)
        os.chmod(".volsetid", 0444)
        os.chown(".volsetid", 0, 0)

        # create the file marking the image type (e.g. .autoinstall or
        # .livecd)
        self.logger.debug("creating image_type file")
        with open(self.image_type, "w"):
            pass

        # create .cdrom directory
        self.logger.debug("creating .cdrom directory")
        os.mkdir(".cdrom", 0755)

        # create opt symlink to mnt/misc/opt if needed
        self.logger.debug("checking for symlink of opt -> mnt/misc/opt")
        if not os.path.islink("opt"):
            os.symlink("mnt/misc/opt", "opt")

        tr_uninstall = CPIOSpec()
        tr_uninstall.action = CPIOSpec.UNINSTALL
        tr_uninstall.contents = ["opt"]

        root_tr_software_node = self.doc.persistent.get_descendants(
            name=TRANSFER_ROOT, class_type=Software, not_found_is_err=True)[0]
        root_tr_software_node.insert_children(tr_uninstall)

        # copy the SMF repository from pkg_image_path to ba_build
        pkg_img_path_repo = os.path.join(self.pkg_img_path,
                                         "etc/svc/repository.db")
        ba_build_repo = os.path.join(self.ba_build,
                                     "etc/svc/repository.db")
        shutil.copy2(pkg_img_path_repo, ba_build_repo)