コード例 #1
0
    def _create(self):
        cmd = [
            "lvcreate",
        ]
        cmd.extend(['--name', self.name])
        if self.size:
            cmd.extend(['-L', self.size])
        elif self.extents:
            cmd.extend(['-l', self.extents])
        if self.options:
            cmd.extend(self.options)

        cmd.append(self.base)

        logger.debug("Creating lv command [%s]", cmd)
        exec_sudo(cmd)

        # save state
        self.state['blockdev'][self.name] = {
            'vgs': self.base,
            'size': self.size,
            'extents': self.extents,
            'opts': self.options,
            'device': '/dev/mapper/%s-%s' % (self.base, self.name)
        }
コード例 #2
0
    def _notify_os_of_partition_changes(self, device_path, partition_devices):
        """Notify of of partition table changes

        There is the need to call some programs to inform the operating
        system of partition tables changes.
        These calls are highly distribution and version specific. Here
        a couple of different methods are used to get the best result.
        """
        try:
            exec_sudo(["partprobe", device_path])
            exec_sudo(["udevadm", "settle"])
        except CalledProcessError as e:
            logger.info("Ignoring settling failure: %s", e)
            pass

        if self._all_part_devices_exist(partition_devices):
            return
        # If running inside Docker, make our nodes manually, because udev
        # will not be working.
        if os.path.exists("/.dockerenv"):
            # kpartx cannot run in sync mode in docker.
            exec_sudo(["kpartx", "-av", device_path])
            exec_sudo(["dmsetup", "--noudevsync", "mknodes"])
            return

        exec_sudo(["kpartx", "-avs", device_path])
コード例 #3
0
    def create(self):
        cmd = ["mkfs"]

        cmd.extend(['-t', self.type])
        if self.opts:
            cmd.extend(self.opts)
        cmd.extend(["-L", self.label])

        if self.type in ('ext2', 'ext3', 'ext4'):
            cmd.extend(['-U', self.uuid])
        elif self.type == 'xfs':
            cmd.extend(['-m', "uuid=%s" % self.uuid])
        else:
            logger.warning("UUID will not be written for fs type [%s]",
                           self.type)

        if self.type in ('ext2', 'ext3', 'ext4', 'xfs'):
            cmd.append('-q')

        if 'blockdev' not in self.state:
            self.state['blockdev'] = {}
        device = self.state['blockdev'][self.base]['device']
        cmd.append(device)

        logger.debug("Creating fs command [%s]", cmd)
        exec_sudo(cmd)

        if 'filesys' not in self.state:
            self.state['filesys'] = {}
        self.state['filesys'][self.name] \
            = {'uuid': self.uuid, 'label': self.label,
               'fstype': self.type, 'opts': self.opts,
               'device': device}
コード例 #4
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
    def _create(self):
        # The PV's have saved their actual device name into the state
        # during their _create().  Look at our base elements and thus
        # find the underlying device paths in the state.
        pvs_devs = []
        for pv in self.base:
            pvs_dev = self.state['pvs'][pv]['device']
            pvs_devs.append(pvs_dev)

        cmd = ["vgcreate", ]
        cmd.append(self.name)
        cmd.extend(pvs_devs)
        if self.options:
            cmd.extend(self.options)

        logger.debug("Creating vg command [%s]", cmd)
        exec_sudo(cmd)

        # save state
        if 'vgs' not in self.state:
            self.state['vgs'] = {}
        self.state['vgs'][self.name] = {
            'opts': self.options,
            'devices': self.base,
        }
コード例 #5
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
    def _create(self):
        # The PV's have saved their actual device name into the state
        # during their _create().  Look at our base elements and thus
        # find the underlying device paths in the state.
        pvs_devs = []
        for pv in self.base:
            pvs_dev = self.state['pvs'][pv]['device']
            pvs_devs.append(pvs_dev)

        cmd = [
            "vgcreate",
        ]
        cmd.append(self.name)
        cmd.extend(pvs_devs)
        if self.options:
            cmd.extend(self.options)

        logger.debug("Creating vg command [%s]", cmd)
        exec_sudo(cmd)

        # save state
        if 'vgs' not in self.state:
            self.state['vgs'] = {}
        self.state['vgs'][self.name] = {
            'opts': self.options,
            'devices': self.base,
        }
コード例 #6
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
    def _create(self):
        cmd = [
            "lvcreate",
        ]
        cmd.extend(['--name', self.name])
        if self.type:
            cmd.extend(['--type', self.type])
        if self.thin_pool:
            cmd.extend(['--thin-pool', self.thin_pool])
        if self.size:
            size = parse_abs_size_spec(self.size)
            # ensuire size aligns with physical extents
            size = size - size % PHYSICAL_EXTENT_BYTES
            size_arg = '-V' if self.type == 'thin' else '-L'
            cmd.extend([size_arg, '%dB' % size])
        elif self.extents:
            cmd.extend(['-l', self.extents])
        if self.options:
            cmd.extend(self.options)

        cmd.append(self.base)

        logger.debug("Creating lv command [%s]", cmd)
        exec_sudo(cmd)

        # save state
        device_name = '%s-%s' % (self.base, self.name)
        self.state['blockdev'][self.name] = {
            'vgs': self.base,
            'size': self.size,
            'extents': self.extents,
            'opts': self.options,
            'device': '/dev/mapper/%s' % device_name
        }
        self.add_rollback(remove_device, device_name)
コード例 #7
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
    def umount(self):
        for lvs in self.lvs:
            lvs._umount()

        for vgs in self.vgs:
            vgs._umount()

        exec_sudo(['udevadm', 'settle'])
コード例 #8
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
    def umount(self):
        for lvs in self.lvs:
            lvs._umount()

        for vgs in self.vgs:
            vgs._umount()

        exec_sudo(['udevadm', 'settle'])
コード例 #9
0
ファイル: lvm.py プロジェクト: xinma/diskimage-builder
    def cleanup(self):
        for lvs in self.lvs:
            lvs._cleanup()

        for vgs in self.vgs:
            vgs._cleanup()

        exec_sudo(['udevadm', 'settle'])
コード例 #10
0
 def cleanup(self):
     # remove the partition mappings made for the parent
     # block-device by create() above.  this is called from the
     # child PartitionNode umount/delete/cleanup.  Thus every
     # partition calls it, but we only want to do it once and our
     # gate.
     if not self.already_cleaned:
         self.already_cleaned = True
         exec_sudo(
             ["kpartx", "-d", self.state['blockdev'][self.base]['device']])
コード例 #11
0
 def umount(self):
     # Remove the partition mappings made for the parent
     # block-device by create() above.  This is called from the
     # child PartitionNode umount.  Thus every
     # partition calls it, but we only want to do it once when
     # we know this is the very last partition
     self.number_of_partitions -= 1
     if self.number_of_partitions == 0:
         exec_sudo(
             ["kpartx", "-d", self.state['blockdev'][self.base]['device']])
コード例 #12
0
 def umount(self):
     # Remove the partition mappings made for the parent
     # block-device by create() above.  This is called from the
     # child PartitionNode umount.  Thus every
     # partition calls it, but we only want to do it once when
     # we know this is the very last partition
     self.number_of_partitions -= 1
     if self.number_of_partitions == 0:
         exec_sudo(["kpartx", "-d",
                    self.state['blockdev'][self.base]['device']])
コード例 #13
0
    def _create_gpt(self):
        """Create partitions with GPT"""

        cmd = ['sgdisk', self.image_path]

        # This padding gives us a little room for rounding so we don't
        # go over the end of the disk
        disk_free = self.disk_size - (2048 * 1024)
        pnum = 1

        for p in self.partitions:
            args = {}
            args['pnum'] = pnum
            args['name'] = '%s' % p.get_name()
            args['type'] = '%s' % p.get_type()

            # convert from a relative/string size to bytes
            size = parse_rel_size_spec(p.get_size(), disk_free)[1]

            # We keep track in bytes, but specify things to sgdisk in
            # megabytes so it can align on sensible boundaries. And
            # create partitions right after previous so no need to
            # calculate start/end - just size.
            assert size <= disk_free
            args['size'] = size // (1024 * 1024)

            new_cmd = (
                "-n",
                "{pnum}:0:+{size}M".format(**args),
                "-t",
                "{pnum}:{type}".format(**args),
                # Careful with this one, as {name} could have spaces
                "-c",
                "{pnum}:{name}".format(**args))
            cmd.extend(new_cmd)

            # Fill the state; we mount all partitions with kpartx
            # below once we're done.  So the device this partition
            # will be seen at becomes "/dev/mapper/loop0pX"
            assert self.device_path[:5] == "/dev/"
            device_name = "/dev/mapper/%sp%d" % (self.device_path[5:], pnum)
            self.state['blockdev'][p.get_name()] \
                = {'device': device_name}

            disk_free = disk_free - size
            pnum = pnum + 1
            logger.debug("Partition %s added, %s remaining in disk", pnum,
                         disk_free)

        logger.debug("cmd: %s", ' '.join(cmd))
        exec_sudo(cmd)
コード例 #14
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
 def cleanup(self):
     # Information about the PV, VG and LV is typically
     # cached in lvmetad. Even after removing PV device and
     # partitions this data is not automatically updated
     # which leads to a couple of problems.
     # the 'pvscan --cache' scans the available disks
     # and updates the cache.
     # This is in cleanup because it must be called after the
     # umount of the containing block device is done, (which should
     # all be done in umount phase).
     try:
         exec_sudo(['pvscan', '--cache'])
     except BlockDeviceSetupException as e:
         logger.info("pvscan call failed [%s]", e.returncode)
コード例 #15
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
 def cleanup(self):
     # Information about the PV, VG and LV is typically
     # cached in lvmetad. Even after removing PV device and
     # partitions this data is not automatically updated
     # which leads to a couple of problems.
     # the 'pvscan --cache' scans the available disks
     # and updates the cache.
     # This is in cleanup because it must be called after the
     # umount of the containing block device is done, (which should
     # all be done in umount phase).
     try:
         exec_sudo(['pvscan', '--cache'])
     except BlockDeviceSetupException as e:
         logger.info("pvscan call failed [%s]", e.returncode)
コード例 #16
0
    def create(self):
        # This is a bit of a hack.  Each of the partitions is actually
        # in the graph, so for every partition we get a create() call
        # as the walk happens.  But we only need to create the
        # partition table once...
        if self.already_created:
            logger.info("Not creating the partitions a second time.")
            return
        self.already_created = True

        # the raw file on disk
        image_path = self.state['blockdev'][self.base]['image']
        # the /dev/loopX device of the parent
        device_path = self.state['blockdev'][self.base]['device']
        logger.info("Creating partition on [%s] [%s]", self.base, image_path)

        assert self.label == 'mbr'

        disk_size = self._size_of_block_dev(image_path)
        with MBR(image_path, disk_size, self.align) as part_impl:
            for part_cfg in self.partitions:
                part_name = part_cfg.get_name()
                part_bootflag = PartitionNode.flag_boot \
                                in part_cfg.get_flags()
                part_primary = PartitionNode.flag_primary \
                               in part_cfg.get_flags()
                part_size = part_cfg.get_size()
                part_free = part_impl.free()
                part_type = part_cfg.get_type()
                logger.debug("Not partitioned space [%d]", part_free)
                part_size = parse_rel_size_spec(part_size, part_free)[1]
                part_no \
                    = part_impl.add_partition(part_primary, part_bootflag,
                                              part_size, part_type)
                logger.debug("Create partition [%s] [%d]", part_name, part_no)

                # We're going to mount all partitions with kpartx
                # below once we're done.  So the device this partition
                # will be seen at becomes "/dev/mapper/loop0pX"
                assert device_path[:5] == "/dev/"
                partition_device_name = "/dev/mapper/%sp%d" % \
                                        (device_path[5:], part_no)
                self.state['blockdev'][part_name] \
                    = {'device': partition_device_name}

        # "saftey sync" to make sure the partitions are written
        exec_sudo(["sync"])

        # now all the partitions are created, get device-mapper to
        # mount them
        if not os.path.exists("/.dockerenv"):
            exec_sudo(["kpartx", "-avs", device_path])
        else:
            # If running inside Docker, make our nodes manually,
            # because udev will not be working. kpartx cannot run in
            # sync mode in docker.
            exec_sudo(["kpartx", "-av", device_path])
            exec_sudo(["dmsetup", "--noudevsync", "mknodes"])

        return
コード例 #17
0
def loopdev_detach(loopdev):
    logger.info("loopdev detach")
    # loopback dev may be tied up a bit by udev events triggered
    # by partition events
    for try_cnt in range(10, 1, -1):
        try:
            exec_sudo(["losetup", "-d", loopdev])
            return
        except BlockDeviceSetupException as e:
            # Do not raise an error - maybe other cleanup methods
            # can at least do some more work.
            logger.error("loopdev detach failed (%s)", e.returncode)

    logger.debug("Gave up trying to detach [%s]", loopdev)
    return 1
コード例 #18
0
def loopdev_detach(loopdev):
    logger.info("loopdev detach")
    # loopback dev may be tied up a bit by udev events triggered
    # by partition events
    for try_cnt in range(10, 1, -1):
        try:
            exec_sudo(["losetup", "-d", loopdev])
            return
        except BlockDeviceSetupException as e:
            # Do not raise an error - maybe other cleanup methods
            # can at least do some more work.
            logger.error("loopdev detach failed (%s)", e.returncode)

    logger.debug("Gave up trying to detach [%s]", loopdev)
    return 1
コード例 #19
0
    def _create_gpt(self):
        """Create partitions with GPT"""

        cmd = ['sgdisk', self.image_path]

        # This padding gives us a little room for rounding so we don't
        # go over the end of the disk
        disk_free = self.disk_size - (2048 * 1024)
        pnum = 1

        for p in self.partitions:
            args = {}
            args['pnum'] = pnum
            args['name'] = '%s' % p.get_name()
            args['type'] = '%s' % p.get_type()

            # convert from a relative/string size to bytes
            size = parse_rel_size_spec(p.get_size(), disk_free)[1]

            # We keep track in bytes, but specify things to sgdisk in
            # megabytes so it can align on sensible boundaries. And
            # create partitions right after previous so no need to
            # calculate start/end - just size.
            assert size <= disk_free
            args['size'] = size // (1024 * 1024)

            new_cmd = ("-n", "{pnum}:0:+{size}M".format(**args),
                       "-t", "{pnum}:{type}".format(**args),
                       # Careful with this one, as {name} could have spaces
                       "-c", "{pnum}:{name}".format(**args))
            cmd.extend(new_cmd)

            # Fill the state; we mount all partitions with kpartx
            # below once we're done.  So the device this partition
            # will be seen at becomes "/dev/mapper/loop0pX"
            assert self.device_path[:5] == "/dev/"
            device_name = "/dev/mapper/%sp%d" % (self.device_path[5:], pnum)
            self.state['blockdev'][p.get_name()] \
                = {'device': device_name}

            disk_free = disk_free - size
            pnum = pnum + 1
            logger.debug("Partition %s added, %s remaining in disk",
                         pnum, disk_free)

        logger.debug("cmd: %s", ' '.join(cmd))
        exec_sudo(cmd)
コード例 #20
0
def loopdev_attach(filename):
    logger.info("loopdev attach")
    logger.debug("Calling [sudo losetup --show -f %s]", filename)
    block_device = exec_sudo(["losetup", "--show", "-f", filename])
    # [:-1]: Cut of the newline
    block_device = block_device[:-1]
    logger.info("New block device [%s]", block_device)
    return block_device
コード例 #21
0
def loopdev_attach(filename):
    logger.info("loopdev attach")
    logger.debug("Calling [sudo losetup --show -f %s]", filename)
    block_device = exec_sudo(["losetup", "--show", "-f", filename])
    # [:-1]: Cut of the newline
    block_device = block_device[:-1]
    logger.info("New block device [%s]", block_device)
    return block_device
コード例 #22
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
    def _create(self):
        # the underlying device path of our parent was previously
        # recorded into the state during blockdev creation; look it
        # up.
        phys_dev = self.state['blockdev'][self.base]['device']

        cmd = ["pvcreate"]
        cmd.append(phys_dev)
        if self.options:
            cmd.extend(self.options)
        logger.debug("Creating pv command [%s]", cmd)
        exec_sudo(cmd)

        # save state
        if 'pvs' not in self.state:
            self.state['pvs'] = {}
        self.state['pvs'][self.name] = {
            'opts': self.options,
            'device': phys_dev
        }
コード例 #23
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
    def _create(self):
        # the underlying device path of our parent was previously
        # recorded into the state during blockdev creation; look it
        # up.
        phys_dev = self.state['blockdev'][self.base]['device']

        cmd = ["pvcreate"]
        cmd.append(phys_dev)
        if self.options:
            cmd.extend(self.options)
        logger.debug("Creating pv command [%s]", cmd)
        exec_sudo(cmd)

        # save state
        if 'pvs' not in self.state:
            self.state['pvs'] = {}
        self.state['pvs'][self.name] = {
            'opts': self.options,
            'device': phys_dev
        }
コード例 #24
0
ファイル: mount.py プロジェクト: openstack/diskimage-builder
    def create(self):
        logger.debug("mount called [%s]", self.mount_point)
        rel_mp = self.mount_point if self.mount_point[0] != '/' \
                 else self.mount_point[1:]
        mount_point = os.path.join(self.mount_base, rel_mp)
        if not os.path.exists(mount_point):
            # Need to sudo this because of permissions in the new
            # file system tree.
            exec_sudo(['mkdir', '-p', mount_point])
        logger.info("Mounting [%s] to [%s]", self.name, mount_point)
        exec_sudo(["mount", self.state['filesys'][self.base]['device'],
                   mount_point])

        if 'mount' not in self.state:
            self.state['mount'] = {}
        self.state['mount'][self.mount_point] \
            = {'name': self.name, 'base': self.base, 'path': mount_point}

        if 'mount_order' not in self.state:
            self.state['mount_order'] = []
        self.state['mount_order'].append(self.mount_point)
コード例 #25
0
ファイル: mount.py プロジェクト: Anapaya/diskimage-builder
    def create(self):
        logger.debug("mount called [%s]", self.mount_point)
        rel_mp = self.mount_point if self.mount_point[0] != '/' \
                 else self.mount_point[1:]
        mount_point = os.path.join(self.mount_base, rel_mp)
        if not os.path.exists(mount_point):
            # Need to sudo this because of permissions in the new
            # file system tree.
            exec_sudo(['mkdir', '-p', mount_point])
        logger.info("Mounting [%s] to [%s]", self.name, mount_point)
        exec_sudo(
            ["mount", self.state['filesys'][self.base]['device'], mount_point])

        if 'mount' not in self.state:
            self.state['mount'] = {}
        self.state['mount'][self.mount_point] \
            = {'name': self.name, 'base': self.base, 'path': mount_point}

        if 'mount_order' not in self.state:
            self.state['mount_order'] = []
        self.state['mount_order'].append(self.mount_point)
コード例 #26
0
    def cmd_writefstab(self):
        """Creates the fstab"""
        logger.info("Creating fstab")

        # State should have been created by prior calls; we only need
        # the dict
        state = BlockDeviceState(self.state_json_file_name)

        tmp_fstab = os.path.join(self.state_dir, "fstab")
        with open(tmp_fstab, "wt") as fstab_fd:
            # This gives the order in which this must be mounted
            for mp in state['mount_order']:
                logger.debug("Writing fstab entry for [%s]", mp)
                fs_base = state['mount'][mp]['base']
                fs_name = state['mount'][mp]['name']
                fs_val = state['filesys'][fs_base]
                if 'label' in fs_val:
                    diskid = "LABEL=%s" % fs_val['label']
                else:
                    diskid = "UUID=%s" % fs_val['uuid']

                # If there is no fstab entry - do not write anything
                if 'fstab' not in state:
                    continue
                if fs_name not in state['fstab']:
                    continue

                options = state['fstab'][fs_name]['options']
                dump_freq = state['fstab'][fs_name]['dump-freq']
                fsck_passno = state['fstab'][fs_name]['fsck-passno']

                fstab_fd.write("%s %s %s %s %s %s\n" %
                               (diskid, mp, fs_val['fstype'], options,
                                dump_freq, fsck_passno))

        target_etc_dir = os.path.join(self.params['build-dir'], 'built', 'etc')
        exec_sudo(['mkdir', '-p', target_etc_dir])
        exec_sudo(['cp', tmp_fstab, os.path.join(target_etc_dir, "fstab")])

        return 0
コード例 #27
0
    def cleanup(self):
        # First do a copy of all physical devices to individual
        # temporary files.  This is because the physical device is
        # full of LVM metadata describing the volumes and we don't
        # have a better way to handle removing the devices/volumes
        # from the host system while persisting this metadata in the
        # underlying devices.
        tempfiles = collections.OrderedDict()  # to unwind in same order!
        for pvs in self.pvs:
            phys_dev = self.state['blockdev'][pvs.base]['device']
            target_file = tempfile.NamedTemporaryFile(delete=False)
            target_file.close()
            exec_sudo(['dd', 'if=%s' % phys_dev, 'of=%s' % target_file.name])
            tempfiles[target_file.name] = phys_dev

        # once copied, start the removal in reverse order
        for lvs in self.lvs:
            lvs._cleanup()

        for vgs in self.vgs:
            vgs._cleanup()

        for pvs in self.pvs:
            pvs._cleanup()

        exec_sudo(['udevadm', 'settle'])

        # after the cleanup copy devices back
        for tmp_name, phys_dev in tempfiles.items():
            exec_sudo(['dd', 'if=%s' % tmp_name, 'of=%s' % phys_dev])
            os.unlink(tmp_name)
コード例 #28
0
    def cmd_writefstab(self):
        """Creates the fstab"""
        logger.info("Creating fstab")

        # State should have been created by prior calls; we only need
        # the dict
        state = BlockDeviceState(self.state_json_file_name)

        tmp_fstab = os.path.join(self.state_dir, "fstab")
        with open(tmp_fstab, "wt") as fstab_fd:
            # This gives the order in which this must be mounted
            for mp in state['mount_order']:
                logger.debug("Writing fstab entry for [%s]", mp)
                fs_base = state['mount'][mp]['base']
                fs_name = state['mount'][mp]['name']
                fs_val = state['filesys'][fs_base]
                if 'label' in fs_val:
                    diskid = "LABEL=%s" % fs_val['label']
                else:
                    diskid = "UUID=%s" % fs_val['uuid']

                # If there is no fstab entry - do not write anything
                if 'fstab' not in state:
                    continue
                if fs_name not in state['fstab']:
                    continue

                options = state['fstab'][fs_name]['options']
                dump_freq = state['fstab'][fs_name]['dump-freq']
                fsck_passno = state['fstab'][fs_name]['fsck-passno']

                fstab_fd.write("%s %s %s %s %s %s\n"
                               % (diskid, mp, fs_val['fstype'],
                                  options, dump_freq, fsck_passno))

        target_etc_dir = os.path.join(self.params['build-dir'], 'built', 'etc')
        exec_sudo(['mkdir', '-p', target_etc_dir])
        exec_sudo(['cp', tmp_fstab, os.path.join(target_etc_dir, "fstab")])

        return 0
コード例 #29
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
    def _create(self):
        cmd = ["lvcreate", ]
        cmd.extend(['--name', self.name])
        if self.size:
            cmd.extend(['-L', self.size])
        elif self.extents:
            cmd.extend(['-l', self.extents])
        if self.options:
            cmd.extend(self.options)

        cmd.append(self.base)

        logger.debug("Creating lv command [%s]", cmd)
        exec_sudo(cmd)

        # save state
        self.state['blockdev'][self.name] = {
            'vgs': self.base,
            'size': self.size,
            'extents': self.extents,
            'opts': self.options,
            'device': '/dev/mapper/%s-%s' % (self.base, self.name)
        }
コード例 #30
0
ファイル: mkfs.py プロジェクト: openstack/diskimage-builder
    def create(self):
        cmd = ["mkfs"]

        cmd.extend(['-t', self.type])
        if self.opts:
            cmd.extend(self.opts)

        if self.type in ('vfat', 'fat'):
            cmd.extend(["-n", self.label])
        else:
            cmd.extend(["-L", self.label])

        if self.type in ('ext2', 'ext3', 'ext4'):
            cmd.extend(['-U', self.uuid])
        elif self.type == 'xfs':
            cmd.extend(['-m', "uuid=%s" % self.uuid])
        else:
            logger.warning("UUID will not be written for fs type [%s]",
                           self.type)

        if self.type in ('ext2', 'ext3', 'ext4', 'xfs'):
            cmd.append('-q')

        if 'blockdev' not in self.state:
            self.state['blockdev'] = {}
        device = self.state['blockdev'][self.base]['device']
        cmd.append(device)

        logger.debug("Creating fs command [%s]", cmd)
        exec_sudo(cmd)

        if 'filesys' not in self.state:
            self.state['filesys'] = {}
        self.state['filesys'][self.name] \
            = {'uuid': self.uuid, 'label': self.label,
               'fstype': self.type, 'opts': self.opts,
               'device': device}
コード例 #31
0
    def create(self):
        # This is a bit of a hack.  Each of the partitions is actually
        # in the graph, so for every partition we get a create() call
        # as the walk happens.  But we only need to create the
        # partition table once...
        self.number_of_partitions += 1
        if self.number_of_partitions > 1:
            logger.info("Not creating the partitions a second time.")
            return

        # the raw file on disk
        self.image_path = self.state['blockdev'][self.base]['image']
        # the /dev/loopX device of the parent
        self.device_path = self.state['blockdev'][self.base]['device']
        # underlying size
        self.disk_size = self._size_of_block_dev(self.image_path)

        logger.info("Creating partition on [%s] [%s]",
                    self.base, self.image_path)

        assert self.label in ('mbr', 'gpt')

        if self.label == 'mbr':
            self._create_mbr()
        elif self.label == 'gpt':
            self._create_gpt()

        # "saftey sync" to make sure the partitions are written
        exec_sudo(["sync"])

        # now all the partitions are created, get device-mapper to
        # mount them
        if not os.path.exists("/.dockerenv"):
            exec_sudo(["kpartx", "-avs", self.device_path])
        else:
            # If running inside Docker, make our nodes manually,
            # because udev will not be working. kpartx cannot run in
            # sync mode in docker.
            exec_sudo(["kpartx", "-av", self.device_path])
            exec_sudo(["dmsetup", "--noudevsync", "mknodes"])

        return
コード例 #32
0
    def create(self):
        # This is a bit of a hack.  Each of the partitions is actually
        # in the graph, so for every partition we get a create() call
        # as the walk happens.  But we only need to create the
        # partition table once...
        self.number_of_partitions += 1
        if self.number_of_partitions > 1:
            logger.info("Not creating the partitions a second time.")
            return

        # the raw file on disk
        self.image_path = self.state['blockdev'][self.base]['image']
        # the /dev/loopX device of the parent
        self.device_path = self.state['blockdev'][self.base]['device']
        # underlying size
        self.disk_size = self._size_of_block_dev(self.image_path)

        logger.info("Creating partition on [%s] [%s]", self.base,
                    self.image_path)

        assert self.label in ('mbr', 'gpt')

        if self.label == 'mbr':
            self._create_mbr()
        elif self.label == 'gpt':
            self._create_gpt()

        # "saftey sync" to make sure the partitions are written
        exec_sudo(["sync"])

        # now all the partitions are created, get device-mapper to
        # mount them
        if not os.path.exists("/.dockerenv"):
            exec_sudo(["kpartx", "-avs", self.device_path])
        else:
            # If running inside Docker, make our nodes manually,
            # because udev will not be working. kpartx cannot run in
            # sync mode in docker.
            exec_sudo(["kpartx", "-av", self.device_path])
            exec_sudo(["dmsetup", "--noudevsync", "mknodes"])

        return
コード例 #33
0
ファイル: mount.py プロジェクト: oddomatik/diskimage-builder
 def umount(self):
     logger.info("Called for [%s]", self.name)
     # Before calling umount call 'fstrim' on the mounted file
     # system.  This discards unused blocks from the mounted
     # file system and therefore decreases the resulting image
     # size.
     # A race condition can occur when trying to fstrim immediately after
     # deleting a file resulting in that free space not being reclaimed.
     # Calling sync before fstrim is a workaround for this behaviour.
     # https://lists.gnu.org/archive/html/qemu-devel/2014-03/msg02978.html
     exec_sudo(["sync"])
     exec_sudo([
         "fstrim", "--verbose",
         self.state['mount'][self.mount_point]['path']
     ])
     exec_sudo(["umount", self.state['mount'][self.mount_point]['path']])
コード例 #34
0
ファイル: mount.py プロジェクト: openstack/diskimage-builder
 def umount(self):
     logger.info("Called for [%s]", self.name)
     # Before calling umount, call 'fstrim' on suitable mounted
     # file systems.  This discards unused blocks from the mounted
     # file system and therefore decreases the resulting image
     # size.
     #
     # A race condition can occur when trying to fstrim immediately
     # after deleting a file resulting in that free space not being
     # reclaimed.  Calling sync before fstrim is a workaround for
     # this behaviour.
     # https://lists.gnu.org/archive/html/qemu-devel/2014-03/msg02978.html
     exec_sudo(["sync"])
     if self.state['filesys'][self.base]['fstype'] != 'vfat':
         exec_sudo(["fstrim", "--verbose",
                    self.state['mount'][self.mount_point]['path']])
     exec_sudo(["umount", self.state['mount'][self.mount_point]['path']])
コード例 #35
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
 def _umount(self):
     exec_sudo(['lvchange', '-an',
                '/dev/%s/%s' % (self.base, self.name)])
コード例 #36
0
 def _cleanup(self):
     exec_sudo(
         ['pvremove', '--force', self.state['pvs'][self.name]['device']])
コード例 #37
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
 def _umount(self):
     exec_sudo(['lvchange', '-an', '/dev/%s/%s' % (self.base, self.name)])
コード例 #38
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
 def _umount(self):
     exec_sudo(['vgchange', '-an', self.name])
コード例 #39
0
ファイル: lvm.py プロジェクト: openstack/diskimage-builder
 def _umount(self):
     exec_sudo(['vgchange', '-an', self.name])
コード例 #40
0
ファイル: lvm.py プロジェクト: xinma/diskimage-builder
 def cleanup(self):
     try:
         exec_sudo(['pvscan', '--cache'])
     except subprocess.CalledProcessError as cpe:
         logger.debug("pvscan call result [%s]", cpe)
コード例 #41
0
ファイル: lvm.py プロジェクト: xinma/diskimage-builder
 def _cleanup(self):
     exec_sudo(['vgchange', '-an', self.name])
コード例 #42
0
ファイル: mount.py プロジェクト: yaochi/diskimage-builder
 def umount(self, state):
     logger.info("Called for [%s]", self.name)
     exec_sudo(["umount", state['mount'][self.mount_point]['path']])
コード例 #43
0
 def _cleanup(self):
     exec_sudo(['vgchange', '-an', self.name])
     exec_sudo(['vgremove', '--force', self.name])
コード例 #44
0
 def _cleanup(self):
     exec_sudo(['lvchange', '-an', '/dev/%s/%s' % (self.base, self.name)])
     exec_sudo(
         ['lvremove', '--force',
          '/dev/%s/%s' % (self.base, self.name)])