Ejemplo n.º 1
0
    def extract_item(self, item, restore_attrs=True, dry_run=False):
        if dry_run:
            if b'chunks' in item:
                for _ in self.pipeline.fetch_many(
                    [c[0] for c in item[b'chunks']], is_preloaded=True):
                    pass
            return

        dest = self.cwd
        if item[b'path'].startswith('/') or item[b'path'].startswith('..'):
            raise Exception('Path should be relative and local')
        path = os.path.join(dest, item[b'path'])
        # Attempt to remove existing files, ignore errors on failure
        try:
            st = os.lstat(path)
            if stat.S_ISDIR(st.st_mode):
                os.rmdir(path)
            else:
                os.unlink(path)
        except OSError:
            pass
        mode = item[b'mode']
        if stat.S_ISDIR(mode):
            if not os.path.exists(path):
                os.makedirs(path)
            if restore_attrs:
                self.restore_attrs(path, item)
        elif stat.S_ISREG(mode):
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            # Hard link?
            if b'source' in item:
                source = os.path.join(dest, item[b'source'])
                if os.path.exists(path):
                    os.unlink(path)
                os.link(source, path)
            else:
                with open(path, 'wb') as fd:
                    ids = [c[0] for c in item[b'chunks']]
                    for data in self.pipeline.fetch_many(ids,
                                                         is_preloaded=True):
                        fd.write(data)
                    fd.flush()
                    self.restore_attrs(path, item, fd=fd.fileno())
        elif stat.S_ISFIFO(mode):
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            os.mkfifo(path)
            self.restore_attrs(path, item)
        elif stat.S_ISLNK(mode):
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            source = item[b'source']
            if os.path.exists(path):
                os.unlink(path)
            os.symlink(source, path)
            self.restore_attrs(path, item, symlink=True)
        elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode):
            os.mknod(path, item[b'mode'], item[b'rdev'])
            self.restore_attrs(path, item)
        else:
            raise Exception('Unknown archive item type %r' % item[b'mode'])
Ejemplo n.º 2
0
def create(zpool, *vdevs, **kwargs):
    '''
    .. versionadded:: 2015.5.0
    .. versionchanged:: 2016.3.0

    Create a simple zpool, a mirrored zpool, a zpool having nested VDEVs, a hybrid zpool with cache, spare and log drives or a zpool with RAIDZ-1, RAIDZ-2 or RAIDZ-3

    zpool : string
        name of storage pool
    *vdevs : string
        one or move devices
    force : boolean
        forces use of vdevs, even if they appear in use or specify a conflicting replication level.
    mountpoint : string
        sets the mount point for the root dataset
    altroot : string
        equivalent to "-o cachefile=none,altroot=root"
    properties : dict
        additional pool properties
    filesystem_properties : dict
        additional filesystem properties

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False]
        salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False]
        salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False]
        salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False]
        salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False]

    .. note::

        Zpool properties can be specified at the time of creation of the pool by
        passing an additional argument called "properties" and specifying the properties
        with their respective values in the form of a python dictionary::

            properties="{'property1': 'value1', 'property2': 'value2'}"

        Filesystem properties can be specified at the time of creation of the pool by
        passing an additional argument called "filesystem_properties" and specifying the properties
        with their respective values in the form of a python dictionary::

            filesystem_properties="{'property1': 'value1', 'property2': 'value2'}"

        Example:

        .. code-block:: bash

            salt '*' zpool.create myzpool /path/to/vdev1 [...] properties="{'property1': 'value1', 'property2': 'value2'}"
    '''
    ret = {}
    dlist = []

    # Check if the pool_name is already being used
    if exists(zpool):
        ret[zpool] = 'storage pool already exists'
        return ret

    if not vdevs:
        ret[zpool] = 'no devices specified'
        return ret

    # make sure files are present on filesystem
    ret[zpool] = {}
    for vdev in vdevs:
        if vdev not in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
            if not os.path.exists(vdev):
                ret[zpool][vdev] = 'not present on filesystem'
                continue
            mode = os.stat(vdev).st_mode
            if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode) and not stat.S_ISCHR(mode):
                ret[zpool][vdev] = 'not a block device, a file vdev or character special device'
                continue
        dlist.append(vdev)

    if len(ret[zpool]) > 0:
        return ret

    devs = ' '.join(dlist)
    zpool_cmd = _check_zpool()
    force = kwargs.get('force', False)
    altroot = kwargs.get('altroot', None)
    mountpoint = kwargs.get('mountpoint', None)
    properties = kwargs.get('properties', None)
    filesystem_properties = kwargs.get('filesystem_properties', None)
    cmd = '{0} create'.format(zpool_cmd)

    # apply extra arguments from kwargs
    if force:  # force creation
        cmd = '{0} -f'.format(cmd)
    if properties:  # create "-o property=value" pairs
        optlist = []
        for prop in properties:
            if ' ' in properties[prop]:
                value = "'{0}'".format(properties[prop])
            else:
                value = properties[prop]
            optlist.append('-o {0}={1}'.format(prop, value))
        opts = ' '.join(optlist)
        cmd = '{0} {1}'.format(cmd, opts)
    if filesystem_properties:  # create "-O property=value" pairs
        optlist = []
        for prop in filesystem_properties:
            if ' ' in filesystem_properties[prop]:
                value = "'{0}'".format(filesystem_properties[prop])
            else:
                value = filesystem_properties[prop]
            optlist.append('-O {0}={1}'.format(prop, value))
        opts = ' '.join(optlist)
        cmd = '{0} {1}'.format(cmd, opts)
    if mountpoint:  # set mountpoint
        cmd = '{0} -m {1}'.format(cmd, mountpoint)
    if altroot:  # set altroot
        cmd = '{0} -R {1}'.format(cmd, altroot)
    cmd = '{0} {1} {2}'.format(cmd, zpool, devs)

    # Create storage pool
    res = __salt__['cmd.run_all'](cmd, python_shell=False)

    # Check and see if the pools is available
    if res['retcode'] != 0:
        ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
    else:
        ret[zpool] = 'created'

    return ret
Ejemplo n.º 3
0
def attach(zpool, device, new_device, force=False):
    '''
    .. versionchanged:: 2016.3.0

    Attach specified device to zpool

    zpool : string
        name of storage pool
    device : string
        device to attach too
    new_device : string
        device to attach
    force : boolean
        forces use of device

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.attach myzpool /path/to/vdev1 /path/to/vdev2 [...]
    '''
    ret = {}
    dlist = []

    # check for pool
    if not exists(zpool):
        ret[zpool] = 'storage pool does not exist'
        return ret

    # check devices
    ret[zpool] = {}
    if not os.path.exists(device):
        ret[zpool][device] = 'not present on filesystem'
    else:
        mode = os.stat(device).st_mode
        if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
            ret[zpool][device] = 'not a block device, a file vdev or character special device'
    if not os.path.exists(new_device):
        ret[zpool][new_device] = 'not present on filesystem'
    else:
        mode = os.stat(new_device).st_mode
        if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
            ret[zpool][new_device] = 'not a block device, a file vdev or character special device'

    if len(ret[zpool]) > 0:
        return ret

    # try and add watch out for mismatched replication levels
    zpool_cmd = _check_zpool()
    cmd = '{zpool_cmd} attach {force}{zpool} {device} {new_device}'.format(
        zpool_cmd=zpool_cmd,
        force='-f ' if force else '',
        zpool=zpool,
        device=device,
        new_device=new_device
    )
    res = __salt__['cmd.run_all'](cmd, python_shell=False)
    if res['retcode'] != 0:
        ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
    else:
        ret[zpool] = {}
        ret[zpool][new_device] = 'attached'

    return ret
Ejemplo n.º 4
0
def _stat_is_device(stat_obj):
    """
    Helper function that will interpret ``os.stat`` output directly, so that other
    functions can call ``os.stat`` once and interpret that result several times
    """
    return stat.S_ISBLK(stat_obj)
Ejemplo n.º 5
0
 def isblk(self):
     return stat.S_ISBLK(self.mode)
Ejemplo n.º 6
0
def is_block_device(dev):
    """Check whether a device is block or not."""
    s = os.stat(dev)
    return stat.S_ISBLK(s.st_mode)
Ejemplo n.º 7
0
 def method_blockdevp(self, space):
     return space.newbool(stat.S_ISBLK(self.get_stat(space).st_mode))
Ejemplo n.º 8
0
def resize_devices(resizer, devices):
    # returns a tuple of tuples containing (entry-in-devices, action, message)
    devices = copy.copy(devices)
    info = []

    while devices:
        devent = devices.pop(0)
        try:
            blockdev = devent2dev(devent)
        except ValueError as e:
            info.append((
                devent,
                RESIZE.SKIPPED,
                "unable to convert to device: %s" % e,
            ))
            continue

        try:
            statret = os.stat(blockdev)
        except OSError as e:
            info.append((
                devent,
                RESIZE.SKIPPED,
                "stat of '%s' failed: %s" % (blockdev, e),
            ))
            continue

        if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(
                statret.st_mode):
            info.append((
                devent,
                RESIZE.SKIPPED,
                "device '%s' not a block device" % blockdev,
            ))
            continue

        underlying_blockdev = get_mapped_device(blockdev)
        if underlying_blockdev:
            try:
                # We need to resize the underlying partition first
                partition = get_underlying_partition(blockdev)
                if is_encrypted(underlying_blockdev, partition):
                    if partition not in [x[0] for x in info]:
                        # We shouldn't attempt to resize this mapped partition
                        # until the underlying partition is resized, so re-add
                        # our device to the beginning of the list we're
                        # iterating over, then add our underlying partition
                        # so it can get processed first
                        devices.insert(0, devent)
                        devices.insert(0, partition)
                        continue
                    status, message = resize_encrypted(blockdev, partition)
                    info.append((
                        devent,
                        status,
                        message,
                    ))
                else:
                    info.append((
                        devent,
                        RESIZE.SKIPPED,
                        f"Resizing mapped device ({blockdev}) skipped "
                        "as it is not encrypted.",
                    ))
            except Exception as e:
                info.append((
                    devent,
                    RESIZE.FAILED,
                    f"Resizing encrypted device ({blockdev}) failed: {e}",
                ))
            # At this point, we WON'T resize a non-encrypted mapped device
            # though we should probably grow the ability to
            continue
        try:
            (disk, ptnum) = device_part_info(blockdev)
        except (TypeError, ValueError) as e:
            info.append((
                devent,
                RESIZE.SKIPPED,
                "device_part_info(%s) failed: %s" % (blockdev, e),
            ))
            continue

        try:
            (old, new) = resizer.resize(disk, ptnum, blockdev)
            if old == new:
                info.append((
                    devent,
                    RESIZE.NOCHANGE,
                    "no change necessary (%s, %s)" % (disk, ptnum),
                ))
            else:
                info.append((
                    devent,
                    RESIZE.CHANGED,
                    "changed (%s, %s) from %s to %s" % (disk, ptnum, old, new),
                ))

        except ResizeFailedException as e:
            info.append((
                devent,
                RESIZE.FAILED,
                "failed to resize: disk=%s, ptnum=%s: %s" % (disk, ptnum, e),
            ))

    return info
Ejemplo n.º 9
0
    def create(self, params):
        name = params.get('name', '').strip()
        iso = params.get('cdrom')
        # check search permission
        if iso and iso.startswith('/') and os.path.exists(iso):
            st_mode = os.stat(iso).st_mode
            if stat.S_ISREG(st_mode) or stat.S_ISBLK(st_mode):
                user = UserTests().probe_user()
                run_setfacl_set_attr(iso, user=user)
                ret, excp = probe_file_permission_as_user(iso, user)
                if ret is False:
                    raise InvalidParameter('KCHISO0008E', {
                        'filename': iso,
                        'user': user,
                        'err': excp
                    })

        cpu_info = params.get('cpu_info')
        if cpu_info:
            topology = cpu_info.get('topology')
            # Check, even though currently only topology
            #   is supported.
            if topology:
                sockets = topology['sockets']
                cores = topology['cores']
                threads = topology['threads']
                if params.get('cpus') is None:
                    params['cpus'] = sockets * cores * threads
                # check_topoology will raise the appropriate
                # exception if a topology is invalid.
                CPUInfoModel(conn=self.conn).\
                    check_topology(params['cpus'], topology)

        conn = self.conn.get()
        pool_uri = params.get(u'storagepool', '')
        if pool_uri:
            try:
                pool_name = pool_name_from_uri(pool_uri)
                pool = conn.storagePoolLookupByName(pool_name.encode("utf-8"))
            except Exception:
                raise InvalidParameter("KCHTMPL0004E", {
                    'pool': pool_uri,
                    'template': name
                })

            tmp_volumes = [
                disk['volume'] for disk in params.get('disks', [])
                if 'volume' in disk
            ]
            self.template_volume_validate(tmp_volumes, pool)

        for net_name in params.get(u'networks', []):
            try:
                conn.networkLookupByName(net_name.encode('utf-8'))
            except Exception:
                raise InvalidParameter("KCHTMPL0003E", {
                    'network': net_name,
                    'template': name
                })
        # Creates the template class with necessary information
        # Checkings will be done while creating this class, so any exception
        # will be raised here
        t = LibvirtVMTemplate(params, scan=True, conn=self.conn)
        name = params['name']
        try:
            with self.objstore as session:
                if name in session.get_list('template'):
                    raise InvalidOperation("KCHTMPL0001E", {'name': name})
                session.store('template', name, t.info)
        except InvalidOperation:
            raise
        except Exception, e:
            raise OperationFailed('KCHTMPL0020E', {'err': e.message})
Ejemplo n.º 10
0
def offline(pool_name, *vdevs, **kwargs):
    '''
    .. versionadded:: 2015.5.0

    Ensure that the specified devices are offline

    .. warning::

        By default, the OFFLINE state is persistent. The device remains offline when
        the system is rebooted. To temporarily take a device offline, use ``temporary=True``.

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.offline myzpool /path/to/vdev1 [...] [temporary=True|False]
    '''
    ret = {}
    dlist = []

    # Check if the pool_name exists
    if not exists(pool_name):
        ret['Error'] = 'Storage Pool `{0}` doesn\'t exist'.format(pool_name)
        ret['retcode'] = 1
        return ret

    if not vdevs:
        ret['Error'] = 'Missing vdev specification. Please specify vdevs.'
        ret['retcode'] = 2
        return ret

    # make sure files are present on filesystem
    for vdev in vdevs:
        if not os.path.exists(vdev):
            # Path doesn't exist so error and return
            ret[vdev] = '{0} not present on filesystem'.format(vdev)
            ret['retcode'] = 3
            return ret
        mode = os.stat(vdev).st_mode
        if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
            # Not a block device or file vdev so error and return
            ret[vdev] = '{0} is not a block device or a file vdev'.format(vdev)
            ret['retcode'] = 4
            return ret
        dlist.append(vdev)

    devs = ' '.join(dlist)
    zpool = _check_zpool()
    temporary_opt = kwargs.get('temporary', False)
    if temporary_opt:
        cmd = '{0} offline -t {1} {2}'.format(zpool, pool_name, devs)
    else:
        cmd = '{0} offline {1} {2}'.format(zpool, pool_name, devs)

    # Take all specified devices offline
    res = __salt__['cmd.run'](cmd)
    if res:
        ret['Error'] = {}
        ret['Error']['Message'] = 'Failure taking specified devices offline.'
        ret['Error']['Reason'] = res
        ret['retcode'] = 5
    else:
        ret[pool_name] = 'Specified devices: {0} are offline.'.format(vdevs)
    return ret
Ejemplo n.º 11
0
					# if the file exists already
					pass
				handledfiles.append(file)
				if (realfile[0] != '/'):
					realfile = os.path.normpath(os.path.join(os.path.dirname(file),realfile))
				handledfiles = copy_binaries_and_libs(chroot, [realfile], force_overwrite, be_verbose, check_libs, try_hardlink, retain_owner, handledfiles)
			elif (stat.S_ISDIR(sb.st_mode)):
				handledfiles = copy_dir_recursive(chroot,file,force_overwrite, be_verbose, check_libs, try_hardlink, retain_owner, handledfiles)
			elif (stat.S_ISREG(sb.st_mode)):
				if (try_hardlink):
					print 'Trying to link '+file+' to '+chrootrfile
				else:
					print 'Copying '+file+' to '+chrootrfile
				copy_with_permissions(file,chrootrfile,be_verbose, try_hardlink, retain_owner)
				handledfiles.append(file)
			elif (stat.S_ISCHR(sb.st_mode) or stat.S_ISBLK(sb.st_mode)):
				copy_device(chroot, file, be_verbose, retain_owner)
			else:
				sys.stderr.write('Failed to find how to copy '+file+' into a chroot jail, please report to the Jailkit developers\n')
#	in python 2.1 the return value is a tuple, not an object, st_mode is field 0
#	mode = stat.S_IMODE(sbuf.st_mode)
			mode = stat.S_IMODE(sb[stat.ST_MODE])
			if (check_libs and (string.find(file, 'lib') != -1 or string.find(file,'.so') != -1 or (mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)))):
				libs = lddlist_libraries(file)
				handledfiles = copy_binaries_and_libs(chroot, libs, force_overwrite, be_verbose, 0, try_hardlink, handledfiles)
	return handledfiles

def config_get_option_as_list(cfgparser, sectionname, optionname):
	"""retrieves a comma separated option from the configparser and splits it into a list, returning an empty list if it does not exist"""
	retval = []
	if (cfgparser.has_option(sectionname,optionname)):
Ejemplo n.º 12
0
def create(pool_name, *vdevs, **kwargs):
    '''
    .. versionadded:: 2015.5.0

    Create a simple zpool, a mirrored zpool, a zpool having nested VDEVs, a hybrid zpool with cache, spare and log drives or a zpool with RAIDZ-1, RAIDZ-2 or RAIDZ-3

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False]
        salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False]
        salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False]
        salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False]
        salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False]

    .. note::

        Zpool properties can be specified at the time of creation of the pool by
        passing an additional argument called "properties" and specifying the properties
        with their respective values in the form of a python dictionary::

            properties="{'property1': 'value1', 'property2': 'value2'}"

        Example:

        .. code-block:: bash

            salt '*' zpool.create myzpool /path/to/vdev1 [...] properties="{'property1': 'value1', 'property2': 'value2'}"
    '''
    ret = {}
    dlist = []

    # Check if the pool_name is already being used
    if exists(pool_name):
        ret['Error'] = 'Storage Pool `{0}` already exists'.format(pool_name)
        ret['retcode'] = 1
        return ret

    if not vdevs:
        ret['Error'] = 'Missing vdev specification. Please specify vdevs.'
        ret['retcode'] = 2
        return ret

    # make sure files are present on filesystem
    for vdev in vdevs:
        if vdev not in [
                'mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare'
        ]:
            if not os.path.exists(vdev):
                # Path doesn't exist so error and return
                ret[vdev] = '{0} not present on filesystem'.format(vdev)
                ret['retcode'] = 3
                return ret
            mode = os.stat(vdev).st_mode
            if not stat.S_ISBLK(mode) and not stat.S_ISREG(
                    mode) and not stat.S_ISCHR(mode):
                # Not a block device, file vdev, or character special device so error and return
                ret[vdev] = '{0} is not a block device, a file vdev, or character special device'.format(
                    vdev)
                ret['retcode'] = 4
                return ret
        dlist.append(vdev)

    devs = ' '.join(dlist)
    zpool = _check_zpool()
    force = kwargs.get('force', False)
    properties = kwargs.get('properties', None)
    cmd = '{0} create'.format(zpool)

    if force:
        cmd = '{0} -f'.format(cmd)

    # if zpool properties specified, then
    # create "-o property=value" pairs
    if properties:
        optlist = []
        for prop in properties:
            optlist.append('-o {0}={1}'.format(prop, properties[prop]))
        opts = ' '.join(optlist)
        cmd = '{0} {1}'.format(cmd, opts)
    cmd = '{0} {1} {2}'.format(cmd, pool_name, devs)

    # Create storage pool
    res = __salt__['cmd.run'](cmd, python_shell=False)

    # Check and see if the pools is available
    if exists(pool_name):
        ret[pool_name] = 'created'
        return ret
    else:
        ret['Error'] = {}
        ret['Error']['Messsage'] = 'Unable to create storage pool {0}'.format(
            pool_name)
        ret['Error']['Reason'] = res
        ret['retcode'] = 5

    return ret
Ejemplo n.º 13
0
def maybe_get_writable_device_path(devpath, info, log):
    """Return updated devpath if the devpath is a writable block device.

    @param devpath: Requested path to the root device we want to resize.
    @param info: String representing information about the requested device.
    @param log: Logger to which logs will be added upon error.

    @returns devpath or updated devpath per kernel commandline if the device
        path is a writable block device, returns None otherwise.
    """
    container = util.is_container()

    # Ensure the path is a block device.
    if (devpath == "/dev/root" and not os.path.exists(devpath)
            and not container):
        devpath = util.rootdev_from_cmdline(util.get_cmdline())
        if devpath is None:
            log.warning("Unable to find device '/dev/root'")
            return None
        log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)

    if devpath == 'overlayroot':
        log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
        return None

    # FreeBSD zpool can also just use gpt/<label>
    # with that in mind we can not do an os.stat on "gpt/whatever"
    # therefore return the devpath already here.
    if devpath.startswith('gpt/'):
        log.debug('We have a gpt label - just go ahead')
        return devpath
    # Alternatively, our device could simply be a name as returned by gpart,
    # such as da0p3
    if not devpath.startswith('/dev/') and not os.path.exists(devpath):
        fulldevpath = '/dev/' + devpath.lstrip('/')
        log.debug("'%s' doesn't appear to be a valid device path. Trying '%s'",
                  devpath, fulldevpath)
        devpath = fulldevpath

    try:
        statret = os.stat(devpath)
    except OSError as exc:
        if container and exc.errno == errno.ENOENT:
            log.debug(
                "Device '%s' did not exist in container. "
                "cannot resize: %s", devpath, info)
        elif exc.errno == errno.ENOENT:
            log.warning("Device '%s' did not exist. cannot resize: %s",
                        devpath, info)
        else:
            raise exc
        return None

    if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
        if container:
            log.debug("device '%s' not a block device in container."
                      " cannot resize: %s" % (devpath, info))
        else:
            log.warning("device '%s' not a block device. cannot resize: %s" %
                        (devpath, info))
        return None
    return devpath  # The writable block devpath
Ejemplo n.º 14
0
def _stat_is_block(path):
    if not os.path.exists(path):
        return False
    return stat.S_ISBLK(os.stat(path)[stat.ST_MODE])
Ejemplo n.º 15
0
def disk_exists(path):
    try:
        return stat.S_ISBLK(os.stat(path).st_mode)
    except:
        return False
Ejemplo n.º 16
0
    def create(self, params):
        name = params.get('name', '').strip()

        conn = self.conn.get()
        for net_name in params.get(u'networks', []):
            try:
                conn.networkLookupByName(net_name.encode('utf-8'))
            except Exception:
                raise InvalidParameter("KCHTMPL0003E", {
                    'network': net_name,
                    'template': name
                })

        # get source_media
        source_media = params.pop("source_media")

        if source_media['type'] == 'netboot':
            params['netboot'] = True
            return self.save_template(params)
        else:
            # Get path of source media if it's based on disk type.
            path = source_media.get('path', None)

        if path is None:
            raise InvalidParameter("KCHTMPL0016E")

        # not local image: set as remote ISO
        path = path.encode('utf-8')
        if urlparse.urlparse(path).scheme in [
                "http", "https", "tftp", "ftp", "ftps"
        ]:
            params["cdrom"] = path

        # image does not exists: raise error
        elif not os.path.exists(path):
            raise InvalidParameter("KCHTMPL0002E", {'path': path})

        # create magic object to discover file type
        file_type = magic.open(magic.MAGIC_NONE)
        file_type.load()
        ftype = file_type.file(path)

        # cdrom
        if ISO_TYPE in ftype:
            params["cdrom"] = path

            # check search permission
            st_mode = os.stat(path).st_mode
            if stat.S_ISREG(st_mode) or stat.S_ISBLK(st_mode):
                user = UserTests().probe_user()
                run_setfacl_set_attr(path, user=user)
                ret, excp = probe_file_permission_as_user(path, user)
                if ret is False:
                    raise InvalidParameter('KCHISO0008E', {
                        'filename': path,
                        'user': user,
                        'err': excp
                    })
        # disk
        else:
            params["disks"] = params.get('disks', [])
            params["disks"].append({"base": path})

        return self.save_template(params)
Ejemplo n.º 17
0
def hardlinkable(mode):
    """return True if we support hardlinked items of this type"""
    return stat.S_ISREG(mode) or stat.S_ISBLK(mode) or stat.S_ISCHR(
        mode) or stat.S_ISFIFO(mode)
Ejemplo n.º 18
0
def _get_partition(device, uuid):
    """Find the partition of a given device."""
    LOG.debug("Find the partition %(uuid)s on device %(dev)s",
              {'dev': device, 'uuid': uuid})

    try:
        # Try to tell the kernel to re-read the partition table
        try:
            utils.execute('partx', '-u', device, attempts=3,
                          delay_on_retry=True)
            utils.execute('udevadm', 'settle')
        except processutils.ProcessExecutionError:
            LOG.warning("Couldn't re-read the partition table "
                        "on device %s", device)

        lsblk = utils.execute('lsblk', '-PbioKNAME,UUID,PARTUUID,TYPE', device)
        report = lsblk[0]
        for line in report.split('\n'):
            part = {}
            # Split into KEY=VAL pairs
            vals = shlex.split(line)
            for key, val in (v.split('=', 1) for v in vals):
                part[key] = val.strip()
            # Ignore non partition
            if part.get('TYPE') not in ['md', 'part']:
                # NOTE(TheJulia): This technically creates an edge failure
                # case where a filesystem on a whole block device sans
                # partitioning would behave differently.
                continue

            if part.get('UUID') == uuid:
                LOG.debug("Partition %(uuid)s found on device "
                          "%(dev)s", {'uuid': uuid, 'dev': device})
                return '/dev/' + part.get('KNAME')
            if part.get('PARTUUID') == uuid:
                LOG.debug("Partition %(uuid)s found on device "
                          "%(dev)s", {'uuid': uuid, 'dev': device})
                return '/dev/' + part.get('KNAME')
        else:
            # NOTE(TheJulia): We may want to consider moving towards using
            # findfs in the future, if we're comfortable with the execution
            # and interaction. There is value in either way though.
            try:
                findfs, stderr = utils.execute('findfs', 'UUID=%s' % uuid)
                return findfs.strip()
            except processutils.ProcessExecutionError as e:
                LOG.debug('First fallback detection attempt for locating '
                          'partition via UUID %(uuid)s failed. '
                          'Error: %(err)s',
                          {'uuid': uuid,
                           'err': e})
                try:
                    findfs, stderr = utils.execute(
                        'findfs', 'PARTUUID=%s' % uuid)
                    return findfs.strip()
                except processutils.ProcessExecutionError as e:
                    LOG.debug('Secondary fallback detection attempt for '
                              'locating partition via UUID %(uuid)s failed. '
                              'Error: %(err)s',
                              {'uuid': uuid,
                               'err': e})

            # Last fallback: In case we cannot find the partition by UUID
            # and the deploy device is an md device, we check if the md
            # device has a partition (which we assume to contain the root fs).
            if hardware.is_md_device(device):
                md_partition = device + 'p1'
                if (os.path.exists(md_partition) and
                    stat.S_ISBLK(os.stat(md_partition).st_mode)):
                    LOG.debug("Found md device with partition %s",
                              md_partition)
                    return md_partition
                else:
                    LOG.debug('Could not find partition %(part)s on md '
                              'device %(dev)s',
                              {'part': md_partition,
                               'dev': device})

            # Partition not found, time to escalate.
            error_msg = ("No partition with UUID %(uuid)s found on "
                         "device %(dev)s" % {'uuid': uuid, 'dev': device})
            LOG.error(error_msg)
            raise errors.DeviceNotFound(error_msg)
    except processutils.ProcessExecutionError as e:
        error_msg = ('Finding the partition with UUID %(uuid)s on '
                     'device %(dev)s failed with %(err)s' %
                     {'uuid': uuid, 'dev': device, 'err': e})
        LOG.error(error_msg)
        raise errors.CommandExecutionError(error_msg)
Ejemplo n.º 19
0
def is_block_device(filename):
    return stat.S_ISBLK(os.stat(filename).st_mode)
Ejemplo n.º 20
0
def check_ecsbx_stores(config):
    try:
        printin(1, "Checking ECSBX stores section")
        if "ecsbx_stores" in config:
            ecsbx_stores = config["ecsbx_stores"]
            if not isinstance(ecsbx_stores, list):
                raise Exception(
                    "Value following key ecsbx_stores should be a list")

            names = []
            partitions = []
            mount_dirs = []

            for store in ecsbx_stores:
                if not isinstance(store, dict):
                    raise Exception(
                        "ECSBX store specification should be key value pairs")

                unrecognised_key = check_keys(
                    store, ["name", "partition", "mount_dir", "smart_enabled"])
                if unrecognised_key != None:
                    printin(2, "Unrecognised key",
                            '"' + unrecognised_key + '"')
                    shutdown_error()

                name = store["name"]
                partition = store["partition"]
                mount_dir = store["mount_dir"]
                smart_enabled = store["smart_enabled"]

                names.append(name)
                partitions.append(partition)
                mount_dirs.append(mount_dir)

                try:
                    mode = os.stat(partition).st_mode
                    if not stat.S_ISBLK(mode):
                        raise Exception('"' + partition + '"' +
                                        " is not a block device")
                except FileNotFoundError:
                    raise Exception("Partition " + partition + " not found")

                if os.path.isfile(mount_dir):
                    raise Exception(mount_dir + " is not a directory")
                elif os.path.isdir(mount_dir):
                    pass
                else:
                    raise Exception(mount_dir + " does not exist")

                if not isinstance(smart_enabled, bool):
                    raise Exception("smart_enabled should be a boolean")

            duplicate_names = misc_utils.collect_duplicates(names)
            if duplicate_names != []:
                raise Exception("Following names are used more than once : " +
                                ", ".join(duplicate_names))

            duplicate_partitions = misc_utils.collect_duplicates(partitions)
            if duplicate_partitions != []:
                raise Exception(
                    "Following partitions are used more than once : " +
                    ", ".join(duplicate_partitions))

            duplicate_mount_dirs = misc_utils.collect_duplicates(mount_dirs)
            if duplicate_mount_dirs != []:
                raise Exception(
                    "Following mount directories are used more than once : " +
                    ", ".join(duplicate_mount_dirs))

            printin(2, "Okay")
        else:
            printin(2, "Section not specified")
    except KeyError as e:
        printin(2, "Key", str(e), "misisng")
        shutdown_error()
    except Exception as e:
        printin(2, str(e))
        shutdown_error()
Ejemplo n.º 21
0
 def is_block_device(self):
     return stat.S_ISBLK(self._stats_obj.st_mode) != 0
Ejemplo n.º 22
0
    def copy_all(self):
        """Core copy process. This is the most important step of this
        stage. It clones live filesystem into a local partition in the
        selected hard disk."""

        self.db.progress('START', 0, 100, 'ubiquity/install/title')
        self.db.progress('INFO', 'ubiquity/install/copying')

        fs_size = os.path.join(self.casper_path, 'filesystem.size')
        if os.path.exists(fs_size):
            with open(fs_size) as total_size_fp:
                total_size = int(total_size_fp.readline())
        else:
            # Fallback in case an Ubuntu derivative forgets to put
            # /casper/filesystem.size on the CD, or to account for things
            # like CD->USB transformation tools that don't copy this file.
            # This is slower than just reading the size from a file, but
            # better than crashing.
            #
            # Obviously doing os.walk() twice is inefficient, but I'd rather
            # not suck the list into ubiquity's memory, and I'm guessing
            # that the kernel's dentry cache will avoid most of the slowness
            # anyway.
            total_size = 0
            for dirpath, dirnames, filenames in os.walk(self.source):
                for name in dirnames + filenames:
                    fqpath = os.path.join(dirpath, name)
                    total_size += os.lstat(fqpath).st_size

        # Progress bar handling:
        # We sample progress every half-second (assuming time.time() gives
        # us sufficiently good granularity) and use the average of progress
        # over the last minute or so to decide how much time remains. We
        # don't bother displaying any progress for the first ten seconds in
        # order to allow things to settle down, and we only update the "time
        # remaining" indicator at most every two seconds after that.

        copy_progress = 0
        copied_size = 0
        directory_times = []
        time_start = time.time()
        times = [(time_start, copied_size)]
        long_enough = False
        time_last_update = time_start
        debug = 'UBIQUITY_DEBUG' in os.environ
        if self.db.get('ubiquity/install/md5_check') == 'false':
            md5_check = False
        else:
            md5_check = True

        # Increase kernel flush times during bulk data copying to make it
        # more likely that small files are packed contiguously, which should
        # speed up initial boot times.
        dirty_writeback_centisecs = None
        dirty_expire_centisecs = None
        if os.path.exists('/proc/sys/vm/dirty_writeback_centisecs'):
            with open('/proc/sys/vm/dirty_writeback_centisecs') as dwc:
                dirty_writeback_centisecs = int(dwc.readline())
            with open('/proc/sys/vm/dirty_writeback_centisecs', 'w') as dwc:
                print('3000\n', file=dwc)
        if os.path.exists('/proc/sys/vm/dirty_expire_centisecs'):
            with open('/proc/sys/vm/dirty_expire_centisecs') as dec:
                dirty_expire_centisecs = int(dec.readline())
            with open('/proc/sys/vm/dirty_expire_centisecs', 'w') as dec:
                print('6000\n', file=dec)

        old_umask = os.umask(0)
        for dirpath, dirnames, filenames in os.walk(self.source):
            sp = dirpath[len(self.source) + 1:]
            for name in dirnames + filenames:
                relpath = os.path.join(sp, name)
                # /etc/fstab was legitimately created by partman, and
                # shouldn't be copied again.  Similarly, /etc/crypttab may
                # have been legitimately created by the user-setup plugin.
                if relpath in ("etc/fstab", "etc/crypttab"):
                    continue
                sourcepath = os.path.join(self.source, relpath)
                targetpath = os.path.join(self.target, relpath)
                st = os.lstat(sourcepath)

                # Is the path blacklisted?
                if (not stat.S_ISDIR(st.st_mode)
                        and '/%s' % relpath in self.blacklist):
                    if debug:
                        syslog.syslog('Not copying %s' % relpath)
                    continue

                # Remove the target if necessary and if we can.
                install_misc.remove_target(self.source, self.target, relpath,
                                           st)

                # Now actually copy source to target.
                mode = stat.S_IMODE(st.st_mode)
                if stat.S_ISLNK(st.st_mode):
                    linkto = os.readlink(sourcepath)
                    os.symlink(linkto, targetpath)
                elif stat.S_ISDIR(st.st_mode):
                    if not os.path.isdir(targetpath):
                        try:
                            os.mkdir(targetpath, mode)
                        except OSError as e:
                            # there is a small window where update-apt-cache
                            # can race with us since it creates
                            # "/target/var/cache/apt/...". Hence, ignore
                            # failure if the directory does now exist where
                            # brief moments before it didn't.
                            if e.errno != errno.EEXIST:
                                raise
                elif stat.S_ISCHR(st.st_mode):
                    os.mknod(targetpath, stat.S_IFCHR | mode, st.st_rdev)
                elif stat.S_ISBLK(st.st_mode):
                    os.mknod(targetpath, stat.S_IFBLK | mode, st.st_rdev)
                elif stat.S_ISFIFO(st.st_mode):
                    os.mknod(targetpath, stat.S_IFIFO | mode)
                elif stat.S_ISSOCK(st.st_mode):
                    os.mknod(targetpath, stat.S_IFSOCK | mode)
                elif stat.S_ISREG(st.st_mode):
                    install_misc.copy_file(self.db, sourcepath, targetpath,
                                           md5_check)

                # Copy metadata.
                copied_size += st.st_size
                os.lchown(targetpath, st.st_uid, st.st_gid)
                if not stat.S_ISLNK(st.st_mode):
                    os.chmod(targetpath, mode)
                if stat.S_ISDIR(st.st_mode):
                    directory_times.append(
                        (targetpath, st.st_atime, st.st_mtime))
                # os.utime() sets timestamp of target, not link
                elif not stat.S_ISLNK(st.st_mode):
                    try:
                        os.utime(targetpath, (st.st_atime, st.st_mtime))
                    except Exception:
                        # We can live with timestamps being wrong.
                        pass
                if (hasattr(os, "listxattr")
                        and hasattr(os, "supports_follow_symlinks")
                        and os.supports_follow_symlinks):
                    try:
                        attrnames = os.listxattr(sourcepath,
                                                 follow_symlinks=False)
                        for attrname in attrnames:
                            attrvalue = os.getxattr(sourcepath,
                                                    attrname,
                                                    follow_symlinks=False)
                            os.setxattr(targetpath,
                                        attrname,
                                        attrvalue,
                                        follow_symlinks=False)
                    except OSError as e:
                        if e.errno not in (errno.EPERM, errno.ENOTSUP,
                                           errno.ENODATA):
                            raise

                if int((copied_size * 90) / total_size) != copy_progress:
                    copy_progress = int((copied_size * 90) / total_size)
                    self.db.progress('SET', 10 + copy_progress)

                time_now = time.time()
                if (time_now - times[-1][0]) >= 0.5:
                    times.append((time_now, copied_size))
                    if not long_enough and time_now - times[0][0] >= 10:
                        long_enough = True
                    if long_enough and time_now - time_last_update >= 2:
                        time_last_update = time_now
                        while (time_now - times[0][0] > 60
                               and time_now - times[1][0] >= 60):
                            times.pop(0)
                        speed = ((times[-1][1] - times[0][1]) /
                                 (times[-1][0] - times[0][0]))
                        if speed != 0:
                            time_remaining = (int(
                                (total_size - copied_size) / speed))
                            if time_remaining < 60:
                                self.db.progress(
                                    'INFO', 'ubiquity/install/copying_minute')

        # Apply timestamps to all directories now that the items within them
        # have been copied.
        for dirtime in directory_times:
            (directory, atime, mtime) = dirtime
            try:
                os.utime(directory, (atime, mtime))
            except Exception:
                # I have no idea why I've been getting lots of bug reports
                # about this failing, but I really don't care. Ignore it.
                pass

        # Revert to previous kernel flush times.
        if dirty_writeback_centisecs is not None:
            with open('/proc/sys/vm/dirty_writeback_centisecs', 'w') as dwc:
                print(dirty_writeback_centisecs, file=dwc)
        if dirty_expire_centisecs is not None:
            with open('/proc/sys/vm/dirty_expire_centisecs', 'w') as dec:
                print(dirty_expire_centisecs, file=dec)

        # Try some possible locations for the kernel we used to boot. This
        # lets us save a couple of megabytes of CD space.
        bootdir = self.target_file('boot')
        kernel = self.find_cd_kernel()
        if kernel:
            prefix = os.path.basename(kernel).split('-', 1)[0]
            release = os.uname()[2]
            target_kernel = os.path.join(bootdir, '%s-%s' % (prefix, release))
            copies = []

            # ISO9660 images may have to use .efi rather than .efi.signed in
            # order to support being booted using isolinux, which must abide
            # by archaic 8.3 restrictions.
            for suffix in (".efi", ".efi.signed"):
                if os.path.exists(kernel + suffix):
                    signed_kernel = kernel + suffix
                    break
            else:
                signed_kernel = None

            if os.path.exists(kernel):
                copies.append((kernel, target_kernel))
            elif signed_kernel is not None:
                # No unsigned kernel.  We'll construct it using sbsigntool.
                copies.append((signed_kernel, target_kernel))

            if signed_kernel is not None:
                copies.append((signed_kernel, "%s.efi.signed" % target_kernel))

            for source, target in copies:
                osextras.unlink_force(target)
                install_misc.copy_file(self.db, source, target, md5_check)
                os.lchown(target, 0, 0)
                os.chmod(target, 0o644)
                st = os.lstat(source)
                try:
                    os.utime(target, (st.st_atime, st.st_mtime))
                except Exception:
                    # We can live with timestamps being wrong.
                    pass

            if not os.path.exists(kernel) and signed_kernel is not None:
                # Construct the unsigned kernel.
                subprocess.check_call(["sbattach", "--remove", target_kernel])

        os.umask(old_umask)

        self.db.progress('SET', 100)
        self.db.progress('STOP')
Ejemplo n.º 23
0
def files(b):
    logging.info('searching for configuration files')

    # Visit every file in `/etc` except those on the exclusion list above.
    for dirpath, dirnames, filenames in os.walk('/etc'):

        # Determine if this entire directory should be ignored by default.
        ignored = ignore.file(dirpath)

        # Collect up the full pathname to each file, `lstat` them all, and
        # note which ones will probably be ignored.
        files = []
        for filename in filenames:
            pathname = os.path.join(dirpath, filename)
            try:
                files.append((pathname, os.lstat(pathname),
                              ignore.file(pathname, ignored)))
            except OSError as e:
                logging.warning('{0} caused {1} - try running as root'.format(
                    pathname, errno.errorcode[e.errno]))

        # Track the ctime of each file in this directory.  Weed out false
        # positives by ignoring files with common ctimes.
        ctimes = defaultdict(lambda: 0)

        # Map the ctimes of each directory entry that isn't being ignored.
        for pathname, s, ignored in files:
            if not ignored:
                ctimes[s.st_ctime] += 1
        for dirname in dirnames:
            try:
                ctimes[os.lstat(os.path.join(dirpath, dirname)).st_ctime] += 1
            except OSError:
                pass

        for pathname, s, ignored in files:

            # Always ignore block special files, character special files,
            # pipes, and sockets.  They end up looking like deadlocks.
            if stat.S_ISBLK(s.st_mode) \
            or stat.S_ISCHR(s.st_mode) \
            or stat.S_ISFIFO(s.st_mode) \
            or stat.S_ISSOCK(s.st_mode):
                continue

            # Make sure this pathname will actually be able to be included
            # in the blueprint.  This is a bit of a cop-out since the file
            # could be important but at least it's not a crashing bug.
            try:
                pathname = unicode(pathname)
            except UnicodeDecodeError:
                logging.warning('{0} not UTF-8 - skipping it'.format(
                    repr(pathname)[1:-1]))
                continue

            # Ignore ignored files and files that share their ctime with other
            # files in the directory.  This is a very strong indication that
            # the file is original to the system and should be ignored.
            if ignored or 1 < ctimes[s.st_ctime] and ignore.file(
                    pathname, True):
                continue

            # Check for a Mustache template and an optional shell script
            # that templatize this file.
            try:
                template = open(
                    '{0}.blueprint-template.mustache'.format(pathname)).read()
            except IOError:
                template = None
            try:
                data = open(
                    '{0}.blueprint-template.sh'.format(pathname)).read()
            except IOError:
                data = None

            # The content is used even for symbolic links to determine whether
            # it has changed from the packaged version.
            try:
                content = open(pathname).read()
            except IOError:
                #logging.warning('{0} not readable'.format(pathname))
                continue

            # Ignore files that are unchanged from their packaged version.
            if _unchanged(pathname, content):
                continue

            # Resolve the rest of the file's metadata from the
            # `/etc/passwd` and `/etc/group` databases.
            try:
                pw = pwd.getpwuid(s.st_uid)
                owner = pw.pw_name
            except KeyError:
                owner = s.st_uid
            try:
                gr = grp.getgrgid(s.st_gid)
                group = gr.gr_name
            except KeyError:
                group = s.st_gid
            mode = '{0:o}'.format(s.st_mode)

            # A symbolic link's content is the link target.
            if stat.S_ISLNK(s.st_mode):
                content = os.readlink(pathname)

                # Ignore symbolic links providing backwards compatibility
                # between SystemV init and Upstart.
                if '/lib/init/upstart-job' == content:
                    continue

                # Ignore symbolic links into the Debian alternatives system.
                # These are almost certainly managed by packages.
                if content.startswith('/etc/alternatives/'):
                    continue

                b.add_file(pathname,
                           content=content,
                           encoding='plain',
                           group=group,
                           mode=mode,
                           owner=owner)

            # A regular file is stored as plain text only if it is valid
            # UTF-8, which is required for JSON serialization.
            else:
                kwargs = dict(group=group, mode=mode, owner=owner)
                try:
                    if template:
                        if data:
                            kwargs['data'] = data.decode('utf_8')
                        kwargs['template'] = template.decode('utf_8')
                    else:
                        kwargs['content'] = content.decode('utf_8')
                    kwargs['encoding'] = 'plain'
                except UnicodeDecodeError:
                    if template:
                        if data:
                            kwargs['data'] = base64.b64encode(data)
                        kwargs['template'] = base64.b64encode(template)
                    else:
                        kwargs['content'] = base64.b64encode(content)
                    kwargs['encoding'] = 'base64'
                b.add_file(pathname, **kwargs)

            # If this file is a service init script or config , create a
            # service resource.
            try:
                manager, service = util.parse_service(pathname)
                if not ignore.service(manager, service):
                    b.add_service(manager, service)
                    b.add_service_package(manager, service, 'apt',
                                          *_dpkg_query_S(pathname))
                    b.add_service_package(manager, service, 'yum',
                                          *_rpm_qf(pathname))
            except ValueError:
                pass
Ejemplo n.º 24
0
 def _device_exists(dev):
     try:
         mode = os.stat(dev).st_mode
     except OSError:
         return False
     return stat.S_ISBLK(mode)
Ejemplo n.º 25
0
def online(zpool, *vdevs, **kwargs):
    '''
    .. versionadded:: 2015.5.0
    .. versionchanged:: 2016.3.0

    Ensure that the specified devices are online

    zpool : string
        name of storage pool
    *vdevs : string
        one or more devices
    expand : boolean
        Expand the device to use all available space.

        .. note::
            If the device is part of a mirror or raidz then all devices must be
            expanded before the new space will become available to the pool.

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.online myzpool /path/to/vdev1 [...]

    '''
    ret = {}
    dlist = []

    # Check if the pool_name exists
    if not exists(zpool):
        ret[zpool] = 'storage pool does not exist'
        return ret

    if not vdevs:
        ret[zpool] = 'no devices specified'
        return ret

    # get expand option
    expand = kwargs.get('expand', False)

    # make sure files are present on filesystem
    ret[zpool] = {}
    for vdev in vdevs:
        if not os.path.exists(vdev):
            ret[zpool][vdev] = 'not present on filesystem'
            continue
        mode = os.stat(vdev).st_mode
        if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
            ret[zpool][vdev] = 'not a block device, a file vdev or character special device'
            continue
        dlist.append(vdev)

    if len(ret[zpool]) > 0:
        return ret

    devs = ' '.join(dlist)
    zpool_cmd = _check_zpool()
    cmd = '{zpool_cmd} online {expand}{zpool} {devs}'.format(
        zpool_cmd=zpool_cmd,
        expand='-e ' if expand else '',
        zpool=zpool,
        devs=devs
    )
    # Bring all specified devices online
    res = __salt__['cmd.run_all'](cmd, python_shell=False)
    if res['retcode'] != 0:
        ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
    else:
        ret[zpool] = {}
        for device in dlist:
            if device not in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
                ret[zpool][device] = 'onlined'
    return ret
Ejemplo n.º 26
0
    def _create_via_common_rec(self, path, create_symlinks=True):
        if not self.mode:
            raise ApplyError('no metadata - cannot create path ' + path)

        # If the path already exists and is a dir, try rmdir.
        # If the path already exists and is anything else, try unlink.
        st = None
        try:
            st = xstat.lstat(path)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
        if st:
            if stat.S_ISDIR(st.st_mode):
                try:
                    os.rmdir(path)
                except OSError as e:
                    if e.errno in (errno.ENOTEMPTY, errno.EEXIST):
                        msg = 'refusing to overwrite non-empty dir ' + path
                        raise Exception(msg)
                    raise
            else:
                os.unlink(path)

        if stat.S_ISREG(self.mode):
            assert (self._recognized_file_type())
            fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o600)
            os.close(fd)
        elif stat.S_ISDIR(self.mode):
            assert (self._recognized_file_type())
            os.mkdir(path, 0o700)
        elif stat.S_ISCHR(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFCHR, self.rdev)
        elif stat.S_ISBLK(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFBLK, self.rdev)
        elif stat.S_ISFIFO(self.mode):
            assert (self._recognized_file_type())
            os.mkfifo(path, 0o600 | stat.S_IFIFO)
        elif stat.S_ISSOCK(self.mode):
            try:
                os.mknod(path, 0o600 | stat.S_IFSOCK)
            except OSError as e:
                if e.errno in (errno.EINVAL, errno.EPERM):
                    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                    s.bind(path)
                else:
                    raise
        elif stat.S_ISLNK(self.mode):
            assert (self._recognized_file_type())
            if self.symlink_target and create_symlinks:
                # on MacOS, symlink() permissions depend on umask, and there's
                # no way to chown a symlink after creating it, so we have to
                # be careful here!
                oldumask = os.umask((self.mode & 0o777) ^ 0o777)
                try:
                    os.symlink(self.symlink_target, path)
                finally:
                    os.umask(oldumask)
        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        else:
            assert (not self._recognized_file_type())
            add_error('not creating "%s" with unrecognized mode "0x%x"\n' %
                      (path, self.mode))
Ejemplo n.º 27
0
def add(zpool, *vdevs, **kwargs):
    '''
    .. versionchanged:: 2016.3.0

    Add the specified vdev\'s to the given storage pool

    zpool : string
        name of storage pool
    *vdevs : string
        one or more devices
    force : boolean
        forces use of device

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.add myzpool /path/to/vdev1 /path/to/vdev2 [...]
    '''
    ret = {}
    dlist = []

    # check for pool
    if not exists(zpool):
        ret[zpool] = 'storage pool does not exist'
        return ret

    if not vdevs:
        ret[zpool] = 'no devices specified'
        return ret

    force = kwargs.get('force', False)

    # make sure files are present on filesystem
    ret[zpool] = {}
    for vdev in vdevs:
        if vdev not in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
            if not os.path.exists(vdev):
                ret[zpool][vdev] = 'not present on filesystem'
                continue
            mode = os.stat(vdev).st_mode
            if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
                ret[zpool][vdev] = 'not a block device, a file vdev or character special device'
                continue
        dlist.append(vdev)

    if len(ret[zpool]) > 0:
        return ret

    devs = ' '.join(dlist)

    # try and add watch out for mismatched replication levels
    zpool_cmd = _check_zpool()
    cmd = '{zpool_cmd} add {force}{zpool} {devs}'.format(
        zpool_cmd=zpool_cmd,
        force='-f ' if force else '',
        zpool=zpool,
        devs=devs
    )
    res = __salt__['cmd.run_all'](cmd, python_shell=False)
    if res['retcode'] != 0:
        ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
    else:
        ret[zpool] = {}
        for device in dlist:
            if device not in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
                ret[zpool][device] = 'added'

    return ret
Ejemplo n.º 28
0
def is_block_device(dev):
    if not os.path.exists(dev):
        fail_usage("Failed: device \"" + dev + "\" does not exist")
    if not stat.S_ISBLK(os.stat(dev).st_mode):
        fail_usage("Failed: device \"" + dev + "\" is not a block device")
Ejemplo n.º 29
0
def replace(zpool, old_device, new_device=None, force=False):
    '''
    .. versionchanged:: 2016.3.0

    Replaces old_device with new_device.

    .. note::
        This is equivalent to attaching new_device,
        waiting for it to resilver, and then detaching old_device.

        The size of new_device must be greater than or equal to the minimum
        size of all the devices in a mirror or raidz configuration.

    zpool : string
        name of storage pool
    old_device : string
        old device to replace
    new_device : string
        optional new device
    force : boolean
        Forces use of new_device, even if its appears to be in use.

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.replace myzpool /path/to/vdev1 /path/to/vdev2
    '''
    ret = {}
    # Make sure pool is there
    if not exists(zpool):
        ret[zpool] = 'storage pool does not exist'
        return ret

    # check devices
    ret[zpool] = {}
    if not new_device:  # if we have a new device, old_device is probably missing!
        if not os.path.exists(old_device):
            ret[zpool][old_device] = 'not present on filesystem'
        else:
            mode = os.stat(old_device).st_mode
            if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
                ret[zpool][old_device] = 'not a block device, a file vdev or character special device'

    if new_device:  # if we are replacing a device in the same slot, new device can be None
        if not os.path.exists(new_device):
            ret[zpool][new_device] = 'not present on filesystem'
        else:
            mode = os.stat(new_device).st_mode
            if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
                ret[zpool][new_device] = 'not a block device, a file vdev or character special device'

    if len(ret[zpool]) > 0:
        return ret

    # Replace vdevs
    zpool_cmd = _check_zpool()
    cmd = '{zpool_cmd} replace {force}{zpool} {old_device}{new_device}'.format(
        zpool_cmd=zpool_cmd,
        zpool=zpool,
        force='-f ' if force else '',
        old_device=old_device,
        new_device=' {0}'.format(new_device) if new_device else ''
    )
    res = __salt__['cmd.run_all'](cmd, python_shell=False)
    if res['retcode'] != 0:
        ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
    else:
        ret[zpool] = {}
        ret[zpool][old_device] = 'replaced with {0}'.format(new_device)

    return ret
Ejemplo n.º 30
0
        def zram_init() -> None:
            info("Zram: trying to initialize free device")
            output = None
            success = False
            for n in range(3):
                if n > 0:
                    warn(f"Zram: device or resource was busy, retry #{n}")
                    time.sleep(1)
                # zramctl is an external program -> return path to first free device.
                output = subprocess.run(
                    [
                        "zramctl",
                        "-f",
                        "-a",
                        config.get("zram_alg"),
                        "-s",
                        str(zram_size),
                    ],
                    text=True,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT,
                ).stdout.rstrip()
                if "failed to reset: Device or resource busy" in output:
                    continue
                else:
                    success = True
                    break
            # Try limit reached.
            if not success:
                warn("Zram: device or resource was busy too many times")
                return
            zram_dev = None
            if "zramctl: no free zram device found" in output:
                warn("Zram: zramctl can't find free device")
                info("Zram: using workaround hook for hot add")
                if not os.path.isfile("/sys/class/zram-control/hot_add"):
                    error(
                        "Zram: this kernel does not support hot adding zram devices, "
                        "please use a 4.2+ kernel or see 'modinfo zram´ and create a "
                        "modprobe rule"
                    )
                new_zram = read("/sys/class/zram-control/hot_add").rstrip()
                zram_dev = f"/dev/zram{new_zram}"
                info(f"Zram: success: new device {zram_dev}")
            elif "/dev/zram" in output:
                mode = os.stat(output).st_mode
                if not stat.S_ISBLK(mode):
                    return
                zram_dev = output
            else:
                error(f"Zram: unexpected output from zramctl: {output}")

            mode = os.stat(zram_dev).st_mode
            if stat.S_ISBLK(mode):
                info(f"Zram: initialized: {zram_dev}")
                ret_code = subprocess.run(
                    ["mkswap", zram_dev],
                    stdout=subprocess.DEVNULL,
                    stderr=subprocess.DEVNULL,
                ).returncode
                if ret_code == 0:
                    unit_name = gen_swap_unit(
                        what=zram_dev,
                        options="discard",
                        priority=config.get("zram_prio"),
                        tag="zram",
                    )
                    subprocess.run(["systemctl", "daemon-reload"], check=True)
                    subprocess.run(["systemctl", "start", unit_name], check=True)
            else:
                warn("Zram: can't get free zram device")