def check(self, pkg):
        global _policy_legacy_exceptions

        if pkg.isSource():
            return

        # Only check unsuffixed lib* packages
        if pkg.name.endswith('-devel') or pkg.name.endswith('-doc'):
            return

        files = pkg.files()

        # Search for shared libraries in this package
        libs = set()
        libs_needed = set()
        libs_to_dir = dict()
        dirs = set()
        reqlibs = set()
        pkg_requires = set(
            map(lambda x: string.split(x[0], '(')[0], pkg.requires()))

        for f, pkgfile in files.items():
            if f.find('.so.') != -1 or f.endswith('.so'):
                filename = pkg.dirName() + '/' + f
                try:
                    if stat.S_ISREG(files[f].mode) and 'ELF' in pkgfile.magic:
                        bi = BinaryInfo(pkg, filename, f, False, True)
                        libs_needed = libs_needed.union(bi.needed)
                        if bi.soname != 0:
                            lib_dir = string.join(f.split('/')[:-1], '/')
                            libs.add(bi.soname)
                            libs_to_dir[bi.soname] = lib_dir
                            dirs.add(lib_dir)
                        if bi.soname in pkg_requires:
                            # But not if the library is used by the pkg itself
                            # This avoids program packages with their own
                            # private lib
                            # FIXME: we'd need to check if somebody else links
                            # to this lib
                            reqlibs.add(bi.soname)
                except Exception:
                    pass
            pass

        std_dirs = dirs.intersection(
            ('/lib', '/lib64', '/usr/lib', '/usr/lib64', '/opt/kde3/lib',
             '/opt/kde3/lib64'))

        # If this is a program package (all libs it provides are
        # required by itself), bail out
        if not pkg.name.startswith("lib") and len(
                libs.difference(reqlibs)) == 0:
            return

        std_lib_package = False
        if pkg.name.startswith("lib") and pkg.name[-1].isdigit():
            std_lib_package = True

        # ignore libs in a versioned non_std_dir
        if std_lib_package:
            for lib in libs.copy():
                lib_dir = libs_to_dir[lib]
                if lib_dir.startswith("/opt/kde3"):
                    continue
                for lib_part in lib_dir.split('/'):
                    if len(lib_part) == 0:
                        continue
                    if lib_part[-1].isdigit(
                    ) and not lib_part.endswith("lib64"):
                        libs.remove(lib)
                        break

        # Check for non-versioned libs in a std lib package
        if std_lib_package:
            for lib in libs.copy():
                if not lib[-1].isdigit():
                    printWarning(pkg, "shlib-unversioned-lib", lib)
                    libs.remove(lib)

        # If this package should be or should be splitted into shlib
        # package(s)
        if len(libs) > 0 and len(std_dirs) > 0:
            # If the package contains a single shlib, name after soname
            if len(libs) == 1:
                soname = libs.copy().pop()
                libname = libname_from_soname(soname)
                if libname.startswith('lib') and pkg.name != libname and \
                        pkg.name != libname + "-mini":
                    if libname in _policy_legacy_exceptions:
                        printWarning(pkg, 'shlib-legacy-policy-name-error',
                                     libname)
                    else:
                        printError(pkg, 'shlib-policy-name-error', libname)

            elif not pkg.name[-1:].isdigit():
                printError(pkg, 'shlib-policy-missing-suffix')

        if (not pkg.name.startswith('lib')) or pkg.name.endswith('-lang'):
            return

        if not libs:
            if pkg.name in _policy_legacy_exceptions:
                printWarning(pkg, 'shlib-legacy-policy-missing-lib', pkg.name)
            else:
                printError(pkg, 'shlib-policy-missing-lib')

        # Verify no non-lib stuff is in the package
        dirs = set()
        for f in files:
            if os.path.isdir(pkg.dirName() + f):
                dirs.add(f)

        # Verify shared lib policy package doesn't have hard dependency on non-lib packages
        if std_lib_package:
            for dep in pkg.requires():
                if (dep[0].startswith('rpmlib(')
                        or dep[0].startswith('config(')):
                    continue
                if (dep[1] & (rpm.RPMSENSE_GREATER
                              | rpm.RPMSENSE_EQUAL)) == rpm.RPMSENSE_EQUAL:
                    printWarning(pkg, "shlib-fixed-dependency",
                                 Pkg.formatRequire(dep[0], dep[1], dep[2]))

        # Verify non-lib stuff does not add dependencies
        if libs:
            for dep in pkg_requires.difference(_essential_dependencies):
                if dep.find(
                        '.so.'
                ) != -1 and not dep in libs and not dep in libs_needed:
                    printError(pkg, 'shlib-policy-excessive-dependency', dep)

        # Check for non-versioned directories beyond sysdirs in package
        sysdirs = [
            '/lib', '/lib64', '/usr/lib', '/usr/lib64',
            '/usr/share/doc/packages', '/usr/share'
        ]
        cdirs = set()
        for sysdir in sysdirs:
            done = set()
            for dir in dirs:
                if dir.startswith(sysdir + '/'):
                    ssdir = string.split(dir[len(sysdir) + 1:], '/')[0]
                    if not ssdir[-1].isdigit():
                        cdirs.add(sysdir + '/' + ssdir)
                    done.add(dir)
            dirs = dirs.difference(done)
        map(lambda dir: printError(pkg, 'shlib-policy-nonversioned-dir', dir),
            cdirs)
Example #2
0
def test_preauth_indicator(simple_pam_cert_auth):
    """Check if preauth indicator file is created"""
    statinfo = os.stat(config.PUBCONF_PATH + "/pam_preauth_available")
    assert stat.S_ISREG(statinfo.st_mode)
Example #3
0
            def process(path):
                s = os.lstat(path)

                if stat.S_ISDIR(s.st_mode):
                    update_hash('d')
                elif stat.S_ISCHR(s.st_mode):
                    update_hash('c')
                elif stat.S_ISBLK(s.st_mode):
                    update_hash('b')
                elif stat.S_ISSOCK(s.st_mode):
                    update_hash('s')
                elif stat.S_ISLNK(s.st_mode):
                    update_hash('l')
                elif stat.S_ISFIFO(s.st_mode):
                    update_hash('p')
                else:
                    update_hash('-')

                def add_perm(mask, on, off='-'):
                    if mask & s.st_mode:
                        update_hash(on)
                    else:
                        update_hash(off)

                add_perm(stat.S_IRUSR, 'r')
                add_perm(stat.S_IWUSR, 'w')
                if stat.S_ISUID & s.st_mode:
                    add_perm(stat.S_IXUSR, 's', 'S')
                else:
                    add_perm(stat.S_IXUSR, 'x')

                add_perm(stat.S_IRGRP, 'r')
                add_perm(stat.S_IWGRP, 'w')
                if stat.S_ISGID & s.st_mode:
                    add_perm(stat.S_IXGRP, 's', 'S')
                else:
                    add_perm(stat.S_IXGRP, 'x')

                add_perm(stat.S_IROTH, 'r')
                add_perm(stat.S_IWOTH, 'w')
                if stat.S_ISVTX & s.st_mode:
                    update_hash('t')
                else:
                    add_perm(stat.S_IXOTH, 'x')

                if include_owners:
                    update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
                    update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)

                update_hash(" ")
                if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
                    update_hash("%9s" % ("%d.%d" % (os.major(s.st_rdev), os.minor(s.st_rdev))))
                else:
                    update_hash(" " * 9)

                update_hash(" ")
                if stat.S_ISREG(s.st_mode):
                    update_hash("%10d" % s.st_size)
                else:
                    update_hash(" " * 10)

                update_hash(" ")
                fh = hashlib.sha256()
                if stat.S_ISREG(s.st_mode):
                    # Hash file contents
                    with open(path, 'rb') as d:
                        for chunk in iter(lambda: d.read(4096), b""):
                            fh.update(chunk)
                    update_hash(fh.hexdigest())
                else:
                    update_hash(" " * len(fh.hexdigest()))

                update_hash(" %s" % path)

                if stat.S_ISLNK(s.st_mode):
                    update_hash(" -> %s" % os.readlink(path))

                update_hash("\n")
Example #4
0
    def _create_via_common_rec(self, path, create_symlinks=True):
        if not self.mode:
            raise ApplyError('no metadata - cannot create path ' + path)

        # If the path already exists and is a dir, try rmdir.
        # If the path already exists and is anything else, try unlink.
        st = None
        try:
            st = xstat.lstat(path)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
        if st:
            if stat.S_ISDIR(st.st_mode):
                try:
                    os.rmdir(path)
                except OSError as e:
                    if e.errno in (errno.ENOTEMPTY, errno.EEXIST):
                        msg = 'refusing to overwrite non-empty dir ' + path
                        raise Exception(msg)
                    raise
            else:
                os.unlink(path)

        if stat.S_ISREG(self.mode):
            assert (self._recognized_file_type())
            fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o600)
            os.close(fd)
        elif stat.S_ISDIR(self.mode):
            assert (self._recognized_file_type())
            os.mkdir(path, 0o700)
        elif stat.S_ISCHR(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFCHR, self.rdev)
        elif stat.S_ISBLK(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFBLK, self.rdev)
        elif stat.S_ISFIFO(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFIFO)
        elif stat.S_ISSOCK(self.mode):
            try:
                os.mknod(path, 0o600 | stat.S_IFSOCK)
            except OSError as e:
                if e.errno in (errno.EINVAL, errno.EPERM):
                    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                    s.bind(path)
                else:
                    raise
        elif stat.S_ISLNK(self.mode):
            assert (self._recognized_file_type())
            if self.symlink_target and create_symlinks:
                # on MacOS, symlink() permissions depend on umask, and there's
                # no way to chown a symlink after creating it, so we have to
                # be careful here!
                oldumask = os.umask((self.mode & 0o777) ^ 0o777)
                try:
                    os.symlink(self.symlink_target, path)
                finally:
                    os.umask(oldumask)
        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        else:
            assert (not self._recognized_file_type())
            add_error('not creating "%s" with unrecognized mode "0x%x"\n' %
                      (path, self.mode))
Example #5
0
            if getattr(e, "errno", None) == 2:
                return False
            raise
        return statinfo.S_ISDIR(stat.st_mode)

    @synchronize
    @convert_os_errors
    def isfile(self, path):
        npath = self._normpath(path)
        try:
            stat = self.client.stat(npath)
        except IOError, e:
            if getattr(e, "errno", None) == 2:
                return False
            raise
        return statinfo.S_ISREG(stat.st_mode)

    @synchronize
    @convert_os_errors
    def listdir(self,
                path="./",
                wildcard=None,
                full=False,
                absolute=False,
                dirs_only=False,
                files_only=False):
        npath = self._normpath(path)
        try:
            attrs_map = None
            if dirs_only or files_only:
                attrs = self.client.listdir_attr(npath)
    def init_install(self, setup):
        # mount the media location.
        print " --> Installation started"
        try:
            if (not os.path.exists("/target")):
                if (setup.skip_mount):
                    self.error_message(message=_(
                        "ERROR: You must first manually mount your target filesystem(s) at /target to do a custom install!"
                    ))
                    return
                os.mkdir("/target")
            if (not os.path.exists("/source")):
                os.mkdir("/source")
            # find the squashfs..
            if (not os.path.exists(self.media)):
                print "Base filesystem does not exist! Critical error (exiting)."
                sys.exit(1)  # change to report

            try:
                os.system("umount --force /target/dev/shm")
                os.system("umount --force /target/dev/pts")
                os.system("umount --force /target/dev/")
                os.system("umount --force /target/sys/")
                os.system("umount --force /target/proc/")
            except:
                pass

            if (not setup.skip_mount):
                self.step_format_partitions(setup)
                self.step_mount_partitions(setup)
            else:
                self.step_mount_source(setup)

            # walk root filesystem
            SOURCE = "/source/"
            DEST = "/target/"
            directory_times = []
            our_total = 0
            our_current = -1
            os.chdir(SOURCE)
            # index the files
            print " --> Indexing files"
            self.update_progress(pulse=True,
                                 message=_("Indexing files to be copied.."))
            for top, dirs, files in os.walk(SOURCE, topdown=False):
                our_total += len(dirs) + len(files)
            our_total += 1  # safenessness
            print " --> Copying files"
            for top, dirs, files in os.walk(SOURCE):
                # Sanity check. Python is a bit schitzo
                dirpath = top
                if (dirpath.startswith(SOURCE)):
                    dirpath = dirpath[len(SOURCE):]
                for name in dirs + files:
                    # following is hacked/copied from Ubiquity
                    rpath = os.path.join(dirpath, name)
                    sourcepath = os.path.join(SOURCE, rpath)
                    targetpath = os.path.join(DEST, rpath)
                    st = os.lstat(sourcepath)
                    mode = stat.S_IMODE(st.st_mode)

                    # now show the world what we're doing
                    our_current += 1
                    self.update_progress(total=our_total,
                                         current=our_current,
                                         message=_("Copying %s" % rpath))

                    if os.path.exists(targetpath):
                        if not os.path.isdir(targetpath):
                            os.remove(targetpath)
                    if stat.S_ISLNK(st.st_mode):
                        if os.path.lexists(targetpath):
                            os.unlink(targetpath)
                        linkto = os.readlink(sourcepath)
                        os.symlink(linkto, targetpath)
                    elif stat.S_ISDIR(st.st_mode):
                        if not os.path.isdir(targetpath):
                            os.mkdir(targetpath, mode)
                    elif stat.S_ISCHR(st.st_mode):
                        os.mknod(targetpath, stat.S_IFCHR | mode, st.st_rdev)
                    elif stat.S_ISBLK(st.st_mode):
                        os.mknod(targetpath, stat.S_IFBLK | mode, st.st_rdev)
                    elif stat.S_ISFIFO(st.st_mode):
                        os.mknod(targetpath, stat.S_IFIFO | mode)
                    elif stat.S_ISSOCK(st.st_mode):
                        os.mknod(targetpath, stat.S_IFSOCK | mode)
                    elif stat.S_ISREG(st.st_mode):
                        # we don't do blacklisting yet..
                        try:
                            os.unlink(targetpath)
                        except:
                            pass
                        self.do_copy_file(sourcepath, targetpath)
                    os.lchown(targetpath, st.st_uid, st.st_gid)
                    if not stat.S_ISLNK(st.st_mode):
                        os.chmod(targetpath, mode)
                    if stat.S_ISDIR(st.st_mode):
                        directory_times.append(
                            (targetpath, st.st_atime, st.st_mtime))
                    # os.utime() sets timestamp of target, not link
                    elif not stat.S_ISLNK(st.st_mode):
                        os.utime(targetpath, (st.st_atime, st.st_mtime))
            # Apply timestamps to all directories now that the items within them
            # have been copied.
            print " --> Restoring meta-info"
            for dirtime in directory_times:
                (directory, atime, mtime) = dirtime
                try:
                    self.update_progress(
                        pulse=True,
                        message=_("Restoring meta-information on %s" %
                                  directory))
                    os.utime(directory, (atime, mtime))
                except OSError:
                    pass

            # Steps:
            our_total = 11
            our_current = 0
            # chroot
            print " --> Chrooting"
            self.update_progress(total=our_total,
                                 current=our_current,
                                 message=_("Entering new system.."))
            os.system("mount --bind /dev/ /target/dev/")
            os.system("mount --bind /dev/shm /target/dev/shm")
            os.system("mount --bind /dev/pts /target/dev/pts")
            os.system("mount --bind /sys/ /target/sys/")
            os.system("mount --bind /proc/ /target/proc/")
            os.system("mv /target/etc/resolv.conf /target/etc/resolv.conf.bk")
            os.system("cp -f /etc/resolv.conf /target/etc/resolv.conf")

            # remove live user
            print " --> Removing live user"
            live_user = self.live_user
            our_current += 1
            self.update_progress(
                total=our_total,
                current=our_current,
                message=_("Removing live configuration (user)"))
            self.do_run_in_chroot("deluser %s" % live_user)
            # can happen
            if (os.path.exists("/target/home/%s" % live_user)):
                self.do_run_in_chroot("rm -rf /home/%s" % live_user)

            # remove live-initramfs (or w/e)
            print " --> Removing live packages"
            our_current += 1
            self.update_progress(
                total=our_total,
                current=our_current,
                message=_("Removing live configuration (packages)"))
            self.do_run_in_chroot(
                "apt-get purge --yes --force-yes live-boot live-boot-initramfs-tools tanglu-live-installer live-config live-config-systemd gparted"
            )
            # this is only used by the live-installer
            # FIXME: If a Tanglu spin is *ever* shipping it by default, we need to update this line!
            self.do_run_in_chroot(
                "apt-get purge --yes --force-yes graphicsmagick")

            # When the purge is incomplete and leaves redundant symbolic links in the rc*.d directories.
            # The resulting startpar error prevents gsfxi to successfully install the Nvidia drivers.
            self.do_run_in_chroot("update-rc.d -f live-installer remove")

            # remove some live-config cruft which might still be around
            self.do_run_in_chroot("rm -rf /lib/live")
            # reset dconf overrides
            self.do_run_in_chroot(
                "rm -f /usr/share/glib-2.0/schemas/20_live-cd-config.gschema.override"
            )
            if not os.path.exists('/target/usr/bin/plasma-desktop'):
                self.do_run_in_chroot(
                    "glib-compile-schemas /usr/share/glib-2.0/schemas")

            # add new user
            print " --> Adding new user"
            our_current += 1
            self.update_progress(total=our_total,
                                 current=our_current,
                                 message=_("Adding user to system"))
            self.do_run_in_chroot(
                "useradd -s %s -c \'%s\' -G sudo,adm,dialout,audio,video,cdrom,floppy,dip,plugdev,lpadmin -m %s"
                % ("/bin/bash", setup.real_name, setup.username))
            os.system(
                "chroot /target/ /bin/bash -c \"shopt -s dotglob && cp -R /etc/skel/* /home/%s/\""
                % setup.username)
            self.do_run_in_chroot(
                "chown -R %s:%s /home/%s" %
                (setup.username, setup.username, setup.username))

            fp = open("/target/tmp/.passwd", "w")
            fp.write(setup.username + ":" + setup.password1 + "\n")
            fp.write("root:" + setup.password1 + "\n")
            fp.close()
            self.do_run_in_chroot("cat /tmp/.passwd | chpasswd")
            os.system("rm -f /target/tmp/.passwd")

            # Add user's face
            if os.path.exists('/tmp/live-installer-face.png'):
                os.system(
                    "cp /tmp/live-installer-face.png /target/home/%s/.face" %
                    setup.username)
                self.do_run_in_chroot(
                    "chown %s:%s /home/%s/.face" %
                    (setup.username, setup.username, setup.username))

            # write the /etc/fstab
            print " --> Writing fstab"
            our_current += 1
            self.update_progress(
                total=our_total,
                current=our_current,
                message=_("Writing filesystem mount information"))
            # make sure fstab has default /proc and /sys entries
            if (not os.path.exists("/target/etc/fstab")):
                os.system(
                    "echo \"#### Static Filesystem Table File\" > /target/etc/fstab"
                )
            fstab = open("/target/etc/fstab", "a")
            fstab.write("proc\t/proc\tproc\tdefaults\t0\t0\n")
            if (not setup.skip_mount):
                for partition in setup.partitions:
                    if (partition.mount_as is not None
                            and partition.mount_as != "None"):
                        partition_uuid = partition.partition.path  # If we can't find the UUID we use the path
                        try:
                            blkid = commands.getoutput('blkid').split('\n')
                            for blkid_line in blkid:
                                blkid_elements = blkid_line.split(':')
                                if blkid_elements[
                                        0] == partition.partition.path:
                                    blkid_mini_elements = blkid_line.split()
                                    for blkid_mini_element in blkid_mini_elements:
                                        if "UUID=" in blkid_mini_element:
                                            partition_uuid = blkid_mini_element.replace(
                                                '"', '').strip()
                                            break
                                    break
                        except Exception, detail:
                            print detail

                        fstab.write("# %s\n" % (partition.partition.path))

                        if (partition.mount_as == "/"):
                            fstab_fsck_option = "1"
                        else:
                            fstab_fsck_option = "0"

                        if ("ext" in partition.type):
                            fstab_mount_options = "rw,errors=remount-ro"
                        else:
                            fstab_mount_options = "defaults"

                        if (partition.type == "swap"):
                            fstab.write("%s\tswap\tswap\tsw\t0\t0\n" %
                                        partition_uuid)
                        else:
                            fstab.write("%s\t%s\t%s\t%s\t%s\t%s\n" %
                                        (partition_uuid, partition.mount_as,
                                         partition.type, fstab_mount_options,
                                         "0", fstab_fsck_option))
            fstab.close()
Example #7
0
def attach(zpool, device, new_device, force=False):
    '''
    .. versionchanged:: 2016.3.0

    Attach specified device to zpool

    zpool : string
        name of storage pool
    device : string
        device to attach too
    new_device : string
        device to attach
    force : boolean
        forces use of device

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.attach myzpool /path/to/vdev1 /path/to/vdev2 [...]
    '''
    ret = {}
    dlist = []

    # check for pool
    if not exists(zpool):
        ret[zpool] = 'storage pool does not exist'
        return ret

    # check devices
    ret[zpool] = {}
    if not os.path.exists(device):
        ret[zpool][device] = 'not present on filesystem'
    else:
        mode = os.stat(device).st_mode
        if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
            ret[zpool][
                device] = 'not a block device, a file vdev or character special device'
    if not os.path.exists(new_device):
        ret[zpool][new_device] = 'not present on filesystem'
    else:
        mode = os.stat(new_device).st_mode
        if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
            ret[zpool][
                new_device] = 'not a block device, a file vdev or character special device'

    if len(ret[zpool]) > 0:
        return ret

    # try and add watch out for mismatched replication levels
    zpool_cmd = _check_zpool()
    cmd = '{zpool_cmd} attach {force}{zpool} {device} {new_device}'.format(
        zpool_cmd=zpool_cmd,
        force='-f ' if force else '',
        zpool=zpool,
        device=device,
        new_device=new_device)
    res = __salt__['cmd.run_all'](cmd, python_shell=False)
    if res['retcode'] != 0:
        ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
    else:
        ret[zpool] = {}
        ret[zpool][new_device] = 'attached'

    return ret
Example #8
0
 def canhandlerequest(self):
     """We can handle the request if it's for a file."""
     return self.statresult and stat.S_ISREG(self.statresult[stat.ST_MODE])
#!/usr/bin/python3.1
## For each file in a directory tree, create a full path
## and check the file using 'stat', to get information about
## use if, elif to return whether a file is a file or something
## else

import os, stat
my_path = '/tmp'
for dirpath, dirnames, filenames in os.walk(my_path):
    for f in dirnames or filenames:
        x = (os.path.join(dirpath,f))
        x_stat = os.stat(x) [stat.ST_MODE]
        if stat.S_ISREG(x_stat):
            print(x, 'is a Regular File: ')
        elif stat.S_ISDIR(x_stat):
            print(x, 'is a Directory: ')
        elif stat.S_IFIFO:
            print(x, 'is a sicket File: ')
Example #10
0
def main(*args):

    requiredVersion = (2, 6, 2)
    checkInstallation(requiredVersion)

    params = Parameters()

    #
    # Fixes bug with Python handling of SIGPIPE signal from UNIX head, etc.
    # http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-06/3823.html
    #

    signal.signal(signal.SIGPIPE, signal.SIG_DFL)

    #
    # Read in options
    #

    optstr = ""
    longopts = [
        "do-not-sort", "keep-header", "headered", "help", "max-mem=",
        "sort-tmpdir="
    ]
    try:
        (options, args) = getopt.getopt(sys.argv[1:], optstr, longopts)
    except getopt.GetoptError as error:
        sys.stderr.write("[%s] - Error: %s\n" % (sys.argv[0], str(error)))
        printUsage("stderr")
        return os.EX_USAGE
    for key, value in options:
        if key in ("--help"):
            printUsage("stdout")
            return os.EX_OK
        elif key in ("--keep-header"):
            params.keepHeader = True
        elif key in ("--headered"):
            params.inputIsHeadered = True
        elif key in ("--do-not-sort"):
            params.sortOutput = False
        elif key in ("--sort-tmpdir"):
            params.sortTmpdir = str(value)
            params.sortTmpdirSet = True
        elif key in ("--max-mem"):
            params.maxMem = str(value)
            params.maxMemChanged = True

    if params.keepHeader and not params.inputIsHeadered:
        sys.stderr.write(
            "[%s] - Error: Cannot specify --keep-header without --headered\n" %
            sys.argv[0])
        printUsage("stderr")
        return os.EX_USAGE

    if params.maxMemChanged and not params.sortOutput:
        sys.stderr.write(
            "[%s] - Error: Cannot specify both --do-not-sort and --max-mem parameters\n"
            % sys.argv[0])
        printUsage("stderr")
        return os.EX_USAGE

    if params.sortTmpdirSet and not params.maxMemChanged:
        sys.stderr.write(
            "[%s] - Error: Cannot specify --sort-tmpdir parameter without specifying --max-mem parameter\n"
            % sys.argv[0])
        printUsage("stderr")
        return os.EX_USAGE

    if params.sortTmpdirSet:
        try:
            os.listdir(params.sortTmpdir)
        except OSError as error:
            sys.stderr.write(
                "[%s] - Error: Temporary sort data directory specified with --sort-tmpdir is a file, is non-existent, or its permissions do not allow access\n"
                % sys.argv[0])
            printUsage("stderr")
            return os.EX_USAGE

    mode = os.fstat(0).st_mode
    inputIsNotAvailable = True
    if stat.S_ISFIFO(mode) or stat.S_ISREG(mode):
        inputIsNotAvailable = False
    if inputIsNotAvailable:
        sys.stderr.write(
            "[%s] - Error: Please redirect or pipe in PSL-formatted data\n" %
            sys.argv[0])
        printUsage("stderr")
        return os.EX_NOINPUT

    try:
        if which('sort-bed') is None:
            raise IOError(
                "The sort-bed binary could not be found in your user PATH -- please locate and install this binary"
            )
    except IOError, msg:
        sys.stderr.write("[%s] - %s\n" % (sys.argv[0], msg))
        return os.EX_OSFILE
Example #11
0
class dircmp:
    """A class that manages the comparison of 2 directories.

    dircmp(a,b,ignore=None,hide=None)
      A and B are directories.
      IGNORE is a list of names to ignore,
        defaults to ['RCS', 'CVS', 'tags'].
      HIDE is a list of names to hide,
        defaults to [os.curdir, os.pardir].

    High level usage:
      x = dircmp(dir1, dir2)
      x.report() -> prints a report on the differences between dir1 and dir2
       or
      x.report_partial_closure() -> prints report on differences between dir1
            and dir2, and reports on common immediate subdirectories.
      x.report_full_closure() -> like report_partial_closure,
            but fully recursive.

    Attributes:
     left_list, right_list: The files in dir1 and dir2,
        filtered by hide and ignore.
     common: a list of names in both dir1 and dir2.
     left_only, right_only: names only in dir1, dir2.
     common_dirs: subdirectories in both dir1 and dir2.
     common_files: files in both dir1 and dir2.
     common_funny: names in both dir1 and dir2 where the type differs between
        dir1 and dir2, or the name is not stat-able.
     same_files: list of identical files.
     diff_files: list of filenames which differ.
     funny_files: list of files which could not be compared.
     subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
     """
    def __init__(self, a, b, ignore=None, hide=None):  # Initialize
        self.left = a
        self.right = b
        if hide is None:
            self.hide = [os.curdir, os.pardir]  # Names never to be shown
        else:
            self.hide = hide
        if ignore is None:
            self.ignore = ['RCS', 'CVS', 'tags']  # Names ignored in comparison
        else:
            self.ignore = ignore

    def phase0(self):  # Compare everything except common subdirectories
        self.left_list = _filter(os.listdir(self.left),
                                 self.hide + self.ignore)
        self.right_list = _filter(os.listdir(self.right),
                                  self.hide + self.ignore)
        self.left_list.sort()
        self.right_list.sort()

    def phase1(self):  # Compute common names
        a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))
        b = dict(izip(imap(os.path.normcase, self.right_list),
                      self.right_list))
        self.common = map(a.__getitem__, ifilter(b.has_key, a))
        self.left_only = map(a.__getitem__, ifilterfalse(b.has_key, a))
        self.right_only = map(b.__getitem__, ifilterfalse(a.has_key, b))

    def phase2(self):  # Distinguish files, directories, funnies
        self.common_dirs = []
        self.common_files = []
        self.common_funny = []

        for x in self.common:
            a_path = os.path.join(self.left, x)
            b_path = os.path.join(self.right, x)

            ok = 1
            try:
                a_stat = os.stat(a_path)
            except os.error, why:
                # print 'Can\'t stat', a_path, ':', why[1]
                ok = 0
            try:
                b_stat = os.stat(b_path)
            except os.error, why:
                # print 'Can\'t stat', b_path, ':', why[1]
                ok = 0

            if ok:
                a_type = stat.S_IFMT(a_stat.st_mode)
                b_type = stat.S_IFMT(b_stat.st_mode)
                if a_type != b_type:
                    self.common_funny.append(x)
                elif stat.S_ISDIR(a_type):
                    self.common_dirs.append(x)
                elif stat.S_ISREG(a_type):
                    self.common_files.append(x)
                else:
                    self.common_funny.append(x)
            else:
                self.common_funny.append(x)
Example #12
0
                files = sorted(S.ListDir(path))
                _, size = S.GetFilesystemSize()
                _, free = S.GetFilesystemFree()

                listing = []
                if not long_listing:
                    for filename in files:
                        listing.append(filename)
                else:
                    listing_format = "{0:^15s} {1:0>5o} {2:>15d} {3:>5d} {4:>5d} {5}"
                    listing_header = "{0:^15s} {1: ^5s} {2:^15s} {3:>5s} {4:>5s} {5}".format(
                        "TYPE", "PERM", "SIZE", "USER", "GROUP", "NAME")
                    listing.append(listing_header)
                    for filename in files:
                        _, filestat = S.Stat(fullPath(path, filename))
                        if stat.S_ISREG(filestat.mode):
                            filetype = "file"
                        elif stat.S_ISDIR(filestat.mode):
                            filetype = "directory"
                        else:
                            filetype = "other"
                        details = listing_format.format(
                            filetype, filestat.mode & 07777, filestat.size,
                            filestat.uid, filestat.gid, filename)
                        listing.append(details)

                print "Contents of %s:" % path
                for entry in listing:
                    print "    " + entry
                if size is not None:
                    print "Size: %d KB" % size
Example #13
0
def main():
    log_parser = pld_nr_buildconf.get_logging_args_parser()
    parser = argparse.ArgumentParser(description="Fix broken GPT",
                                     parents=[log_parser])
    parser.add_argument("image", help="Image file or block device to fix")
    parser.add_argument("--write",
                        action="store_true",
                        help="Write changes without asking")
    parser.add_argument("--dry-run",
                        action="store_true",
                        help="Do not change anything")
    args = parser.parse_args()
    pld_nr_buildconf.setup_logging(args)

    if args.dry_run:
        mode = "rb"
    else:
        mode = "r+b"

    with open(args.image, mode) as image_f:
        image_st = os.fstat(image_f.fileno())
        if stat.S_ISBLK(image_st.st_mode):
            logger.debug("{} is a block device, reading its properties".format(
                args.image))
            uint64_buf = ctypes.c_uint64()
            if fcntl.ioctl(image_f.fileno(), BLKGETSIZE64, uint64_buf) < 0:
                raise IOError("ioctl BLKGETSIZE64 failed")
            image_size = uint64_buf.value
            logger.debug("  device size: {}".format(image_size))
            int_buf = ctypes.c_size_t()
            if fcntl.ioctl(image_f.fileno(), BLKSSZGET, int_buf) < 0:
                raise IOError("ioctl BLKSSZGET failed")
            logger.debug("  block size: {}".format(int_buf.value))
            if int_buf.value != 512:
                logger.warning(
                    "{} block size is {}, but this utility"
                    " currently works on 512-bytes blocks only.".format(
                        args.image, int_buf.value))
        elif stat.S_ISREG(image_st.st_mode):
            image_size = os.fstat(image_f.fileno()).st_size
            logger.debug("image size: {}".format(image_size))
        else:
            logger.error("{} not a block device nor a regular file".format(
                args.image))
            sys.exit(1)
        if image_size & 0x1ff:
            logger.error("Image size not a multiply of 512!")
            sys.exit(1)
        try:
            primary_gpt = GPT(image_f, image_size=image_size, lba_size=512)
        except GPTError as err:
            logger.error(
                "Could not read GPT at the second 512-bytes sector:"
                " {}. Other sector sizes not supported yet.".format(err))
            sys.exit(1)

        try:
            backup_gpt = primary_gpt.load_backup()
        except GPTError as err:
            logger.warning(err)

        if (primary_gpt.something_wrong or not backup_gpt
                or backup_gpt.something_wrong):
            logger.info("Problems found, will fix that.")
        elif primary_gpt.part_array_size != 128:
            logger.info("Strange partition array size ({}), will fix that.")
        else:
            logger.info("Everything seems OK. Nothing to do.")
            return

        primary_gpt.trim_partition_array()
        backup_gpt = primary_gpt.make_backup()

        logger.debug("New primary GPT:\n{}".format(primary_gpt))
        logger.debug("New backup GPT:\n{}".format(backup_gpt))

        if args.dry_run:
            logger.info("Skipping write.")
            return

        if not args.write:
            ans = input("Modify the image [y/N]?")
            if ans.lower() not in ("y", "yes"):
                return

        primary_gpt.write()
        backup_gpt.write()
Example #14
0
    def verify(self, img, **args):
        """Returns a tuple of lists of the form (errors, warnings,
                info).  The error list will be empty if the action has been
                correctly installed in the given image.

                In detail, this verifies that the file is present, and if
                the preserve attribute is not present, that the hashes
                and other attributes of the file match."""

        if self.attrs.get("preserve") == "abandon":
            return [], [], []

        path = self.get_installed_path(img.get_root())

        lstat, errors, warnings, info, abort = \
            self.verify_fsobj_common(img, stat.S_IFREG)
        if lstat:
            if not stat.S_ISREG(lstat.st_mode):
                self.replace_required = True

        if abort:
            assert errors
            self.replace_required = True
            return errors, warnings, info

        if path.lower().endswith("/bobcat") and args["verbose"] == True:
            # Returned as a purely informational (untranslated)
            # message so that no client should interpret it as a
            # reason to fail verification.
            info.append("Warning: package may contain bobcat!  "
                        "(http://xkcd.com/325/)")

        preserve = self.attrs.get("preserve")

        if (preserve is None and "timestamp" in self.attrs and lstat.st_mtime
                != misc.timestamp_to_time(self.attrs["timestamp"])):
            errors.append(
                _("Timestamp: {found} should be "
                  "{expected}").format(found=misc.time_to_timestamp(
                      lstat.st_mtime),
                                       expected=self.attrs["timestamp"]))

        # avoid checking pkg.size if we have any content-hashes present;
        # different size files may have the same content-hash
        pkg_size = int(self.attrs.get("pkg.size", 0))
        if preserve is None and pkg_size > 0 and \
            not set(digest.DEFAULT_GELF_HASH_ATTRS).intersection(
            set(self.attrs.keys())) and \
            lstat.st_size != pkg_size:
            errors.append(
                _("Size: {found:d} bytes should be "
                  "{expected:d}").format(found=lstat.st_size,
                                         expected=pkg_size))

        if (preserve is not None and args["verbose"] == False
                or lstat is None):
            return errors, warnings, info

        if args["forever"] != True:
            return errors, warnings, info

        #
        # Check file contents.
        #
        try:
            # This is a generic mechanism, but only used for libc on
            # x86, where the "best" version of libc is lofs-mounted
            # on the canonical path, foiling the standard verify
            # checks.
            is_mtpt = self.attrs.get("mountpoint", "").lower() == "true"
            elfhash = None
            elferror = None
            elf_hash_attr, elf_hash_val, \
                elf_hash_func = \
                digest.get_preferred_hash(self,
                    hash_type=pkg.digest.HASH_GELF)
            if elf_hash_attr and haveelf and not is_mtpt:
                #
                # It's possible for the elf module to
                # throw while computing the hash,
                # especially if the file is badly
                # corrupted or truncated.
                #
                try:
                    # On path, only calculate the
                    # content hash that matches
                    # the preferred one on the
                    # action
                    get_elfhash = \
                        elf_hash_attr == "elfhash"
                    get_sha256 = (not get_elfhash and elf_hash_func
                                  == digest.GELF_HASH_ALGS["gelf:sha256"])
                    get_sha512t_256 = (
                        not get_elfhash and elf_hash_func
                        == digest.GELF_HASH_ALGS["gelf:sha512t_256"])
                    elfhash = elf.get_hashes(
                        path,
                        elfhash=get_elfhash,
                        sha256=get_sha256,
                        sha512t_256=get_sha512t_256)[elf_hash_attr]

                    if get_elfhash:
                        elfhash = [elfhash]
                    else:
                        elfhash = list(digest.ContentHash(elfhash).values())
                except RuntimeError as e:
                    errors.append("ELF content hash: {0}".format(e))

                if (elfhash is not None and elf_hash_val != elfhash[0]):
                    elferror = _("ELF content hash: "
                                 "{found} "
                                 "should be {expected}").format(
                                     found=elfhash[0], expected=elf_hash_val)

            # If we failed to compute the "gelf:" content hash, or
            # the content hash failed to verify, try the "file:"
            # hash. If the content hash fails to match but the file
            # hash matches, it indicates that the content hash
            # algorithm changed, since obviously the file hash is a
            # superset of the content hash.
            if (elfhash is None or elferror) and not is_mtpt:
                hash_attr, hash_val, hash_func = \
                    digest.get_preferred_hash(self)
                sha_hash, data = misc.get_data_digest(path,
                                                      hash_func=hash_func)
                if sha_hash != hash_val:
                    # Prefer the content hash error message.
                    if preserve is not None:
                        info.append(_("editable file has " "been changed"))
                    elif elferror:
                        errors.append(elferror)
                        self.replace_required = True
                    else:
                        errors.append(
                            _("Hash: "
                              "{found} should be "
                              "{expected}").format(found=sha_hash,
                                                   expected=hash_val))
                        self.replace_required = True

            # Check system attributes.
            # Since some attributes like 'archive' or 'av_modified'
            # are set automatically by the FS, it makes no sense to
            # check for 1:1 matches. So we only check that the
            # system attributes specified in the action are still
            # set on the file.
            sattr = self.attrs.get("sysattr", None)
            if sattr:
                if isinstance(sattr, list):
                    sattr = ",".join(sattr)
                sattrs = sattr.split(",")
                if len(sattrs) == 1 and \
                    sattrs[0] not in portable.get_sysattr_dict():
                    # not a verbose attr, try as a compact
                    set_attrs = portable.fgetattr(path, compact=True)
                    sattrs = sattrs[0]
                else:
                    set_attrs = portable.fgetattr(path)

                for a in sattrs:
                    if a not in set_attrs:
                        errors.append(
                            _("System attribute '{0}' "
                              "not set").format(a))

        except EnvironmentError as e:
            if e.errno == errno.EACCES:
                errors.append(_("Skipping: Permission Denied"))
            else:
                errors.append(_("Unexpected Error: {0}").format(e))
        except Exception as e:
            errors.append(_("Unexpected Exception: {0}").format(e))

        return errors, warnings, info
Example #15
0
 def IsRegular(self):
     return stat.S_ISREG(self._stat.st_mode)
Example #16
0
    def _dir_model(self, path, content=True):
        """Build a model for a directory

        if content is requested, will include a listing of the directory
        """
        os_path = self._get_os_path(path)

        four_o_four = u'directory does not exist: %r' % path

        if not os.path.isdir(os_path):
            raise web.HTTPError(404, four_o_four)
        elif is_hidden(os_path, self.root_dir) and not self.allow_hidden:
            self.log.info(
                "Refusing to serve hidden directory %r, via 404 Error",
                os_path)
            raise web.HTTPError(404, four_o_four)

        model = self._base_model(path)
        model['type'] = 'directory'
        model['size'] = None
        if content:
            model['content'] = contents = []
            os_dir = self._get_os_path(path)
            for name in os.listdir(os_dir):
                self.log.warning("Looking at %s", name)
                try:
                    os_path = os.path.join(os_dir, name)
                except UnicodeDecodeError as e:
                    self.log.warning("failed to decode filename '%s': %s",
                                     name, e)
                    continue

                try:
                    self.log.warning("os.lstat begin %s %s", name,
                                     datetime.now())
                    st = os.lstat(os_path)
                    self.log.warning("os.lstat end %s %s", name,
                                     datetime.now())
                except OSError as e:
                    # skip over broken symlinks in listing
                    if e.errno == errno.ENOENT:
                        self.log.warning("%s doesn't exist", os_path)
                    else:
                        self.log.warning("Error stat-ing %s: %s", os_path, e)
                    continue

                self.log.warning("stat.IS_checks start %s %s", name,
                                 datetime.now())
                if (not stat.S_ISLNK(st.st_mode)
                        and not stat.S_ISREG(st.st_mode)
                        and not stat.S_ISDIR(st.st_mode)):
                    self.log.debug("%s not a regular file", os_path)
                    continue
                self.log.warning("stat.IS_checks end %s %s", name,
                                 datetime.now())

                try:
                    if self.should_list(name):
                        if self.allow_hidden or not is_file_hidden(
                                os_path, stat_res=st):
                            contents.append(
                                self.get(path='%s/%s' % (path, name),
                                         content=False))
                except OSError as e:
                    # ELOOP: recursive symlink
                    if e.errno != errno.ELOOP:
                        self.log.warning(
                            "Unknown error checking if file %r is hidden",
                            os_path,
                            exc_info=True,
                        )
            model['format'] = 'json'

        return model
Example #17
0
__author__ = 'yiqing'

open('spam.txt', 'w').write('hello stat world \n')

import os

info = os.stat('spam.txt')
print(info, info.st_atime, info.st_size)

import stat
print(stat.ST_MODE, stat.ST_SIZE)

print(stat.S_ISDIR(info.st_mode), stat.S_ISREG(info.st_mode))
# 等价的os模块调用:
path = 'spam.txt'
print(os.path.isdir(path), os.path.isfile(path), os.path.getsize(path))
Example #18
0
    dirs_no_symlink = set()
    files = set()
    files_no_symlink = set()
    symlinks = set()
    for dir_entry in os.scandir(tmpdir):
        names.add(dir_entry.name)
        paths.add(dir_entry.path)
        if dir_entry.is_dir():
            assert stat.S_ISDIR(dir_entry.stat().st_mode) is True
            dirs.add(dir_entry.name)
        if dir_entry.is_dir(follow_symlinks=False):
            assert stat.S_ISDIR(dir_entry.stat().st_mode) is True
            dirs_no_symlink.add(dir_entry.name)
        if dir_entry.is_file():
            files.add(dir_entry.name)
            assert stat.S_ISREG(dir_entry.stat().st_mode) is True
        if dir_entry.is_file(follow_symlinks=False):
            files_no_symlink.add(dir_entry.name)
            assert stat.S_ISREG(dir_entry.stat().st_mode) is True
        if dir_entry.is_symlink():
            symlinks.add(dir_entry.name)

    assert names == set(
        [FILE_NAME, FILE_NAME2, FOLDER, SYMLINK_FILE, SYMLINK_FOLDER])
    assert paths == set([fname, fname2, folder, symlink_file, symlink_folder])
    assert dirs == set([FOLDER, SYMLINK_FOLDER])
    assert dirs_no_symlink == set([FOLDER])
    assert files == set([FILE_NAME, FILE_NAME2, SYMLINK_FILE])
    assert files_no_symlink == set([FILE_NAME, FILE_NAME2])
    assert symlinks == set([SYMLINK_FILE, SYMLINK_FOLDER])
Example #19
0
#coding:utf-8
import os
import stat
import time
#1,系统调用,os.stat(),os.lstat(),os.fstat
# print os.stat("a")#获取文件状态
# print os.lstat("a")#不跟随符号连接
# f=open("a.txt","w")
# print os.fstat(f.fileno())#参数为文件描述符
s=os.stat("a.txt")
print s.st_mode#获取文件的类型
print stat.S_ISDIR(s.st_mode)#判断路径是否为文件夹
print stat.S_ISREG(s.st_mode)#判断是否为普通文件
print s.st_mode & stat.S_IRUSR#判断文件是否可读,stat.S_IRUSR是掩码,与运算的值大于0即时可读
print s.st_mode & stat.S_IXUSR#判断文件是否可执行
t=time.localtime(s.st_atime)#获取文件的最后修改时间
print t
print s.st_size#获取文件大小
#2快捷函数os.path
print os.path.isdir("a.txt")#判断文件是否为目录
print os.path.islink("a.txt")#判断是否为符号链接
print os.path.isfile("a.txt")#判断是否为文件
#os.path无法查看文件的访问权限
print time.localtime(os.path.getatime("a.txt"))#文件的最后修改时间
print os.path.getsize("a.txt")#获取普通文件的大小
Example #20
0
    def ReadTags(self, ListName):
        ProcessFileLabel = self.builder.get_object('ProcessFileLabel')
        ProcessDirLabel = self.builder.get_object('ProcessDirLabel')
        ProgressBar = self.builder.get_object('ProgressBar')
        ProgressButton = self.builder.get_object('ProgressButton')
        gtk.gdk.threads_enter()
        ProgressBar.set_text('')
        ProgressButton.set_label('gtk-cancel')
        gtk.gdk.threads_leave()
        DirList = self.dbc.dbFetchAll(
            '''
        select LD.dir_name as "Name", LND.dir_recurse_bool as "Recurse"
        from list_dirs LD, list_name_dir LND, list_names LN
        where LD.list_dir_id = LND.list_dir_id
        and LN.list_id = LND.list_id
        and LN.list_name = %s
        ''', (ListName))
        for Dir in DirList:
            if self.ProgressEvent.isSet():
                break
            for (basepath, children) in self._walktree\
                    (Dir["Name"], Dir["Recurse"]):
                gtk.gdk.threads_enter()
                ProcessDirLabel.set_text(u'Folder: %s' % (basepath))
                gtk.gdk.threads_leave()

                if self.ProgressEvent.isSet():
                    break
                for child in children:
                    if self.ProgressEvent.isSet():
                        break

                    DirID = self.PushDirectory(basepath)
                    exist = self.dbc.dbFetchRow(
                        '''
                    select song_id from song
                    where directory_id = %s and file_name = %s
                    ''', (DirID, child))

                    if (exist):
                        continue

                    filename = os.path.join(basepath, child)
                    try:
                        st = os.lstat(filename)
                    except os.error:
                        continue

                    if stat.S_ISREG(st.st_mode):
                        File = child
                        try:
                            gtk.gdk.threads_enter()
                            try:
                                ProcessFileLabel.set_text(u'File: %s' %
                                                          (child))
                            except UnicodeDecodeError, e:
                                pass
                            ProgressBar.pulse()
                            time.sleep(0.01)
                            gtk.gdk.threads_leave()
                            try:
                                mp3 = mutagen.mp3.MP3(filename)
                            except IOError:
                                continue
                            try:
                                id3 = mutagen.id3.ID3(filename)
                            except:
                                continue
                            VBR = self._is_vbr(filename)
                            try:
                                AlbumArtist = self.PushAlbumArtist(id3["TPE2"])
                                if not isinstance(AlbumArtist, int):
                                    AlbumArtist = '1'
                            except (mutagen.id3.ID3NoHeaderError, KeyError), e:
                                AlbumArtist = '1'
                            try:
                                Artist = self.PushArtist(id3["TPE1"])
                                if not isinstance(Artist, int):
                                    Artist = '1'
                            except (mutagen.id3.ID3NoHeaderError, KeyError), e:
                                Artist = '1'
                            try:
                                Album = self.PushAlbum(id3["TALB"])
                                if not isinstance(Album, int):
                                    Album = 1
                            except (mutagen.id3.ID3NoHeaderError, KeyError), e:
                                Album = '1'
                            try:
                                Genre = self.PushGenre(id3["TCON"])
                                if not isinstance(Genre, int):
                                    Genre = '126'
                            except (mutagen.id3.ID3NoHeaderError, KeyError), e:
                                Genre = '126'
                            try:
                                Track = id3["TRCK"]
                                if not isinstance(Track, int):
                                    Track = '1'
                            except (mutagen.id3.ID3NoHeaderError, KeyError), e:
                                Track = '1'
Example #21
0
def replace(zpool, old_device, new_device=None, force=False):
    '''
    .. versionchanged:: 2016.3.0

    Replaces old_device with new_device.

    .. note::
        This is equivalent to attaching new_device,
        waiting for it to resilver, and then detaching old_device.

        The size of new_device must be greater than or equal to the minimum
        size of all the devices in a mirror or raidz configuration.

    zpool : string
        name of storage pool
    old_device : string
        old device to replace
    new_device : string
        optional new device
    force : boolean
        Forces use of new_device, even if its appears to be in use.

    CLI Example:

    .. code-block:: bash

        salt '*' zpool.replace myzpool /path/to/vdev1 /path/to/vdev2
    '''
    ret = {}
    # Make sure pool is there
    if not exists(zpool):
        ret[zpool] = 'storage pool does not exist'
        return ret

    # check devices
    ret[zpool] = {}
    if not new_device:  # if we have a new device, old_device is probably missing!
        if not os.path.exists(old_device):
            ret[zpool][old_device] = 'not present on filesystem'
        else:
            mode = os.stat(old_device).st_mode
            if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
                ret[zpool][
                    old_device] = 'not a block device, a file vdev or character special device'

    if new_device:  # if we are replacing a device in the same slot, new device can be None
        if not os.path.exists(new_device):
            ret[zpool][new_device] = 'not present on filesystem'
        else:
            mode = os.stat(new_device).st_mode
            if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
                ret[zpool][
                    new_device] = 'not a block device, a file vdev or character special device'

    if len(ret[zpool]) > 0:
        return ret

    # Replace vdevs
    zpool_cmd = _check_zpool()
    cmd = '{zpool_cmd} replace {force}{zpool} {old_device}{new_device}'.format(
        zpool_cmd=zpool_cmd,
        zpool=zpool,
        force='-f ' if force else '',
        old_device=old_device,
        new_device=' {0}'.format(new_device) if new_device else '')
    res = __salt__['cmd.run_all'](cmd, python_shell=False)
    if res['retcode'] != 0:
        ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
    else:
        ret[zpool] = 'replaced {0} with {1}'.format(old_device, new_device)

    return ret
Example #22
0
    def GetBasePathSpec(self, source_path):
        """Determines the base path specification.

    Args:
      source_path: the source path.

    Returns:
      The base path specification (instance of dfvfs.PathSpec).

    Raises:
      RuntimeError: if the source path does not exists, or if the source path
                    is not a file or directory, or if the format of or within
                    the source file is not supported.
    """
        if not os.path.exists(source_path):
            raise RuntimeError(u'No such source: {0:s}.'.format(source_path))

        stat_info = os.stat(source_path)

        if (not stat.S_ISDIR(stat_info.st_mode)
                and not stat.S_ISREG(stat_info.st_mode)):
            raise RuntimeError(
                u'Unsupported source: {0:s} not a file or directory.'.format(
                    source_path))

        if stat.S_ISDIR(stat_info.st_mode):
            path_spec = path_spec_factory.Factory.NewPathSpec(
                definitions.TYPE_INDICATOR_OS, location=source_path)

        elif stat.S_ISREG(stat_info.st_mode):
            path_spec = path_spec_factory.Factory.NewPathSpec(
                definitions.TYPE_INDICATOR_OS, location=source_path)

            type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators(
                path_spec)

            if len(type_indicators) > 1:
                raise RuntimeError((
                    u'Unsupported source: {0:s} found more than one storage media '
                    u'image types.').format(source_path))

            if len(type_indicators) == 1:
                path_spec = path_spec_factory.Factory.NewPathSpec(
                    type_indicators[0], parent=path_spec)

            if not type_indicators:
                # The RAW storage media image type cannot be detected based on
                # a signature so we try to detect it based on common file naming
                # schemas.
                file_system = resolver.Resolver.OpenFileSystem(path_spec)
                raw_path_spec = path_spec_factory.Factory.NewPathSpec(
                    definitions.TYPE_INDICATOR_RAW, parent=path_spec)

                glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec)
                if glob_results:
                    path_spec = raw_path_spec

            # In case we did not find a storage media image type we keep looking
            # since not all RAW storage media image naming schemas are known and
            # its type can only detected by its content.

            path_spec = self._GetUpperLevelVolumeSystemPathSpec(path_spec)

            # In case we did not find a volume system type we keep looking
            # since we could be dealing with a store media image that contains
            # a single volume.

            type_indicators = analyzer.Analyzer.GetFileSystemTypeIndicators(
                path_spec)

            if len(type_indicators) > 1:
                raise RuntimeError((
                    u'Unsupported source: {0:s} found more than one file system '
                    u'types.').format(source_path))

            if not type_indicators:
                logging.warning(u'Unable to find a supported file system.')
                path_spec = path_spec_factory.Factory.NewPathSpec(
                    definitions.TYPE_INDICATOR_OS, location=source_path)

            elif type_indicators[0] != definitions.TYPE_INDICATOR_TSK:
                raise RuntimeError((
                    u'Unsupported source: {0:s} found unsupported file system '
                    u'type: {1:s}.').format(source_path, type_indicators[0]))

            else:
                path_spec = path_spec_factory.Factory.NewPathSpec(
                    definitions.TYPE_INDICATOR_TSK,
                    location=u'/',
                    parent=path_spec)

        return path_spec
Example #23
0
def is_piped(file_obj):
    """check if file-object is a pipe or a file redirect"""
    mode = os.fstat(file_obj.fileno()).st_mode
    return stat.S_ISFIFO(mode) or stat.S_ISREG(mode)
Example #24
0
def zip_item_is_executable(info):
    # type: (ZipInfo) -> bool
    mode = info.external_attr >> 16
    # if mode and regular file and any execute permissions for
    # user/group/world?
    return bool(mode and stat.S_ISREG(mode) and mode & 0o111)
Example #25
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            paths=dict(required=True, aliases=['name', 'path'], type='list'),
            patterns=dict(default=['*'], type='list', aliases=['pattern']),
            contains=dict(default=None, type='str'),
            file_type=dict(default="file",
                           choices=['file', 'directory', 'link', 'any'],
                           type='str'),
            age=dict(default=None, type='str'),
            age_stamp=dict(default="mtime",
                           choices=['atime', 'mtime', 'ctime'],
                           type='str'),
            size=dict(default=None, type='str'),
            recurse=dict(default='no', type='bool'),
            hidden=dict(default="False", type='bool'),
            follow=dict(default="False", type='bool'),
            get_checksum=dict(default="False", type='bool'),
            use_regex=dict(default="False", type='bool'),
        ),
        supports_check_mode=True,
    )

    params = module.params

    filelist = []

    if params['age'] is None:
        age = None
    else:
        # convert age to seconds:
        m = re.match("^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
        seconds_per_unit = {
            "s": 1,
            "m": 60,
            "h": 3600,
            "d": 86400,
            "w": 604800
        }
        if m:
            age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
        else:
            module.fail_json(age=params['age'], msg="failed to process age")

    if params['size'] is None:
        size = None
    else:
        # convert size to bytes:
        m = re.match("^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
        bytes_per_unit = {
            "b": 1,
            "k": 1024,
            "m": 1024**2,
            "g": 1024**3,
            "t": 1024**4
        }
        if m:
            size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
        else:
            module.fail_json(size=params['size'], msg="failed to process size")

    now = time.time()
    msg = ''
    looked = 0
    for npath in params['paths']:
        npath = os.path.expanduser(os.path.expandvars(npath))
        if os.path.isdir(npath):
            ''' ignore followlinks for python version < 2.6 '''
            for root,dirs,files in (sys.version_info < (2,6,0) and os.walk(npath)) or \
                                    os.walk( npath, followlinks=params['follow']):
                looked = looked + len(files) + len(dirs)
                for fsobj in (files + dirs):
                    fsname = os.path.normpath(os.path.join(root, fsobj))

                    if os.path.basename(fsname).startswith(
                            '.') and not params['hidden']:
                        continue

                    try:
                        st = os.lstat(fsname)
                    except:
                        msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
                        continue

                    r = {'path': fsname}
                    if params['file_type'] == 'any':
                        if pfilter(fsobj, params['patterns'],
                                   params['use_regex']) and agefilter(
                                       st, now, age, params['age_stamp']):
                            r.update(statinfo(st))
                            filelist.append(r)
                    elif stat.S_ISDIR(
                            st.st_mode) and params['file_type'] == 'directory':
                        if pfilter(fsobj, params['patterns'],
                                   params['use_regex']) and agefilter(
                                       st, now, age, params['age_stamp']):

                            r.update(statinfo(st))
                            filelist.append(r)

                    elif stat.S_ISREG(
                            st.st_mode) and params['file_type'] == 'file':
                        if pfilter(fsobj, params['patterns'], params['use_regex']) and \
                           agefilter(st, now, age, params['age_stamp']) and \
                           sizefilter(st, size) and \
                           contentfilter(fsname, params['contains']):

                            r.update(statinfo(st))
                            if params['get_checksum']:
                                r['checksum'] = module.sha1(fsname)
                            filelist.append(r)

                    elif stat.S_ISLNK(
                            st.st_mode) and params['file_type'] == 'link':
                        if pfilter(fsobj, params['patterns'],
                                   params['use_regex']) and agefilter(
                                       st, now, age, params['age_stamp']):
                            r.update(statinfo(st))
                            filelist.append(r)

                if not params['recurse']:
                    break
        else:
            msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath

    matched = len(filelist)
    module.exit_json(files=filelist,
                     changed=False,
                     msg=msg,
                     matched=matched,
                     examined=looked)
Example #26
0
    def main(self):
        db = self.db
        storage = storages[self.playout_config["playout_storage"]]
        if not storage:
            if STORAGE_STATUS.get(storage.id, True):
                logging.error(f"{storage} is not available")
                STORAGE_STATUS[storage.id] = False
            return
        STORAGE_STATUS[storage.id] = True
        storage_path = storage.local_path

        for asset, scheduled in get_scheduled_assets(self.id_channel, db=db):
            old_status = asset.get(self.status_key, DEFAULT_STATUS)

            # read playout file props
            try:
                fs = os.stat(asset.get_playout_full_path(self.id_channel))
                file_exists = stat.S_ISREG(fs[stat.ST_MODE])
            except FileNotFoundError:
                file_exists = False

            if file_exists:
                file_size = fs[stat.ST_SIZE]
                file_mtime = fs[stat.ST_MTIME]
            else:
                file_size = file_mtime = 0

            if file_exists:
                if file_size:
                    file_status = ONLINE
                else:
                    file_status = CORRUPTED
            else:
                file_status = OFFLINE

            ostatus = old_status.get("status", OFFLINE)
            omtime = old_status.get("mtime", 0)
            osize = old_status.get("size", 0)
            duration = old_status.get("duration", 0)

            now = time.time()

            # if file changed, check using ffprobe
            if file_status == ONLINE:
                if omtime != file_mtime or osize != file_size:
                    file_status, duration = check_file_validity(
                        asset, self.id_channel)

                else:
                    if ostatus == CREATING:
                        if now - file_mtime > 10 and omtime == file_mtime:
                            file_status = ONLINE
                        else:
                            file_status = CREATING
                    elif ostatus == UNKNOWN:
                        if now - file_mtime > 10:
                            file_status = CORRUPTED

            if ostatus != file_status or omtime != file_mtime or osize != file_size:
                logging.info(
                    f"Set {asset} playout status to {get_object_state_name(file_status)}"
                )
                asset[self.status_key] = {
                    "status": file_status,
                    "size": file_size,
                    "mtime": file_mtime,
                    "duration": duration
                }
                asset.save()

            if file_status not in [
                    ONLINE, CREATING, CORRUPTED
            ] and self.send_action and asset["status"] == ONLINE and scheduled:
                result = send_to(asset.id,
                                 self.send_action,
                                 restart_existing=True,
                                 restart_running=False,
                                 db=db)
                if result.response == 201:
                    logging.info(
                        f"Sending {asset} to playout {self.playout_config['title']} : {result.message}"
                    )
Example #27
0
    def _check_path(self, src, path_type, dest=None, force=False):
        """Check a new destination path in the archive.

            Since it is possible for multiple plugins to collect the same
            paths, and since plugins can now run concurrently, it is possible
            for two threads to race in archive methods: historically the
            archive class only needed to test for the actual presence of a
            path, since it was impossible for another `Archive` client to
            enter the class while another method invocation was being
            dispatched.

            Deal with this by implementing a locking scheme for operations
            that modify the path structure of the archive, and by testing
            explicitly for conflicts with any existing content at the
            specified destination path.

            It is not an error to attempt to create a path that already
            exists in the archive so long as the type of the object to be
            added matches the type of object already found at the path.

            It is an error to attempt to re-create an existing path with
            a different path type (for example, creating a symbolic link
            at a path already occupied by a regular file).

            :param src: the source path to be copied to the archive
            :param path_type: the type of object to be copied
            :param dest: an optional destination path
            :param force: force file creation even if the path exists
            :returns: An absolute destination path if the path should be
                      copied now or `None` otherwise
        """
        dest = dest or self.dest_path(src)
        if path_type == P_DIR:
            dest_dir = dest
        else:
            dest_dir = os.path.split(dest)[0]
        if not dest_dir:
            return dest

        # Check containing directory presence and path type
        if os.path.exists(dest_dir) and not os.path.isdir(dest_dir):
            raise ValueError("path '%s' exists and is not a directory" %
                             dest_dir)
        elif not os.path.exists(dest_dir):
            src_dir = src if path_type == P_DIR else os.path.split(src)[0]
            self._make_leading_paths(src_dir)

        def is_special(mode):
            return any([
                stat.S_ISBLK(mode),
                stat.S_ISCHR(mode),
                stat.S_ISFIFO(mode),
                stat.S_ISSOCK(mode)
            ])

        if force:
            return dest

        # Check destination path presence and type
        if os.path.exists(dest):
            # Use lstat: we care about the current object, not the referent.
            st = os.lstat(dest)
            ve_msg = "path '%s' exists and is not a %s"
            if path_type == P_FILE and not stat.S_ISREG(st.st_mode):
                raise ValueError(ve_msg % (dest, "regular file"))
            if path_type == P_LINK and not stat.S_ISLNK(st.st_mode):
                raise ValueError(ve_msg % (dest, "symbolic link"))
            if path_type == P_NODE and not is_special(st.st_mode):
                raise ValueError(ve_msg % (dest, "special file"))
            if path_type == P_DIR and not stat.S_ISDIR(st.st_mode):
                raise ValueError(ve_msg % (dest, "directory"))
            # Path has already been copied: skip
            return None
        return dest
def walk(
    *dirPaths,
    acceptDirPathPatterns=None,
    acceptFilePathPatterns=None,
    acceptLinkPathPatterns=None,
    ignorePathPatterns=None,
    ignoreDirPathPatterns=None,
    ignoreFilePathPatterns=None,
    ignoreLinkPathPatterns=None,
    emitDirs: bool = True,
    emitFiles: bool = True,
    emitLinks: bool = True,
    emitBaseDirs: bool = True,
    recursive: bool = True,
    sort: bool = True,
    emitErrorEntries: bool = True,
    clazz=None,
    ioAdapter: IWalkIOAdapter = None,
) -> typing.Iterator[Entry]:

    if clazz is None:
        clazz = Entry
    else:
        assert clazz.__class__ == type

    ignoreDirPathMatcher = None
    ignoreFilePathMatcher = None
    ignoreLinkPathMatcher = None

    if ignorePathPatterns:
        ignoreFilePathMatcher = PathPatternMatcherCollection()
        ignoreDirPathMatcher = PathPatternMatcherCollection()
        ignoreLinkPathMatcher = PathPatternMatcherCollection()

        if isinstance(ignorePathPatterns, str):
            ignorePathPatterns = [ignorePathPatterns]

        _temp = compileAllPatterns(ignorePathPatterns)
        ignoreFilePathMatcher.extend(_temp)
        ignoreDirPathMatcher.extend(_temp)
        ignoreLinkPathMatcher.extend(_temp)

    ignoreDirPathMatcher = _cmpileEndExtend(ignoreDirPathMatcher,
                                            ignoreDirPathPatterns)
    ignoreFilePathMatcher = _cmpileEndExtend(ignoreFilePathMatcher,
                                             ignoreFilePathPatterns)
    ignoreLinkPathMatcher = _cmpileEndExtend(ignoreLinkPathMatcher,
                                             ignoreLinkPathPatterns)

    acceptDirPathMatcher = None
    acceptFilePathMatcher = None
    acceptLinkPathMatcher = None

    acceptDirPathMatcher = _cmpileEndExtend(acceptDirPathMatcher,
                                            acceptDirPathPatterns)
    acceptFilePathMatcher = _cmpileEndExtend(acceptFilePathMatcher,
                                             acceptFilePathPatterns)
    acceptLinkPathMatcher = _cmpileEndExtend(acceptLinkPathMatcher,
                                             acceptLinkPathPatterns)

    # ----

    if ioAdapter is None:
        ioAdapter = ILocalDiskIOAdapter()
    _lstat = ioAdapter.lstatCallback()
    _listdir = ioAdapter.listdirCallback()

    # ----

    dirPaths2 = []
    for d in dirPaths:
        if isinstance(d, (list, tuple)):
            for d2 in d:
                assert isinstance(d2, str)
                # remove trailing slashes
                if d2.endswith("/") and (len(d2) > 1):
                    d2 = d2[:-1]
                dirPaths2.append(d2)
        else:
            # remove trailing slashes
            if d.endswith("/") and (len(d) > 1):
                d = d[:-1]
            dirPaths2.append(d)

    for dirPath in dirPaths2:
        dirPath = os.path.abspath(dirPath)
        s = dirPath
        if not s.endswith(os.path.sep):
            s += os.path.sep
        removePathPrefixLen = len(s)
        dirsToGo = [(dirPath, dirPath, removePathPrefixLen, emitBaseDirs)]

        while dirsToGo:
            nextDirPath, baseDirPath, removePathPrefixLen, bEmitBaseDir = dirsToGo[
                0]
            del dirsToGo[0]

            if bEmitBaseDir:
                statResult = _lstat(baseDirPath)
                assert baseDirPath == nextDirPath  # ??? is this the case ???
                yield Entry._createRootDir(clazz, baseDirPath, statResult)

            try:
                allEntries = _listdir(nextDirPath)
            except Exception as ee:
                # can't process this directory
                if emitErrorEntries:
                    fullPath = nextDirPath
                    relPath = fullPath[removePathPrefixLen:]
                    yield Entry._createReadDirError(clazz, baseDirPath,
                                                    relPath, ee)
                else:
                    raise
                continue

            if sort:
                allEntries = sorted(allEntries)

            for entry in allEntries:
                fullPath = os.path.join(nextDirPath, entry)
                relPath = fullPath[removePathPrefixLen:]
                try:
                    statResult = _lstat(fullPath)
                    if stat.S_ISDIR(statResult.st_mode):
                        if ignoreDirPathMatcher and ignoreDirPathMatcher.matchAR(
                                fullPath, relPath):
                            continue

                        if emitDirs:
                            if acceptDirPathMatcher is None or acceptDirPathMatcher.matchAR(
                                    fullPath, relPath):
                                yield Entry._createDir(clazz, baseDirPath,
                                                       relPath, statResult)

                        if recursive:
                            dirsToGo.append((fullPath, baseDirPath,
                                             removePathPrefixLen, False))

                    elif stat.S_ISLNK(statResult.st_mode):
                        if ignoreLinkPathMatcher and ignoreLinkPathMatcher.matchAR(
                                fullPath, relPath):
                            continue

                        if emitFiles:
                            if acceptLinkPathMatcher is None or acceptLinkPathMatcher.matchAR(
                                    fullPath, relPath):
                                yield Entry._createLink(
                                    clazz, baseDirPath, relPath, statResult)

                    elif stat.S_ISREG(statResult.st_mode):
                        if ignoreFilePathMatcher and ignoreFilePathMatcher.matchAR(
                                fullPath, relPath):
                            continue

                        if emitFiles:
                            if acceptFilePathMatcher is None or acceptFilePathMatcher.matchAR(
                                    fullPath, relPath):
                                yield Entry._createFile(
                                    clazz, baseDirPath, relPath, statResult)

                except FileNotFoundError as ee:
                    # we end here if entry was a link but the link target does not exist
                    pass
Example #29
0
 def __addElement(self, relativePath, doHash, skipAttributes):
     try:
         fullPath = os.path.join(self.__path, relativePath)
         stats = os.lstat(fullPath)
         properties = {'path': relativePath}
         if stat.S_ISLNK(stats.st_mode):
             kind = "link"
             properties['target'] = os.readlink(fullPath)
         elif stat.S_ISDIR(stats.st_mode):
             kind = "directory"
         elif stat.S_ISREG(stats.st_mode):
             kind = "file"
             properties['size'] = str(stats.st_size)
         else:
             return None  # unknown file type, skip it
         properties['modified'] = formatDate(stats.st_mtime)
         mods = stat.S_IMODE(stats.st_mode)
         if mods & (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH) == 0:
             properties['readonly'] = "true"
         if mods & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) != 0:
             properties['executable'] = "true"
         bXML.appendText(self.__contents.documentElement, "\n\t")
         element = bXML.appendElement(self.__contents.documentElement, kind,
                                      properties)
         if kXattrAvailable:
             try:
                 attrs = xattr.listxattr(fullPath)
                 for attr in attrs:
                     try:
                         value = xattr.getxattr(fullPath, attr, True)
                         bXML.appendText(element, "\n\t\t")
                         tag = bXML.appendElement(element, "xattr",
                                                  {'name': attr})
                         bXML.appendText(tag, escape(value))
                     except:  # can't read this attribute
                         exceptionType, exceptionValue, exceptionTraceback = sys.exc_info(
                         )
                         traceback.print_exception(exceptionType,
                                                   exceptionValue,
                                                   exceptionTraceback,
                                                   limit=5,
                                                   file=sys.stderr)
                         pass
             except:  # something went wrong
                 exceptionType, exceptionValue, exceptionTraceback = sys.exc_info(
                 )
                 traceback.print_exception(exceptionType,
                                           exceptionValue,
                                           exceptionTraceback,
                                           limit=5,
                                           file=sys.stderr)
                 pass
         if element.firstChild:
             bXML.appendText(element, "\n\t")
     except:  # skip files we can't look at
         exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
         traceback.print_exception(exceptionType,
                                   exceptionValue,
                                   exceptionTraceback,
                                   limit=5,
                                   file=sys.stderr)
         pass
Example #30
0
 # it's not a directory
 id = None
 if hashvalid:
     id = ent.sha
     git_name = git.mangle_name(file, ent.mode, ent.gitmode)
     git_info = (ent.gitmode, git_name, id)
     shalists[-1].append(git_info)
     sort_key = git.shalist_item_sort_key((ent.mode, file, id))
     meta = msr.metadata_at(ent.meta_ofs)
     meta.hardlink_target = find_hardlink_target(hlink_db, ent)
     # Restore the times that were cleared to 0 in the metastore.
     (meta.atime, meta.mtime, meta.ctime) = (ent.atime, ent.mtime,
                                             ent.ctime)
     metalists[-1].append((sort_key, meta))
 else:
     if stat.S_ISREG(ent.mode):
         try:
             f = hashsplit.open_noatime(ent.name)
         except (IOError, OSError) as e:
             add_error(e)
             lastskip_name = ent.name
         else:
             try:
                 (mode, id) = hashsplit.split_to_blob_or_tree(
                     w.new_blob, w.new_tree, [f], keep_boundaries=False)
             except (IOError, OSError) as e:
                 add_error('%s: %s' % (ent.name, e))
                 lastskip_name = ent.name
     else:
         if stat.S_ISDIR(ent.mode):
             assert (0)  # handled above