Esempio n. 1
0
     'isSpecial': 0,
     'permissions': 0,
     'isHidden': 0
 }
 try:
     try:
         lstats = os.lstat(self._decodedPath)  # Don't follow symlinks
     except EnvironmentError, ex:
         try:
             # XXX bug 63027, try the original path
             lstats = os.lstat(self._path)
         except EnvironmentError, ex:
             #log.exception(ex)
             raise
     lmode = lstats[stat.ST_MODE]
     _stats['isSymlink'] = int(stat.S_ISLNK(lmode))
     if not _stats['isSymlink']:
         mode = lmode
         stats = lstats
     else:
         try:
             stats = os.stat(self._decodedPath)
         except EnvironmentError, ex:
             try:
                 # XXX bug 63027, try the original path
                 stats = os.stat(self._path)
             except EnvironmentError, ex:
                 #log.exception(ex)
                 raise
         mode = stats[stat.ST_MODE]
     _stats['mode'] = mode
Esempio n. 2
0
    def action(self, path):
        rootdir = self.rootdir
        pubpath = pkg.misc.relpath(path, rootdir)
        pstat = os.lstat(path)
        mode = oct(stat.S_IMODE(pstat.st_mode))
        timestamp = pkg.misc.time_to_timestamp(pstat.st_mtime)

        # Set default root and group.
        owner = "root"
        group = "bin"

        # Check whether need to change owner.
        if not self.use_default_owner:
            try:
                owner = pwd.getpwuid(pstat.st_uid).pw_name
            except KeyError as e:
                owner = None
            try:
                group = grp.getgrgid(pstat.st_gid).gr_name
            except KeyError as e:
                group = None

            if not owner and not group:
                raise pkg.bundle.InvalidOwnershipException(path,
                                                           uid=pstat.st_uid,
                                                           gid=pstat.st_gid)
            elif not owner:
                raise pkg.bundle.InvalidOwnershipException(path,
                                                           uid=pstat.st_uid)
            elif not group:
                raise pkg.bundle.InvalidOwnershipException(path,
                                                           gid=pstat.st_gid)

        if stat.S_ISREG(pstat.st_mode):
            inode = pstat.st_ino
            # Any inode in self.inodes will either have been visited
            # before or will have been pre-populated from the list
            # of known targets.  Create file actions for known
            # targets and unvisited inodes.
            if pubpath in self.targetpaths or \
                inode not in self.inodes:
                if pstat.st_nlink > 1:
                    self.inodes.setdefault(inode, path)
                return pkg.actions.file.FileAction(open(path, "rb"),
                                                   mode=mode,
                                                   owner=owner,
                                                   group=group,
                                                   path=pubpath,
                                                   timestamp=timestamp)
            else:
                # Find the relative path to the link target.
                target = pkg.misc.relpath(self.inodes[inode],
                                          os.path.dirname(path))
                return pkg.actions.hardlink.HardLinkAction(path=pubpath,
                                                           target=target)
        elif stat.S_ISLNK(pstat.st_mode):
            return pkg.actions.link.LinkAction(target=os.readlink(path),
                                               path=pubpath)
        elif stat.S_ISDIR(pstat.st_mode):
            return pkg.actions.directory.DirectoryAction(timestamp=timestamp,
                                                         mode=mode,
                                                         owner=owner,
                                                         group=group,
                                                         path=pubpath)
Esempio n. 3
0
    def copy(self, dest, skip_if_older=True):
        assert isinstance(dest, basestring)

        # The logic in this function is complicated by the fact that symlinks
        # aren't universally supported. So, where symlinks aren't supported, we
        # fall back to file copying. Keep in mind that symlink support is
        # per-filesystem, not per-OS.

        # Handle the simple case where symlinks are definitely not supported by
        # falling back to file copy.
        if not hasattr(os, 'symlink'):
            return File.copy(self, dest, skip_if_older=skip_if_older)

        # Always verify the symlink target path exists.
        if not os.path.exists(self.path):
            raise ErrorMessage('Symlink target path does not exist: %s' %
                               self.path)

        st = None

        try:
            st = os.lstat(dest)
        except OSError as ose:
            if ose.errno != errno.ENOENT:
                raise

        # If the dest is a symlink pointing to us, we have nothing to do.
        # If it's the wrong symlink, the filesystem must support symlinks,
        # so we replace with a proper symlink.
        if st and stat.S_ISLNK(st.st_mode):
            link = os.readlink(dest)
            if link == self.path:
                return False

            os.remove(dest)
            os.symlink(self.path, dest)
            return True

        # If the destination doesn't exist, we try to create a symlink. If that
        # fails, we fall back to copy code.
        if not st:
            try:
                os.symlink(self.path, dest)
                return True
            except OSError:
                return File.copy(self, dest, skip_if_older=skip_if_older)

        # Now the complicated part. If the destination exists, we could be
        # replacing a file with a symlink. Or, the filesystem may not support
        # symlinks. We want to minimize I/O overhead for performance reasons,
        # so we keep the existing destination file around as long as possible.
        # A lot of the system calls would be eliminated if we cached whether
        # symlinks are supported. However, even if we performed a single
        # up-front test of whether the root of the destination directory
        # supports symlinks, there's no guarantee that all operations for that
        # dest (or source) would be on the same filesystem and would support
        # symlinks.
        #
        # Our strategy is to attempt to create a new symlink with a random
        # name. If that fails, we fall back to copy mode. If that works, we
        # remove the old destination and move the newly-created symlink into
        # its place.

        temp_dest = os.path.join(os.path.dirname(dest), str(uuid.uuid4()))
        try:
            os.symlink(self.path, temp_dest)
        # TODO Figure out exactly how symlink creation fails and only trap
        # that.
        except EnvironmentError:
            return File.copy(self, dest, skip_if_older=skip_if_older)

        # If removing the original file fails, don't forget to clean up the
        # temporary symlink.
        try:
            os.remove(dest)
        except EnvironmentError:
            os.remove(temp_dest)
            raise

        os.rename(temp_dest, dest)
        return True
Esempio n. 4
0
                with open(lname, 'wb') as destfile:
                    log('Saving %r\n' % lname)
                    write_to_file(srcfile, destfile)
        elif cmd == 'mget':
            for parm in words[1:]:
                (dir, base) = os.path.split(parm)

                res = vfs.resolve(repo, dir, parent=pwd)
                _, dir_item = res[-1]
                if not dir_item:
                    raise Exception('%r does not exist' % dir)
                for name, item in vfs.contents(repo, dir_item):
                    if name == '.':
                        continue
                    if fnmatch.fnmatch(name, base):
                        if stat.S_ISLNK(vfs.item_mode(item)):
                            deref = vfs.resolve(repo, name, parent=res)
                            deref_name, deref_item = deref[-1]
                            if not deref_item:
                                raise Exception(
                                    '%r does not exist' %
                                    '/'.join(name for name, item in deref))
                            item = deref_item
                        with vfs.fopen(repo, item) as srcfile:
                            with open(name, 'wb') as destfile:
                                log('Saving %r\n' % name)
                                write_to_file(srcfile, destfile)
        elif cmd == 'help' or cmd == '?':
            # FIXME: move to stdout
            log('Commands: ls cd pwd cat get mget help quit\n')
        elif cmd in ('quit', 'exit', 'bye'):
Esempio n. 5
0
     try:
         f = hashsplit.open_noatime(ent.name)
     except (IOError, OSError), e:
         add_error(e)
         lastskip_name = ent.name
     else:
         try:
             (mode, id) = hashsplit.split_to_blob_or_tree(
                 w.new_blob, w.new_tree, [f], keep_boundaries=False)
         except (IOError, OSError), e:
             add_error('%s: %s' % (ent.name, e))
             lastskip_name = ent.name
 else:
     if stat.S_ISDIR(ent.mode):
         assert (0)  # handled above
     elif stat.S_ISLNK(ent.mode):
         try:
             rl = os.readlink(ent.name)
         except (OSError, IOError), e:
             add_error(e)
             lastskip_name = ent.name
         else:
             (mode, id) = (GIT_MODE_SYMLINK, w.new_blob(rl))
     else:
         add_error(Exception('skipping special file "%s"' % ent.name))
         lastskip_name = ent.name
 if id:
     ent.validate(mode, id)
     ent.repack()
     shalists[-1].append(
         (mode, git.mangle_name(file, ent.mode, ent.gitmode), id))
Esempio n. 6
0
parent_dir = "NULL"
leafs = []  #Leaf
links = []  #String

for (root, dirs, files) in os.walk(".", topdown=True, followlinks=True):
    if root == ".":
        hash = cwd
        parent = "NULL"
    else:
        hash = cwd + root[1:]
        parent = os.path.dirname(hash)

    # add current directory
    dirname = os.path.basename(hash)
    leafs.append(Leaf(hash, dirname, parent, os.lstat(hash), cwd))
    if stat.S_ISLNK(os.lstat(hash).st_mode):
        links.append(hash)

    for child_file in files:
        filepath = hash + "/" + child_file
        leafs.append(Leaf(filepath, child_file, hash, os.lstat(filepath), cwd))
        # if this file is a link, store them
        if stat.S_ISLNK(os.lstat(filepath).st_mode):
            links.append(filepath)

# insert nodes
for l in leafs:
    l.insert_to_db(mycursor)

for l in links:
    symlink_template = "insert into `symbolic_links` values (%s, %s);"
Esempio n. 7
0
            def process(path):
                s = os.lstat(path)

                if stat.S_ISDIR(s.st_mode):
                    update_hash('d')
                elif stat.S_ISCHR(s.st_mode):
                    update_hash('c')
                elif stat.S_ISBLK(s.st_mode):
                    update_hash('b')
                elif stat.S_ISSOCK(s.st_mode):
                    update_hash('s')
                elif stat.S_ISLNK(s.st_mode):
                    update_hash('l')
                elif stat.S_ISFIFO(s.st_mode):
                    update_hash('p')
                else:
                    update_hash('-')

                def add_perm(mask, on, off='-'):
                    if mask & s.st_mode:
                        update_hash(on)
                    else:
                        update_hash(off)

                add_perm(stat.S_IRUSR, 'r')
                add_perm(stat.S_IWUSR, 'w')
                if stat.S_ISUID & s.st_mode:
                    add_perm(stat.S_IXUSR, 's', 'S')
                else:
                    add_perm(stat.S_IXUSR, 'x')

                add_perm(stat.S_IRGRP, 'r')
                add_perm(stat.S_IWGRP, 'w')
                if stat.S_ISGID & s.st_mode:
                    add_perm(stat.S_IXGRP, 's', 'S')
                else:
                    add_perm(stat.S_IXGRP, 'x')

                add_perm(stat.S_IROTH, 'r')
                add_perm(stat.S_IWOTH, 'w')
                if stat.S_ISVTX & s.st_mode:
                    update_hash('t')
                else:
                    add_perm(stat.S_IXOTH, 'x')

                if include_owners:
                    try:
                        update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
                        update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
                    except KeyError:
                        bb.warn("KeyError in %s" % path)
                        raise

                if include_timestamps:
                    update_hash(" %10d" % s.st_mtime)

                update_hash(" ")
                if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
                    update_hash("%9s" %
                                ("%d.%d" %
                                 (os.major(s.st_rdev), os.minor(s.st_rdev))))
                else:
                    update_hash(" " * 9)

                update_hash(" ")
                if stat.S_ISREG(s.st_mode):
                    update_hash("%10d" % s.st_size)
                else:
                    update_hash(" " * 10)

                update_hash(" ")
                fh = hashlib.sha256()
                if stat.S_ISREG(s.st_mode):
                    # Hash file contents
                    with open(path, 'rb') as d:
                        for chunk in iter(lambda: d.read(4096), b""):
                            fh.update(chunk)
                    update_hash(fh.hexdigest())
                else:
                    update_hash(" " * len(fh.hexdigest()))

                update_hash(" %s" % path)

                if stat.S_ISLNK(s.st_mode):
                    update_hash(" -> %s" % os.readlink(path))

                update_hash("\n")
Esempio n. 8
0
 def _add_posix1e_acl(self, path, st):
     if not read_acl:
         return
     if not stat.S_ISLNK(st.st_mode):
         isdir = 1 if stat.S_ISDIR(st.st_mode) else 0
         self.posix1e_acl = read_acl(path, isdir)
Esempio n. 9
0
    def copy(self,
             destination,
             skip_if_older=True,
             remove_unaccounted=True,
             remove_all_directory_symlinks=True,
             remove_empty_directories=True):
        '''
        Copy all registered files to the given destination path. The given
        destination can be an existing directory, or not exist at all. It
        can't be e.g. a file.
        The copy process acts a bit like rsync: files are not copied when they
        don't need to (see mozpack.files for details on file.copy).

        By default, files in the destination directory that aren't
        registered are removed and empty directories are deleted. In
        addition, all directory symlinks in the destination directory
        are deleted: this is a conservative approach to ensure that we
        never accidently write files into a directory that is not the
        destination directory. In the worst case, we might have a
        directory symlink in the object directory to the source
        directory.

        To disable removing of unregistered files, pass
        remove_unaccounted=False. To disable removing empty
        directories, pass remove_empty_directories=False. In rare
        cases, you might want to maintain directory symlinks in the
        destination directory (at least those that are not required to
        be regular directories): pass
        remove_all_directory_symlinks=False. Exercise caution with
        this flag: you almost certainly do not want to preserve
        directory symlinks.

        Returns a FileCopyResult that details what changed.
        '''
        assert isinstance(destination, basestring)
        assert not os.path.exists(destination) or os.path.isdir(destination)

        result = FileCopyResult()
        have_symlinks = hasattr(os, 'symlink')
        destination = os.path.normpath(destination)

        # We create the destination directory specially. We can't do this as
        # part of the loop doing mkdir() below because that loop munges
        # symlinks and permissions and parent directories of the destination
        # directory may have their own weird schema. The contract is we only
        # manage children of destination, not its parents.
        try:
            os.makedirs(destination)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

        # Because we could be handling thousands of files, code in this
        # function is optimized to minimize system calls. We prefer CPU time
        # in Python over possibly I/O bound filesystem calls to stat() and
        # friends.

        required_dirs = set([destination])
        required_dirs |= set(
            os.path.normpath(os.path.join(destination, d))
            for d in self.required_directories())

        # Ensure destination directories are in place and proper.
        #
        # The "proper" bit is important. We need to ensure that directories
        # have appropriate permissions or we will be unable to discover
        # and write files. Furthermore, we need to verify directories aren't
        # symlinks.
        #
        # Symlinked directories (a symlink whose target is a directory) are
        # incompatible with us because our manifest talks in terms of files,
        # not directories. If we leave symlinked directories unchecked, we
        # would blindly follow symlinks and this might confuse file
        # installation. For example, if an existing directory is a symlink
        # to directory X and we attempt to install a symlink in this directory
        # to a file in directory X, we may create a recursive symlink!
        for d in sorted(required_dirs, key=len):
            try:
                os.mkdir(d)
            except OSError as error:
                if error.errno != errno.EEXIST:
                    raise

            # We allow the destination to be a symlink because the caller
            # is responsible for managing the destination and we assume
            # they know what they are doing.
            if have_symlinks and d != destination:
                st = os.lstat(d)
                if stat.S_ISLNK(st.st_mode):
                    # While we have remove_unaccounted, it doesn't apply
                    # to directory symlinks because if it did, our behavior
                    # could be very wrong.
                    os.remove(d)
                    os.mkdir(d)

            if not os.access(d, os.W_OK):
                umask = os.umask(0o077)
                os.umask(umask)
                os.chmod(d, 0o777 & ~umask)

        if isinstance(remove_unaccounted, FileRegistry):
            existing_files = set(
                os.path.normpath(os.path.join(destination, p))
                for p in remove_unaccounted.paths())
            existing_dirs = set(
                os.path.normpath(os.path.join(destination, p))
                for p in remove_unaccounted.required_directories())
            existing_dirs |= {os.path.normpath(destination)}
        else:
            # While we have remove_unaccounted, it doesn't apply to empty
            # directories because it wouldn't make sense: an empty directory
            # is empty, so removing it should have no effect.
            existing_dirs = set()
            existing_files = set()
            for root, dirs, files in os.walk(destination):
                # We need to perform the same symlink detection as above.
                # os.walk() doesn't follow symlinks into directories by
                # default, so we need to check dirs (we can't wait for root).
                if have_symlinks:
                    filtered = []
                    for d in dirs:
                        full = os.path.join(root, d)
                        st = os.lstat(full)
                        if stat.S_ISLNK(st.st_mode):
                            # This directory symlink is not a required
                            # directory: any such symlink would have been
                            # removed and a directory created above.
                            if remove_all_directory_symlinks:
                                os.remove(full)
                                result.removed_files.add(
                                    os.path.normpath(full))
                            else:
                                existing_files.add(os.path.normpath(full))
                        else:
                            filtered.append(d)

                    dirs[:] = filtered

                existing_dirs.add(os.path.normpath(root))

                for d in dirs:
                    existing_dirs.add(os.path.normpath(os.path.join(root, d)))

                for f in files:
                    existing_files.add(os.path.normpath(os.path.join(root, f)))

        # Now we reconcile the state of the world against what we want.
        dest_files = set()

        # Install files.
        #
        # Creating/appending new files on Windows/NTFS is slow. So we use a
        # thread pool to speed it up significantly. The performance of this
        # loop is so critical to common build operations on Linux that the
        # overhead of the thread pool is worth avoiding, so we have 2 code
        # paths. We also employ a low water mark to prevent thread pool
        # creation if number of files is too small to benefit.
        copy_results = []
        if sys.platform == 'win32' and len(self) > 100:
            with futures.ThreadPoolExecutor(4) as e:
                fs = []
                for p, f in self:
                    destfile = os.path.normpath(os.path.join(destination, p))
                    fs.append(
                        (destfile, e.submit(f.copy, destfile, skip_if_older)))

            copy_results = [(destfile, f.result) for destfile, f in fs]
        else:
            for p, f in self:
                destfile = os.path.normpath(os.path.join(destination, p))
                copy_results.append((destfile, f.copy(destfile,
                                                      skip_if_older)))

        for destfile, copy_result in copy_results:
            dest_files.add(destfile)
            if copy_result:
                result.updated_files.add(destfile)
            else:
                result.existing_files.add(destfile)

        # Remove files no longer accounted for.
        if remove_unaccounted:
            for f in existing_files - dest_files:
                # Windows requires write access to remove files.
                if os.name == 'nt' and not os.access(f, os.W_OK):
                    # It doesn't matter what we set permissions to since we
                    # will remove this file shortly.
                    os.chmod(f, 0o600)

                os.remove(f)
                result.removed_files.add(f)

        if not remove_empty_directories:
            return result

        # Figure out which directories can be removed. This is complicated
        # by the fact we optionally remove existing files. This would be easy
        # if we walked the directory tree after installing files. But, we're
        # trying to minimize system calls.

        # Start with the ideal set.
        remove_dirs = existing_dirs - required_dirs

        # Then don't remove directories if we didn't remove unaccounted files
        # and one of those files exists.
        if not remove_unaccounted:
            parents = set()
            pathsep = os.path.sep
            for f in existing_files:
                path = f
                while True:
                    # All the paths are normalized and relative by this point,
                    # so os.path.dirname would only do extra work.
                    dirname = path.rpartition(pathsep)[0]
                    if dirname in parents:
                        break
                    parents.add(dirname)
                    path = dirname
            remove_dirs -= parents

        # Remove empty directories that aren't required.
        for d in sorted(remove_dirs, key=len, reverse=True):
            try:
                try:
                    os.rmdir(d)
                except OSError as e:
                    if e.errno in (errno.EPERM, errno.EACCES):
                        # Permissions may not allow deletion. So ensure write
                        # access is in place before attempting to rmdir again.
                        os.chmod(d, 0o700)
                        os.rmdir(d)
                    else:
                        raise
            except OSError as e:
                # If remove_unaccounted is a # FileRegistry, then we have a
                # list of directories that may not be empty, so ignore rmdir
                # ENOTEMPTY errors for them.
                if (isinstance(remove_unaccounted, FileRegistry)
                        and e.errno == errno.ENOTEMPTY):
                    continue
                raise
            result.removed_directories.add(d)

        return result
Esempio n. 10
0
    def _apply_common_rec(self, path, restore_numeric_ids=False):
        if not self.mode:
            raise ApplyError('no metadata - cannot apply to ' + path_msg(path))

        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        # EACCES errors at this stage are fatal for the current path.
        if lutime and stat.S_ISLNK(self.mode):
            try:
                lutime(path, (self.atime, self.mtime))
            except OSError as e:
                if e.errno == errno.EACCES:
                    raise ApplyError('lutime: %s' % e)
                else:
                    raise
        else:
            try:
                utime(path, (self.atime, self.mtime))
            except OSError as e:
                if e.errno == errno.EACCES:
                    raise ApplyError('utime: %s' % e)
                else:
                    raise

        uid = gid = -1  # By default, do nothing.
        if is_superuser():
            if self.uid is not None:
                uid = self.uid
            if self.gid is not None:
                gid = self.gid
            if not restore_numeric_ids:
                if self.uid != 0 and self.user:
                    entry = pwd_from_name(self.user)
                    if entry:
                        uid = entry.pw_uid
                if self.gid != 0 and self.group:
                    entry = grp_from_name(self.group)
                    if entry:
                        gid = entry.gr_gid
        else:  # not superuser - only consider changing the group/gid
            user_gids = os.getgroups()
            if self.gid in user_gids:
                gid = self.gid
            if not restore_numeric_ids and self.gid != 0:
                # The grp might not exist on the local system.
                grps = filter(None, [grp_from_gid(x) for x in user_gids])
                if self.group in [x.gr_name for x in grps]:
                    g = grp_from_name(self.group)
                    if g:
                        gid = g.gr_gid

        if uid != -1 or gid != -1:
            try:
                os.lchown(path, uid, gid)
            except OSError as e:
                if e.errno == errno.EPERM:
                    add_error('lchown: %s' % e)
                elif sys.platform.startswith('cygwin') \
                   and e.errno == errno.EINVAL:
                    add_error('lchown: unknown uid/gid (%d/%d) for %s' %
                              (uid, gid, path_msg(path)))
                else:
                    raise

        if _have_lchmod:
            try:
                os.lchmod(path, stat.S_IMODE(self.mode))
            except errno.ENOSYS:  # Function not implemented
                pass
        elif not stat.S_ISLNK(self.mode):
            os.chmod(path, stat.S_IMODE(self.mode))
Esempio n. 11
0
 def _add_symlink_target(self, path, st):
     try:
         if stat.S_ISLNK(st.st_mode):
             self.symlink_target = os.readlink(path)
     except OSError as e:
         add_error('readlink: %s' % e)
Esempio n. 12
0
    def _create_via_common_rec(self, path, create_symlinks=True):
        if not self.mode:
            raise ApplyError('no metadata - cannot create path ' +
                             path_msg(path))

        # If the path already exists and is a dir, try rmdir.
        # If the path already exists and is anything else, try unlink.
        st = None
        try:
            st = xstat.lstat(path)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
        if st:
            if stat.S_ISDIR(st.st_mode):
                try:
                    os.rmdir(path)
                except OSError as e:
                    if e.errno in (errno.ENOTEMPTY, errno.EEXIST):
                        raise Exception(
                            'refusing to overwrite non-empty dir ' +
                            path_msg(path))
                    raise
            else:
                os.unlink(path)

        if stat.S_ISREG(self.mode):
            assert (self._recognized_file_type())
            fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o600)
            os.close(fd)
        elif stat.S_ISDIR(self.mode):
            assert (self._recognized_file_type())
            os.mkdir(path, 0o700)
        elif stat.S_ISCHR(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFCHR, self.rdev)
        elif stat.S_ISBLK(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFBLK, self.rdev)
        elif stat.S_ISFIFO(self.mode):
            assert (self._recognized_file_type())
            os.mkfifo(path, 0o600 | stat.S_IFIFO)
        elif stat.S_ISSOCK(self.mode):
            try:
                os.mknod(path, 0o600 | stat.S_IFSOCK)
            except OSError as e:
                if e.errno in (errno.EINVAL, errno.EPERM):
                    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                    s.bind(path)
                else:
                    raise
        elif stat.S_ISLNK(self.mode):
            assert (self._recognized_file_type())
            if self.symlink_target and create_symlinks:
                # on MacOS, symlink() permissions depend on umask, and there's
                # no way to chown a symlink after creating it, so we have to
                # be careful here!
                oldumask = os.umask((self.mode & 0o777) ^ 0o777)
                try:
                    os.symlink(self.symlink_target, path)
                finally:
                    os.umask(oldumask)
        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        else:
            assert (not self._recognized_file_type())
            add_error('not creating "%s" with unrecognized mode "0x%x"\n' %
                      (path_msg(path), self.mode))
    def is_link(self):
        """Checks if this file is a symbolic link.

        :returns: `True`, if the file is a symlink file. False otherwise.
        """
        return stat.S_ISLNK(self.mode)
Esempio n. 14
0
    def get_sha1(self, path, stat_value=None):
        """Return the sha1 of a file.
        """
        if path.__class__ is str:
            abspath = pathjoin(self.root_utf8, path)
        else:
            abspath = pathjoin(self.root, path)
        self.stat_count += 1
        file_fp = self._fingerprint(abspath, stat_value)

        if not file_fp:
            # not a regular file or not existing
            if path in self._cache:
                self.removed_count += 1
                self.needs_write = True
                del self._cache[path]
            return None

        if path in self._cache:
            cache_sha1, cache_fp = self._cache[path]
        else:
            cache_sha1, cache_fp = None, None

        if cache_fp == file_fp:
            ## mutter("hashcache hit for %s %r -> %s", path, file_fp, cache_sha1)
            ## mutter("now = %s", time.time())
            self.hit_count += 1
            return cache_sha1

        self.miss_count += 1

        mode = file_fp[FP_MODE_COLUMN]
        if stat.S_ISREG(mode):
            digest = self._really_sha1_file(abspath)
        elif stat.S_ISLNK(mode):
            digest = sha.new(os.readlink(abspath)).hexdigest()
        else:
            raise BzrError("file %r: unknown file stat mode: %o" %
                           (abspath, mode))

        # window of 3 seconds to allow for 2s resolution on windows,
        # unsynchronized file servers, etc.
        cutoff = self._cutoff_time()
        if file_fp[FP_MTIME_COLUMN] >= cutoff \
                or file_fp[FP_CTIME_COLUMN] >= cutoff:
            # changed too recently; can't be cached.  we can
            # return the result and it could possibly be cached
            # next time.
            #
            # the point is that we only want to cache when we are sure that any
            # subsequent modifications of the file can be detected.  If a
            # modification neither changes the inode, the device, the size, nor
            # the mode, then we can only distinguish it by time; therefore we
            # need to let sufficient time elapse before we may cache this entry
            # again.  If we didn't do this, then, for example, a very quick 1
            # byte replacement in the file might go undetected.
            ## mutter('%r modified too recently; not caching', path)
            self.danger_count += 1
            if cache_fp:
                self.removed_count += 1
                self.needs_write = True
                del self._cache[path]
        else:
            ## mutter('%r added to cache: now=%f, mtime=%d, ctime=%d',
            ##        path, time.time(), file_fp[FP_MTIME_COLUMN],
            ##        file_fp[FP_CTIME_COLUMN])
            self.update_count += 1
            self.needs_write = True
            self._cache[path] = (digest, file_fp)
        return digest
Esempio n. 15
0
def movefile(src, dest, newmtime=None, sstat=None):
    """Moves a file from src to dest, preserving all permissions and
    attributes; mtime will be preserved even when moving across
    filesystems.  Returns true on success and false on failure. Move is
    atomic.
    """

    #print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
    try:
        if not sstat:
            sstat = os.lstat(src)
    except Exception as e:
        print("movefile: Stating source file failed...", e)
        return None

    destexists = 1
    try:
        dstat = os.lstat(dest)
    except:
        dstat = os.lstat(os.path.dirname(dest))
        destexists = 0

    if destexists:
        if stat.S_ISLNK(dstat[stat.ST_MODE]):
            try:
                os.unlink(dest)
                destexists = 0
            except Exception as e:
                pass

    if stat.S_ISLNK(sstat[stat.ST_MODE]):
        try:
            target = os.readlink(src)
            if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
                os.unlink(dest)
            os.symlink(target, dest)
            #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
            os.unlink(src)
            return os.lstat(dest)
        except Exception as e:
            print("movefile: failed to properly create symlink:", dest, "->",
                  target, e)
            return None

    renamefailed = 1
    if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
        try:
            os.rename(src, dest)
            renamefailed = 0
        except Exception as e:
            if e[0] != errno.EXDEV:
                # Some random error.
                print("movefile: Failed to move", src, "to", dest, e)
                return None
            # Invalid cross-device-link 'bind' mounted or actually Cross-Device

    if renamefailed:
        didcopy = 0
        if stat.S_ISREG(sstat[stat.ST_MODE]):
            try:  # For safety copy then move it over.
                shutil.copyfile(src, dest + "#new")
                os.rename(dest + "#new", dest)
                didcopy = 1
            except Exception as e:
                print('movefile: copy', src, '->', dest, 'failed.', e)
                return None
        else:
            #we don't yet handle special, so we need to fall back to /bin/mv
            a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
            if a[0] != 0:
                print(
                    "movefile: Failed to move special file:" + src + "' to '" +
                    dest + "'", a)
                return None  # failure
        try:
            if didcopy:
                os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
                os.chmod(dest, stat.S_IMODE(
                    sstat[stat.ST_MODE]))  # Sticky is reset on chown
                os.unlink(src)
        except Exception as e:
            print("movefile: Failed to chown/chmod/unlink", dest, e)
            return None

    if newmtime:
        os.utime(dest, (newmtime, newmtime))
    else:
        os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
        newmtime = sstat[stat.ST_MTIME]
    return newmtime
Esempio n. 16
0
 def islink(self, path):
     try:
         return stat.S_ISLNK(self.ftp.lstat(path).st_mode)
     except IOError:
         return False
Esempio n. 17
0
def copyfile(src, dest, newmtime=None, sstat=None):
    """
    Copies a file from src to dest, preserving all permissions and
    attributes; mtime will be preserved even when moving across
    filesystems.  Returns true on success and false on failure.
    """
    #print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
    try:
        if not sstat:
            sstat = os.lstat(src)
    except Exception as e:
        logger.warn("copyfile: stat of %s failed (%s)" % (src, e))
        return False

    destexists = 1
    try:
        dstat = os.lstat(dest)
    except:
        dstat = os.lstat(os.path.dirname(dest))
        destexists = 0

    if destexists:
        if stat.S_ISLNK(dstat[stat.ST_MODE]):
            try:
                os.unlink(dest)
                destexists = 0
            except Exception as e:
                pass

    if stat.S_ISLNK(sstat[stat.ST_MODE]):
        try:
            target = os.readlink(src)
            if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
                os.unlink(dest)
            os.symlink(target, dest)
            #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
            return os.lstat(dest)
        except Exception as e:
            logger.warn("copyfile: failed to create symlink %s to %s (%s)" %
                        (dest, target, e))
            return False

    if stat.S_ISREG(sstat[stat.ST_MODE]):
        try:
            srcchown = False
            if not os.access(src, os.R_OK):
                # Make sure we can read it
                srcchown = True
                os.chmod(src, sstat[stat.ST_MODE] | stat.S_IRUSR)

            # For safety copy then move it over.
            shutil.copyfile(src, dest + "#new")
            os.rename(dest + "#new", dest)
        except Exception as e:
            logger.warn("copyfile: copy %s to %s failed (%s)" % (src, dest, e))
            return False
        finally:
            if srcchown:
                os.chmod(src, sstat[stat.ST_MODE])
                os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))

    else:
        #we don't yet handle special, so we need to fall back to /bin/mv
        a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'")
        if a[0] != 0:
            logger.warn("copyfile: failed to copy special file %s to %s (%s)" %
                        (src, dest, a))
            return False  # failure
    try:
        os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
        os.chmod(dest,
                 stat.S_IMODE(sstat[stat.ST_MODE]))  # Sticky is reset on chown
    except Exception as e:
        logger.warn("copyfile: failed to chown/chmod %s (%s)" % (dest, e))
        return False

    if newmtime:
        os.utime(dest, (newmtime, newmtime))
    else:
        os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
        newmtime = sstat[stat.ST_MTIME]
    return newmtime
Esempio n. 18
0
 def link(self):
     st = self.path.lstat()
     return stat.S_ISLNK(st.mode)
Esempio n. 19
0
def statislink(st):
    '''check whether a stat result is a symlink'''
    return st and stat.S_ISLNK(st.st_mode)
Esempio n. 20
0
def remove_target(source_root, target_root, relpath, st_source):
    """Remove a target file if necessary and if we can.

    On the whole, we can assume that partman-target has arranged to clear
    out the areas of the filesystem we're installing to.  However, in edge
    cases it's possible that there is still some detritus left over, and we
    want to steer a reasonable course between cavalierly destroying data and
    crashing.  So, we remove non-directories and empty directories that are
    in our way, but if a non-empty directory is in our way then we move it
    aside (adding .bak suffixes until we find something unused) instead.
    """
    targetpath = os.path.join(target_root, relpath)
    try:
        st_target = os.lstat(targetpath)
    except OSError:
        # The target does not exist.  Boring.
        return

    if stat.S_ISDIR(st_source.st_mode) and stat.S_ISDIR(st_target.st_mode):
        # One directory is as good as another, so we don't need to remove an
        # existing directory just in order to create another one.
        return

    if not stat.S_ISDIR(st_target.st_mode):
        # Installing over a non-directory is easy; just remove it.
        osextras.unlink_force(targetpath)
        return

    try:
        # Is it an empty directory?  That's easy too.
        os.rmdir(targetpath)
        return
    except OSError as e:
        if e.errno not in (errno.ENOTEMPTY, errno.EEXIST):
            raise

    # If we've got this far, then we must be trying to install a
    # non-directory over an existing non-empty directory.  The slightly
    # easier case is if it's a symlink, and if the prospective symlink
    # target hasn't been copied yet or is empty; in that case, we should try
    # to move the existing directory to the symlink target.
    if stat.S_ISLNK(st_source.st_mode):
        sourcepath = os.path.join(source_root, relpath)
        linkto = os.path.join(os.path.dirname(relpath),
                              os.readlink(sourcepath))
        if linkto.startswith('/'):
            linkto = linkto[1:]
        linktarget = os.path.join(target_root, linkto)
        try:
            os.rmdir(linktarget)
        except OSError:
            pass
        if not os.path.exists(linktarget):
            try:
                os.makedirs(os.path.dirname(linktarget))
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise
            shutil.move(targetpath, linktarget)
            return

    # We're installing a non-directory over an existing non-empty directory,
    # and we have no better strategy.  Move the existing directory to a
    # backup location.
    backuppath = targetpath + '.bak'
    while True:
        if not os.path.exists(backuppath):
            os.rename(targetpath, backuppath)
            break
        else:
            backuppath = backuppath + '.bak'
Esempio n. 21
0
            def process_sysroot(path):
                #bb.plain("[ABICompatBasicHash]: %s: process(%s)" % (task, path,))
                s = os.lstat(path)

                path_is_shared_lib = False
                if stat.S_ISREG(s.st_mode):
                    # Determine if path is an shared library so object
                    # Restrict to .so filenames to save some time...
                    if ".so" in path:
                        #bb.plain("[ABICompatBasicHash]: %s,%s" % (is_elf(path)))
                        result = is_elf(path)
                        if result[1] & 1 and result[1] & 8:
                            bb.plain("[ABICompatBasicHash]: ABI Dumping: %s" %
                                     (path, ))
                            path_is_shared_lib = True

                # If path is an so object then dump the ABI and store it in the dictionary with the soname as the key
                # Otherwise use the file checksum for the hash
                if path_is_shared_lib:
                    abi_dump_successful = False
                    try:
                        result = subprocess.run(
                            ["abidw", "--no-corpus-path", path],
                            check=True,
                            capture_output=True,
                            universal_newlines=True)
                        soname_search = re.search("soname='(.*)'",
                                                  result.stdout)
                        if soname_search:
                            soname = soname_search.group(1)
                            abi_dumps[soname] = result.stdout
                            abi_dump_successful = True
                    except subprocess.CalledProcessError as e:
                        bb.warn("[ABICompatBasicHash]: Could not abi dump %s" %
                                (path, ))

                    # Don't capture any details of this file in the hash if the abi dump is successful
                    # If unsuccessful at dumping the file, continue and use the checksum
                    if abi_dump_successful:
                        return

                if stat.S_ISDIR(s.st_mode):
                    update_hash('d')
                elif stat.S_ISCHR(s.st_mode):
                    update_hash('c')
                elif stat.S_ISBLK(s.st_mode):
                    update_hash('b')
                elif stat.S_ISSOCK(s.st_mode):
                    update_hash('s')
                elif stat.S_ISLNK(s.st_mode):
                    update_hash('l')
                elif stat.S_ISFIFO(s.st_mode):
                    update_hash('p')
                else:
                    update_hash('-')

                update_hash(" ")
                if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
                    update_hash("%9s" %
                                ("%d.%d" %
                                 (os.major(s.st_rdev), os.minor(s.st_rdev))))
                else:
                    update_hash(" " * 9)

                update_hash(" ")
                if stat.S_ISREG(s.st_mode):
                    update_hash("%10d" % s.st_size)
                else:
                    update_hash(" " * 10)

                update_hash(" ")
                fh = hashlib.sha256()
                if stat.S_ISREG(s.st_mode):
                    with open(path, 'rb') as d:
                        for chunk in iter(lambda: d.read(4096), b""):
                            fh.update(chunk)
                    update_hash(fh.hexdigest())
                else:
                    update_hash(" " * len(fh.hexdigest()))

                update_hash(" %s" % path)

                if stat.S_ISLNK(s.st_mode):
                    # Replace symlink destinations with the soname if it's an so file
                    symlink_dest = os.readlink(path)
                    if ".so" in path:
                        result = is_elf(os.path.realpath(path))
                        if result[1] & 1 and result[1] & 8:
                            result = subprocess.run(
                                ["abidw", "--no-corpus-path", path],
                                check=True,
                                capture_output=True,
                                universal_newlines=True)
                            soname_search = re.search("soname='(.*)'",
                                                      result.stdout)
                            if soname_search:
                                soname = soname_search.group(1)
                                bb.plain(
                                    "[ABICompatBasicHash]: Re-writing symlink: %s -> %s -> %s"
                                    % (path, os.readlink(path), soname))
                                symlink_dest = soname

                    update_hash(" -> %s" % symlink_dest)

                update_hash("\n")
def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, apk_key_map,
                       key_passwords, platform_api_level,
                       codename_to_api_level_map, compressed_extension):
    # maxsize measures the maximum filename length, including the ones to be
    # skipped.
    maxsize = max([
        len(os.path.basename(i.filename)) for i in input_tf_zip.infolist()
        if GetApkFileInfo(i.filename, compressed_extension, [])[0]
    ])
    system_root_image = misc_info.get("system_root_image") == "true"

    for info in input_tf_zip.infolist():
        filename = info.filename
        if filename.startswith("IMAGES/"):
            continue

        data = input_tf_zip.read(filename)
        out_info = copy.copy(info)
        (is_apk, is_compressed, should_be_skipped) = GetApkFileInfo(
            filename, compressed_extension, OPTIONS.skip_apks_with_path_prefix)

        if is_apk and should_be_skipped:
            # Copy skipped APKs verbatim.
            print("NOT signing: %s\n"
                  "        (skipped due to matching prefix)" % (filename, ))
            common.ZipWriteStr(output_tf_zip, out_info, data)

        # Sign APKs.
        elif is_apk:
            name = os.path.basename(filename)
            if is_compressed:
                name = name[:-len(compressed_extension)]

            key = apk_key_map[name]
            if key not in common.SPECIAL_CERT_STRINGS:
                print("    signing: %-*s (%s)" % (maxsize, name, key))
                signed_data = SignApk(data, key, key_passwords[key],
                                      platform_api_level,
                                      codename_to_api_level_map, is_compressed)
                common.ZipWriteStr(output_tf_zip, out_info, signed_data)
            else:
                # an APK we're not supposed to sign.
                print("NOT signing: %s\n"
                      "        (skipped due to special cert string)" %
                      (name, ))
                common.ZipWriteStr(output_tf_zip, out_info, data)

        # System properties.
        elif filename in (
                "SYSTEM/build.prop",
                "VENDOR/build.prop",
                "SYSTEM/etc/prop.default",
                "BOOT/RAMDISK/prop.default",
                "BOOT/RAMDISK/default.prop",  # legacy
                "ROOT/default.prop",  # legacy
                "RECOVERY/RAMDISK/prop.default",
                "RECOVERY/RAMDISK/default.prop"):  # legacy
            print("Rewriting %s:" % (filename, ))
            if stat.S_ISLNK(info.external_attr >> 16):
                new_data = data
            else:
                new_data = RewriteProps(data)
            common.ZipWriteStr(output_tf_zip, out_info, new_data)

        # Replace the certs in *mac_permissions.xml (there could be multiple, such
        # as {system,vendor}/etc/selinux/{plat,nonplat}_mac_permissions.xml).
        elif filename.endswith("mac_permissions.xml"):
            print("Rewriting %s with new keys." % (filename, ))
            new_data = ReplaceCerts(data)
            common.ZipWriteStr(output_tf_zip, out_info, new_data)

        # Ask add_img_to_target_files to rebuild the recovery patch if needed.
        elif filename in ("SYSTEM/recovery-from-boot.p",
                          "SYSTEM/etc/recovery.img",
                          "SYSTEM/bin/install-recovery.sh"):
            OPTIONS.rebuild_recovery = True

        # Don't copy OTA keys if we're replacing them.
        elif (
                OPTIONS.replace_ota_keys and filename in
            ("BOOT/RAMDISK/res/keys",
             "BOOT/RAMDISK/system/etc/update_engine/update-payload-key.pub.pem",
             "RECOVERY/RAMDISK/res/keys", "SYSTEM/etc/security/otacerts.zip",
             "SYSTEM/etc/update_engine/update-payload-key.pub.pem")):
            pass

        # Skip META/misc_info.txt since we will write back the new values later.
        elif filename == "META/misc_info.txt":
            pass

        # Skip verity public key if we will replace it.
        elif (OPTIONS.replace_verity_public_key
              and filename in ("BOOT/RAMDISK/verity_key", "ROOT/verity_key")):
            pass

        # Skip verity keyid (for system_root_image use) if we will replace it.
        elif OPTIONS.replace_verity_keyid and filename == "BOOT/cmdline":
            pass

        # Skip the care_map as we will regenerate the system/vendor images.
        elif filename == "META/care_map.pb" or filename == "META/care_map.txt":
            pass

        # A non-APK file; copy it verbatim.
        else:
            common.ZipWriteStr(output_tf_zip, out_info, data)

    if OPTIONS.replace_ota_keys:
        ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info)

    # Replace the keyid string in misc_info dict.
    if OPTIONS.replace_verity_private_key:
        ReplaceVerityPrivateKey(misc_info,
                                OPTIONS.replace_verity_private_key[1])

    if OPTIONS.replace_verity_public_key:
        dest = "ROOT/verity_key" if system_root_image else "BOOT/RAMDISK/verity_key"
        # We are replacing the one in boot image only, since the one under
        # recovery won't ever be needed.
        ReplaceVerityPublicKey(output_tf_zip, dest,
                               OPTIONS.replace_verity_public_key[1])

    # Replace the keyid string in BOOT/cmdline.
    if OPTIONS.replace_verity_keyid:
        ReplaceVerityKeyId(input_tf_zip, output_tf_zip,
                           OPTIONS.replace_verity_keyid[1])

    # Replace the AVB signing keys, if any.
    ReplaceAvbSigningKeys(misc_info)

    # Write back misc_info with the latest values.
    ReplaceMiscInfoTxt(input_tf_zip, output_tf_zip, misc_info)
Esempio n. 23
0
def islink(path):
	try:
		st = os.lstat(path)
	except (os.error, AttributeError):
		return 0
	return stat.S_ISLNK(st[stat.ST_MODE])
Esempio n. 24
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            paths=dict(type='list', required=True, aliases=['name', 'path']),
            patterns=dict(type='list', default=['*'], aliases=['pattern']),
            excludes=dict(type='list', aliases=['exclude']),
            contains=dict(type='str'),
            file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
            age=dict(type='str'),
            age_stamp=dict(type='str', default="mtime", choices=['atime', 'mtime', 'ctime']),
            size=dict(type='str'),
            recurse=dict(type='bool', default='no'),
            hidden=dict(type='bool', default='no'),
            follow=dict(type='bool', default='no'),
            get_checksum=dict(type='bool', default='no'),
            use_regex=dict(type='bool', default='no'),
            depth=dict(type='int', default=None),
            # by RockieYang: Add one more configuration, in hierachy format
            # configs=dict(type="json")
        ),
        supports_check_mode=True,
    )

    params = module.params

    filelist = []

    # by RockieYang: print(params["configs"])
    # configs = params["configs"]
    # print(configs)

    if params['age'] is None:
        age = None
    else:
        # convert age to seconds:
        m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
        seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
        if m:
            age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
        else:
            module.fail_json(age=params['age'], msg="failed to process age")

    if params['size'] is None:
        size = None
    else:
        # convert size to bytes:
        m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
        bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
        if m:
            size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
        else:
            module.fail_json(size=params['size'], msg="failed to process size")

    now = time.time()
    msg = ''
    looked = 0
    for npath in params['paths']:
        npath = os.path.expanduser(os.path.expandvars(npath))
        if os.path.isdir(npath):
            """ ignore followlinks for python version < 2.6 """
            for root, dirs, files in (sys.version_info < (2, 6, 0) and os.walk(npath)) or os.walk(npath, followlinks=params['follow']):
                if params['depth']:
                    depth = root.replace(npath.rstrip(os.path.sep), '').count(os.path.sep)
                    if files or dirs:
                        depth += 1
                    if depth > params['depth']:
                        del(dirs[:])
                        continue
                looked = looked + len(files) + len(dirs)
                for fsobj in (files + dirs):
                    fsname = os.path.normpath(os.path.join(root, fsobj))

                    if os.path.basename(fsname).startswith('.') and not params['hidden']:
                        continue

                    try:
                        st = os.lstat(fsname)
                        # f.write(st)
                        # f.write("\n")
                    except Exception:
                        msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
                        continue

                    r = {'path': fsname}
                    if params['file_type'] == 'any':
                        if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):

                            r.update(statinfo(st))
                            # by RockieYang: Track link source
                            if stat.S_ISLNK(st.st_mode):
                                r['lnk_source'] = os.path.realpath(fsname)

                            if stat.S_ISREG(st.st_mode) and params['get_checksum']:
                                r['checksum'] = safe_sha1(module, st, fsname)

                            # filelist.append(r)
                            append_to_result(filelist, r)

                    elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
                        if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):

                            r.update(statinfo(st))
                            # filelist.append(r)
                            append_to_result(filelist, r)

                    elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
                        if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
                                agefilter(st, now, age, params['age_stamp']) and \
                                sizefilter(st, size) and contentfilter(fsname, params['contains']):

                            r.update(statinfo(st))
                            if stat.S_ISLNK(st.st_mode):
                                r['lnk_source'] = os.path.realpath(fsname)

                            if stat.S_ISREG(st.st_mode) and params['get_checksum']:
                                r['checksum'] = safe_sha1(module, st, fsname)

                            # filelist.append(r)
                            append_to_result(filelist, r)

                    elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
                        if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):

                            r.update(statinfo(st))


                            # filelist.append(r)
                            append_to_result(filelist, r)

                if not params['recurse']:
                    break
        else:
            msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath

    matched = len(filelist)
    module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
Esempio n. 25
0
def unzip(zip_file,
          dest,
          excludes=None,
          options=None,
          template=None,
          runas=None,
          trim_output=False,
          password=None,
          extract_perms=True):
    '''
    Uses the ``zipfile`` Python module to unpack zip files

    .. versionchanged:: 2015.5.0
        This function was rewritten to use Python's native zip file support.
        The old functionality has been preserved in the new function
        :mod:`archive.cmd_unzip <salt.modules.archive.cmd_unzip>`. For versions
        2014.7.x and earlier, see the :mod:`archive.cmd_zip
        <salt.modules.archive.cmd_zip>` documentation.

    zip_file
        Path of zip file to be unpacked

    dest
        The destination directory into which the file should be unpacked

    excludes : None
        Comma-separated list of files not to unpack. Can also be passed in a
        Python list.

    options
        This options are only used when ``unzip`` binary is used. In this
        function is ignored.

        .. versionadded:: 2016.3.1

    template : None
        Can be set to 'jinja' or another supported template engine to render
        the command arguments before execution:

        .. code-block:: bash

            salt '*' archive.unzip template=jinja /tmp/zipfile.zip /tmp/{{grains.id}}/ excludes=file_1,file_2

    runas : None
        Unpack the zip file as the specified user. Defaults to the user under
        which the minion is running.

    trim_output : False
        The number of files we should output on success before the rest are trimmed, if this is
        set to True then it will default to 100

    CLI Example:

    .. code-block:: bash

        salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2

    password
        Password to use with password protected zip files

        .. note::
            The password will be present in the events logged to the minion log
            file at the ``debug`` log level. If the minion is logging at
            ``debug`` (or more verbose), then be advised that the password will
            appear in the log.

        .. versionadded:: 2016.3.0

    extract_perms : True
        The Python zipfile_ module does not extract file/directory attributes
        by default. When this argument is set to ``True``, Salt will attempt to
        apply the file permision attributes to the extracted files/folders.

        On Windows, only the read-only flag will be extracted as set within the
        zip file, other attributes (i.e. user/group permissions) are ignored.

        Set this argument to ``False`` to disable this behavior.

        .. versionadded:: 2016.11.0

    .. _zipfile: https://docs.python.org/2/library/zipfile.html

    CLI Example:

    .. code-block:: bash

        salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ password='******'
    '''
    if not excludes:
        excludes = []
    if runas:
        euid = os.geteuid()
        egid = os.getegid()
        uinfo = __salt__['user.info'](runas)
        if not uinfo:
            raise SaltInvocationError(
                "User '{0}' does not exist".format(runas)
            )

    zip_file, dest = _render_filenames(zip_file, dest, None, template)

    if runas and (euid != uinfo['uid'] or egid != uinfo['gid']):
        # Change the egid first, as changing it after the euid will fail
        # if the runas user is non-privileged.
        os.setegid(uinfo['gid'])
        os.seteuid(uinfo['uid'])

    try:
        exc = None
        # Define cleaned_files here so that an exception will not prevent this
        # variable from being defined and cause a NameError in the return
        # statement at the end of the function.
        cleaned_files = []
        with contextlib.closing(zipfile.ZipFile(zip_file, "r")) as zfile:
            files = zfile.namelist()

            if isinstance(excludes, string_types):
                excludes = [x.strip() for x in excludes.split(',')]
            elif isinstance(excludes, (float, integer_types)):
                excludes = [str(excludes)]

            cleaned_files.extend([x for x in files if x not in excludes])
            for target in cleaned_files:
                if target not in excludes:
                    if salt.utils.is_windows() is False:
                        info = zfile.getinfo(target)
                        # Check if zipped file is a symbolic link
                        if stat.S_ISLNK(info.external_attr >> 16):
                            source = zfile.read(target)
                            os.symlink(source, os.path.join(dest, target))
                            continue
                    zfile.extract(target, dest, password)
                    if extract_perms:
                        os.chmod(os.path.join(dest, target), zfile.getinfo(target).external_attr >> 16)
    except Exception as exc:
        pass
    finally:
        # Restore the euid/egid
        if runas:
            os.seteuid(euid)
            os.setegid(egid)
        if exc is not None:
            # Wait to raise the exception until euid/egid are restored to avoid
            # permission errors in writing to minion log.
            raise CommandExecutionError(
                'Exception encountered unpacking zipfile: {0}'.format(exc)
            )

    return _trim_files(cleaned_files, trim_output)
Esempio n. 26
0
def DoUnaryOp(op_id, s):
    # type: (Id_t, str) -> bool

    # Only use lstat if we're testing for a symlink.
    if op_id in (Id.BoolUnary_h, Id.BoolUnary_L):
        try:
            mode = posix.lstat(s).st_mode
        except OSError:
            # TODO: simple_test_builtin should this as status=2.
            #e_die("lstat() error: %s", e, word=node.child)
            return False

        return stat.S_ISLNK(mode)

    try:
        st = posix.stat(s)
    except OSError as e:
        # TODO: simple_test_builtin should this as status=2.
        # Problem: we really need errno, because test -f / is bad argument,
        # while test -f /nonexistent is a good argument but failed.  Gah.
        # ENOENT vs. ENAMETOOLONG.
        #e_die("stat() error: %s", e, word=node.child)
        return False
    mode = st.st_mode

    if op_id in (Id.BoolUnary_e, Id.BoolUnary_a):  # -a is alias for -e
        return True

    if op_id == Id.BoolUnary_f:
        return stat.S_ISREG(mode)

    if op_id == Id.BoolUnary_d:
        return stat.S_ISDIR(mode)

    if op_id == Id.BoolUnary_b:
        return stat.S_ISBLK(mode)

    if op_id == Id.BoolUnary_c:
        return stat.S_ISCHR(mode)

    if op_id == Id.BoolUnary_k:
        # need 'bool' for MyPy
        return bool(stat.S_IMODE(mode) & stat.S_ISVTX)

    if op_id == Id.BoolUnary_p:
        return stat.S_ISFIFO(mode)

    if op_id == Id.BoolUnary_S:
        return stat.S_ISSOCK(mode)

    if op_id == Id.BoolUnary_x:
        return posix.access(s, posix.X_OK_)

    if op_id == Id.BoolUnary_r:
        return posix.access(s, posix.R_OK_)

    if op_id == Id.BoolUnary_w:
        return posix.access(s, posix.W_OK_)

    if op_id == Id.BoolUnary_s:
        return st.st_size != 0

    if op_id == Id.BoolUnary_u:
        return bool(stat.S_IMODE(mode) & stat.S_ISUID)

    if op_id == Id.BoolUnary_g:
        return bool(stat.S_IMODE(mode) & stat.S_ISGID)

    if op_id == Id.BoolUnary_O:
        return st.st_uid == posix.geteuid()

    if op_id == Id.BoolUnary_G:
        return st.st_gid == posix.getegid()

    e_die("%s isn't implemented", ui.PrettyId(op_id))  # implicit location
Esempio n. 27
0
    async def _dir_model(self, path, content=True):
        """Build a model for a directory

        if content is requested, will include a listing of the directory
        """
        os_path = self._get_os_path(path)

        four_o_four = u'directory does not exist: %r' % path

        if not os.path.isdir(os_path):
            raise web.HTTPError(404, four_o_four)
        elif is_hidden(os_path, self.root_dir) and not self.allow_hidden:
            self.log.info(
                "Refusing to serve hidden directory %r, via 404 Error",
                os_path)
            raise web.HTTPError(404, four_o_four)

        model = self._base_model(path)
        model['type'] = 'directory'
        model['size'] = None
        if content:
            model['content'] = contents = []
            os_dir = self._get_os_path(path)
            dir_contents = await run_sync_in_worker_thread(os.listdir, os_dir)
            for name in dir_contents:
                try:
                    os_path = os.path.join(os_dir, name)
                except UnicodeDecodeError as e:
                    self.log.warning("failed to decode filename '%s': %s",
                                     name, e)
                    continue

                try:
                    st = await run_sync_in_worker_thread(os.lstat, os_path)
                except OSError as e:
                    # skip over broken symlinks in listing
                    if e.errno == errno.ENOENT:
                        self.log.warning("%s doesn't exist", os_path)
                    elif e.errno != errno.EACCES:  # Don't provide clues about protected files
                        self.log.warning("Error stat-ing %s: %s", os_path, e)
                    continue

                if (not stat.S_ISLNK(st.st_mode)
                        and not stat.S_ISREG(st.st_mode)
                        and not stat.S_ISDIR(st.st_mode)):
                    self.log.debug("%s not a regular file", os_path)
                    continue

                try:
                    if self.should_list(name):
                        if self.allow_hidden or not is_file_hidden(
                                os_path, stat_res=st):
                            contents.append(await self.get(path='%s/%s' %
                                                           (path, name),
                                                           content=False))
                except OSError as e:
                    # ELOOP: recursive symlink, also don't show failure due to permissions
                    if e.errno not in [errno.ELOOP, errno.EACCES]:
                        self.log.warning(
                            "Unknown error checking if file %r is hidden",
                            os_path,
                            exc_info=True,
                        )

            model['format'] = 'json'

        return model
Esempio n. 28
0
def scanPath(path, rule_sets, filename_iocs, hashes, false_hashes):

    # Startup
    log("INFO", "Scanning %s ...  " % path)

    # Counter
    c = 0

    # Get application path
    appPath = getApplicationPath()

    # Linux excludes from mtab
    if isLinux:
        allExcludes = LINUX_PATH_SKIPS_START | Set(getExcludedMountpoints())

    for root, directories, files in scandir.walk(path,
                                                 onerror=walkError,
                                                 followlinks=False):

        if isLinux:
            # Skip paths that start with ..
            newDirectories = []
            for dir in directories:
                skipIt = False
                completePath = os.path.join(root, dir)
                for skip in allExcludes:
                    if completePath.startswith(skip):
                        log("INFO", "Skipping %s directory" % skip)
                        skipIt = True
                if not skipIt:
                    newDirectories.append(dir)
            directories[:] = newDirectories

        # Loop through files
        for filename in files:
            try:

                # Get the file and path
                filePath = os.path.join(root, filename)

                # Linux directory skip
                if isLinux:

                    # Skip paths that end with ..
                    for skip in LINUX_PATH_SKIPS_END:
                        if filePath.endswith(skip):
                            if LINUX_PATH_SKIPS_END[skip] == 0:
                                log("INFO", "Skipping %s element" % skip)
                                LINUX_PATH_SKIPS_END[skip] = 1

                    # File mode
                    mode = os.stat(filePath).st_mode
                    if stat.S_ISCHR(mode) or stat.S_ISBLK(
                            mode) or stat.S_ISFIFO(mode) or stat.S_ISLNK(
                                mode) or stat.S_ISSOCK(mode):
                        continue

                # Counter
                c += 1

                if not args.noindicator:
                    printProgress(c)

                # Skip program directory
                # print appPath.lower() +" - "+ filePath.lower()
                if appPath.lower() in filePath.lower():
                    log(
                        "DEBUG",
                        "Skipping file in program directory FILE: %s" %
                        filePath)
                    continue

                fileSize = os.stat(filePath).st_size
                # print file_size

                # File Name Checks -------------------------------------------------
                for regex in filename_iocs.keys():
                    match = re.search(r'%s' % regex, filePath)
                    if match:
                        description = filenameIOC_desc[regex]
                        score = filename_iocs[regex]
                        if score > 70:
                            log(
                                "ALERT",
                                "File Name IOC matched PATTERN: %s DESC: %s MATCH: %s"
                                % (regex, description, filePath))
                        elif score > 40:
                            log(
                                "WARNING",
                                "File Name Suspicious IOC matched PATTERN: %s DESC: %s MATCH: %s"
                                % (regex, description, filePath))

                # Access check (also used for magic header detection)
                firstBytes = ""
                try:
                    with open(filePath, 'rb') as f:
                        firstBytes = f.read(4)
                except Exception, e:
                    log("DEBUG",
                        "Cannot open file %s (access denied)" % filePath)

                # Evaluate Type
                fileType = ""
                if firstBytes.startswith('\x4d\x5a'):
                    fileType = "EXE"
                if firstBytes.startswith('\x4d\x44\x4d\x50'):
                    fileType = "MDMP"

                # Set fileData to an empty value
                fileData = ""

                # Evaluations -------------------------------------------------------
                # Evaluate size
                do_intense_check = True
                if fileSize > (args.s * 1024):
                    # Print files
                    if args.printAll:
                        log("INFO", "Checking %s" % filePath)
                    do_hash_check = False
                else:
                    if args.printAll:
                        log("INFO", "Scanning %s" % filePath)

                # Some file types will force intense check
                if fileType == "MDMP":
                    do_intense_check = True

                # Hash Check -------------------------------------------------------
                # Do the check
                md5 = "-"
                sha1 = "-"
                sha256 = "-"
                if do_intense_check:

                    fileData = readFileData(filePath)
                    md5, sha1, sha256 = generateHashes(fileData)

                    log(
                        "DEBUG", "MD5: %s SHA1: %s SHA256: %s FILE: %s" %
                        (md5, sha1, sha256, filePath))

                    # False Positive Hash
                    if md5 in false_hashes.keys() or sha1 in false_hashes.keys(
                    ) or sha256 in false_hashes.keys():
                        continue

                    # Malware Hash
                    matchType = None
                    matchDesc = None
                    matchHash = None
                    if md5 in hashes.keys():
                        matchType = "MD5"
                        matchDesc = hashes[md5]
                        matchHash = md5
                    elif sha1 in hashes.keys():
                        matchType = "SHA1"
                        matchDesc = hashes[sha1]
                        matchHash = sha1
                    elif sha256 in hashes.keys():
                        matchType = "SHA256"
                        matchDesc = hashes[sha256]
                        matchHash = sha256

                    if matchType:
                        log(
                            "ALERT",
                            "Malware Hash TYPE: %s HASH: %s FILE: %s DESC: %s"
                            % (matchType, matchHash, filePath, matchDesc))

                # Yara Check -------------------------------------------------------
                # Size and type check
                if do_intense_check:

                    # Read file data if hash check has been skipped
                    if not fileData:
                        fileData = readFileData(filePath)

                    # Memory Dump Scan
                    if fileType == "MDMP":
                        log("INFO", "Scanning memory dump file %s" % filePath)

                    # Scan with yara
                    try:
                        for rules in rule_sets:

                            # Yara Rule Match
                            matches = rules.match(data=fileData,
                                                  externals={
                                                      'filename':
                                                      filename.lower(),
                                                      'filepath':
                                                      filePath.lower()
                                                  })

                            # If matched
                            if matches:
                                for match in matches:

                                    score = 70
                                    description = "not set"

                                    # Built-in rules have meta fields (cannot be expected from custom rules)
                                    if hasattr(match, 'meta'):

                                        if 'description' in match.meta:
                                            description = match.meta[
                                                'description']

                                        # If a score is given
                                        if 'score' in match.meta:
                                            score = int(match.meta['score'])

                                    # Hash string
                                    hash_string = "MD5: %s SHA1: %s SHA256: %s" % (
                                        md5, sha1, sha256)

                                    # Matching strings
                                    matched_strings = ""
                                    if hasattr(match, 'strings'):
                                        # Get matching strings
                                        matched_strings = getStringMatches(
                                            match.strings)

                                    if score >= 70:
                                        log(
                                            "ALERT",
                                            "Yara Rule MATCH: %s FILE: %s %s MATCHES: %s"
                                            % (match.rule, filePath,
                                               hash_string, matched_strings))

                                    elif score >= 40:
                                        log(
                                            "WARNING",
                                            "Yara Rule MATCH: %s FILE: %s %s MATCHES: %s"
                                            % (match.rule, filePath,
                                               hash_string, matched_strings))

                    except Exception, e:
                        if args.debug:
                            traceback.print_exc()

            except Exception, e:
                if args.debug:
                    traceback.print_exc()
Esempio n. 29
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            paths=dict(type='list', required=True, aliases=['name', 'path'], elements='str'),
            patterns=dict(type='list', default=['*'], aliases=['pattern'], elements='str'),
            excludes=dict(type='list', aliases=['exclude'], elements='str'),
            contains=dict(type='str'),
            file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
            age=dict(type='str'),
            age_stamp=dict(type='str', default="mtime", choices=['atime', 'ctime', 'mtime']),
            size=dict(type='str'),
            recurse=dict(type='bool', default=False),
            hidden=dict(type='bool', default=False),
            follow=dict(type='bool', default=False),
            get_checksum=dict(type='bool', default=False),
            use_regex=dict(type='bool', default=False),
            depth=dict(type='int'),
        ),
        supports_check_mode=True,
    )

    params = module.params

    filelist = []

    if params['age'] is None:
        age = None
    else:
        # convert age to seconds:
        m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
        seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
        if m:
            age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
        else:
            module.fail_json(age=params['age'], msg="failed to process age")

    if params['size'] is None:
        size = None
    else:
        # convert size to bytes:
        m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
        bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
        if m:
            size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
        else:
            module.fail_json(size=params['size'], msg="failed to process size")

    now = time.time()
    msg = ''
    looked = 0
    for npath in params['paths']:
        npath = os.path.expanduser(os.path.expandvars(npath))
        if os.path.isdir(npath):
            for root, dirs, files in os.walk(npath, followlinks=params['follow']):
                looked = looked + len(files) + len(dirs)
                for fsobj in (files + dirs):
                    fsname = os.path.normpath(os.path.join(root, fsobj))
                    if params['depth']:
                        wpath = npath.rstrip(os.path.sep) + os.path.sep
                        depth = int(fsname.count(os.path.sep)) - int(wpath.count(os.path.sep)) + 1
                        if depth > params['depth']:
                            continue
                    if os.path.basename(fsname).startswith('.') and not params['hidden']:
                        continue

                    try:
                        st = os.lstat(fsname)
                    except Exception:
                        msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
                        continue

                    r = {'path': fsname}
                    if params['file_type'] == 'any':
                        if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):

                            r.update(statinfo(st))
                            if stat.S_ISREG(st.st_mode) and params['get_checksum']:
                                r['checksum'] = module.sha1(fsname)
                            filelist.append(r)

                    elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
                        if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):

                            r.update(statinfo(st))
                            filelist.append(r)

                    elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
                        if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
                           agefilter(st, now, age, params['age_stamp']) and \
                           sizefilter(st, size) and contentfilter(fsname, params['contains']):

                            r.update(statinfo(st))
                            if params['get_checksum']:
                                r['checksum'] = module.sha1(fsname)
                            filelist.append(r)

                    elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
                        if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):

                            r.update(statinfo(st))
                            filelist.append(r)

                if not params['recurse']:
                    break
        else:
            msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath

    matched = len(filelist)
    module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
    def install(self, setup):        
        # mount the media location.
        print " --> Installation started"
        try:
            if(not os.path.exists("/target")):
                os.mkdir("/target")
            if(not os.path.exists("/source")):
                os.mkdir("/source")
            # find the squashfs..
            if(not os.path.exists(self.media)):
                print "Base filesystem does not exist! Critical error (exiting)."
                sys.exit(1) # change to report
       
            self.step_format_partitions(setup)
            self.step_mount_partitions(setup)                        
            
            # walk root filesystem
            SOURCE = "/source/"
            DEST = "/target/"
            directory_times = []
            our_total = 0
            our_current = -1
            os.chdir(SOURCE)
            # index the files
            print " --> Indexing files"
            for top,dirs,files in os.walk(SOURCE, topdown=False):
                our_total += len(dirs) + len(files)
                self.update_progress(pulse=True, message=_("Indexing files to be copied.."))
            our_total += 1 # safenessness
            print " --> Copying files"
            for top,dirs,files in os.walk(SOURCE):
                # Sanity check. Python is a bit schitzo
                dirpath = top
                if(dirpath.startswith(SOURCE)):
                    dirpath = dirpath[len(SOURCE):]
                for name in dirs + files:
                    # following is hacked/copied from Ubiquity
                    rpath = os.path.join(dirpath, name)
                    sourcepath = os.path.join(SOURCE, rpath)
                    targetpath = os.path.join(DEST, rpath)
                    st = os.lstat(sourcepath)
                    mode = stat.S_IMODE(st.st_mode)

                    # now show the world what we're doing                    
                    our_current += 1
                    self.update_progress(total=our_total, current=our_current, message=_("Copying %s" % rpath))

                    if os.path.exists(targetpath):
                        if not os.path.isdir(targetpath):
                            os.remove(targetpath)                        
                    if stat.S_ISLNK(st.st_mode):
                        if os.path.lexists(targetpath):
                            os.unlink(targetpath)
                        linkto = os.readlink(sourcepath)
                        os.symlink(linkto, targetpath)
                    elif stat.S_ISDIR(st.st_mode):
                        if not os.path.isdir(targetpath):
                            os.mkdir(targetpath, mode)
                    elif stat.S_ISCHR(st.st_mode):                        
                        os.mknod(targetpath, stat.S_IFCHR | mode, st.st_rdev)
                    elif stat.S_ISBLK(st.st_mode):
                        os.mknod(targetpath, stat.S_IFBLK | mode, st.st_rdev)
                    elif stat.S_ISFIFO(st.st_mode):
                        os.mknod(targetpath, stat.S_IFIFO | mode)
                    elif stat.S_ISSOCK(st.st_mode):
                        os.mknod(targetpath, stat.S_IFSOCK | mode)
                    elif stat.S_ISREG(st.st_mode):
                        # we don't do blacklisting yet..
                        try:
                            os.unlink(targetpath)
                        except:
                            pass
                        self.do_copy_file(sourcepath, targetpath)
                    os.lchown(targetpath, st.st_uid, st.st_gid)
                    if not stat.S_ISLNK(st.st_mode):
                        os.chmod(targetpath, mode)
                    if stat.S_ISDIR(st.st_mode):
                        directory_times.append((targetpath, st.st_atime, st.st_mtime))
                    # os.utime() sets timestamp of target, not link
                    elif not stat.S_ISLNK(st.st_mode):
                        os.utime(targetpath, (st.st_atime, st.st_mtime))
                # Apply timestamps to all directories now that the items within them
                # have been copied.
            print " --> Restoring meta-info"
            for dirtime in directory_times:
                (directory, atime, mtime) = dirtime
                try:
                    self.update_progress(pulse=True, message=_("Restoring meta-information on %s" % directory))
                    os.utime(directory, (atime, mtime))
                except OSError:
                    pass
                    
            # Steps:
            our_total = 10
            our_current = 0
            # chroot
            print " --> Chrooting"
            self.update_progress(total=our_total, current=our_current, message=_("Entering new system.."))            
            os.system("mount --bind /dev/ /target/dev/")
            os.system("mount --bind /dev/shm /target/dev/shm")
            os.system("mount --bind /dev/pts /target/dev/pts")
            os.system("mount --bind /sys/ /target/sys/")
            os.system("mount --bind /proc/ /target/proc/")
            os.system("cp -f /etc/resolv.conf /target/etc/resolv.conf")
                                          
            # remove live user
            print " --> Removing live user"
            live_user = self.live_user
            our_current += 1
            self.update_progress(total=our_total, current=our_current, message=_("Removing live configuration (user)"))
            self.do_run_in_chroot("deluser %s" % live_user)
            # can happen
            if(os.path.exists("/target/home/%s" % live_user)):
                self.do_run_in_chroot("rm -rf /home/%s" % live_user)
            
            # remove live-initramfs (or w/e)
            print " --> Removing live-initramfs"
            our_current += 1
            self.update_progress(total=our_total, current=our_current, message=_("Removing live configuration (packages)"))
            self.do_run_in_chroot("apt-get remove --purge --yes --force-yes live-boot live-boot-initramfs-tools live-initramfs live-installer live-config live-config-sysvinit")
            
            # add new user
            print " --> Adding new user"
            our_current += 1
            self.update_progress(total=our_total, current=our_current, message=_("Adding user to system"))           
            self.do_run_in_chroot("useradd -s %s -c \'%s\' -G sudo -m %s" % ("/bin/bash", setup.real_name, setup.username))
            newusers = open("/target/tmp/newusers.conf", "w")
            newusers.write("%s:%s\n" % (setup.username, setup.password1))
            newusers.write("root:%s\n" % setup.password1)
            newusers.close()
            self.do_run_in_chroot("cat /tmp/newusers.conf | chpasswd")
            self.do_run_in_chroot("rm -rf /tmp/newusers.conf")
            
            # write the /etc/fstab
            print " --> Writing fstab"
            our_current += 1
            self.update_progress(total=our_total, current=our_current, message=_("Writing filesystem mount information"))
            # make sure fstab has default /proc and /sys entries
            if(not os.path.exists("/target/etc/fstab")):
                os.system("echo \"#### Static Filesystem Table File\" > /target/etc/fstab")
            fstab = open("/target/etc/fstab", "a")
            fstab.write("proc\t/proc\tproc\tdefaults\t0\t0\n")
            for partition in setup.partitions:
                if (partition.mount_as is not None and partition.mount_as != "None"):
                    partition_uuid = partition.partition.path # If we can't find the UUID we use the path
                    try:                    
                        blkid = commands.getoutput('blkid').split('\n')
                        for blkid_line in blkid:
                            blkid_elements = blkid_line.split(':')
                            if blkid_elements[0] == partition.partition.path:
                                blkid_mini_elements = blkid_line.split()
                                for blkid_mini_element in blkid_mini_elements:
                                    if "UUID=" in blkid_mini_element:
                                        partition_uuid = blkid_mini_element.replace('"', '').strip()
                                        break
                                break
                    except Exception, detail:
                        print detail
                                        
                    fstab.write("# %s\n" % (partition.partition.path))                            
                    
                    if(partition.mount_as == "/"):
                        fstab_fsck_option = "1"
                    else:
                        fstab_fsck_option = "0" 
                                            
                    if("ext" in partition.type):
                        fstab_mount_options = "rw,errors=remount-ro"
                    else:
                        fstab_mount_options = "defaults"
                        
                    if(partition.type == "swap"):                    
                        fstab.write("%s\tswap\tswap\tsw\t0\t0\n" % partition_uuid)
                    else:                                                    
                        fstab.write("%s\t%s\t%s\t%s\t%s\t%s\n" % (partition_uuid, partition.mount_as, partition.type, fstab_mount_options, "0", fstab_fsck_option))
            fstab.close()
            
            # write host+hostname infos
            print " --> Writing hostname"
            our_current += 1
            self.update_progress(total=our_total, current=our_current, message=_("Setting hostname"))
            hostnamefh = open("/target/etc/hostname", "w")
            hostnamefh.write("%s\n" % setup.hostname)
            hostnamefh.close()
            hostsfh = open("/target/etc/hosts", "w")
            hostsfh.write("127.0.0.1\tlocalhost\n")
            hostsfh.write("127.0.1.1\t%s\n" % setup.hostname)
            hostsfh.write("# The following lines are desirable for IPv6 capable hosts\n")
            hostsfh.write("::1     localhost ip6-localhost ip6-loopback\n")
            hostsfh.write("fe00::0 ip6-localnet\n")
            hostsfh.write("ff00::0 ip6-mcastprefix\n")
            hostsfh.write("ff02::1 ip6-allnodes\n")
            hostsfh.write("ff02::2 ip6-allrouters\n")
            hostsfh.write("ff02::3 ip6-allhosts\n")
            hostsfh.close()

            # gdm overwrite (specific to Debian/live-initramfs)
            # print " --> Configuring GDM"
            # gdmconffh = open("/target/etc/gdm3/daemon.conf", "w")
            # gdmconffh.write("# GDM configuration storage\n")
            # gdmconffh.write("\n[daemon]\n")
            # gdmconffh.write("\n[security]\n")
            # gdmconffh.write("\n[xdmcp]\n")
            # gdmconffh.write("\n[greeter]\n")
            # gdmconffh.write("\n[chooser]\n")
            # gdmconffh.write("\n[debug]\n")
            # gdmconffh.close()

            # set the locale
            print " --> Setting the locale"
            our_current += 1
            self.update_progress(total=our_total, current=our_current, message=_("Setting locale"))
            os.system("echo \"%s.UTF-8 UTF-8\" >> /target/etc/locale.gen" % setup.language)
            self.do_run_in_chroot("locale-gen")
            os.system("echo \"\" > /target/etc/default/locale")
            self.do_run_in_chroot("update-locale LANG=\"%s.UTF-8\"" % setup.language)
            self.do_run_in_chroot("update-locale LANG=%s.UTF-8" % setup.language)

            # set the timezone
            ########## print " --> Setting the timezone"
            ########## os.system("echo \"%s\" > /target/etc/timezone" % setup.timezone_code)
            ########## os.system("cp /target/usr/share/zoneinfo/%s /target/etc/localtime" % setup.timezone)
            
            ######### # localize Firefox and Thunderbird
            ######### print " --> Localizing Firefox and Thunderbird"
            ######### self.update_progress(total=our_total, current=our_current, message=_("Localizing Firefox and Thunderbird"))
            ######### if setup.language != "en_US":                
                ######### os.system("apt-get update")
                ######### self.do_run_in_chroot("apt-get update")
                ######### locale = setup.language.replace("_", "-").lower()                
                              #########  
                ######### num_res = commands.getoutput("aptitude search firefox-l10n-%s | grep firefox-l10n-%s | wc -l" % (locale, locale))
                ######### if num_res != "0":                    
                    ######### self.do_run_in_chroot("apt-get install --yes --force-yes firefox-l10n-" + locale)
                ######### else:
                    ######### if "_" in setup.language:
                        ######### language_code = setup.language.split("_")[0]
                        ######### num_res = commands.getoutput("aptitude search firefox-l10n-%s | grep firefox-l10n-%s | wc -l" % (language_code, language_code))
                        ######### if num_res != "0":                            
                            ######### self.do_run_in_chroot("apt-get install --yes --force-yes firefox-l10n-" + language_code)
              #########  
                ######### num_res = commands.getoutput("aptitude search thunderbird-l10n-%s | grep thunderbird-l10n-%s | wc -l" % (locale, locale))
                ######### if num_res != "0":
                    ######### self.do_run_in_chroot("apt-get install --yes --force-yes thunderbird-l10n-" + locale)
                ######### else:
                    ######### if "_" in setup.language:
                        ######### language_code = setup.language.split("_")[0]
                        ######### num_res = commands.getoutput("aptitude search thunderbird-l10n-%s | grep thunderbird-l10n-%s | wc -l" % (language_code, language_code))
                        ######### if num_res != "0":
                            ######### self.do_run_in_chroot("apt-get install --yes --force-yes thunderbird-l10n-" + language_code)                                                                                        

            # set the keyboard options..
            ######### print " --> Setting the keyboard"
            ######### our_current += 1
            ######### self.update_progress(total=our_total, current=our_current, message=_("Setting keyboard options"))
            ######### consolefh = open("/target/etc/default/console-setup", "r")
            ######### newconsolefh = open("/target/etc/default/console-setup.new", "w")
            ######### for line in consolefh:
                ######### line = line.rstrip("\r\n")
                ######### if(line.startswith("XKBMODEL=")):
                    ######### newconsolefh.write("XKBMODEL=\"%s\"\n" % setup.keyboard_model)
                ######### elif(line.startswith("XKBLAYOUT=")):
                    ######### newconsolefh.write("XKBLAYOUT=\"%s\"\n" % setup.keyboard_layout)
                ######### elif(line.startswith("XKBVARIANT=") and setup.keyboard_variant is not None):
                    ######### newconsolefh.write("XKBVARIANT=\"%s\"\n" % setup.keyboard_variant)
                ######### else:
                    ######### newconsolefh.write("%s\n" % line)
            ######### consolefh.close()
            ######### newconsolefh.close()
            ######### self.do_run_in_chroot("rm /etc/default/console-setup")
            ######### self.do_run_in_chroot("mv /etc/default/console-setup.new /etc/default/console-setup")
            ######### 
            ######### consolefh = open("/target/etc/default/keyboard", "r")
            ######### newconsolefh = open("/target/etc/default/keyboard.new", "w")
            ######### for line in consolefh:
                ######### line = line.rstrip("\r\n")
                ######### if(line.startswith("XKBMODEL=")):
                    ######### newconsolefh.write("XKBMODEL=\"%s\"\n" % setup.keyboard_model)
                ######### elif(line.startswith("XKBLAYOUT=")):
                    ######### newconsolefh.write("XKBLAYOUT=\"%s\"\n" % setup.keyboard_layout)
                ######### elif(line.startswith("XKBVARIANT=") and setup.keyboard_variant is not None):
                    ######### newconsolefh.write("XKBVARIANT=\"%s\"\n" % setup.keyboard_variant)
                ######### else:
                    ######### newconsolefh.write("%s\n" % line)
            ######### consolefh.close()
            ######### newconsolefh.close()
            ######### self.do_run_in_chroot("rm /etc/default/keyboard")
            ######### self.do_run_in_chroot("mv /etc/default/keyboard.new /etc/default/keyboard")

            # write MBR (grub)
            print " --> Configuring Grub"
            our_current += 1
            if(setup.grub_device is not None):
                self.update_progress(pulse=True, total=our_total, current=our_current, message=_("Installing bootloader"))
                print " --> Running grub-install"
                self.do_run_in_chroot("grub-install --force %s" % setup.grub_device)
                self.do_configure_grub(our_total, our_current)
                ####### grub_retries = 0
                ####### while (not self.do_check_grub(our_total, our_current)):
                    ####### self.do_configure_grub(our_total, our_current)
                    ####### grub_retries = grub_retries + 1
                    ####### if grub_retries >= 5:
                        ####### self.error_message(critical=True, message=_("WARNING: The grub bootloader was not configured properly! You need to configure it manually."))
                        ####### break
                        
            # Finish Install
            print " --> Finish Install"
            our_current += 1
            self.update_progress(pulse=True, total=our_total, current=our_current, message=_("Kurulum bitiriliyor"))
            os.system("chroot /target/ /bin/sh -c \"/opt/PARDUS/files/finish_install.sh\"")
            
            # now unmount it
            print " --> Unmounting partitions"
            try:
                os.system("umount --force /target/dev/shm")
                os.system("umount --force /target/dev/pts")
                os.system("umount --force /target/dev/")
                os.system("umount --force /target/sys/")
                os.system("umount --force /target/proc/")
                os.system("rm -rf /target/etc/resolv.conf")
                for partition in setup.partitions:
                    if(partition.mount_as is not None and partition.mount_as != "" and partition.mount_as != "/" and partition.mount_as != "swap"):
                        self.do_unmount("/target" + partition.mount_as)
                self.do_unmount("/target")
                self.do_unmount("/source")
            except Exception, detail:
                #best effort, no big deal if we can't umount something
                print detail