Exemple #1
0
 def getattr(self, path, fileh=None):
     auth, realbase, restpath, validops, deniedops = self._getvars(path)
     if auth == None:
         stats = os.lstat(settings.MSERVE_DATA)
         return _stat_to_dict(stats,
                         {"st_uid":self.uid, "st_gid":self.gid})
     else:
         if len(path) == 0:
             stats = os.lstat(settings.MSERVE_DATA)
             return _stat_to_dict(stats,
                         {"st_uid":self.uid, "st_gid":self.gid})
         else:
             try:
                 mfile = realbase.get_file_for_paths(restpath)
                 if mfile:
                     stats = os.stat(mfile.file.path)
                     return _stat_to_dict(stats)
             except Exception as exception:
                 logging.debug(exception)
             try:
                 realbase.get_folder_for_paths(restpath)
                 # TODO - Check this is secure to return stat for ./
                 stats = os.lstat(settings.MSERVE_DATA)
                 return _stat_to_dict(stats,
                     {"st_uid":self.uid, "st_gid":self.gid, "st_size":5000})
             except Exception as exception:
                 logging.debug(exception)
     raise FuseOSError(ENOENT)
Exemple #2
0
 def _assert_dirs_equal_cmp(self, diff):
     self.assert_equal(diff.left_only, [])
     self.assert_equal(diff.right_only, [])
     self.assert_equal(diff.diff_files, [])
     self.assert_equal(diff.funny_files, [])
     for filename in diff.common:
         path1 = os.path.join(diff.left, filename)
         path2 = os.path.join(diff.right, filename)
         s1 = os.lstat(path1)
         s2 = os.lstat(path2)
         # Assume path2 is on FUSE if st_dev is different
         fuse = s1.st_dev != s2.st_dev
         attrs = ['st_mode', 'st_uid', 'st_gid', 'st_rdev']
         if not fuse or not os.path.isdir(path1):
             # dir nlink is always 1 on our fuse fileystem
             attrs.append('st_nlink')
         d1 = [filename] + [getattr(s1, a) for a in attrs]
         d2 = [filename] + [getattr(s2, a) for a in attrs]
         if not os.path.islink(path1) or utime_supports_fd:
             # Older versions of llfuse does not support ns precision properly
             if fuse and not have_fuse_mtime_ns:
                 d1.append(round(st_mtime_ns(s1), -4))
                 d2.append(round(st_mtime_ns(s2), -4))
             d1.append(round(st_mtime_ns(s1), st_mtime_ns_round))
             d2.append(round(st_mtime_ns(s2), st_mtime_ns_round))
         d1.append(get_all(path1, follow_symlinks=False))
         d2.append(get_all(path2, follow_symlinks=False))
         self.assert_equal(d1, d2)
     for sub_diff in diff.subdirs.values():
         self._assert_dirs_equal_cmp(sub_diff)
Exemple #3
0
def glob(pathname):
    """Return a list of paths matching a pathname pattern.

    The pattern may contain simple shell-style wildcards a la fnmatch.

    """
    if not has_magic(pathname):
        return [pathname]
    dirname, basename = os.path.split(pathname)
    if not dirname:
        return glob1(os.curdir, basename)
    elif has_magic(dirname):
        list = glob(dirname)
    else:
        list = [dirname]
    if not has_magic(basename):
        result = []
        for dirname in list:
            if basename or os.path.isdir(dirname):
                name = os.path.join(dirname, basename)
                try:
                    os.lstat(name)
                except OSError, err:
                    # if the file does not exist, or if an element of the
                    # path to the file is not a directory, ignore the error
                    if err.errno != errno.ENOENT and err.errno != errno.ENOTDIR:
                        raise
                else:
                    result.append(name)
Exemple #4
0
def check_dir(inst, type, mode):
    if type == 'xlogs':
        limit_ut = int(inst['xlogs_mon_max_lag']) * 3600
    elif type == 'snaps':
        limit_ut = int(inst['snaps_mon_max_lag']) * 3600
    elif type == 'other':
        limit_ut = int(inst['other_mon_max_lag']) * 3600
    else:
        limit_ut = 86400

    if mode == 'tarantool':
        root = '/backup/%s/%s/' % (inst['rsync_modulepath'], type)
    elif mode == 'silver':
        name = inst['base_dir'].rsplit('/')[-1]
        root = '/backup/%s/%s/%s/' % (inst['rsync_modulepath'], name, type)

    if not os.path.isdir(root):
        logger.critical("Directory '%s' does not exist" % root)
        return
    if os.listdir(root):
        ### "Other" backups tend to have same mtime for a long period of time, so we will check their ctime
        if type == 'other':
            oldest_file = max([os.path.join(root, f) for f in os.listdir(root) if not f.startswith('.')], key=os.path.getctime)
            if os.lstat(oldest_file).st_ctime < now - limit_ut:
                hours_ago = int((now - os.lstat(oldest_file).st_ctime) / 60 // 60)
                logger.critical("Last backup in '%s' was made more than %s hours ago" % (root, hours_ago))
        else:
            oldest_file = max([os.path.join(root, f) for f in os.listdir(root) if not f.startswith('.')], key=os.path.getmtime)
            if os.lstat(oldest_file).st_mtime < now - limit_ut:
                hours_ago = int((now - os.lstat(oldest_file).st_mtime) / 60 // 60)
                logger.critical("Last backup in '%s' was made more than %s hours ago" % (root, hours_ago))
    else:
        logger.critical("Directory '%s' is empty" % root)
    def test_lchflags_symlink(self):
        testfn_st = os.stat(test_support.TESTFN)

        self.assertTrue(hasattr(testfn_st, 'st_flags'))

        os.symlink(test_support.TESTFN, _DUMMY_SYMLINK)
        self.teardown_files.append(_DUMMY_SYMLINK)
        dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)

        # ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
        try:
            posix.lchflags(_DUMMY_SYMLINK,
                           dummy_symlink_st.st_flags | stat.UF_IMMUTABLE)
        except OSError as err:
            if err.errno != errno.EOPNOTSUPP:
                raise
            msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
            self.skipTest(msg)

        try:
            new_testfn_st = os.stat(test_support.TESTFN)
            new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)

            self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
            self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
                             new_dummy_symlink_st.st_flags)
        finally:
            posix.lchflags(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
Exemple #6
0
def lexists(path: AnyStr) -> bool:
    """Test whether a path exists.  Returns True for broken symbolic links"""
    try:
        os.lstat(path)
    except os.error:
        return False
    return True
Exemple #7
0
def tst_link(mnt_dir):
    name1 = pjoin(mnt_dir, name_generator())
    name2 = pjoin(mnt_dir, name_generator())
    shutil.copyfile(TEST_FILE, name1)
    assert filecmp.cmp(name1, TEST_FILE, False)

    fstat1 = os.lstat(name1)
    assert fstat1.st_nlink == 1

    os.link(name1, name2)

    fstat1 = os.lstat(name1)
    fstat2 = os.lstat(name2)
    for attr in ('st_mode', 'st_dev', 'st_uid', 'st_gid',
                 'st_size', 'st_atime', 'st_mtime', 'st_ctime'):
        assert getattr(fstat1, attr) == getattr(fstat2, attr)
    assert os.path.basename(name2) in os.listdir(mnt_dir)
    assert filecmp.cmp(name1, name2, False)

    os.unlink(name2)

    assert os.path.basename(name2) not in os.listdir(mnt_dir)
    with pytest.raises(FileNotFoundError):
        os.lstat(name2)

    os.unlink(name1)
Exemple #8
0
def lexists(path):
    """Test whether a path exists.  Returns True for broken symbolic links"""
    try:
        os.lstat(path)
    except OSError:
        return False
    return True
Exemple #9
0
def ismount(path):
    """Test whether a path is a mount point"""
    try:
        s1 = os.lstat(path)
    except OSError:
        # It doesn't exist -- so not a mount point. :-)
        return False
    else:
        # A symlink can never be a mount point
        if stat.S_ISLNK(s1.st_mode):
            return False

    if isinstance(path, bytes):
        parent = join(path, b'..')
    else:
        parent = join(path, '..')
    try:
        s2 = os.lstat(parent)
    except OSError:
        return False

    dev1 = s1.st_dev
    dev2 = s2.st_dev
    if dev1 != dev2:
        return True     # path/.. on a different device as path
    ino1 = s1.st_ino
    ino2 = s2.st_ino
    if ino1 == ino2:
        return True     # path/.. is the same i-node as path
    return False
Exemple #10
0
def ignore_tree (root_dir):
    for dir_path, sd, files in os.walk(root_dir):
        perm = oct(os.lstat(dir_path).st_mode & 0o777).rjust(4, '0')[1:]
        yield ('ign', 'd', perm, dir_path)
        for f in files:
            perm = oct(os.lstat(dir_path+'/'+f).st_mode & 0o777).rjust(4, '0')[1:]
            yield ('ign', 'f', perm, dir_path + '/' + f)
Exemple #11
0
def gen_items (rootdir, verbose=False):
    global locked_dirs

    if os.path.isfile(rootdir):

        file_path = rootdir

        dir_path = os.path.dirname(rootdir)
        file_name = os.path.basename(rootdir)

        perm = oct(os.lstat(file_path).st_mode & 0o777).rjust(4, '0')[1:]

        action = get_file_action(perm, dir_path, file_name)

        if (action != 'ign' and action != perm) or verbose:
            yield (action, 'f', perm, dir_path + '/' + file_name)

        return

    for dir_path, sub_dirs, files in os.walk(rootdir):
        perm = oct(os.lstat(dir_path).st_mode & 0o777).rjust(4, '0')[1:]

        action = get_dir_action(perm, dir_path, sub_dirs, files)

        if action == 'ign' or action == 'non':
            if verbose:
                for i in ignore_tree(dir_path):
                    yield i
            del sub_dirs[:]
        else:
            if action != perm or (action == perm and verbose):
                yield (action, 'd', perm, dir_path)

            ign_sub_dir_list = get_ignore_sub_dirs_list(sub_dirs)

            if verbose:
                for i in ign_sub_dir_list:
                    for j in ignore_tree(dir_path + '/' + i):
                        yield j

            for i in ign_sub_dir_list:
                sub_dirs.remove(i)

            # may have problem to walk
            for i in sub_dirs:
                perm = oct(os.lstat(dir_path + '/' + i).st_mode & 0o777).rjust(4, '0')[1:]
                if re.match(r'^.*[0123].*$', perm):
                    locked_dirs.append(
                        '[{perm}] {itemname}'.format(
                        perm=perm,
                        itemname=dir_path+'/'+i) )

            for file_name in files:
                perm = oct(os.lstat(dir_path + '/' + file_name).st_mode & 0o777).rjust(4, '0')[1:]
                action = get_file_action(perm, dir_path, file_name)

                output = False

                if (action != 'ign' and action != perm) or verbose:
                    yield (action, 'f', perm, dir_path + '/' + file_name)
Exemple #12
0
 def convertGroups( self, dir, repFile, ignorefilelist):
     for f in os.listdir(dir):
         pathname = '%s/%s' % (dir, f)
         mode = os.lstat(pathname)[ST_MODE]
         if S_ISDIR(mode):
             # It's a directory therefore it must be a COVISE group
             for ff in os.listdir(pathname):
                 execName = '%s/%s' % (pathname, ff)
                 if not execName.endswith(".pdb") and not execName.endswith(".ilk") and not execName.endswith(".exp") and not execName.endswith(".lib") and not execName.endswith(".suo"):
                     skipfile = False
                     # check to see, if filename is to be ignored
                     if (ignorefilelist != None):
                         for fignore in ignorefilelist:
                             actualfile = '%s/%s' % (f, ff)
                             #repFile.write("convertGroups - comparing ignorefile=%s w/ actualfile=%s\n" % (fignore, actualfile))
                             if (fignore == actualfile):
                                 #repFile.write("convertGroups - will ignore file=%s\n" % fignore)
                                 skipfile = True
                     if not skipfile:
                         repFile.write(bytes("convertGroups - processing file=",'UTF-8'))
                         repFile.write(bytes(execName,'UTF-8'))
                         repFile.write(bytes("\n",'UTF-8'))
                         mmode = os.lstat(execName)[ST_MODE]
                         if S_ISREG(mmode) and (ff[0] != "."):
                             # It's a file, call the callback function
                             self.convertFromFile( execName, repFile)
                     else:
                         repFile.write("convertGroups - ignoring file=%s\n" % execName)
def lexists(filename):
    "test whether a file with this name exists. does not follow symlinks"
    try:
        os.lstat(filename)
    except:
        return False
    return True
Exemple #14
0
 def test__write_inventory(self):
     # The private interface _write_inventory is currently used by transform.
     tree = self.make_branch_and_tree('.')
     # if we write write an inventory then do a walkdirs we should get back
     # missing entries, and actual, and unknowns as appropriate.
     self.build_tree(['present', 'unknown'])
     inventory = Inventory(tree.get_root_id())
     inventory.add_path('missing', 'file', 'missing-id')
     inventory.add_path('present', 'file', 'present-id')
     # there is no point in being able to write an inventory to an unlocked
     # tree object - its a low level api not a convenience api.
     tree.lock_write()
     tree._write_inventory(inventory)
     tree.unlock()
     tree.lock_read()
     try:
         present_stat = os.lstat('present')
         unknown_stat = os.lstat('unknown')
         expected_results = [
             (('', tree.get_root_id()),
              [('missing', 'missing', 'unknown', None, 'missing-id', 'file'),
               ('present', 'present', 'file', present_stat, 'present-id', 'file'),
               ('unknown', 'unknown', 'file', unknown_stat, None, None),
              ]
             )]
         self.assertEqual(expected_results, list(tree.walkdirs()))
     finally:
         tree.unlock()
Exemple #15
0
    def getSubdirSize(directory):
        # returns size in bytes
        try:
            mydev = os.lstat(directory)[stat.ST_DEV]
        except OSError as e:
            log.debug("failed to stat %s: %s", directory, e)
            return 0

        try:
            dirlist = os.listdir(directory)
        except OSError as e:
            log.debug("failed to listdir %s: %s", directory, e)
            return 0

        dsize = 0
        for f in dirlist:
            curpath = '%s/%s' % (directory, f)
            try:
                sinfo = os.lstat(curpath)
            except OSError as e:
                log.debug("failed to stat %s/%s: %s", directory, f, e)
                continue

            if stat.S_ISDIR(sinfo[stat.ST_MODE]):
                if os.path.ismount(curpath):
                    continue
                if mydev == sinfo[stat.ST_DEV]:
                    dsize += getSubdirSize(curpath)
            elif stat.S_ISREG(sinfo[stat.ST_MODE]):
                dsize += sinfo[stat.ST_SIZE]

        return dsize
Exemple #16
0
def VerEXIST():
    
    
    # xpath = argv[1]                                                                                                                                                                                                                                                         

    if(argv[1][-1] == '/'):
        xpath = argv[1][:-1]

    else:
        xpath = argv[1]

    if os.path.exists(xpath):
        if os.path.isfile(xpath):
            right = oct(stat.S_IMODE(os.lstat(argv[1]).st_mode))
            if right == '0':
                 print("Vous n'avez pas les droits necessaire.")
            elif right[1] == '4' or right[1] == '5' or right[1] == '6' or right[1] == '7':
                FileEXIST()
            else:
                print("Vous n'avez pas les droits necessaire.")

        if os.path.isdir(xpath):
            right = oct(stat.S_IMODE(os.lstat(argv[1]).st_mode))
            if right == '0':
                 print("Vous n'avez pas les droits necessaire.")
            if right[1] == '4' or right[1] == '5' or right[1] == '6' or right[1] == '7':
                DirectoryEXIST()
            else:
                print("Vous n'avez pas les droits necessaire.")
    else:
        print(xpath + " --> N'existe pas.")
Exemple #17
0
def rm_pkgs(args):
    # TODO: This doesn't handle packages that have hard links to files within
    # themselves, like bin/python3.3 and bin/python3.3m in the Python package
    from os.path import join, isdir
    from os import lstat, walk, listdir
    from conda.install import rm_rf

    pkgs_dir = config.pkgs_dirs[0]
    print('Cache location: %s' % pkgs_dir)

    rmlist = []
    pkgs = [i for i in listdir(pkgs_dir) if isdir(join(pkgs_dir, i)) and
        # Only include actual packages
        isdir(join(pkgs_dir, i, 'info'))]
    for pkg in pkgs:
        breakit = False
        for root, dir, files in walk(join(pkgs_dir, pkg)):
            if breakit:
                break
            for fn in files:
                try:
                    stat = lstat(join(root, fn))
                except OSError as e:
                    print(e)
                    continue
                if stat.st_nlink > 1:
                    # print('%s is installed: %s' % (pkg, join(root, fn)))
                    breakit = True
                    break
        else:
            rmlist.append(pkg)

    if not rmlist:
        print("There are no unused packages to remove")
        sys.exit(0)

    print("Will remove the following packages:")
    print()
    totalsize = 0
    maxlen = len(max(rmlist, key=lambda x: len(str(x))))
    fmt = "%-40s %10s"
    for pkg in rmlist:
        pkgsize = 0
        for root, dir, files in walk(join(pkgs_dir, pkg)):
            for fn in files:
                # We don't have to worry about counting things twice:  by
                # definition these files all have a link count of 1!
                size = lstat(join(root, fn)).st_size
                totalsize += size
                pkgsize += size
        print(fmt % (pkg, human_bytes(pkgsize)))
    print('-' * (maxlen + 2 + 10))
    print(fmt % ('Total:', human_bytes(totalsize)))
    print()

    common.confirm_yn(args)

    for pkg in rmlist:
        print("removing %s" % pkg)
        rm_rf(join(pkgs_dir, pkg))
Exemple #18
0
 def getattr(self, path, fh=None):
     if os.path.basename(args.src) in path:
         st = os.lstat(args.src)
     else:
         st = os.lstat(path)
     return dict((key, getattr(st, key)) for key in ('st_atime', 'st_gid',
         'st_mode', 'st_mtime', 'st_size', 'st_uid'))
Exemple #19
0
def walk(path):
    os.lstat(path)
    for dir_path, dirs, filenames in os.walk(path):
        for filename in filenames:
            full_path = os.path.join(dir_path, filename)
            rel_path = full_path[len(path) + 1:]
            yield rel_path.replace(os.path.sep, '/'), str(full_path)
    def test_write_renewal_config(self):
        # Mostly tested by the process of creating and updating lineages,
        # but we can test that this successfully creates files, removes
        # unneeded items, and preserves comments.
        temp = os.path.join(self.config.config_dir, "sample-file")
        temp2 = os.path.join(self.config.config_dir, "sample-file.new")
        with open(temp, "w") as f:
            f.write("[renewalparams]\nuseful = value # A useful value\n"
                    "useless = value # Not needed\n")
        os.chmod(temp, 0o640)
        target = {}
        for x in ALL_FOUR:
            target[x] = "somewhere"
        archive_dir = "the_archive"
        relevant_data = {"useful": "new_value"}

        from certbot import storage
        storage.write_renewal_config(temp, temp2, archive_dir, target, relevant_data)

        with open(temp2, "r") as f:
            content = f.read()
        # useful value was updated
        self.assertTrue("useful = new_value" in content)
        # associated comment was preserved
        self.assertTrue("A useful value" in content)
        # useless value was deleted
        self.assertTrue("useless" not in content)
        # check version was stored
        self.assertTrue("version = {0}".format(certbot.__version__) in content)
        # ensure permissions are copied
        self.assertEqual(stat.S_IMODE(os.lstat(temp).st_mode),
                         stat.S_IMODE(os.lstat(temp2).st_mode))
    def test_lchflags_symlink(self):
        testfn_st = os.stat(support.TESTFN)

        self.assertTrue(hasattr(testfn_st, "st_flags"))

        os.symlink(support.TESTFN, _DUMMY_SYMLINK)
        self.teardown_files.append(_DUMMY_SYMLINK)
        dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)

        def chflags_nofollow(path, flags):
            return posix.chflags(path, flags, follow_symlinks=False)

        for fn in (posix.lchflags, chflags_nofollow):
            # ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
            flags = dummy_symlink_st.st_flags | stat.UF_IMMUTABLE
            try:
                fn(_DUMMY_SYMLINK, flags)
            except OSError as err:
                if err.errno != errno.EOPNOTSUPP:
                    raise
                msg = "chflag UF_IMMUTABLE not supported by underlying fs"
                self.skipTest(msg)
            try:
                new_testfn_st = os.stat(support.TESTFN)
                new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)

                self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
                self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE, new_dummy_symlink_st.st_flags)
            finally:
                fn(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
Exemple #22
0
def TestOwnerGroupMode(DestinationPath, SourcePath, fc):
    stat_info = os.lstat(DestinationPath)

    if SourcePath:
        stat_info_src = os.lstat(SourcePath)

    if fc.Owner:
        Specified_Owner_ID = pwd.getpwnam(fc.Owner)[2]
        if Specified_Owner_ID != pwd.getpwuid(stat_info.st_uid)[2]:
            return False
    elif SourcePath:
        # Owner wasn't specified, if SourcePath is specified then check that the Owners match
        if pwd.getpwuid(stat_info.st_uid)[2] != pwd.getpwuid(stat_info_src.st_uid)[2]:
            return False

    if fc.Group:
        Specified_Group_ID = grp.getgrnam(fc.Group)[2]
        if Specified_Group_ID != grp.getgrgid(stat_info.st_gid)[2]:
            return False
    elif SourcePath:
        # Group wasn't specified, if SourcePath is specified then check that the Groups match
        if grp.getgrgid(stat_info.st_gid)[2] != grp.getgrgid(stat_info_src.st_gid)[2]:
            return False
    
    # Mode is irrelevant to symlinks
    if not os.path.islink(DestinationPath):
        if fc.Mode:
            if str(oct(stat_info.st_mode))[-3:] != fc.Mode:
                return False
        elif SourcePath:
            # Mode wasn't specified, if SourcePath is specified then check that the Modes match
            if str(oct(stat_info.st_mode))[-3:] != str(oct(stat_info_src.st_mode))[-3:]:
                return False

    return True
Exemple #23
0
def GetURLValues(url, inProps):

    ns_dict, error =  url.resourceValuesForKeys_error_( inProps+[NSURLIsVolumeKey, u'NSURLParentDirectoryURLKey', NSURLIsDirectoryKey, NSURLIsPackageKey] , None )
    
    if error is not None:
        raise MyError(error.code()  , error.localizedDescription())

    ns_dict = dict( ns_dict )  #shallow copy, no per-item conversion.
    # ns_dict = ns_dict.mutableCopy()

    p = url.path()
    file_id = os.lstat(p).st_ino

    # [anotherDict setObject: dict forKey: "sub-dictionary-key"];
    ns_dict[NSFileSystemFileNumber] = file_id 
    ns_dict[NSURLPathKey] = p 
    ns_dict['url'] = url

    if ns_dict[NSURLIsDirectoryKey]:
        ns_dict.update(  {  "NSURLTotalFileSizeKey":  0 })  # file size is zero for directories

    if ns_dict[NSURLIsVolumeKey]:
        ns_dict[NSFileSystemFolderNumber] = 1L
    else:
        folder_url  = ns_dict[NSURLParentDirectoryURLKey]
        fp          = folder_url.path()
        folder_id   = os.lstat(fp).st_ino
        ns_dict[NSFileSystemFolderNumber] = int(folder_id)

    return ns_dict
Exemple #24
0
def daten():
  """
  Anzeige der /daten Seite mit Auflistung der
  Download-Dateien
  """
  body_list = db.get_body()
  body_dict = {}
  for body in body_list:
    body_dict[str(body['_id'])] = body['name']
  data_list = []
  for file in os.listdir(app.config['data_dump_folder']):
    if file.endswith(".tar.bz2"):
      stat = os.lstat(app.config['data_dump_folder'] + os.sep + file)
      data_list.append({
        'id': file.split('.')[0],
        'name': body_dict[file.split('.')[0]],
        'size': "%d" % (stat.st_size / 1024.0 / 1024.0)
      })
  file_list = []
  for file in os.listdir(app.config['files_dump_folder']):
    if file.endswith(".tar.bz2"):
      stat = os.lstat(app.config['files_dump_folder'] + os.sep + file)
      file_list.append({
        'id': file.split('.')[0],
        'name': body_dict[file.split('.')[0]],
        'size': "%d" % (stat.st_size / 1024.0 / 1024.0 / 1024.0)
      })
  return render_template('daten.html', data_list=data_list, file_list=file_list)
Exemple #25
0
    def _get_all_modules(self):
        '''
        Scans the overlay modules dir for loadable modules

        @rtype dict of module_plugins
        '''
        module_dir = self._module_path
        importables = []
        names = os.listdir(module_dir)
        for entry in names:
            if entry.startswith('__'):
                continue
            try:
                os.lstat(os.path.join(module_dir, entry, '__init__.py'))
                importables.append(entry)
            except EnvironmentError:
                pass

        kids = {}
        for entry in importables:
            new_module = Module(entry, self._namepath, self.output)
            for module_name in new_module.kids:
                kid = new_module.kids[module_name]
                kid['parent'] = new_module
                kids[kid['name']] = kid
        return kids
Exemple #26
0
def lexists(path):
    try:
        os.lstat(path)
    except os.error:
        return False

    return True
    def test_rename_away_tree_entry(self) -> None:
        """ Rename a tree entry away and back again """
        # We should be able to rename files that are in the Tree
        hello = os.path.join(self.mount, "hello")
        targetname = os.path.join(self.mount, "a-new-target")
        os.rename(hello, targetname)

        with self.assertRaises(OSError) as context:
            os.lstat(hello)
        self.assertEqual(
            errno.ENOENT, context.exception.errno, msg="no longer visible as old name"
        )

        self.assert_checkout_root_entries({".eden", "a-new-target", "adir", "slink"})

        with open(targetname, "r") as f:
            self.assertEqual("hola\n", f.read(), msg="materialized correct data")

            # Now, while we hold this file open, check that a rename
            # leaves the handle connected to the file contents when
            # we rename it back to its old name.
            os.rename(targetname, hello)

            self.assert_checkout_root_entries({".eden", "adir", "hello", "slink"})

            with open(hello, "r+") as write_f:
                write_f.seek(0, os.SEEK_END)
                write_f.write("woot")

            f.seek(0)
            self.assertEqual("hola\nwoot", f.read())
Exemple #28
0
def GetURLResourceValuesForKeys(url, inProps):
    """raises custom exception MyError when, eg, the file does not exist"""
    
    # add keys needed by this routine
    values, error =  url.resourceValuesForKeys_error_( inProps+[NSURLIsVolumeKey, u'NSURLParentDirectoryURLKey', NSURLIsDirectoryKey] , None )
    
    if error is not None:
        raise MyError(error.code()  , error.localizedDescription())

    # convert unicode key strings to string
    # convert objc types to python types (for mysql converter)
    
    item_dict =   dict( zip(   [str(z) for z in values.allKeys() ] , [df2fk(v) for v in values.allValues()] ) )

    # add fields that are filesystem related, but not directly gotten as keys in the URL values
    
    p = url.path()
    file_id = os.lstat(p).st_ino
    item_dict[NSFileSystemFileNumber] = file_id 
    item_dict[NSURLPathKey] = p 

    if item_dict[NSURLIsDirectoryKey]:
        item_dict.update(  {  "NSURLTotalFileSizeKey":  0 })  # file size is zero for directories

    if item_dict[NSURLIsVolumeKey]:
        item_dict[NSFileSystemFolderNumber] = 1L
    else:
        folder_url  = values[NSURLParentDirectoryURLKey]
        fp          = folder_url.path()
        folder_id   = os.lstat(fp).st_ino
        item_dict[NSFileSystemFolderNumber] = int(folder_id)
    
    return item_dict
    def load(self):
        """
        Load data from most recent file. Update the cache if needed.
        If the file has not changed, use the cache instead
        (typically named something like filename.ftstpk) in a pickle format.
        If file is newer, uses inherited class's loadFromLog method.
        Then, save in pickle cache.
        """
        if not self.has_changed():
            # cache is newer, just load the cache
            return self.loadCache()

        while 1: #could need more than one loop if the log file is changing
            fstat = os.lstat(self.logname)
            start_logtime = fstat[stat.ST_MTIME]
            del fstat
            
            self.loadFromLog()
            try:
                self.saveCache()
            except IOError:
                return # silently ignore, this was a load in the end
            # the log may have changed -> check
            fstat = os.lstat(self.logname)
            logtime = fstat[stat.ST_MTIME]
            del fstat
            if logtime <= start_logtime:
                return # OK, not changed, can exit
        
        return # should never reach this point
Exemple #30
0
 def getattr(self, path):
     for backup in self.backups:
         fpath = backup + path
         if os.path.exists(fpath):
             f_stat = os.lstat(fpath)
             mode = f_stat.st_mode
             if S_ISDIR(mode):
                 return f_stat
             else:
                 # it is a file, show as dir
                 f_stat = list(f_stat)
                 f_stat[0] = mode & 0b111111111111 | S_IFDIR
                 f_stat = stat_result(f_stat)
                 assert S_ISDIR(f_stat.st_mode)
                 parent = os.lstat(os.path.dirname(fpath))
                 # print 'done getattr - fake dir'
                 # print 'fstat:  ', f_stat
                 # print 'parent: ', parent
                 return parent  # XXX workaround
                 return f_stat
     # either the path does not exist or it actually is a file
     real_path = self._get_real_path(path)
     # print 'done getattr ', real_path
     if real_path:
         return os.lstat(real_path)
Exemple #31
0
    def _check_path(self, src, path_type, dest=None, force=False):
        """Check a new destination path in the archive.

            Since it is possible for multiple plugins to collect the same
            paths, and since plugins can now run concurrently, it is possible
            for two threads to race in archive methods: historically the
            archive class only needed to test for the actual presence of a
            path, since it was impossible for another `Archive` client to
            enter the class while another method invocation was being
            dispatched.

            Deal with this by implementing a locking scheme for operations
            that modify the path structure of the archive, and by testing
            explicitly for conflicts with any existing content at the
            specified destination path.

            It is not an error to attempt to create a path that already
            exists in the archive so long as the type of the object to be
            added matches the type of object already found at the path.

            It is an error to attempt to re-create an existing path with
            a different path type (for example, creating a symbolic link
            at a path already occupied by a regular file).

            :param src: the source path to be copied to the archive
            :param path_type: the type of object to be copied
            :param dest: an optional destination path
            :param force: force file creation even if the path exists
            :returns: An absolute destination path if the path should be
                      copied now or `None` otherwise
        """
        dest = dest or self.dest_path(src)
        if path_type == P_DIR:
            dest_dir = dest
        else:
            dest_dir = os.path.split(dest)[0]
        if not dest_dir:
            return dest

        # Check containing directory presence and path type
        if os.path.exists(dest_dir) and not os.path.isdir(dest_dir):
            raise ValueError("path '%s' exists and is not a directory" %
                             dest_dir)
        elif not os.path.exists(dest_dir):
            src_dir = src if path_type == P_DIR else os.path.split(src)[0]
            self._make_leading_paths(src_dir)

        def is_special(mode):
            return any([
                stat.S_ISBLK(mode),
                stat.S_ISCHR(mode),
                stat.S_ISFIFO(mode),
                stat.S_ISSOCK(mode)
            ])

        if force:
            return dest

        # Check destination path presence and type
        if os.path.exists(dest):
            # Use lstat: we care about the current object, not the referent.
            st = os.lstat(dest)
            ve_msg = "path '%s' exists and is not a %s"
            if path_type == P_FILE and not stat.S_ISREG(st.st_mode):
                raise ValueError(ve_msg % (dest, "regular file"))
            if path_type == P_LINK and not stat.S_ISLNK(st.st_mode):
                raise ValueError(ve_msg % (dest, "symbolic link"))
            if path_type == P_NODE and not is_special(st.st_mode):
                raise ValueError(ve_msg % (dest, "special file"))
            if path_type == P_DIR and not stat.S_ISDIR(st.st_mode):
                raise ValueError(ve_msg % (dest, "directory"))
            # Path has already been copied: skip
            return None
        return dest
Exemple #32
0
def makepkginfo(installeritem, options):
    '''Return a pkginfo dictionary for item'''

    if isinstance(options, dict):
        options = AttributeDict(options)

    pkginfo = {}
    installs = []
    if installeritem and os.path.exists(installeritem):
        # Check if the item is a mount point for a disk image
        if dmgutils.pathIsVolumeMountPoint(installeritem):
            # Get the disk image path for the mount point
            # and use that instead of the original item
            installeritem = dmgutils.diskImageForMountPoint(installeritem)

        # get size of installer item
        itemsize = 0
        itemhash = "N/A"
        if os.path.isfile(installeritem):
            itemsize = int(os.path.getsize(installeritem))
            itemhash = munkihash.getsha256hash(installeritem)

        if pkgutils.hasValidDiskImageExt(installeritem):
            if dmgutils.DMGisWritable(
                    installeritem) and options.print_warnings:
                print >> sys.stderr, (
                    "WARNING: %s is a writable disk image. "
                    "Checksum verification is not supported." % installeritem)
                print >> sys.stderr, (
                    "WARNING: Consider converting %s to a read-only disk"
                    "image." % installeritem)
                itemhash = "N/A"
            pkginfo = get_catalog_info_from_dmg(installeritem, options)
            if (pkginfo
                    and pkginfo.get('installer_type') == "AdobeCS5Installer"):
                raise PkgInfoGenerationError(
                    "This disk image appears to contain an Adobe CS5/CS6 "
                    "product install.\n"
                    "Please use Adobe Application Manager, Enterprise "
                    "Edition (AAMEE) to create an installation package "
                    "for this product.")
            if not pkginfo:
                raise PkgInfoGenerationError(
                    "Could not find a supported installer item in %s!" %
                    installeritem)

        elif pkgutils.hasValidPackageExt(installeritem):
            pkginfo = get_catalog_info_from_path(installeritem, options)
            if not pkginfo:
                raise PkgInfoGenerationError(
                    "%s doesn't appear to be a valid installer item!" %
                    installeritem)
            if os.path.isdir(installeritem) and options.print_warnings:
                print >> sys.stderr, (
                    "WARNING: %s is a bundle-style package!\n"
                    "To use it with Munki, you should encapsulate it "
                    "in a disk image.\n") % installeritem
                # need to walk the dir and add it all up
                for (path, dummy_dirs, files) in os.walk(installeritem):
                    for name in files:
                        filename = os.path.join(path, name)
                        # use os.lstat so we don't follow symlinks
                        itemsize += int(os.lstat(filename).st_size)
                # convert to kbytes
                itemsize = int(itemsize / 1024)

        elif pkgutils.hasValidConfigProfileExt(installeritem):
            try:
                pkginfo = get_catalog_info_for_profile(installeritem)
            except ProfileMetadataGenerationError, err:
                print >> sys.stderr, err
                raise PkgInfoGenerationError(
                    "%s doesn't appear to be a supported configuration "
                    "profile!" % installeritem)
        else:
            raise PkgInfoGenerationError("%s is not a valid installer item!" %
                                         installeritem)

        pkginfo['installer_item_size'] = int(itemsize / 1024)
        if itemhash != "N/A":
            pkginfo['installer_item_hash'] = itemhash

        # try to generate the correct item location
        temppath = installeritem
        location = ""
        while len(temppath) > 4:
            if temppath.endswith('/pkgs'):
                location = installeritem[len(temppath) + 1:]
                break
            else:
                temppath = os.path.dirname(temppath)

        if not location:
            #just the filename
            location = os.path.split(installeritem)[1]
        pkginfo['installer_item_location'] = location

        # ADOBE STUFF - though maybe generalizable in the future?
        if (pkginfo.get('installer_type') == "AdobeCCPInstaller"
                and not options.uninstalleritem) and options.print_warnings:
            print >> sys.stderr, (
                "WARNING: This item appears to be an Adobe Creative "
                "Cloud product install.\n"
                "No uninstaller package was specified so product "
                "removal will not be possible.")
            pkginfo['uninstallable'] = False
            if 'uninstall_method' in pkginfo:
                del pkginfo['uninstall_method']

        if options.uninstalleritem:
            uninstallerpath = options.uninstalleritem
            if os.path.exists(uninstallerpath):
                # try to generate the correct item location
                temppath = uninstallerpath
                location = ""
                while len(temppath) > 4:
                    if temppath.endswith('/pkgs'):
                        location = uninstallerpath[len(temppath) + 1:]
                        break
                    else:
                        temppath = os.path.dirname(temppath)

                if not location:
                    #just the filename
                    location = os.path.split(uninstallerpath)[1]
                pkginfo['uninstaller_item_location'] = location
                itemsize = int(os.path.getsize(uninstallerpath))
                itemhash = munkihash.getsha256hash(uninstallerpath)
                pkginfo['uninstaller_item_size'] = int(itemsize / 1024)
                pkginfo['uninstaller_item_hash'] = itemhash
            else:
                raise PkgInfoGenerationError("No uninstaller item at %s" %
                                             uninstallerpath)

        # if we have receipts, assume we can uninstall using them
        if pkginfo.get('receipts', None):
            pkginfo['uninstallable'] = True
            pkginfo['uninstall_method'] = "removepackages"
        consider_gcc("", "-%(version)s" % vars())
        consider_gcc("%(gnu_host)s-" % vars(), "-%(version)s" % vars())
for gnu_host in os.listdir(gcccross_dir):
    consider_gcc("%(gnu_host)s-" % vars(), "")
    for version in os.listdir(gcccross_dir + "/" + gnu_host):
        consider_gcc("", "-%(version)s" % vars())
        consider_gcc("%(gnu_host)s-" % vars(), "-%(version)s" % vars())

consider_clang("")
for ent in os.listdir("/usr/lib"):
    if ent.startswith("llvm-"):
        version = ent.split("-")[1]
        consider_clang("-%(version)s" % vars())

for name in os.listdir(distcc_dir):
    mode = os.lstat(distcc_dir + "/" + name).st_mode
    if stat.S_ISLNK(mode):
        if os.access(distcc_dir + "/" + name, os.X_OK):
            old_symlinks.add(name)
        else:
            os.unlink(distcc_dir + "/" + name)

for link in old_symlinks:
    if link not in new_symlinks:
        os.unlink(distcc_dir + "/" + link)

for link in new_symlinks:
    if link not in old_symlinks:
        if os.access("/usr/bin/distcc", os.X_OK):
            os.symlink("../../bin/distcc", distcc_dir + "/" + link)
        else:
Exemple #34
0
    def TestStats(self):
        """Collect inodes and blocks usage."""
        # Find the loopback device that was mounted to ROOT_A.
        loop_device = None
        root_path = os.path.abspath(os.readlink(image_test_lib.ROOT_A))
        for mtab in osutils.IterateMountPoints():
            if mtab.destination == root_path:
                loop_device = mtab.source
                break
        self.assertTrue(loop_device, 'Cannot find loopback device for ROOT_A.')

        # Gather file system stats with tune2fs.
        cmd = ['tune2fs', '-l', loop_device]
        # tune2fs produces output like this:
        #
        # tune2fs 1.42 (29-Nov-2011)
        # Filesystem volume name:   ROOT-A
        # Last mounted on:          <not available>
        # Filesystem UUID:          <none>
        # Filesystem magic number:  0xEF53
        # Filesystem revision #:    1 (dynamic)
        # ...
        #
        # So we need to ignore the first line.
        ret = cros_build_lib.SudoRunCommand(cmd,
                                            capture_output=True,
                                            extra_env={'LC_ALL': 'C'})
        fs_stat = dict(
            line.split(':', 1) for line in ret.output.splitlines()
            if ':' in line)
        free_inodes = int(fs_stat['Free inodes'])
        free_blocks = int(fs_stat['Free blocks'])
        inode_count = int(fs_stat['Inode count'])
        block_count = int(fs_stat['Block count'])
        block_size = int(fs_stat['Block size'])

        sum_file_size = 0
        for root, _, filenames in os.walk(image_test_lib.ROOT_A):
            for file_name in filenames:
                full_name = os.path.join(root, file_name)
                file_stat = os.lstat(full_name)
                sum_file_size += file_stat.st_size

        metadata_size = (block_count -
                         free_blocks) * block_size - sum_file_size

        self.OutputPerfValue('free_inodes_over_inode_count',
                             free_inodes * 100.0 / inode_count,
                             'percent',
                             graph='free_over_used_ratio')
        self.OutputPerfValue('free_blocks_over_block_count',
                             free_blocks * 100.0 / block_count,
                             'percent',
                             graph='free_over_used_ratio')
        self.OutputPerfValue('apparent_size',
                             sum_file_size,
                             'bytes',
                             higher_is_better=False,
                             graph='filesystem_stats')
        self.OutputPerfValue('metadata_size',
                             metadata_size,
                             'bytes',
                             higher_is_better=False,
                             graph='filesystem_stats')
Exemple #35
0
def is_socket(path):

    # type: (str) -> bool

    return stat.S_ISSOCK(os.lstat(path).st_mode)
Exemple #36
0
    def expand(self):

        import os
        import stat
        from BlockDevice import BlockDevice
        from CharacterDevice import CharacterDevice
        from File import File
        from Link import Link
        from NamedPipe import NamedPipe
        from Socket import Socket

        import journal
        debug = journal.debug("pyre.filesystem")

        files = []
        subdirectories = []

        root = self.path
        children = os.listdir(root)
        debug.log("directory '%s' has %d files" % (self.name, len(children)))

        count = 0
        for name in children:
            count += 1

            if name in self._children:
                continue

            pathname = os.path.join(root, name)
            # PORTABILITY: lstat is unix only
            mode = os.lstat(pathname)[stat.ST_MODE]

            if stat.S_ISDIR(mode):
                node = Directory(name, self)
                subdirectories.append(node)
            elif stat.S_ISREG(mode):
                node = File(name, self)
                files.append(node)
            elif stat.S_ISLNK(mode):
                node = Link(name, self)
            elif stat.S_ISSOCK(mode):
                node = Socket(name, self)
            elif stat.S_ISFIFO(mode):
                node = NamedPipe(name, self)
            elif stat.S_ISCHR(mode):
                node = CharacterDevice(name, self)
            elif stat.S_ISBLK(mode):
                node = BlockDevice(name, self)
            else:
                Firewall.hit("unknown file type: mode=%x" % mode)

            self._children[node.name] = node

            if not count % 1000:
                debug.log("processed %d files" % count)

        debug.log("total files processed: %d" % count)

        self._files = files
        self._subdirectories = subdirectories

        return subdirectories
Exemple #37
0
    def __getCurrentLocation(self, homePath):
        statTuple = os.lstat(homePath)
        if not stat.S_ISLNK(statTuple[stat.ST_MODE]):
            raise SystemExit("PDAQ_HOME '%s' is not a symlink" % homePath)

        return os.readlink(homePath)
Exemple #38
0
 def lstat(self, path=None):
     return os.lstat(self.join(path))
def get_perm(path):
    return stat.S_IMODE(os.lstat(path)[stat.ST_MODE])
Exemple #40
0
def lstat(path):
    if isinstance(path, str):
        return os.lstat(path)
    else:
        return os.lstat(path.as_bytes())
Exemple #41
0
    def test_index_mutation(self, rw_repo):
        index = rw_repo.index
        num_entries = len(index.entries)
        cur_head = rw_repo.head

        uname = "Some Developer"
        umail = "*****@*****.**"
        rw_repo.config_writer().set_value("user", "name", uname)
        rw_repo.config_writer().set_value("user", "email", umail)

        # remove all of the files, provide a wild mix of paths, BaseIndexEntries,
        # IndexEntries
        def mixed_iterator():
            count = 0
            for entry in index.entries.itervalues():
                type_id = count % 4
                if type_id == 0:    # path
                    yield entry.path
                elif type_id == 1:  # blob
                    yield Blob(rw_repo, entry.binsha, entry.mode, entry.path)
                elif type_id == 2:  # BaseIndexEntry
                    yield BaseIndexEntry(entry[:4])
                elif type_id == 3:  # IndexEntry
                    yield entry
                else:
                    raise AssertionError("Invalid Type")
                count += 1
            # END for each entry
        # END mixed iterator
        deleted_files = index.remove(mixed_iterator(), working_tree=False)
        assert deleted_files
        assert self._count_existing(rw_repo, deleted_files) == len(deleted_files)
        assert len(index.entries) == 0

        # reset the index to undo our changes
        index.reset()
        assert len(index.entries) == num_entries

        # remove with working copy
        deleted_files = index.remove(mixed_iterator(), working_tree=True)
        assert deleted_files
        assert self._count_existing(rw_repo, deleted_files) == 0

        # reset everything
        index.reset(working_tree=True)
        assert self._count_existing(rw_repo, deleted_files) == len(deleted_files)

        # invalid type
        self.failUnlessRaises(TypeError, index.remove, [1])

        # absolute path
        deleted_files = index.remove([os.path.join(rw_repo.working_tree_dir, "lib")], r=True)
        assert len(deleted_files) > 1
        self.failUnlessRaises(ValueError, index.remove, ["/doesnt/exists"])

        # TEST COMMITTING
        # commit changed index
        cur_commit = cur_head.commit
        commit_message = "commit default head"

        new_commit = index.commit(commit_message, head=False)
        assert cur_commit != new_commit
        assert new_commit.author.name == uname
        assert new_commit.author.email == umail
        assert new_commit.committer.name == uname
        assert new_commit.committer.email == umail
        assert new_commit.message == commit_message
        assert new_commit.parents[0] == cur_commit
        assert len(new_commit.parents) == 1
        assert cur_head.commit == cur_commit

        # same index, no parents
        commit_message = "index without parents"
        commit_no_parents = index.commit(commit_message, parent_commits=list(), head=True)
        assert commit_no_parents.message == commit_message
        assert len(commit_no_parents.parents) == 0
        assert cur_head.commit == commit_no_parents

        # same index, multiple parents
        commit_message = "Index with multiple parents\n    commit with another line"
        commit_multi_parent = index.commit(commit_message, parent_commits=(commit_no_parents, new_commit))
        assert commit_multi_parent.message == commit_message
        assert len(commit_multi_parent.parents) == 2
        assert commit_multi_parent.parents[0] == commit_no_parents
        assert commit_multi_parent.parents[1] == new_commit
        assert cur_head.commit == commit_multi_parent

        # re-add all files in lib
        # get the lib folder back on disk, but get an index without it
        index.reset(new_commit.parents[0], working_tree=True).reset(new_commit, working_tree=False)
        lib_file_path = os.path.join("lib", "git", "__init__.py")
        assert (lib_file_path, 0) not in index.entries
        assert os.path.isfile(os.path.join(rw_repo.working_tree_dir, lib_file_path))

        # directory
        entries = index.add(['lib'], fprogress=self._fprogress_add)
        self._assert_entries(entries)
        self._assert_fprogress(entries)
        assert len(entries) > 1

        # glob
        entries = index.reset(new_commit).add([os.path.join('lib', 'git', '*.py')], fprogress=self._fprogress_add)
        self._assert_entries(entries)
        self._assert_fprogress(entries)
        assert len(entries) == 14

        # same file
        entries = index.reset(new_commit).add(
            [os.path.abspath(os.path.join('lib', 'git', 'head.py'))] * 2, fprogress=self._fprogress_add)
        self._assert_entries(entries)
        assert entries[0].mode & 0644 == 0644
        # would fail, test is too primitive to handle this case
        # self._assert_fprogress(entries)
        self._reset_progress()
        assert len(entries) == 2

        # missing path
        self.failUnlessRaises(OSError, index.reset(new_commit).add, ['doesnt/exist/must/raise'])

        # blob from older revision overrides current index revision
        old_blob = new_commit.parents[0].tree.blobs[0]
        entries = index.reset(new_commit).add([old_blob], fprogress=self._fprogress_add)
        self._assert_entries(entries)
        self._assert_fprogress(entries)
        assert index.entries[(old_blob.path, 0)].hexsha == old_blob.hexsha and len(entries) == 1

        # mode 0 not allowed
        null_hex_sha = Diff.NULL_HEX_SHA
        null_bin_sha = "\0" * 20
        self.failUnlessRaises(ValueError, index.reset(
            new_commit).add, [BaseIndexEntry((0, null_bin_sha, 0, "doesntmatter"))])

        # add new file
        new_file_relapath = "my_new_file"
        new_file_path = self._make_file(new_file_relapath, "hello world", rw_repo)
        entries = index.reset(new_commit).add(
            [BaseIndexEntry((010644, null_bin_sha, 0, new_file_relapath))], fprogress=self._fprogress_add)
        self._assert_entries(entries)
        self._assert_fprogress(entries)
        assert len(entries) == 1 and entries[0].hexsha != null_hex_sha

        # add symlink
        if sys.platform != "win32":
            basename = "my_real_symlink"
            target = "/etc/that"
            link_file = os.path.join(rw_repo.working_tree_dir, basename)
            os.symlink(target, link_file)
            entries = index.reset(new_commit).add([link_file], fprogress=self._fprogress_add)
            self._assert_entries(entries)
            self._assert_fprogress(entries)
            assert len(entries) == 1 and S_ISLNK(entries[0].mode)
            assert S_ISLNK(index.entries[index.entry_key("my_real_symlink", 0)].mode)

            # we expect only the target to be written
            assert index.repo.odb.stream(entries[0].binsha).read() == target
        # END real symlink test

        # add fake symlink and assure it checks-our as symlink
        fake_symlink_relapath = "my_fake_symlink"
        link_target = "/etc/that"
        fake_symlink_path = self._make_file(fake_symlink_relapath, link_target, rw_repo)
        fake_entry = BaseIndexEntry((0120000, null_bin_sha, 0, fake_symlink_relapath))
        entries = index.reset(new_commit).add([fake_entry], fprogress=self._fprogress_add)
        self._assert_entries(entries)
        self._assert_fprogress(entries)
        assert entries[0].hexsha != null_hex_sha
        assert len(entries) == 1 and S_ISLNK(entries[0].mode)

        # assure this also works with an alternate method
        full_index_entry = IndexEntry.from_base(BaseIndexEntry((0120000, entries[0].binsha, 0, entries[0].path)))
        entry_key = index.entry_key(full_index_entry)
        index.reset(new_commit)

        assert entry_key not in index.entries
        index.entries[entry_key] = full_index_entry
        index.write()
        index.update()  # force reread of entries
        new_entry = index.entries[entry_key]
        assert S_ISLNK(new_entry.mode)

        # a tree created from this should contain the symlink
        tree = index.write_tree()
        assert fake_symlink_relapath in tree
        index.write()                       # flush our changes for the checkout

        # checkout the fakelink, should be a link then
        assert not S_ISLNK(os.stat(fake_symlink_path)[ST_MODE])
        os.remove(fake_symlink_path)
        index.checkout(fake_symlink_path)

        # on windows we will never get symlinks
        if os.name == 'nt':
            # simlinks should contain the link as text ( which is what a
            # symlink actually is )
            open(fake_symlink_path, 'rb').read() == link_target
        else:
            assert S_ISLNK(os.lstat(fake_symlink_path)[ST_MODE])

        # TEST RENAMING
        def assert_mv_rval(rval):
            for source, dest in rval:
                assert not os.path.exists(source) and os.path.exists(dest)
            # END for each renamed item
        # END move assertion utility

        self.failUnlessRaises(ValueError, index.move, ['just_one_path'])
        # file onto existing file
        files = ['AUTHORS', 'LICENSE']
        self.failUnlessRaises(GitCommandError, index.move, files)

        # again, with force
        assert_mv_rval(index.move(files, f=True))

        # files into directory - dry run
        paths = ['LICENSE', 'VERSION', 'doc']
        rval = index.move(paths, dry_run=True)
        assert len(rval) == 2
        assert os.path.exists(paths[0])

        # again, no dry run
        rval = index.move(paths)
        assert_mv_rval(rval)

        # dir into dir
        rval = index.move(['doc', 'test'])
        assert_mv_rval(rval)

        # TEST PATH REWRITING
        ######################
        count = [0]

        def rewriter(entry):
            rval = str(count[0])
            count[0] += 1
            return rval
        # END rewriter

        def make_paths():
            # two existing ones, one new one
            yield 'CHANGES'
            yield 'ez_setup.py'
            yield index.entries[index.entry_key('README', 0)]
            yield index.entries[index.entry_key('.gitignore', 0)]

            for fid in range(3):
                fname = 'newfile%i' % fid
                open(fname, 'wb').write("abcd")
                yield Blob(rw_repo, Blob.NULL_BIN_SHA, 0100644, fname)
            # END for each new file
        # END path producer
        paths = list(make_paths())
        self._assert_entries(index.add(paths, path_rewriter=rewriter))

        for filenum in range(len(paths)):
            assert index.entry_key(str(filenum), 0) in index.entries

        # TEST RESET ON PATHS
        ######################
        arela = "aa"
        brela = "bb"
        afile = self._make_file(arela, "adata", rw_repo)
        bfile = self._make_file(brela, "bdata", rw_repo)
        akey = index.entry_key(arela, 0)
        bkey = index.entry_key(brela, 0)
        keys = (akey, bkey)
        absfiles = (afile, bfile)
        files = (arela, brela)

        for fkey in keys:
            assert not fkey in index.entries

        index.add(files, write=True)
        nc = index.commit("2 files committed", head=False)

        for fkey in keys:
            assert fkey in index.entries

        # just the index
        index.reset(paths=(arela, afile))
        assert not akey in index.entries
        assert bkey in index.entries

        # now with working tree - files on disk as well as entries must be recreated
        rw_repo.head.commit = nc
        for absfile in absfiles:
            os.remove(absfile)

        index.reset(working_tree=True, paths=files)

        for fkey in keys:
            assert fkey in index.entries
        for absfile in absfiles:
            assert os.path.isfile(absfile)
Exemple #42
0
        def clean_leftovers():
            stat_cache = {}

            files_by_name = {}
            new_paths = []
            for f in self.files.list:
                files_by_name.setdefault(os.path.basename(f.path),
                                         []).append(f)
                new_paths.append(f.path)

            if not self.old_files:
                return

            for old_file in self.old_files.list:

                old_file_path = os.path.join(ctx.config.dest_dir(),
                                             old_file.path)

                if old_file.path in new_paths:
                    continue

                if old_file_path not in new_paths:
                    if os.path.islink(old_file_path):
                        os.unlink(old_file_path)
                        continue

                try:
                    old_file_stat = os.lstat(old_file_path)
                except OSError:
                    continue

                old_filename = os.path.basename(old_file.path)

                # If one of the parent directories is a symlink, it is possible
                # that the new and old file paths refer to the same file.
                # In this case, we must not remove the old file.
                #
                # e.g. /lib/libdl.so and /lib64/libdl.so when /lib64 is
                # a symlink to /lib.
                for new_file in files_by_name.get(old_filename, []):

                    new_file_stat = stat_cache.get(new_file.path)

                    if new_file_stat is None:
                        path = os.path.join(ctx.config.dest_dir(),
                                            new_file.path)
                        try:
                            new_file_stat = os.lstat(path)
                        except OSError:
                            continue

                        stat_cache[new_file.path] = new_file_stat

                    if os.path.samestat(new_file_stat, old_file_stat):
                        break

                else:

                    remove_permanent = not ctx.config.get_option(
                        "preserve_permanent")

                    Remove.remove_file(old_file,
                                       self.pkginfo.name,
                                       remove_permanent,
                                       store_old_paths=self.store_old_paths)
Exemple #43
0
def delete(path, shred=False, ignore_missing=False, allow_shred=True):
    """Delete path that is either file, directory, link or FIFO.

       If shred is enabled as a function parameter or the BleachBit global
       parameter, the path will be shredded unless allow_shred = False.
    """
    from bleachbit.Options import options
    is_special = False
    path = extended_path(path)
    if not os.path.lexists(path):
        if ignore_missing:
            return
        raise OSError(2, 'No such file or directory', path)
    if 'posix' == os.name:
        # With certain (relatively rare) files on Windows os.lstat()
        # may return Access Denied
        mode = os.lstat(path)[stat.ST_MODE]
        is_special = stat.S_ISFIFO(mode) or stat.S_ISLNK(mode)
    if is_special:
        os.remove(path)
    elif os.path.isdir(path):
        delpath = path
        if allow_shred and (shred or options.get('shred')):
            delpath = wipe_name(path)
        try:
            os.rmdir(delpath)
        except OSError as e:
            # [Errno 39] Directory not empty
            # https://bugs.launchpad.net/bleachbit/+bug/1012930
            if errno.ENOTEMPTY == e.errno:
                logger.info("directory is not empty: %s", path)
            else:
                raise
        except WindowsError as e:
            # WindowsError: [Error 145] The directory is not empty:
            # 'C:\\Documents and Settings\\username\\Local Settings\\Temp\\NAILogs'
            # Error 145 may happen if the files are scheduled for deletion
            # during reboot.
            if 145 == e.winerror:
                logger.info("directory is not empty: %s", path)
            else:
                raise
    elif os.path.isfile(path):
        # wipe contents
        if allow_shred and (shred or options.get('shred')):
            try:
                wipe_contents(path)
            except pywinerror as e:
                # 2 = The system cannot find the file specified.
                # This can happen with a broken symlink
                # https://github.com/bleachbit/bleachbit/issues/195
                if 2 != e.winerror:
                    raise
                # If a broken symlink, try os.remove() below.
            except IOError as e:
                # permission denied (13) happens shredding MSIE 8 on Windows 7
                logger.debug("IOError #%s shredding '%s'",
                             e.errno,
                             path,
                             exc_info=True)
            # wipe name
            os.remove(wipe_name(path))
        else:
            # unlink
            os.remove(path)
    else:
        logger.info("special file type cannot be deleted: %s", path)
Exemple #44
0
 def wallet_permissions(self):
     return oct(os.lstat(self.wallet_path).st_mode)[-3:]
Exemple #45
0
    def subtest_repair_file_permissions(self):
        try:
            # for this test we need to make sure the warning is NOT suppressed, but set it back to current setting at the end
            original_suppress_warning_value = os.environ.get(
                'OCI_CLI_SUPPRESS_FILE_PERMISSIONS_WARNING')
            os.environ['OCI_CLI_SUPPRESS_FILE_PERMISSIONS_WARNING'] = 'False'

            # capture stdout / stderr so we can validate warnings
            if oci_cli.cli_util.is_windows():
                with util.capture() as out:
                    # create a temporary file and set some unnecessary permissions
                    tmp = tempfile.NamedTemporaryFile()
                    subprocess.check_output(
                        'icacls "{path}" /grant Everyone:F'.format(
                            path=tmp.name),
                        stderr=subprocess.STDOUT)

                    # warning should be emitted because permissions are too loose
                    oci_cli.cli_util.FilePermissionChecker.warn_on_invalid_file_permissions(
                        tmp.name)
                    assert 'WARNING' in out[1].getvalue()

                    # reset captured stderr
                    out[1] = StringIO()

                    result = self.invoke([
                        'setup', 'repair-file-permissions', '--file', tmp.name
                    ])

                    assert result.exit_code == 0

                    # no warning should be emitted because we repaired the permissions
                    oci_cli.cli_util.FilePermissionChecker.warn_on_invalid_file_permissions(
                        tmp.name)
                    assert 'WARNING' not in out[1].getvalue()
            else:
                with util.capture() as out:
                    # create a temporary file and set some unnecessary permissions
                    tmp = tempfile.NamedTemporaryFile()
                    os.chmod(tmp.name, 509)  # octal 775

                    # warning should be emitted because permissions are too loose
                    oci_cli.cli_util.FilePermissionChecker.warn_on_invalid_file_permissions(
                        tmp.name)
                    assert 'WARNING' in out[1].getvalue()

                    # reset captured stderr
                    out[1] = StringIO()

                    result = self.invoke([
                        'setup', 'repair-file-permissions', '--file', tmp.name
                    ])

                    assert result.exit_code == 0
                    assert oct(stat.S_IMODE(os.lstat(
                        tmp.name).st_mode)) == oct(384)  # 600

                    # no warning should be emitted because we repaired the permissions
                    oci_cli.cli_util.FilePermissionChecker.warn_on_invalid_file_permissions(
                        tmp.name)
                    assert 'WARNING' not in out[1].getvalue()

                with util.capture() as out:
                    # validate that 400 file permissions are accepted as well
                    os.chmod(tmp.name, 256)  # octal 400
                    oci_cli.cli_util.FilePermissionChecker.warn_on_invalid_file_permissions(
                        tmp.name)
                    assert 'WARNING' not in out[1].getvalue()
        finally:
            if original_suppress_warning_value is None:
                del os.environ['OCI_CLI_SUPPRESS_FILE_PERMISSIONS_WARNING']
            else:
                os.environ[
                    'OCI_CLI_SUPPRESS_FILE_PERMISSIONS_WARNING'] = original_suppress_warning_value
Exemple #46
0
def ego_owner(filename):
    """Return whether current user owns the file"""
    return os.lstat(filename).st_uid == os.getuid()
Exemple #47
0
if os.system('../fusecompress -o dedup,detach test') != 0:
    os.rmdir('test')
    sys.exit(2)

text1 = 'foo'
text2 = 'bar'

open('test/1', 'w').write(text1)
os.chmod('test/1', 0647)
open('test/2', 'w').write(text1)
os.chmod('test/2', 0600)

os.system('fusermount -u test')
time.sleep(1)

assert (os.lstat('test/1').st_nlink == 2)

os.system('../fusecompress -o dedup,detach test')
time.sleep(.1)

stat1 = os.lstat('test/1')
stat2 = os.lstat('test/2')
assert (stat1.st_nlink == 1)
assert (stat2.st_nlink == 1)
assert (stat.S_IMODE(stat1.st_mode) == 0647)
assert (stat.S_IMODE(stat2.st_mode) == 0600)

os.system('fusermount -u test')
time.sleep(1)

shutil.rmtree('test')
Exemple #48
0
def getsize(name):
    try:
        return os.lstat(
            name).st_size  # same as above; use instead of os.path....
    except:
        return 0
Exemple #49
0
    def check(self, test):
        """
        """
        success = 1

        [testname, args] = self.rule.split("=")
        if testname[0] == "!":
            self.false = 1
            testname = testname[1:]
        [kind, case] = testname.split("_")
        if "|" in args:
            [key, value] = args.split("|", 1)
        else:
            [key, value] = [args, None]

        if kind == "PACMAN":
            if case == "RETCODE":
                if test.retcode != int(key):
                    success = 0
            elif case == "OUTPUT":
                logfile = os.path.join(test.root, util.LOGFILE)
                if not os.access(logfile, os.F_OK):
                    tap.diag("LOGFILE not found, cannot validate 'OUTPUT' rule")
                    success = 0
                elif not util.grep(logfile, key):
                    success = 0
            else:
                tap.diag("PACMAN rule '%s' not found" % case)
                success = -1
        elif kind == "PKG":
            localdb = test.db["local"]
            newpkg = localdb.db_read(key)
            if not newpkg:
                success = 0
            else:
                if case == "EXIST":
                    success = 1
                elif case == "VERSION":
                    if value != newpkg.version:
                        success = 0
                elif case == "DESC":
                    if value != newpkg.desc:
                        success = 0
                elif case == "GROUPS":
                    if not value in newpkg.groups:
                        success = 0
                elif case == "PROVIDES":
                    if not value in newpkg.provides:
                        success = 0
                elif case == "DEPENDS":
                    if not value in newpkg.depends:
                        success = 0
                elif case == "OPTDEPENDS":
                    success = 0
                    for optdep in newpkg.optdepends:
                        if value == optdep.split(':', 1)[0]:
                            success = 1
                            break
                elif case == "REASON":
                    if newpkg.reason != int(value):
                        success = 0
                elif case == "FILES":
                    if not value in newpkg.files:
                        success = 0
                elif case == "BACKUP":
                    success = 0
                    for f in newpkg.backup:
                        if f.startswith(value + "\t"):
                            success = 1
                            break;
                else:
                    tap.diag("PKG rule '%s' not found" % case)
                    success = -1
        elif kind == "FILE":
            filename = os.path.join(test.root, key)
            if case == "EXIST":
                if not os.path.isfile(filename):
                    success = 0
            elif case == "EMPTY":
                if not (os.path.isfile(filename)
                        and os.path.getsize(filename) == 0):
                    success = 0
            elif case == "CONTENTS":
                try:
                    with open(filename, 'r') as f:
                        success = f.read() == value
                except:
                    success = 0
            elif case == "MODIFIED":
                for f in test.files:
                    if f.name == key:
                        if not f.ismodified():
                            success = 0
                        break
            elif case == "MODE":
                if not os.path.isfile(filename):
                    success = 0
                else:
                    mode = os.lstat(filename)[stat.ST_MODE]
                    if int(value, 8) != stat.S_IMODE(mode):
                        success = 0
            elif case == "TYPE":
                if value == "dir":
                    if not os.path.isdir(filename):
                        success = 0
                elif value == "file":
                    if not os.path.isfile(filename):
                        success = 0
                elif value == "link":
                    if not os.path.islink(filename):
                        success = 0
            elif case == "PACNEW":
                if not os.path.isfile("%s.pacnew" % filename):
                    success = 0
            elif case == "PACSAVE":
                if not os.path.isfile("%s.pacsave" % filename):
                    success = 0
            else:
                tap.diag("FILE rule '%s' not found" % case)
                success = -1
        elif kind == "DIR":
            filename = os.path.join(test.root, key)
            if case == "EXIST":
                if not os.path.isdir(filename):
                    success = 0
            else:
                tap.diag("DIR rule '%s' not found" % case)
                success = -1
        elif kind == "LINK":
            filename = os.path.join(test.root, key)
            if case == "EXIST":
                if not os.path.islink(filename):
                    success = 0
            else:
                tap.diag("LINK rule '%s' not found" % case)
                success = -1
        elif kind == "CACHE":
            cachedir = os.path.join(test.root, util.PM_CACHEDIR)
            if case == "EXISTS":
                pkg = test.findpkg(key, value, allow_local=True)
                if not pkg or not os.path.isfile(
                        os.path.join(cachedir, pkg.filename())):
                    success = 0
        else:
            tap.diag("Rule kind '%s' not found" % kind)
            success = -1

        if self.false and success != -1:
            success = not success
        self.result = success
        return success
Exemple #50
0
    def runall(self):
        print "server listening..."
        self.s.listen(5)
        self.conn, self.addr = self.s.accept()
        self.data = self.conn.recv(1024)
        self.data=self.data.split()
#print len(self.data)
        if len(self.data)==0:
            return
        elif self.data[0]=='download':
            if len(self.data)!=3:
                print "INVALID COMMAND"
            elif self.data[1]=='UDP':
                if os.path.exists(self.data[2])==False:
                    print "File not exists"
                    return
                self.conn.send(str(self.data[2])+' '+str(os.stat(self.data[2]).st_size)+' '+str(time.ctime(os.stat(self.data[2]).st_mtime))+' '+str(self.md5(self.data[2]))+' '+str(oct(stat.S_IMODE(os.lstat(self.data[2]).st_mode)))+' ')
                serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
                serverSocket.bind((socket.gethostname(),40000))
                message, clientAddress = serverSocket.recvfrom(1024)
                modifiedMessage = message.upper()
                self.filename=self.data[2]
    	        f = open(self.filename,'rb')
   	        l = f.read(1024)
    	        while (l):
                    serverSocket.sendto(l,clientAddress)
       	            l = f.read(1024)
                serverSocket.sendto(l,clientAddress)
    	        f.close()
                serverSocket.close()
    	        self.conn.close()
            elif self.data[1]=='TCP':
                if os.path.exists(self.data[2])==False:
                    print "File not exists"
                    return
                self.filename=self.data[2]
                self.conn.send(str(self.data[2])+' '+str(os.stat(self.data[2]).st_size)+' '+str(time.ctime(os.stat(self.data[2]).st_mtime))+' '+str(self.md5(self.data[2]))+' '+str(oct(stat.S_IMODE(os.lstat(self.data[2]).st_mode)))+' ')
                f=open(self.filename,'rb')
   	        l = f.read(1024)
    	        while (l):
                    self.conn.send(l)
       	            l = f.read(1024)
    	        f.close()
    	        self.conn.close()
        elif self.data[0]=='index':
            if len(self.data)<2:
                print "INVALID COMMAND"
                self.conn.send("INVALID COMMAND")
            elif  self.data[1]=='longlist':
                for file in os.listdir(os.getcwd()):
                    a=os.stat(file)
                    '''self.conn.send(file)
                    self.conn.send("\t")
                    self.conn.send(str(a.st_size))
                    self.conn.send("\t")
                    if os.path.isfile(file):
                        self.conn.send("file")
                    else:
                        self.conn.send("Directory")
                    self.conn.send("\t")
                    self.conn.send(str(time.ctime(a.st_mtime)))
                    self.conn.send("\n")'''
                    filename=str(file)
                    size=str(a.st_size)
                    if os.path.isfile(file):
                        type="file"
                    else:
                        type="Directory"
                    mtime=str(time.ctime(a.st_mtime))
                    self.conn.send(filename+" "+size+" "+type+" "+mtime+"\n")
                self.conn.close()
            elif self.data[1]=='shortlist' and len(self.data)==4:
                for file in os.listdir(os.getcwd()):
                    a=os.stat(file)
                    mint=time.mktime(time.strptime(' '.join(self.data[2:6]), '%d %m %Y %H:%M:%S'))
                    maxt=time.mktime(time.strptime(' '.join(self.data[6:10]), '%d %m %Y %H:%M:%S'))
                    if mint<os.path.getmtime(file)<maxt:
                        '''self.conn.send(file)
                        self.conn.send("\t")
                        self.conn.send(str(a.st_size))
                        self.conn.send("\t")    
                        if os.path.isfile(file):
                            self.conn.send("file")
                        else:
                            self.conn.send("Directory")
                        self.conn.send("\t")
                        self.conn.send(str(time.ctime(a.st_mtime)))
                        self.conn.send("\n")'''
                        filename=str(file)
                        size=str(a.st_size)
                        if os.path.isfile(file):
                            type="file"
                        else:
                            type="Directory"
                        mtime=str(time.ctime(a.st_mtime))
                        self.conn.send(filename+" "+size+" "+type+" "+mtime+"\n")
                    self.conn.close()
            elif self.data[1]=='regex' and len(self.data)==3:
                reprog=re.compile(' '.join(self.data[2:]).strip())
                for file in os.listdir(os.getcwd()):
                    if reprog.match(file):
                        a=os.stat(file)
                        '''self.conn.send(file)
                        self.conn.send("\t")
                        self.conn.send(str(a.st_size))
                        self.conn.send("\t")
                        if os.path.isfile(file):
                            self.conn.send("file")
                        else:
                            self.conn.send("Directory")
                        self.conn.send("\t")
                        self.conn.send(str(time.ctime(a.st_mtime)))
                        self.conn.send("\n")'''
                        filename=str(file)
                        size=str(a.st_size)
                        if os.path.isfile(file):
                            type="file"
                        else:
                            type="Directory"
                        mtime=str(time.ctime(a.st_mtime))
                        self.conn.send(filename+" "+size+" "+type+" "+mtime+"\n")
                    self.conn.close()
            else:
                print "INVALID COMMAND"
                self.conn.send("INVALID COMMAND")
        elif self.data[0]=='hash':
            if len(self.data)<2:
                print "INVALID COMMAND"
                self.conn.send("INVALID COMMAND")
            elif self.data[1]=='checkall':
                for file in os.listdir(os.getcwd()):
                    '''self.conn.send(file)
                    self.conn.send("\t")
                    self.conn.send(self.md5(file))
                    self.conn.send("\t")
                    self.conn.send(str(time.ctime(os.stat(file).st_mtime)))
                    self.conn.send("\n")'''
                    self.conn.send(str(file)+' '+str(self.md5(file))+' '+str(time.ctime(os.stat(file).st_mtime))+' ')
                self.conn.close()
            elif self.data[1]=='verify' and len(self.data)==3:
                '''self.conn.send(self.data[2])
                self.conn.send("\t")
                self.conn.send(self.md5(self.data[2]))
                self.conn.send("\t")
                self.conn.send(str(time.ctime(os.stat(self.data[2]).st_mtime)))
                self.conn.send("\n")'''
                self.conn.send(str(self.data[2])+' '+str(self.md5(self.data[2]))+' '+str(time.ctime(os.stat(self.data[2]).st_mtime))+' ')
                self.conn.close()
            else:
                print "INVALID COMMAND"
                self.conn.send("INVALID COMMAND")
        elif self.data[0]=='getfileslist':
            ata=''
            for file in os.listdir(os.getcwd()):
                '''self.conn.send(str(file))
                self.conn.send("\t")
                self.conn.send(str(self.md5(file)))'''
                self.conn.send(str(file)+' '+str(self.md5(file))+' ')
            self.conn.send(ata)
            self.conn.close()
        elif self.data[0]=='exit':
            global exit
            exit=True
        else:
             print "command not found"
Exemple #51
0
def _make_writable(fname):
    """Make a file writable."""
    os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128)  # write
Exemple #52
0
    def copy(self, dest, skip_if_older=True):
        assert isinstance(dest, basestring)

        # The logic in this function is complicated by the fact that symlinks
        # aren't universally supported. So, where symlinks aren't supported, we
        # fall back to file copying. Keep in mind that symlink support is
        # per-filesystem, not per-OS.

        # Handle the simple case where symlinks are definitely not supported by
        # falling back to file copy.
        if not hasattr(os, 'symlink'):
            return File.copy(self, dest, skip_if_older=skip_if_older)

        # Always verify the symlink target path exists.
        if not os.path.exists(self.path):
            raise ErrorMessage('Symlink target path does not exist: %s' %
                               self.path)

        st = None

        try:
            st = os.lstat(dest)
        except OSError as ose:
            if ose.errno != errno.ENOENT:
                raise

        # If the dest is a symlink pointing to us, we have nothing to do.
        # If it's the wrong symlink, the filesystem must support symlinks,
        # so we replace with a proper symlink.
        if st and stat.S_ISLNK(st.st_mode):
            link = os.readlink(dest)
            if link == self.path:
                return False

            os.remove(dest)
            os.symlink(self.path, dest)
            return True

        # If the destination doesn't exist, we try to create a symlink. If that
        # fails, we fall back to copy code.
        if not st:
            try:
                os.symlink(self.path, dest)
                return True
            except OSError:
                return File.copy(self, dest, skip_if_older=skip_if_older)

        # Now the complicated part. If the destination exists, we could be
        # replacing a file with a symlink. Or, the filesystem may not support
        # symlinks. We want to minimize I/O overhead for performance reasons,
        # so we keep the existing destination file around as long as possible.
        # A lot of the system calls would be eliminated if we cached whether
        # symlinks are supported. However, even if we performed a single
        # up-front test of whether the root of the destination directory
        # supports symlinks, there's no guarantee that all operations for that
        # dest (or source) would be on the same filesystem and would support
        # symlinks.
        #
        # Our strategy is to attempt to create a new symlink with a random
        # name. If that fails, we fall back to copy mode. If that works, we
        # remove the old destination and move the newly-created symlink into
        # its place.

        temp_dest = os.path.join(os.path.dirname(dest), str(uuid.uuid4()))
        try:
            os.symlink(self.path, temp_dest)
        # TODO Figure out exactly how symlink creation fails and only trap
        # that.
        except EnvironmentError:
            return File.copy(self, dest, skip_if_older=skip_if_older)

        # If removing the original file fails, don't forget to clean up the
        # temporary symlink.
        try:
            os.remove(dest)
        except EnvironmentError:
            os.remove(temp_dest)
            raise

        os.rename(temp_dest, dest)
        return True
Exemple #53
0
    def action(self, path):
        rootdir = self.rootdir
        pubpath = pkg.misc.relpath(path, rootdir)
        pstat = os.lstat(path)
        mode = oct(stat.S_IMODE(pstat.st_mode))
        timestamp = pkg.misc.time_to_timestamp(pstat.st_mtime)

        # Set default root and group.
        owner = "root"
        group = "bin"

        # Check whether need to change owner.
        if not self.use_default_owner:
            try:
                owner = pwd.getpwuid(pstat.st_uid).pw_name
            except KeyError as e:
                owner = None
            try:
                group = grp.getgrgid(pstat.st_gid).gr_name
            except KeyError as e:
                group = None

            if not owner and not group:
                raise pkg.bundle.InvalidOwnershipException(path,
                                                           uid=pstat.st_uid,
                                                           gid=pstat.st_gid)
            elif not owner:
                raise pkg.bundle.InvalidOwnershipException(path,
                                                           uid=pstat.st_uid)
            elif not group:
                raise pkg.bundle.InvalidOwnershipException(path,
                                                           gid=pstat.st_gid)

        if stat.S_ISREG(pstat.st_mode):
            inode = pstat.st_ino
            # Any inode in self.inodes will either have been visited
            # before or will have been pre-populated from the list
            # of known targets.  Create file actions for known
            # targets and unvisited inodes.
            if pubpath in self.targetpaths or \
                inode not in self.inodes:
                if pstat.st_nlink > 1:
                    self.inodes.setdefault(inode, path)
                return pkg.actions.file.FileAction(open(path, "rb"),
                                                   mode=mode,
                                                   owner=owner,
                                                   group=group,
                                                   path=pubpath,
                                                   timestamp=timestamp)
            else:
                # Find the relative path to the link target.
                target = pkg.misc.relpath(self.inodes[inode],
                                          os.path.dirname(path))
                return pkg.actions.hardlink.HardLinkAction(path=pubpath,
                                                           target=target)
        elif stat.S_ISLNK(pstat.st_mode):
            return pkg.actions.link.LinkAction(target=os.readlink(path),
                                               path=pubpath)
        elif stat.S_ISDIR(pstat.st_mode):
            return pkg.actions.directory.DirectoryAction(timestamp=timestamp,
                                                         mode=mode,
                                                         owner=owner,
                                                         group=group,
                                                         path=pubpath)
Exemple #54
0
    def _dir_model(self, path, content=True):
        """Build a model for a directory

        if content is requested, will include a listing of the directory
        """
        os_path = self._get_os_path(path)

        four_o_four = u'directory does not exist: %r' % path

        if not os.path.isdir(os_path):
            raise web.HTTPError(404, four_o_four)
        elif is_hidden(os_path, self.root_dir) and not self.allow_hidden:
            self.log.info(
                "Refusing to serve hidden directory %r, via 404 Error",
                os_path)
            raise web.HTTPError(404, four_o_four)

        model = self._base_model(path)
        model['type'] = 'directory'
        model['size'] = None
        if content:
            model['content'] = contents = []
            os_dir = self._get_os_path(path)
            for name in os.listdir(os_dir):
                try:
                    os_path = os.path.join(os_dir, name)
                except UnicodeDecodeError as e:
                    self.log.warning("failed to decode filename '%s': %s",
                                     name, e)
                    continue

                try:
                    st = os.lstat(os_path)
                except OSError as e:
                    # skip over broken symlinks in listing
                    if e.errno == errno.ENOENT:
                        self.log.warning("%s doesn't exist", os_path)
                    elif e.errno != errno.EACCES:  # Don't provide clues about protected files
                        self.log.warning("Error stat-ing %s: %s", os_path, e)
                    continue

                if (not stat.S_ISLNK(st.st_mode)
                        and not stat.S_ISREG(st.st_mode)
                        and not stat.S_ISDIR(st.st_mode)):
                    self.log.debug("%s not a regular file", os_path)
                    continue

                try:
                    if self.should_list(name):
                        if self.allow_hidden or not is_file_hidden(
                                os_path, stat_res=st):
                            contents.append(
                                self.get(path='%s/%s' % (path, name),
                                         content=False))
                except OSError as e:
                    # ELOOP: recursive symlink, also don't show failure due to permissions
                    if e.errno not in [errno.ELOOP, errno.EACCES]:
                        self.log.warning(
                            "Unknown error checking if file %r is hidden",
                            os_path,
                            exc_info=True,
                        )

            model['format'] = 'json'

        return model
def get_dir_meta(worker_name, path, cliargs, reindex_dict):
    """This is the get directory meta data function.
    It gets directory metadata and returns dir meta dict.
    It checks if meta data is in Redis and compares times
    mtime and ctime on disk compared to Redis and if same
    returns sametimes string.
    """

    try:
        lstat_path = os.lstat(path)
        mtime_unix = lstat_path.st_mtime
        mtime_utc = datetime.utcfromtimestamp(mtime_unix) \
            .strftime('%Y-%m-%dT%H:%M:%S')
        atime_unix = lstat_path.st_atime
        atime_utc = datetime.utcfromtimestamp(atime_unix) \
            .strftime('%Y-%m-%dT%H:%M:%S')
        ctime_unix = lstat_path.st_ctime
        ctime_utc = datetime.utcfromtimestamp(ctime_unix) \
            .strftime('%Y-%m-%dT%H:%M:%S')
        if cliargs['index2']:
            # check if directory times cached in Redis
            redis_dirtime = redis_conn.get(
                base64.encodestring(path.encode('utf-8', errors='ignore')))
            if redis_dirtime:
                cached_times = float(redis_dirtime.decode('utf-8'))
                # check if cached times are the same as on disk
                current_times = float(mtime_unix + ctime_unix)
                if cached_times == current_times:
                    return "sametimes"
        # get time now in utc
        indextime_utc = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")
        # get user id of owner
        uid = lstat_path.st_uid
        # try to get owner user name
        # first check cache
        if uid in uids:
            owner = owners[uid]
        # not in cache
        else:
            try:
                owner = pwd.getpwuid(uid).pw_name.split('\\')
                # remove domain before owner
                if len(owner) == 2:
                    owner = owner[1]
                else:
                    owner = owner[0]
            # if we can't find the owner's user name, use the uid number
            except KeyError:
                owner = uid
            # store it in cache
            if not uid in uids:
                uids.append(uid)
                owners[uid] = owner
        # get group id
        gid = lstat_path.st_gid
        # try to get group name
        # first check cache
        if gid in gids:
            group = groups[gid]
        # not in cache
        else:
            try:
                group = grp.getgrgid(gid).gr_name.split('\\')
                # remove domain before group
                if len(group) == 2:
                    group = group[1]
                else:
                    group = group[0]
            # if we can't find the group name, use the gid number
            except KeyError:
                group = gid
            # store in cache
            if not gid in gids:
                gids.append(gid)
                groups[gid] = group

        inode = lstat_path.st_ino
        hardlinks = lstat_path.st_nlink

        filename = os.path.basename(path)
        parentdir = os.path.abspath(os.path.join(path, os.pardir))
        fullpath = os.path.abspath(os.path.join(parentdir, filename))

        dirmeta_dict = {
            "filename": filename,
            "path_parent": parentdir,
            "filesize": 0,
            "items": 1,  # 1 for itself
            "items_files": 0,
            "items_subdirs": 0,
            "last_modified": mtime_utc,
            "last_access": atime_utc,
            "last_change": ctime_utc,
            "hardlinks": hardlinks,
            "inode": inode,
            "owner": owner,
            "group": group,
            "tag": "",
            "tag_custom": "",
            "crawl_time": 0,
            "change_percent_filesize": "",
            "change_percent_items": "",
            "change_percent_items_files": "",
            "change_percent_items_subdirs": "",
            "worker_name": worker_name,
            "indexing_date": indextime_utc,
            "_type": "directory"
        }

        # check plugins for adding extra meta data to dirmeta_dict
        for plugin in diskover.plugins:
            try:
                # check if plugin is for directory doc
                mappings = {'mappings': {'directory': {'properties': {}}}}
                plugin.add_mappings(mappings)
                dirmeta_dict.update(plugin.add_meta(fullpath))
            except KeyError:
                pass

        # add any autotags to dirmeta_dict
        if cliargs['autotag'] and len(diskover.config['autotag_dirs']) > 0:
            auto_tag(dirmeta_dict, 'directory', mtime_unix, atime_unix,
                     ctime_unix)

        # search for and copy over any existing tags from reindex_dict
        for sublist in reindex_dict['directory']:
            if sublist[0] == fullpath:
                dirmeta_dict['tag'] = sublist[1]
                dirmeta_dict['tag_custom'] = sublist[2]
                break

    except (IOError, OSError):
        return None

    # cache directory times in Redis, encode path (key) using base64
    if diskover.config['redis_cachedirtimes'] == 'True' or diskover.config[
            'redis_cachedirtimes'] == 'true':
        redis_conn.set(base64.encodestring(
            path.encode('utf-8', errors='ignore')),
                       mtime_unix + ctime_unix,
                       ex=diskover.config['redis_dirtimesttl'])

    return dirmeta_dict
def VerifyFileOwner(filename):
    stat_info = os.lstat(filename)
    return os.getuid() == stat_info.st_uid
Exemple #57
0
def main():
    module = AssibleModule(
        argument_spec=dict(
            path=dict(type='path', required=True, aliases=['dest', 'name']),
            follow=dict(type='bool', default=False),
            get_md5=dict(type='bool', default=False),
            get_checksum=dict(type='bool', default=True),
            get_mime=dict(type='bool',
                          default=True,
                          aliases=['mime', 'mime_type', 'mime-type']),
            get_attributes=dict(type='bool',
                                default=True,
                                aliases=['attr', 'attributes']),
            checksum_algorithm=dict(type='str',
                                    default='sha1',
                                    choices=[
                                        'md5', 'sha1', 'sha224', 'sha256',
                                        'sha384', 'sha512'
                                    ],
                                    aliases=['checksum', 'checksum_algo']),
        ),
        supports_check_mode=True,
    )

    path = module.params.get('path')
    b_path = to_bytes(path, errors='surrogate_or_strict')
    follow = module.params.get('follow')
    get_mime = module.params.get('get_mime')
    get_attr = module.params.get('get_attributes')
    get_checksum = module.params.get('get_checksum')
    checksum_algorithm = module.params.get('checksum_algorithm')

    # NOTE: undocumented option since 2.9 to be removed at a later date if possible (3.0+)
    # no real reason for keeping other than fear we may break older content.
    get_md5 = module.params.get('get_md5')

    # main stat data
    try:
        if follow:
            st = os.stat(b_path)
        else:
            st = os.lstat(b_path)
    except OSError as e:
        if e.errno == errno.ENOENT:
            output = {'exists': False}
            module.exit_json(changed=False, stat=output)

        module.fail_json(msg=e.strerror)

    # process base results
    output = format_output(module, path, st)

    # resolved permissions
    for perm in [('readable', os.R_OK), ('writeable', os.W_OK),
                 ('executable', os.X_OK)]:
        output[perm[0]] = os.access(b_path, perm[1])

    # symlink info
    if output.get('islnk'):
        output['lnk_source'] = os.path.realpath(b_path)
        output['lnk_target'] = os.readlink(b_path)

    try:  # user data
        pw = pwd.getpwuid(st.st_uid)
        output['pw_name'] = pw.pw_name
    except (TypeError, KeyError):
        pass

    try:  # group data
        grp_info = grp.getgrgid(st.st_gid)
        output['gr_name'] = grp_info.gr_name
    except (KeyError, ValueError, OverflowError):
        pass

    # checksums
    if output.get('isreg') and output.get('readable'):

        # NOTE: see above about get_md5
        if get_md5:
            # Will fail on FIPS-140 compliant systems
            try:
                output['md5'] = module.md5(b_path)
            except ValueError:
                output['md5'] = None

        if get_checksum:
            output['checksum'] = module.digest_from_file(
                b_path, checksum_algorithm)

    # try to get mime data if requested
    if get_mime:
        output['mimetype'] = output['charset'] = 'unknown'
        mimecmd = module.get_bin_path('file')
        if mimecmd:
            mimecmd = [mimecmd, '--mime-type', '--mime-encoding', b_path]
            try:
                rc, out, err = module.run_command(mimecmd)
                if rc == 0:
                    mimetype, charset = out.rsplit(':', 1)[1].split(';')
                    output['mimetype'] = mimetype.strip()
                    output['charset'] = charset.split('=')[1].strip()
            except Exception:
                pass

    # try to get attr data
    if get_attr:
        output['version'] = None
        output['attributes'] = []
        output['attr_flags'] = ''
        out = module.get_file_attributes(b_path)
        for x in ('version', 'attributes', 'attr_flags'):
            if x in out:
                output[x] = out[x]

    module.exit_json(changed=False, stat=output)
def get_file_meta(worker_name, path, cliargs, reindex_dict):
    """This is the get file meta data function.
    It scrapes file meta and ignores files smaller
    than minsize Bytes, newer than mtime
    and in excluded_files. Returns file meta dict.
    """

    try:
        filename = os.path.basename(path)

        # check if file is in exluded_files list
        extension = os.path.splitext(filename)[1][1:].strip().lower()
        if file_excluded(filename, extension, path, cliargs['verbose']):
            return None

        # use lstat to get meta and not follow sym links
        stat = os.lstat(path)
        # get file size (bytes)
        size = stat.st_size

        # Skip files smaller than minsize cli flag
        if size < cliargs['minsize']:
            return None

        # check file modified time
        mtime_unix = stat.st_mtime
        mtime_utc = \
            datetime.utcfromtimestamp(mtime_unix).strftime('%Y-%m-%dT%H:%M:%S')
        # Convert time in days (mtime cli arg) to seconds
        time_sec = cliargs['mtime'] * 86400
        file_mtime_sec = time.time() - mtime_unix
        # Only process files modified at least x days ago
        if file_mtime_sec < time_sec:
            return None

        # get access time
        atime_unix = stat.st_atime
        atime_utc = \
            datetime.utcfromtimestamp(atime_unix).strftime('%Y-%m-%dT%H:%M:%S')
        # get change time
        ctime_unix = stat.st_ctime
        ctime_utc = \
            datetime.utcfromtimestamp(ctime_unix).strftime('%Y-%m-%dT%H:%M:%S')
        # get user id of owner
        uid = stat.st_uid
        # try to get owner user name
        # first check cache
        if uid in uids:
            owner = owners[uid]
        # not in cache
        else:
            try:
                owner = pwd.getpwuid(uid).pw_name.split('\\')
                # remove domain before owner
                if len(owner) == 2:
                    owner = owner[1]
                else:
                    owner = owner[0]
            # if we can't find the owner's user name, use the uid number
            except KeyError:
                owner = uid
            # store it in cache
            if not uid in uids:
                uids.append(uid)
                owners[uid] = owner
        # get group id
        gid = stat.st_gid
        # try to get group name
        # first check cache
        if gid in gids:
            group = groups[gid]
        # not in cache
        else:
            try:
                group = grp.getgrgid(gid).gr_name.split('\\')
                # remove domain before group
                if len(group) == 2:
                    group = group[1]
                else:
                    group = group[0]
            # if we can't find the group name, use the gid number
            except KeyError:
                group = gid
            # store in cache
            if not gid in gids:
                gids.append(gid)
                groups[gid] = group
        # get inode number
        inode = stat.st_ino
        # get number of hardlinks
        hardlinks = stat.st_nlink
        # create md5 hash of file using metadata filesize and mtime
        filestring = str(size) + str(mtime_unix)
        filehash = hashlib.md5(filestring.encode('utf-8')).hexdigest()
        # get time
        indextime_utc = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")
        # get absolute path of parent directory
        parentdir = os.path.abspath(os.path.join(path, os.pardir))

        # create file metadata dictionary
        filemeta_dict = {
            "filename": filename,
            "extension": extension,
            "path_parent": parentdir,
            "filesize": size,
            "owner": owner,
            "group": group,
            "last_modified": mtime_utc,
            "last_access": atime_utc,
            "last_change": ctime_utc,
            "hardlinks": hardlinks,
            "inode": inode,
            "filehash": filehash,
            "tag": "",
            "tag_custom": "",
            "dupe_md5": "",
            "worker_name": worker_name,
            "indexing_date": indextime_utc,
            "_type": "file"
        }

        # check plugins for adding extra meta data to filemeta_dict
        for plugin in diskover.plugins:
            try:
                # check if plugin is for file doc
                mappings = {'mappings': {'file': {'properties': {}}}}
                plugin.add_mappings(mappings)
                filemeta_dict.update(plugin.add_meta(path))
            except KeyError:
                pass

        # add any autotags to filemeta_dict
        if cliargs['autotag'] and len(diskover.config['autotag_files']) > 0:
            auto_tag(filemeta_dict, 'file', mtime_unix, atime_unix, ctime_unix)

        # search for and copy over any existing tags from reindex_dict
        for sublist in reindex_dict['file']:
            if sublist[0] == path:
                filemeta_dict['tag'] = sublist[1]
                filemeta_dict['tag_custom'] = sublist[2]
                break

    except (IOError, OSError):
        return None

    return filemeta_dict
Exemple #59
0
    def doFile(self, path):
        if hasattr(self.recipe, '_getCapsulePathsForFile'):
            if self.recipe._getCapsulePathsForFile(path):
                return

        m = self.recipe.magic[path]
        if not m:
            return
        # FIXME: should be:
        #if (m.name == "ELF" or m.name == "ar") and \
        #   m.contents['hasDebug']):
        # but this has to wait until ewt writes debug detection
        # for archives as well as elf files
        if (m.name == "ELF" and m.contents['hasDebug']) or \
           (m.name == "ar"):
            oldmode = None
            fullpath = self.dm.destdir+path
            mode = os.lstat(fullpath)[stat.ST_MODE]
            if mode & 0600 != 0600:
                # need to be able to read and write the file to strip it
                oldmode = mode
                os.chmod(fullpath, mode|0600)
            if self.debuginfo and m.name == 'ELF' and not path.endswith('.o'):

                dir=os.path.dirname(path)
                b=os.path.basename(path)
                if not b.endswith('.debug'):
                    b += '.debug'

                debuglibdir = '%(destdir)s%(debuglibdir)s' %self.dm +dir
                debuglibpath = util.joinPaths(debuglibdir, b)
                if os.path.exists(debuglibpath):
                    return

                self._openDb()
                if (_findProgPath(self.macros.debugedit, self.db,
                                  self.recipe, error=False) and
                    _findProgPath(self.macros.strip, self.db,
                                  self.recipe, error=False)):

                    # null-separated AND terminated list, so we need to throw
                    # away the last (empty) item before updating self.debugfiles
                    self.debugfiles |= set(util.popen(
                        '%(debugedit)s -b %(topbuilddir)s -d %(debugsrcdir)s'
                        ' -l /dev/stdout '%self.dm
                        +fullpath).read().split('\x00')[:-1])
                    util.mkdirChain(debuglibdir)
                    util.execute('%s -f %s %s' %(
                        self.dm.strip, debuglibpath, fullpath))

            else:
                self._openDb()
                if m.name == 'ar' or path.endswith('.o'):
                    # just in case strip is eu-strip, which segfaults
                    # whenever it touches an ar archive, and seems to
                    # break some .o files
                    if _findProgPath(self.macros.strip_archive, self.db, self.recipe, error=False):
                        util.execute('%(strip_archive)s ' %self.dm +fullpath)
                else:
                    if _findProgPath(self.macros.strip, self.db, self.recipe, error=False):
                        util.execute('%(strip)s ' %self.dm +fullpath)

            del self.recipe.magic[path]
            if oldmode is not None:
                os.chmod(fullpath, oldmode)
Exemple #60
0
 def lstat(self):
     return os.lstat(self)