Пример #1
0
  def testTarFileWithMultipleFiles(self):
    outfd = StringIO.StringIO()

    infd1 = StringIO.StringIO("this is a test string")
    st1 = os.stat_result((0644, 0, 0, 0, 0, 0, len(infd1.getvalue()), 0, 0, 0))

    infd2 = StringIO.StringIO("this is another test string")
    st2 = os.stat_result((0644, 0, 0, 0, 0, 0, len(infd2.getvalue()), 0, 0, 0))

    # Write the tar into a file like object.
    with utils.StreamingTarWriter(outfd, mode="w:gz") as writer:
      writer.WriteFromFD(infd1, "test1.txt", st=st1)
      writer.WriteFromFD(infd2, "subdir/test2.txt", st=st2)

    test_tar = tarfile.open(
        fileobj=StringIO.StringIO(outfd.getvalue()), mode="r")
    tinfos = sorted(test_tar.getmembers(), key=lambda tinfo: tinfo.name)

    self.assertEqual(len(tinfos), 2)
    self.assertEqual(tinfos[0].name, "subdir/test2.txt")
    self.assertEqual(tinfos[1].name, "test1.txt")

    fd = test_tar.extractfile(tinfos[0])
    self.assertEqual(fd.read(1024), infd2.getvalue())

    fd = test_tar.extractfile(tinfos[1])
    self.assertEqual(fd.read(1024), infd1.getvalue())
  def testTarFileWithSymlink(self):
    outfd = StringIO.StringIO()

    infd1 = StringIO.StringIO("this is a test string")
    st1 = os.stat_result((0644, 0, 0, 0, 0, 0, len(infd1.getvalue()), 0, 0, 0))

    infd2 = StringIO.StringIO("this is another test string")
    st2 = os.stat_result((0644, 0, 0, 0, 0, 0, len(infd2.getvalue()), 0, 0, 0))

    # Write the zip into a file like object.
    with utils.StreamingTarWriter(outfd, mode="w:gz") as writer:
      writer.WriteFromFD(infd1, "test1.txt", st=st1)
      writer.WriteFromFD(infd2, "subdir/test2.txt", st=st2)

      writer.WriteSymlink("test1.txt", "test1.txt.link")
      writer.WriteSymlink("subdir/test2.txt", "test2.txt.link")

    with utils.TempDirectory() as temp_dir:
      tar_path = os.path.join(temp_dir, "archive.tar.gz")
      with open(tar_path, "w") as fd:
        fd.write(outfd.getvalue())

      # Builtin python ZipFile implementation doesn't support symlinks,
      # so we have to extract the files with command line tool.
      subprocess.check_call(["tar", "-xzf", tar_path, "-C", temp_dir])

      link_path = os.path.join(temp_dir, "test1.txt.link")
      self.assertTrue(os.path.islink(link_path))
      self.assertEqual(os.readlink(link_path), "test1.txt")

      link_path = os.path.join(temp_dir, "test2.txt.link")
      self.assertTrue(os.path.islink(link_path))
      self.assertEqual(os.readlink(link_path), "subdir/test2.txt")
Пример #3
0
  def testTarFileWithSymlink(self):
    outfd = StringIO.StringIO()

    infd1 = StringIO.StringIO("this is a test string")
    st1 = os.stat_result((0644, 0, 0, 0, 0, 0, len(infd1.getvalue()), 0, 0, 0))

    infd2 = StringIO.StringIO("this is another test string")
    st2 = os.stat_result((0644, 0, 0, 0, 0, 0, len(infd2.getvalue()), 0, 0, 0))

    # Write the zip into a file like object.
    with utils.StreamingTarWriter(outfd, mode="w:gz") as writer:
      writer.WriteFromFD(infd1, "test1.txt", st=st1)
      writer.WriteFromFD(infd2, "subdir/test2.txt", st=st2)

      writer.WriteSymlink("test1.txt", "test1.txt.link")
      writer.WriteSymlink("subdir/test2.txt", "test2.txt.link")

    with tarfile.open(
        fileobj=StringIO.StringIO(outfd.getvalue()), mode="r") as test_fd:
      test_fd.extractall(self.temp_dir)

      link_path = os.path.join(self.temp_dir, "test1.txt.link")
      self.assertTrue(os.path.islink(link_path))
      self.assertEqual(os.readlink(link_path), "test1.txt")

      link_path = os.path.join(self.temp_dir, "test2.txt.link")
      self.assertTrue(os.path.islink(link_path))
      self.assertEqual(os.readlink(link_path), "subdir/test2.txt")
Пример #4
0
    def test_stat_attributes(self):
        if not hasattr(os, "stat"):
            return

        import stat

        result = os.stat(self.fname)

        # Make sure direct access works
        self.assertEquals(result[stat.ST_SIZE], 3)
        self.assertEquals(result.st_size, 3)

        import sys

        # Make sure all the attributes are there
        members = dir(result)
        for name in dir(stat):
            if name[:3] == "ST_":
                attr = name.lower()
                self.assertEquals(getattr(result, attr), result[getattr(stat, name)])
                self.assert_(attr in members)

        try:
            result[200]
            self.fail("No exception thrown")
        except IndexError:
            pass

        # Make sure that assignment fails
        try:
            result.st_mode = 1
            self.fail("No exception thrown")
        except TypeError:
            pass

        try:
            result.st_rdev = 1
            self.fail("No exception thrown")
        except (AttributeError, TypeError):
            pass

        try:
            result.parrot = 1
            self.fail("No exception thrown")
        except AttributeError:
            pass

        # Use the stat_result constructor with a too-short tuple.
        try:
            result2 = os.stat_result((10,))
            self.fail("No exception thrown")
        except TypeError:
            pass

        # Use the constructr with a too-long tuple.
        try:
            result2 = os.stat_result((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))
        except TypeError:
            pass
Пример #5
0
    def test_stat_attributes(self):
        import stat
        result = os.stat(self.fname)

        # Make sure direct access works
        self.assertEqual(result[stat.ST_SIZE], 3)
        self.assertEqual(result.st_size, 3)

        # Make sure all the attributes are there
        members = dir(result)
        for name in dir(stat):
            if name[:3] == 'ST_':
                attr = name.lower()
                if name.endswith("TIME"):
                    def trunc(x): return int(x)
                else:
                    def trunc(x): return x
                self.assertEqual(trunc(getattr(result, attr)),
                                 result[getattr(stat, name)])
                self.assertIn(attr, members)

        try:
            result[200]
            self.fail("No exception raised")
        except IndexError:
            pass

        # Make sure that assignment fails
        try:
            result.st_mode = 1
            self.fail("No exception raised")
        except (AttributeError, TypeError):
            pass

        try:
            result.st_rdev = 1
            self.fail("No exception raised")
        except (AttributeError, TypeError):
            pass

        try:
            result.parrot = 1
            self.fail("No exception raised")
        except AttributeError:
            pass

        # Use the stat_result constructor with a too-short tuple.
        try:
            result2 = os.stat_result((10,))
            self.fail("No exception raised")
        except TypeError:
            pass

        # Use the constructor with a too-long tuple.
        try:
            result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
        except TypeError:
            pass
Пример #6
0
def make_stat_result(tup):
    """Turn a tuple into an os.stat_result object."""
    positional = tup[:N_INDEXABLE_FIELDS]
    kwds = {}
    for i, name in enumerate(STAT_FIELD_NAMES[N_INDEXABLE_FIELDS:]):
        kwds[name] = tup[N_INDEXABLE_FIELDS + i]
    return os.stat_result(positional, kwds)
Пример #7
0
 def testStatResultFromStatEntry(self):
   stat_obj = os.stat_result([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
   fs_stat = filesystem.Stat("/foo", stat_obj)
   pathspec = rdf_paths.PathSpec(path="/foo", pathtype="OS")
   stat_entry = client_utils.StatEntryFromStat(
       fs_stat, pathspec, ext_attrs=False)
   self.assertEqual(stat_obj, client_utils.StatResultFromStatEntry(stat_entry))
Пример #8
0
    def test_fd_closed_when_diskfile_open_raises_exception_race(self):
        # do_open() succeeds but read_metadata() fails(GlusterFS)
        _m_do_open = Mock(return_value=999)
        _m_do_fstat = Mock(return_value=
                           os.stat_result((33261, 2753735, 2053, 1, 1000,
                                           1000, 6873, 1431415969,
                                           1376895818, 1433139196)))
        _m_rmd = Mock(side_effect=IOError(errno.ENOENT,
                                          os.strerror(errno.ENOENT)))
        _m_do_close = Mock()
        _m_log = Mock()

        with nested(
                patch("gluster.swift.obj.diskfile.do_open", _m_do_open),
                patch("gluster.swift.obj.diskfile.do_fstat", _m_do_fstat),
                patch("gluster.swift.obj.diskfile.read_metadata", _m_rmd),
                patch("gluster.swift.obj.diskfile.do_close", _m_do_close),
                patch("gluster.swift.obj.diskfile.logging.warn", _m_log)):
            gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
            try:
                with gdf.open():
                    pass
            except DiskFileNotExist:
                pass
            else:
                self.fail("Expecting DiskFileNotExist")
            _m_do_fstat.assert_called_once_with(999)
            _m_rmd.assert_called_once_with(999)
            _m_do_close.assert_called_once_with(999)
            self.assertFalse(gdf._fd)
            # Make sure ENOENT failure is logged
            self.assertTrue("failed with ENOENT" in _m_log.call_args[0][0])
Пример #9
0
 def stat(self):
     try:
         st_ino = self._st_ino
     except AttributeError:
         global INO_COUNTER
         INO_COUNTER += 1
         st_ino = self._st_ino = INO_COUNTER
     st_dev = 1
     st_nlink = 1
     st_size = self.getsize()
     st_mode = self.kind
     st_mode |= stat.S_IWUSR | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
     if stat.S_ISDIR(self.kind):
         st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
     if self.read_only:
         st_uid = 0  # read-only files are virtually owned by root
         st_gid = 0
     else:
         st_uid = UID  # read-write files are owned by this virtual user
         st_gid = GID
     st_atime = ATIME
     st_mtime = MTIME
     st_ctime = CTIME
     return os.stat_result(
         (st_mode, st_ino, st_dev, st_nlink, st_uid, st_gid, st_size, st_atime, st_mtime, st_ctime)
     )
Пример #10
0
 def stat(self):
     if self._nostat:
         return None
     if self._stat is None:
         if not self.info.has_stat():
             self._nostat = True
             return None
         def dts(ts):
             return ts.tv_sec + ts.tv_nsec / 1000000000
         def dtsns(ts):
             return ts.tv_sec * 1000000000 + ts.tv_nsec
         # Note: This works in CPython 3.4 on platforms that have all of
         # the relevant fields (which includes BSD/OS X and Linux).
         statp = self.ftsent.fts_statp
         self._stat = os.stat_result((statp.st_mode, statp.st_ino, statp.st_dev,
                                      statp.st_nlink, statp.st_uid, statp.st_gid,
                                      statp.st_size,
                                      dts(statp.st_atimespec),
                                      dts(statp.st_mtimespec),
                                      dts(statp.st_ctimespec),
                                      int(dts(statp.st_atimespec)),
                                      int(dts(statp.st_mtimespec)),
                                      int(dts(statp.st_ctimespec)),
                                      dtsns(statp.st_atimespec),
                                      dtsns(statp.st_mtimespec),
                                      dtsns(statp.st_ctimespec),
                                      statp.st_blksize, statp.st_blocks,
                                      0, 0, 0, # rdev, flags, gen
                                      dts(statp.st_birthtimespec)))
     return self._stat
Пример #11
0
  def DownloadCollectionFiles(self, collection, output_writer, prefix):
    """Download all files from the collection and deduplicate along the way."""

    hashes = set()
    for fd_urn_batch in utils.Grouper(self.ResultsToUrns(collection),
                                      self.BATCH_SIZE):
      self.HeartBeat()

      for fd in aff4.FACTORY.MultiOpen(fd_urn_batch, token=self.token):
        self.state.total_files += 1

        # Any file-like object with data in AFF4 should inherit AFF4Stream.
        if isinstance(fd, aff4.AFF4Stream):
          archive_path = os.path.join(prefix, *fd.urn.Split())

          sha256_hash = fd.Get(fd.Schema.HASH, rdf_crypto.Hash()).sha256
          if not sha256_hash:
            continue
          self.state.archived_files += 1

          content_path = os.path.join(prefix, "hashes", str(sha256_hash))
          if sha256_hash not in hashes:
            # Make sure size of the original file is passed. It's required
            # when output_writer is StreamingTarWriter.
            st = os.stat_result((0644, 0, 0, 0, 0, 0, fd.size, 0, 0, 0))
            output_writer.WriteFromFD(fd, content_path, st=st)
            hashes.add(sha256_hash)
            self.Log("Written contents: " + content_path)

          up_prefix = "../" * len(fd.urn.Split())
          output_writer.WriteSymlink(up_prefix + content_path, archive_path)
          self.Log("Written symlink %s -> %s", archive_path,
                   up_prefix + content_path)
Пример #12
0
  def _WriteDescription(self):
    """Writes description into a MANIFEST file in the archive."""

    manifest = {
        "description": self.description,
        "processed_files": self.total_files,
        "archived_files": self.archived_files,
        "ignored_files": len(self.ignored_files),
        "failed_files": len(self.failed_files)
    }
    if self.ignored_files:
      manifest["ignored_files_list"] = self.ignored_files
    if self.failed_files:
      manifest["failed_files_list"] = self.failed_files

    manifest_fd = cStringIO.StringIO()
    if self.total_files != self.archived_files:
      manifest_fd.write(self.FILES_SKIPPED_WARNING)
    manifest_fd.write(yaml.safe_dump(manifest))

    manifest_fd.seek(0)
    st = os.stat_result((0644, 0, 0, 0, 0, 0, len(manifest_fd.getvalue()), 0, 0,
                         0))

    for chunk in self.archive_generator.WriteFromFD(
        manifest_fd, os.path.join(self.prefix, "MANIFEST"), st=st):
      yield chunk
Пример #13
0
 def original_file_stat(self, encodedFile):
     stat = encodedFile.stat()
     size = FileDecoder.decoded_size(encodedFile)
     return os.stat_result((stat.st_mode, stat.st_ino, stat.st_dev,
                            stat.st_nlink, stat.st_uid, stat.st_gid,
                            size,
                            stat.st_atime, stat.st_mtime, stat.st_ctime))
Пример #14
0
def save_yaml(fname: str, data: JSON_TYPE) -> None:
    """Save a YAML file."""
    yaml = YAML(typ='rt')
    yaml.indent(sequence=4, offset=2)
    tmp_fname = fname + "__TEMP__"
    try:
        try:
            file_stat = os.stat(fname)
        except OSError:
            file_stat = stat_result(
                (0o644, -1, -1, -1, -1, -1, -1, -1, -1, -1))
        with open(os.open(tmp_fname, O_WRONLY | O_CREAT | O_TRUNC,
                          file_stat.st_mode), 'w', encoding='utf-8') \
                as temp_file:
            yaml.dump(data, temp_file)
        os.replace(tmp_fname, fname)
        if hasattr(os, 'chown') and file_stat.st_ctime > -1:
            try:
                os.chown(fname, file_stat.st_uid, file_stat.st_gid)
            except OSError:
                pass
    except YAMLError as exc:
        _LOGGER.error(str(exc))
        raise HomeAssistantError(exc)
    except OSError as exc:
        _LOGGER.exception('Saving YAML file %s failed: %s', fname, exc)
        raise WriteError(exc)
    finally:
        if os.path.exists(tmp_fname):
            try:
                os.remove(tmp_fname)
            except OSError as exc:
                # If we are cleaning up then something else went wrong, so
                # we should suppress likely follow-on errors in the cleanup
                _LOGGER.error("YAML replacement cleanup failed: %s", exc)
Пример #15
0
 def _stat(self, path):
     data = self._traverse(path)
     if type(data) == dict:
         mode = stat.S_IFDIR
     else:
         mode = stat.S_IFREG
     return os.stat_result([mode, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Пример #16
0
  def _GenerateDescription(self):
    """Generates description into a MANIFEST file in the archive."""

    manifest = {
        "description": self.description,
        "processed_files": self.total_files,
        "archived_files": self.archived_files,
        "ignored_files": len(self.ignored_files),
        "failed_files": len(self.failed_files)
    }
    if self.ignored_files:
      manifest["ignored_files_list"] = self.ignored_files
    if self.failed_files:
      manifest["failed_files_list"] = self.failed_files

    # TODO(hanuszczak): Manifest is a YAML file which is supposed to be
    # unicode-encoded format. However due to PyYAML incompetence we are given
    # `bytes` object as a result of dumping. It should be investigated and
    # changed to `StringIO` if possible.
    manifest_fd = io.BytesIO()
    if self.total_files != self.archived_files:
      manifest_fd.write(self.FILES_SKIPPED_WARNING)
    manifest_fd.write(yaml.safe_dump(manifest))

    manifest_fd.seek(0)
    st = os.stat_result((0o644, 0, 0, 0, 0, 0, len(manifest_fd.getvalue()), 0,
                         0, 0))

    for chunk in self.archive_generator.WriteFromFD(
        manifest_fd, os.path.join(self.prefix, "MANIFEST"), st=st):
      yield chunk
Пример #17
0
    def _fetch_local_list_info(loc_list):
        for relative_file in loc_list:
            if relative_file == '-': continue

            full_name = loc_list[relative_file]['full_name']
            try:
                sr = os.stat_result(os.stat(full_name))
            except OSError, e:
                if e.errno == errno.ENOENT:
                    # file was removed async to us getting the list
                    continue
                else:
                    raise
            loc_list[relative_file].update({
                'size' : sr.st_size,
                'mtime' : sr.st_mtime,
                'dev'   : sr.st_dev,
                'inode' : sr.st_ino,
                'uid' : sr.st_uid,
                'gid' : sr.st_gid,
                'sr': sr # save it all, may need it in preserve_attrs_list
                ## TODO: Possibly more to save here...
            })
            if 'md5' in cfg.sync_checks:
                md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size)
                if md5 is None:
                        try:
                            md5 = loc_list.get_md5(relative_file) # this does the file I/O
                        except IOError:
                            continue
                        cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5)
                loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5, sr.st_size)
Пример #18
0
    def stat(self, path):

        st_mode = FULL_CONTROL_MODE_FLAG
        _, bucket_name, key_name = self.parse_fspath(path)

        #Hack together a stat result
        
        st_size = 0
        st_mtime = datetime.datetime(  *time.strptime("1970-01-01",  "%Y-%m-%d")[0:6])

        try:
            if not key_name: # Bucket
                # Return a part-bogus stat with the data we do have.
                st_mode = st_mode | DIR_MODE_FLAG
    
            else: # Key
                bucket = operations.connection.get_bucket(bucket_name)
                if (key_name[-1] == cloud_sep): # Virtual directory for hierarchical key.
                    st_mode = st_mode | DIR_MODE_FLAG
                else:
                    obj = bucket.get_key(key_name)
                    # Workaround os.sep crap.
                    if obj is None:
                        obj = bucket.get_key(key_name.replace(cloud_sep, os.sep))
                    if obj is None:
                         ftpserver.logerror("Cannot find object for path %s , key %s in bucket %s " % (path, key_name, bucket_name))
                         raise OSError(2, 'No such file or directory')
                    st_size = obj.size
                   
            return os.stat_result([st_mode, 0, 0, 0, 0, 0, st_size, 0, 0, 0])  #FIXME more stats (mtime)

        
        except Exception,  e:
            ftpserver.logerror("Failed stat(%s) %s %s: %s " % (path, bucket_name, key_name, e))
            raise OSError(2, 'No such file or directory')
Пример #19
0
    def find_data_to_stat(data):
        """Convert Win32 FIND_DATA struct to stat_result."""
        # First convert Win32 dwFileAttributes to st_mode
        attributes = data.dwFileAttributes
        st_mode = 0
        if attributes & FILE_ATTRIBUTE_DIRECTORY:
            st_mode |= S_IFDIR | 0o111
        else:
            st_mode |= S_IFREG
        if attributes & FILE_ATTRIBUTE_READONLY:
            st_mode |= 0o444
        else:
            st_mode |= 0o666
        if attributes & FILE_ATTRIBUTE_REPARSE_POINT:
            st_mode |= S_IFLNK

        st_size = data.nFileSizeHigh << 32 | data.nFileSizeLow
        st_atime = filetime_to_time(data.ftLastAccessTime)
        st_mtime = filetime_to_time(data.ftLastWriteTime)
        st_ctime = filetime_to_time(data.ftCreationTime)

        # Some fields set to zero per CPython's posixmodule.c: st_ino, st_dev,
        # st_nlink, st_uid, st_gid
        return os.stat_result((st_mode, 0, 0, 0, 0, 0, st_size, st_atime,
                               st_mtime, st_ctime))
Пример #20
0
 def stat(self, path):
     model = self.get_model(path)
     if model:
         size = model.file.file.size
         return os.stat_result((0664, 0, 0, 0, 0, 0, size, 0, 0, 0))
     else:
         raise OSError(errno.ENOENT, "No such file or directory.")
Пример #21
0
 def _stat(self, path):
     rc, out, _ = self.remote._session.run(
         "stat -c '%F,%f,%i,%d,%h,%u,%g,%s,%X,%Y,%Z' " + shquote(path), retcode = None)
     if rc != 0:
         return None
     statres = out.strip().split(",")
     mode = statres.pop(0).lower()
     return mode, os.stat_result(statres)
Пример #22
0
 def _type_to_stat(d_type):
     if d_type == DT_DIR:
         st_mode = stat.S_IFDIR | 0111
     else:
         st_mode = stat.S_IFREG
     st = os.stat_result((st_mode, None, None, None, None, None,
                          None, None, None, None))
     return st
Пример #23
0
 def stat(self, node):
     r = list(os.stat('/'))
     if self.isfile(node):
         r[0] = 33188
     r[6] = self.getsize(node)
     r[7] = self.getmtime(node)
     r[8] =  self.getmtime(node)
     r[9] =  self.getmtime(node)
     return os.stat_result(r)
Пример #24
0
def make_stat_result(tup):
    """Turn a tuple into an os.stat_result object."""
    positional = tuple(
        lltype.cast_primitive(TYPE, value) for value, (name, TYPE) in zip(tup, STAT_FIELDS)[:N_INDEXABLE_FIELDS]
    )
    kwds = {}
    for value, (name, TYPE) in zip(tup, STAT_FIELDS)[N_INDEXABLE_FIELDS:]:
        kwds[name] = lltype.cast_primitive(TYPE, value)
    return os.stat_result(positional, kwds)
Пример #25
0
 def test_getRotatingFileHandler(self):
     """_getRotatingFileHandler() should create a file with 0600
     permissions (os.ST_WRITE | os.ST_APPEND).
     """
     filename = str(self.id()) + '.log'
     logHandler = util._getRotatingFileHandler(filename)
     self.assertTrue(os.path.isfile(filename))
     self.assertEqual(os.stat_result(os.stat(filename)).st_mode, 33152)
     self.assertIsInstance(logHandler(),
                           util.logging.handlers.RotatingFileHandler)
Пример #26
0
 def _GenerateClientInfo(self, client_fd):
   summary = yaml.safe_dump(
       client_fd.GetSummary().ToPrimitiveDict(serialize_leaf_fields=True))
   client_info_path = os.path.join(self.prefix,
                                   client_fd.urn.Basename(),
                                   "client_info.yaml")
   st = os.stat_result((0o644, 0, 0, 0, 0, 0, len(summary), 0, 0, 0))
   yield self.archive_generator.WriteFileHeader(client_info_path, st=st)
   yield self.archive_generator.WriteFileChunk(summary)
   yield self.archive_generator.WriteFileFooter()
Пример #27
0
 def stat(self, path):
     _, bucket, name = self.parse_fspath(path)
     if not name:
         raise OSError(40, 'unsupported')
     try:
         bucket = operations.connection.get_bucket(bucket)
         obj = bucket.get_key(name)
         return os.stat_result((666, 0L, 0L, 0, 0, 0, obj.size, 0, 0, 0))
     except:
         raise OSError(2, 'No such file or directory')
Пример #28
0
    def stat(self, path):
        stat = os.lstat(path)

        # Modification de st_*time pour avoir des timestamps UTC
        _stat = list(stat)
        _stat[7] = datetime.utcfromtimestamp(_stat[7]).timestamp()
        _stat[8] = datetime.utcfromtimestamp(_stat[8]).timestamp()
        _stat[9] = datetime.utcfromtimestamp(_stat[9]).timestamp()
        
        return os.stat_result(_stat)
Пример #29
0
def MyStat(path):
  stat_obj = MyStat.old_target(path)
  if path.endswith("auth.log"):
    res = list(stat_obj)
    # Sets atime, ctime, and mtime to some time in 2022.
    res[-1] = 1672466423
    res[-2] = 1672466423
    res[-3] = 1672466423
    return os.stat_result(res)
  return stat_obj
Пример #30
0
 def iterdir_stat(path=".", pattern="*", fields=None):
     """See iterdir_stat.__doc__ below for docstring."""
     names = os.listdir(path)
     if pattern != "*":
         names = fnmatch.filter(names, pattern)
     for name in names:
         if fields is not None:
             st = os.stat(os.path.join(path, name))
         else:
             st = os.stat_result((None,) * 10)
         yield (name, st)
Пример #31
0
    def patch_osstat(self,
                     path,
                     st_mode=16877,
                     st_ino=742635,
                     st_dev=234881026,
                     st_nlink=51,
                     st_uid=501,
                     st_gid=20,
                     st_size=1734,
                     st_atime=1257942648,
                     st_mtime=1257873561,
                     st_ctime=1257873561):
        """ Patches os.stat for `path`.
    
        Patching os.stat can be tricky, because it can mess much more than what you're trying to test.
        Also, it can be cumbersome to do it. This method lets you do it easily. Just specify a path
        for which you want to patch os.stat, and specify the values through **kwargs. The defaults
        here are just some stats (that make sense) to fill up.
    
        Example call: monkeyplus.patch_osstat('foo/bar', st_mtime=42)
        """
        if not hasattr(self, '_patched_osstat'
                       ):  # first osstat mock, actually install the mock
            self._patched_osstat = {}  # path: os.stat_result
            old_osstat = os.stat

            def fake_osstat(path):
                try:
                    return self._patched_osstat[path]
                except KeyError:
                    return old_osstat(path)

            self.setattr(os, 'stat', fake_osstat)
        st_seq = [
            st_mode, st_ino, st_dev, st_nlink, st_uid, st_gid, st_size,
            st_atime, st_mtime, st_ctime
        ]
        self._patched_osstat[path] = os.stat_result(st_seq)
Пример #32
0
def save_yaml(fname: str, data: JSON_TYPE) -> None:
    """Save a YAML file."""
    yaml = YAML(typ="rt")
    yaml.indent(sequence=4, offset=2)
    tmp_fname = f"{fname}__TEMP__"
    try:
        try:
            file_stat = os.stat(fname)
        except OSError:
            file_stat = stat_result(
                (0o644, -1, -1, -1, -1, -1, -1, -1, -1, -1))
        with open(
                os.open(tmp_fname, O_WRONLY | O_CREAT | O_TRUNC,
                        file_stat.st_mode),
                "w",
                encoding="utf-8",
        ) as temp_file:
            yaml.dump(data, temp_file)
        os.replace(tmp_fname, fname)
        if hasattr(os, "chown") and file_stat.st_ctime > -1:
            try:
                os.chown(fname, file_stat.st_uid, file_stat.st_gid)
            except OSError:
                pass
    except YAMLError as exc:
        _LOGGER.error(str(exc))
        raise HomeAssistantError(exc)
    except OSError as exc:
        _LOGGER.exception("Saving YAML file %s failed: %s", fname, exc)
        raise WriteError(exc)
    finally:
        if os.path.exists(tmp_fname):
            try:
                os.remove(tmp_fname)
            except OSError as exc:
                # If we are cleaning up then something else went wrong, so
                # we should suppress likely follow-on errors in the cleanup
                _LOGGER.error("YAML replacement cleanup failed: %s", exc)
Пример #33
0
def mostRecentFile(dir, file_pattern, after_time=None):
    """Search 'dir' for all files matching glob 'file_pattern',
    and return the mostRecent one(s). If 'after_time' is given,
    it should be a number of seconds since the epoch UTC; no files
    will be returned if none is on or after this time.

    Returns a list of the full paths to file(s), or an empty list.
    More than one file may be returned, in the case that they have
    the same modification time.

    """
    if not os.path.isdir(dir):
        return [ ]
    search_path = os.path.join(dir, file_pattern)
    # make a sortable list of filenames and modification times
    timed_files = [(os.stat_result(os.stat(fname)).st_mtime, fname) 
                   for fname in glob.glob(search_path)]
    # if the list is empty, stop
    if not timed_files:
        return [ ]
    # reverse sort so most-recent is first
    timed_files.sort(reverse=True)
    most_recent_time = timed_files[0][0]
    # return nothing if the most recent time is not
    # after the cutoff
    if after_time is not None and most_recent_time < int(after_time):
        return [ ]
    # start with most recent, then append all 'ties'
    result = [ timed_files[0][1] ]
    i = 1
    try:
        while timed_files[i][0] == most_recent_time:
            result.append(timed_files[i][1])
            i += 1
    except IndexError:
        pass # ran off end of list. all ties (!)
    # return all 'most recent' files
    return result
Пример #34
0
Файл: vfs.py Проект: hfakar/grr
  def _StreamFds(self, archive_generator, prefix, fds, token=None):
    prev_fd = None
    for fd, chunk, exception in aff4.AFF4Stream.MultiStream(fds):
      if exception:
        logging.exception(exception)
        continue

      if prev_fd != fd:
        if prev_fd:
          yield archive_generator.WriteFileFooter()
        prev_fd = fd

        components = fd.urn.Split()
        # Skipping first component: client id.
        content_path = os.path.join(prefix, *components[1:])
        # TODO(user): Export meaningful file metadata.
        st = os.stat_result((0o644, 0, 0, 0, 0, 0, fd.size, 0, 0, 0))
        yield archive_generator.WriteFileHeader(content_path, st=st)

      yield archive_generator.WriteFileChunk(chunk)

    if prev_fd:
      yield archive_generator.WriteFileFooter()
Пример #35
0
def test_stat_ftruncate():
    from pypy.translator.sandbox.sandlib import RESULTTYPE_STATRESULT
    from pypy.rlib.rarithmetic import r_longlong
    r0x12380000007 = r_longlong(0x12380000007)

    def entry_point(argv):
        st = os.stat("somewhere")
        os.ftruncate(st.st_mode, st.st_size)  # nonsense, just to see outside
        return 0

    exe = compile(entry_point)
    g, f = os.popen2(exe, "t", 0)
    st = os.stat_result((55, 0, 0, 0, 0, 0, 0x12380000007, 0, 0, 0))
    expect(f,
           g,
           "ll_os.ll_os_stat", ("somewhere", ),
           st,
           resulttype=RESULTTYPE_STATRESULT)
    expect(f, g, "ll_os.ll_os_ftruncate", (55, 0x12380000007), None)
    g.close()
    tail = f.read()
    f.close()
    assert tail == ""
Пример #36
0
def test_stat_ftruncate():
    from pypy.rpython.module.ll_os_stat import s_StatResult
    from pypy.rlib.rarithmetic import r_longlong
    r0x12380000007 = r_longlong(0x12380000007)

    def entry_point(argv):
        st = os.stat("somewhere")
        os.ftruncate(st.st_mode, st.st_size)  # nonsense, just to see outside
        return 0

    exe = compile(entry_point)
    g, f = os.popen2(exe, "t", 0)
    st = os.stat_result((55, 0, 0, 0, 0, 0, 0x12380000007, 0, 0, 0))
    expect(f,
           g,
           "ll_os.ll_os_stat", ("somewhere", ),
           st,
           resulttype=s_StatResult)
    expect(f, g, "ll_os.ll_os_ftruncate", (55, 0x12380000007), None)
    g.close()
    tail = f.read()
    f.close()
    assert tail == ""
Пример #37
0
    def _fake_init(self, path: str) -> os.stat_result:
        """Prime the cache with a fake __init__.py file.

        This makes code that looks for path believe an empty file by
        that name exists.  Should only be called after
        init_under_package_root() returns True.
        """
        dirname, basename = os.path.split(path)
        assert basename == '__init__.py', path
        assert not os.path.exists(path), path  # Not cached!
        dirname = os.path.normpath(dirname)
        st = self.stat(dirname)  # May raise OSError
        # Get stat result as a list so we can modify it.
        seq: List[float] = list(st)
        seq[stat.ST_MODE] = stat.S_IFREG | 0o444
        seq[stat.ST_INO] = 1
        seq[stat.ST_NLINK] = 1
        seq[stat.ST_SIZE] = 0
        st = os.stat_result(seq)
        self.stat_cache[path] = st
        # Make listdir() and read() also pretend this file exists.
        self.fake_package_cache.add(dirname)
        return st
Пример #38
0
def test_stat_ftruncate():
    from rpython.translator.sandbox.sandlib import RESULTTYPE_STATRESULT
    from rpython.rlib.rarithmetic import r_longlong
    r0x12380000007 = r_longlong(0x12380000007)

    if not hasattr(os, 'ftruncate'):
        py.test.skip("posix only")

    def entry_point(argv):
        st = os.stat("somewhere")
        os.ftruncate(st.st_mode, st.st_size)  # nonsense, just to see outside
        return 0

    exe = compile(entry_point)
    g, f = run_in_subprocess(exe)
    st = os.stat_result((55, 0, 0, 0, 0, 0, 0x12380000007, 0, 0, 0))
    expect(f, g, "ll_os.ll_os_stat", ("somewhere",), st,
           resulttype = RESULTTYPE_STATRESULT)
    expect(f, g, "ll_os.ll_os_ftruncate", (55, 0x12380000007), None)
    g.close()
    tail = f.read()
    f.close()
    assert tail == ""
Пример #39
0
    def stat(self,
             path: str,
             *,
             dir_fd: t.Optional = None,
             follow_symlinks: bool = True) -> t.Optional[os.stat_result]:
        # Linux stat accepts the following format sequences for the minimum required
        # set of values needed for os.stat_result:
        #   st_mode = %f,   st_ino = %i,    st_dev = %d,   st_nlink = %h
        #   st_uid = %u,    st_gid = %g,    st_size = %s,  st_atime = %X
        #   st_mtime = %Y,  st_ctime = %Z
        _format = r"%f %i %d %h %u %g %s %X %Y %Z"
        _output = self.command(
            ["stat", "-L" if follow_symlinks else None, "-c", _format, path])

        if not _output.ok:
            return None

        _vals = _output.stdout.split()
        assert len(
            _vals) == 10, "the formatted 'stat' output should be 10 elements"
        _vals[0] = int(_vals[0], base=16)  # raw access mode is in hex
        _stat_result = tuple(int(v) for v in _vals)

        return os.stat_result(_stat_result)
Пример #40
0
    def get_file_list(self, job_id):
        file_list = []

        if self.conn is None:
            return file_list

        try:
            with self.conn:
                c = self.conn.execute(
                    '''SELECT * FROM files WHERE job_id = ?''', (job_id, ))
                for row in c:
                    file = json.loads(row['file'])
                    file['id'] = row['id']
                    file['status'] = row['status']
                    file['message'] = row['message']
                    file['lstat'] = os.stat_result(file['lstat'])

                    file_list.append(file)

                c.close()
        except sqlite3.OperationalError:
            pass

        return file_list
Пример #41
0
    def getattr(self, fake_path):
        if fake_path in ('/tmp', '/new'):
            stat_result = super(PyMailTagsFileSystem, self).getattr('/cur')
        else:
            stat_result = super(PyMailTagsFileSystem, self).getattr(fake_path)

        st_nlink = stat_result.st_nlink
        if fake_path == '/':
            st_nlink = 5
        elif fake_path in ('/tmp', '/new'):
            st_nlink = 2

        return os.stat_result((
            stat_result.st_mode,
            stat_result.st_ino,
            stat_result.st_dev,
            st_nlink,
            stat_result.st_uid,
            stat_result.st_gid,
            stat_result.st_size,
            stat_result.st_atime,
            stat_result.st_mtime,
            stat_result.st_ctime,
        ))
Пример #42
0
    def _GenerateDescription(self, processed_files: Dict[str, str],
                             missing_files: Iterable[str]) -> Iterable[bytes]:
        """Generates a MANIFEST file in the archive."""

        manifest = {
            "processed_files": processed_files,
            "missing_files": missing_files,
            "client_id": self.flow.client_id,
            "flow_id": self.flow.flow_id,
        }

        manifest_fd = io.BytesIO()
        manifest_fd.write(yaml.Dump(manifest).encode("utf-8"))

        manifest_fd.seek(0)
        st = os.stat_result(
            (0o644, 0, 0, 0, 0, 0, len(manifest_fd.getvalue()), 0, 0, 0))

        for chunk in self.archive_generator.WriteFromFD(manifest_fd,
                                                        os.path.join(
                                                            self.prefix,
                                                            "MANIFEST"),
                                                        st=st):
            yield chunk
Пример #43
0
    def _WriteFileChunk(self, chunk: file_store.StreamedFileChunk,
                        archive_paths_by_id: Dict[rdf_objects.PathID, str]):
        """Yields binary chunks, respecting archive file headers and footers.

    Args:
      chunk: the StreamedFileChunk to be written
      archive_paths_by_id:
    """
        if chunk.chunk_index == 0:
            # Make sure size of the original file is passed. It's required
            # when output_writer is StreamingTarWriter.
            st = os.stat_result(
                (0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0))
            archive_path = (archive_paths_by_id
                            or {}).get(chunk.client_path.path_id)
            target_path = os.path.join(self.prefix, archive_path)

            yield self.archive_generator.WriteFileHeader(target_path, st=st)

        yield self.archive_generator.WriteFileChunk(chunk.data)

        if chunk.chunk_index == chunk.total_chunks - 1:
            self.num_archived_files += 1
            yield self.archive_generator.WriteFileFooter()
Пример #44
0
    def stat(self):
        meta = self.client._get_metadata(self)
        if meta is None:
            raise NoStatError(
                f"No stats available for {self}; it may be a directory or not exist."
            )

        try:
            mtime = meta["updated"].timestamp()
        except KeyError:
            mtime = 0

        return os.stat_result((
            None,  # mode
            None,  # ino
            self.cloud_prefix,  # dev,
            None,  # nlink,
            None,  # uid,
            None,  # gid,
            meta.get("size", 0),  # size,
            None,  # atime,
            mtime,  # mtime,
            None,  # ctime,
        ))
Пример #45
0
    def getattr(self, path, fh=None):
        """
        stat/getattr syscall
        """
        if path == '/':
            return self._stat_dict(os.stat(LOCAL_ROOT))
        path = path[1:]
        if path in os.listdir(LOCAL_ROOT):
            stat = os.stat(join(LOCAL_ROOT, path))
            stat_dict = self._stat_dict(stat)
            stat_dict['st_uid'] = os.getuid()
            stat_dict['st_gid'] = os.getgid()
            return stat_dict

        host_ip = protocol.lookup(self.bootstrap_ip, path)

        try:
            pkt = protocol.construct_packet(protocol.Verbs.STAT_REQ,
                                            protocol.Status.OK,
                                            {'filename': path})
            header, payload = protocol.sock_send_recv(host_ip, pkt)
            logging.debug(f'getattr received {header}')
            if header.verb != protocol.Verbs.STAT_RES.name:
                raise FuseOSError(errno.EIO)
            if header.status == protocol.Status.ENOENT.name:
                raise FuseOSError(errno.ENOENT)
            stat = os.stat_result(payload['stat'])
            stat_dict = self._stat_dict(stat)
            stat_dict['st_uid'] = os.getuid()
            stat_dict['st_gid'] = os.getgid()
            return stat_dict
        except FuseOSError:
            raise
        except (ProtocolError, Exception) as ex:
            logging.exception(ex)
            raise FuseOSError(errno.EIO) from ex
Пример #46
0
    def _GenerateDescription(self):
        """Generates description into a MANIFEST file in the archive."""

        manifest = {
            "description": self.description,
            "processed_files": len(self.processed_files),
            "archived_files": len(self.archived_files),
            "ignored_files": len(self.ignored_files),
            "failed_files": len(self.failed_files)
        }
        if self.ignored_files:
            manifest["ignored_files_list"] = [
                _ClientPathToString(cp, prefix="aff4:")
                for cp in self.ignored_files
            ]
        if self.failed_files:
            manifest["failed_files_list"] = [
                _ClientPathToString(cp, prefix="aff4:")
                for cp in self.failed_files
            ]

        manifest_fd = io.BytesIO()
        if self.total_files != len(self.archived_files):
            manifest_fd.write(self.FILES_SKIPPED_WARNING)
        manifest_fd.write(yaml.Dump(manifest).encode("utf-8"))

        manifest_fd.seek(0)
        st = os.stat_result(
            (0o644, 0, 0, 0, 0, 0, len(manifest_fd.getvalue()), 0, 0, 0))

        for chunk in self.archive_generator.WriteFromFD(manifest_fd,
                                                        os.path.join(
                                                            self.prefix,
                                                            "MANIFEST"),
                                                        st=st):
            yield chunk
Пример #47
0
 def test_clamp_mtime_permission(self, utime_mock, stat_mock):
     """Test permission error when clamping mtime of output files/directories."""
     stat_mock.return_value = os.stat_result(
         (33261, 16535979, 64769, 1, 1000, 1000, 17081, 1581451059, 1581451059, 1581451059)
     )
     utime_mock.side_effect = PermissionError(1, "Operation not permitted")
     config = Config(
         env={"SOURCE_DATE_EPOCH": 1581433737}, mmdebstrap={"target": "/output/test.tar"}
     )
     mmdebstrap = Mmdebstrap(config)
     with self.assertLogs("bdebstrap", level="ERROR") as context_manager:
         mmdebstrap.clamp_mtime("/output")
         self.assertEqual(utime_mock.call_count, 3)
         self.assertEqual(
             [
                 "ERROR:bdebstrap:Failed to change modification time of '/output/manifest': "
                 "[Errno 1] Operation not permitted",
                 "ERROR:bdebstrap:Failed to change modification time of '/output/test.tar': "
                 "[Errno 1] Operation not permitted",
                 "ERROR:bdebstrap:Failed to change modification time of '/output': "
                 "[Errno 1] Operation not permitted",
             ],
             context_manager.output,
         )
Пример #48
0
def unserialize(js):
    """Unserialize a JSON object into a cache dict."""
    return dict(((smart_str(key), os.stat_result(value)) for key, value in json.loads(js).iteritems()))
Пример #49
0
    def _get_filelist_local(loc_list, local_uri, cache):
        info(u"Compiling list of local files...")

        if deunicodise(local_uri.basename()) == "-":
            loc_list["-"] = {
                'full_name_unicode' : '-',
                'full_name' : '-',
                'size' : -1,
                'mtime' : -1,
            }
            return loc_list, True
        if local_uri.isdir():
            local_base = deunicodise(local_uri.basename())
            local_path = deunicodise(local_uri.path())
            if cfg.follow_symlinks:
                filelist = _fswalk_follow_symlinks(local_path)
            else:
                filelist = _fswalk_no_symlinks(local_path)
            single_file = False
        else:
            local_base = ""
            local_path = deunicodise(local_uri.dirname())
            filelist = [( local_path, [], [deunicodise(local_uri.basename())] )]
            single_file = True
        for root, dirs, files in filelist:
            rel_root = root.replace(local_path, local_base, 1)
            for f in files:
                full_name = os.path.join(root, f)
                if not os.path.isfile(full_name):
                    continue
                if os.path.islink(full_name):
                                    if not cfg.follow_symlinks:
                                            continue
                relative_file = unicodise(os.path.join(rel_root, f))
                if os.path.sep != "/":
                    # Convert non-unix dir separators to '/'
                    relative_file = "/".join(relative_file.split(os.path.sep))
                if cfg.urlencoding_mode == "normal":
                    relative_file = replace_nonprintables(relative_file)
                if relative_file.startswith('./'):
                    relative_file = relative_file[2:]
                sr = os.stat_result(os.lstat(full_name))
                loc_list[relative_file] = {
                    'full_name_unicode' : unicodise(full_name),
                    'full_name' : full_name,
                    'size' : sr.st_size,
                    'mtime' : sr.st_mtime,
                    'dev'   : sr.st_dev,
                    'inode' : sr.st_ino,
                    'uid' : sr.st_uid,
                    'gid' : sr.st_gid,
                    'sr': sr # save it all, may need it in preserve_attrs_list
                    ## TODO: Possibly more to save here...
                }
                if 'md5' in cfg.sync_checks:
                    md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size)
                    if md5 is None:
                            try:
                                md5 = loc_list.get_md5(relative_file) # this does the file I/O
                            except IOError:
                                continue
                            cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5)
                    loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5)
        return loc_list, single_file
Пример #50
0
 def stat_side_effect(path):
     stat_list = list(os.stat(path))
     # patch the modified timestamp
     stat_list[8] = file_timestamp
     return os.stat_result(stat_list)
Пример #51
0
        return "".join((capitalizeFirstLetter(p) for p in name.split("_")))
    else:
        return name


def selectRights(st: os.stat_result, uid: int, gid: int) -> int:
    """Computes rwx single triple effective rights based on files permissions, current user and its group"""
    rights = st.st_mode & 7  # nobody
    if gid == st.st_gid:
        rights |= (st.st_mode >> 3) & 7  # group
    if uid == st.st_uid:
        rights |= (st.st_mode >> 6) & 7  # owner
    return rights


assert selectRights(os.stat_result((0o421, 0, 0, 1, 101, 101, 0, 0, 0, 0)),
                    101, 101) == 7
assert selectRights(os.stat_result((0o400, 0, 0, 1, 101, 101, 0, 0, 0, 0)),
                    101, 101) == 4
assert selectRights(os.stat_result((0o020, 0, 0, 1, 101, 101, 0, 0, 0, 0)),
                    101, 101) == 2
assert selectRights(os.stat_result((0o001, 0, 0, 1, 101, 101, 0, 0, 0, 0)),
                    101, 101) == 1

assert selectRights(os.stat_result((0o421, 0, 0, 1, 1, 101, 0, 0, 0, 0)), 101,
                    101) == 3
assert selectRights(os.stat_result((0o400, 0, 0, 1, 1, 101, 0, 0, 0, 0)), 101,
                    101) == 0
assert selectRights(os.stat_result((0o020, 0, 0, 1, 1, 101, 0, 0, 0, 0)), 101,
                    101) == 2
assert selectRights(os.stat_result((0o001, 0, 0, 1, 1, 101, 0, 0, 0, 0)), 101,
Пример #52
0
        sys.exit(1)

try:
    ## Remove 'MANIFEST' file to force
    ## distutils to recreate it.
    ## Only in "sdist" stage. Otherwise
    ## it makes life difficult to packagers.
    if sys.argv[1] == "sdist":
        os.unlink("MANIFEST")
except:
    pass

## Re-create the manpage
## (Beware! Perl script on the loose!!)
if sys.argv[1] == "sdist":
    if os.stat_result(os.stat("s3cmd.1")).st_mtime < os.stat_result(
            os.stat("s3cmd")).st_mtime:
        sys.stderr.write("Re-create man page first!\n")
        sys.stderr.write(
            "Run: ./s3cmd --help | ./format-manpage.pl > s3cmd.1\n")
        sys.exit(1)

## Don't install manpages and docs when $S3CMD_PACKAGING is set
## This was a requirement of Debian package maintainer.
if not os.getenv("S3CMD_PACKAGING"):
    man_path = os.getenv("S3CMD_INSTPATH_MAN") or "share/man"
    doc_path = os.getenv("S3CMD_INSTPATH_DOC") or "share/doc/packages"
    data_files = [
        (doc_path + "/s3cmd", ["README.md", "INSTALL", "NEWS"]),
        (man_path + "/man1", ["s3cmd.1"]),
    ]
Пример #53
0
    def fstat(self, name):
        st = os.stat_result(
            (3, 2, 1, 4, 5, 6, 1024, 900.000000800, 3000, 5000))

        return st
Пример #54
0
    def Generate(self, items, token=None):
        """Generates archive from a given collection.

    Iterates the collection and generates an archive by yielding contents
    of every referenced AFF4Stream.

    Args:
      items: Iterable with items that point to aff4 paths.
      token: User's ACLToken.

    Yields:
      Binary chunks comprising the generated archive.
    """
        clients = set()
        for fd_urn_batch in collection.Batch(self._ItemsToUrns(items),
                                             self.BATCH_SIZE):

            fds_to_write = {}
            for fd in aff4.FACTORY.MultiOpen(fd_urn_batch, token=token):
                self.total_files += 1

                # Derive a ClientPath from AFF4 URN to make new and old
                # archive_generator predicate input consistent.
                # TODO(user): This code is clearly hacky and intended to be removed.
                urn_components = fd.urn.Split()
                if urn_components[1:3] != ["fs", "os"]:
                    raise AssertionError(
                        "URN components are expected to start with "
                        "client, 'fs', 'os'. Got %r" % (urn_components, ))

                client_path = db.ClientPath.OS(client_id=urn_components[0],
                                               components=urn_components[3:])

                if not self.predicate(client_path):
                    self.ignored_files.append(utils.SmartUnicode(fd.urn))
                    continue

                # Any file-like object with data in AFF4 should inherit AFF4Stream.
                if isinstance(fd, aff4.AFF4Stream):
                    urn_components = fd.urn.Split()
                    clients.add(rdf_client.ClientURN(urn_components[0]))

                    content_path = os.path.join(self.prefix, *urn_components)
                    self.archived_files += 1

                    # Make sure size of the original file is passed. It's required
                    # when output_writer is StreamingTarWriter.
                    st = os.stat_result(
                        (0o644, 0, 0, 0, 0, 0, fd.size, 0, 0, 0))
                    fds_to_write[fd] = (content_path, st)

            if fds_to_write:
                prev_fd = None
                for fd, chunk, exception in aff4.AFF4Stream.MultiStream(
                        fds_to_write):
                    if exception:
                        logging.exception(exception)

                        self.archived_files -= 1
                        self.failed_files.append(utils.SmartUnicode(fd.urn))
                        continue

                    if prev_fd != fd:
                        if prev_fd:
                            yield self.archive_generator.WriteFileFooter()
                        prev_fd = fd

                        content_path, st = fds_to_write[fd]
                        yield self.archive_generator.WriteFileHeader(
                            content_path, st=st)

                    yield self.archive_generator.WriteFileChunk(chunk)

                if self.archive_generator.is_file_write_in_progress:
                    yield self.archive_generator.WriteFileFooter()

        if clients:
            for client_urn_batch in collection.Batch(clients, self.BATCH_SIZE):
                for fd in aff4.FACTORY.MultiOpen(
                        client_urn_batch,
                        aff4_type=aff4_grr.VFSGRRClient,
                        token=token):
                    for chunk in self._GenerateClientInfo(fd):
                        yield chunk

        for chunk in self._GenerateDescription():
            yield chunk

        yield self.archive_generator.Close()
Пример #55
0
 def _stat(self, real, path, *args, **kwargs):
     info = list(real(self._real_path(path)))
     if path in self._ownership:
         info[4:5] = self._ownership[path]
     return os.stat_result(info)
Пример #56
0
 def mystat(filename):
     stat_info = os.lstat.old_target(filename)
     stat_list = list(stat_info)
     # Adjust the UID.
     stat_list[4] += 1
     return os.stat_result(stat_list)
Пример #57
0
os.makedirs(r'e:\a\b\c\d')
# 递归删除各级空目录
os.removedirs(r'e:\a\b\c\d')
# 创建一级目录,不能递归创建
os.mkdir(r'e:/test_python')
# 删除一级目录,切该目录不能为空
os.rmdir(r'e:\test_python')
# 以列表的方式列出指定目录的内容
print(os.listdir('.'))
# 删除指定文件
# os.remove(r'e:\test.txt')
# 把test.txt重命名为test1.txt
# os.rename(r'e:\test.txt', r'e:\test1.txt')
# 查看文件的状态信息
print(os.stat(r'e:\test.dcm'))
"""
os.stat_result(st_mode=33206, st_ino=3659174697270772, 
st_dev=505418071, st_nlink=1, st_uid=0, st_gid=0, 
st_size=19468, st_atime=1515546652, st_mtime=1515546652, 
st_ctime=1515546652)
"""
# 当前操作系统的路径分隔符
print(os.sep)
# 当前操作系统的换行符
print(os.linesep)
# 环境变量之间的分隔符
print(os.pathsep)
# 查看系统的环境变量
print(os.environ)
# 查看当前系统的名称
print(os.name)
Пример #58
0
                assert isinstance(v, six.string_types), (type(v), v)
            out.append(v)
    return out


def list_flatten_strict(lst):
    return list_flatten(lst, strict=1)


def rstrip(s, suffix):
    if s.endswith(suffix):
        s = s[:-len(suffix)]
    return s


_os_stat_result_null = os.stat_result(
    [0 for n in range(os.stat_result.n_sequence_fields)])


def os_stat_safe(fname):
    if os.path.isfile(fname):
        return os.stat(fname)
    else:
        return _os_stat_result_null


class Depend(Path):
    '''
	Abstract Dependency
	'''
    pass
Пример #59
0
    def check_stat_attributes(self, fname):
        if not hasattr(os, "stat"):
            return

        import stat
        result = os.stat(fname)

        # Make sure direct access works
        self.assertEqual(result[stat.ST_SIZE], 3)
        self.assertEqual(result.st_size, 3)

        # Make sure all the attributes are there
        members = dir(result)
        for name in dir(stat):
            if name[:3] == 'ST_':
                attr = name.lower()
                if name.endswith("TIME"):

                    def trunc(x):
                        return int(x)
                else:

                    def trunc(x):
                        return x

                self.assertEqual(trunc(getattr(result, attr)),
                                 result[getattr(stat, name)])
                self.assertIn(attr, members)

        try:
            result[200]
            self.fail("No exception thrown")
        except IndexError:
            pass

        # Make sure that assignment fails
        try:
            result.st_mode = 1
            self.fail("No exception thrown")
        except AttributeError:
            pass

        try:
            result.st_rdev = 1
            self.fail("No exception thrown")
        except (AttributeError, TypeError):
            pass

        try:
            result.parrot = 1
            self.fail("No exception thrown")
        except AttributeError:
            pass

        # Use the stat_result constructor with a too-short tuple.
        try:
            result2 = os.stat_result((10, ))
            self.fail("No exception thrown")
        except TypeError:
            pass

        # Use the constructor with a too-long tuple.
        try:
            result2 = os.stat_result(
                (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))
        except TypeError:
            pass
Пример #60
0
                         ino=12345,
                         dev=12345,
                         nlink=1,
                         uid=1000,
                         gid=1000,
                         size=10,
                         atime=123,
                         mtime=123,
                         ctime=123):
        """
        Build an os.stat_result() instance with many default values.

        :param mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime:
                See help(os.stat_result).
        """
        return os.stat_result(
            (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime))

    def test_get_new_files_invalid_path(self):
        """
        Test directory_source.get_new_files() on an invalid path.
        """
        path = '/some/invalid/path'
        os.listdir.expect_call(path).and_raises(OSError('Error'))

        s = source.directory_source(self.db_mock, path)
        self.assertRaises(OSError, s.get_new_files)
        self.god.check_playback()

    def test_get_new_files_stat_fails(self):
        """
        Test directory_source.get_new_files() when stat fails.