def test_cleanup(self):
   # Inject an item without a state.json. It will be deleted on cleanup.
   h_a = self.to_hash('a')[0]
   isolateserver.file_write(os.path.join(self.tempdir, h_a), 'a')
   cache = self.get_cache()
   self.assertEqual([], sorted(cache._lru._items.iteritems()))
   self.assertEqual(
       sorted([h_a, u'state.json']), sorted(os.listdir(self.tempdir)))
   cache.cleanup()
   self.assertEqual([u'state.json'], os.listdir(self.tempdir))
Exemple #2
0
 def write(self, digest, content):
   path = self._path(digest)
   try:
     size = isolateserver.file_write(path, content)
   except:
     # There are two possible places were an exception can occur:
     #   1) Inside |content| generator in case of network or unzipping errors.
     #   2) Inside file_write itself in case of disk IO errors.
     # In any case delete an incomplete file and propagate the exception to
     # caller, it will be logged there.
     try_remove(path)
     raise
   with self._lock:
     self._add(digest, size)
 def write(self, digest, content):
     path = self._path(digest)
     try:
         size = isolateserver.file_write(path, content)
     except:
         # There are two possible places were an exception can occur:
         #   1) Inside |content| generator in case of network or unzipping errors.
         #   2) Inside file_write itself in case of disk IO errors.
         # In any case delete an incomplete file and propagate the exception to
         # caller, it will be logged there.
         try_remove(path)
         raise
     with self._lock:
         self._add(digest, size)
 def write(self, digest, content):
   path = self._path(digest)
   # A stale broken file may remain. It is possible for the file to have write
   # access bit removed which would cause the file_write() call to fail to open
   # in write mode. Take no chance here.
   try_remove(path)
   try:
     size = isolateserver.file_write(path, content)
   except:
     # There are two possible places were an exception can occur:
     #   1) Inside |content| generator in case of network or unzipping errors.
     #   2) Inside file_write itself in case of disk IO errors.
     # In any case delete an incomplete file and propagate the exception to
     # caller, it will be logged there.
     try_remove(path)
     raise
   # Make the file read-only in the cache.  This has a few side-effects since
   # the file node is modified, so every directory entries to this file becomes
   # read-only. It's fine here because it is a new file.
   set_read_only(path, True)
   with self._lock:
     self._add(digest, size)
 def write(self, digest, content):
   path = self._path(digest)
   # A stale broken file may remain. It is possible for the file to have write
   # access bit removed which would cause the file_write() call to fail to open
   # in write mode. Take no chance here.
   try_remove(path)
   try:
     size = isolateserver.file_write(path, content)
   except:
     # There are two possible places were an exception can occur:
     #   1) Inside |content| generator in case of network or unzipping errors.
     #   2) Inside file_write itself in case of disk IO errors.
     # In any case delete an incomplete file and propagate the exception to
     # caller, it will be logged there.
     try_remove(path)
     raise
   # Make the file read-only in the cache.  This has a few side-effects since
   # the file node is modified, so every directory entries to this file becomes
   # read-only. It's fine here because it is a new file.
   set_read_only(path, True)
   with self._lock:
     self._add(digest, size)
  def test_policies_active_trimming(self):
    # Inject two items without a state.json. They will be processed just fine.
    h_a = self.to_hash('a')[0]
    isolateserver.file_write(os.path.join(self.tempdir, h_a), 'a')
    h_b = self.to_hash('b')[0]
    isolateserver.file_write(os.path.join(self.tempdir, h_b), 'b')
    # Insert corrupted files, they will be deleted.
    # TODO(maruel): Verify cache.
    #isolateserver.file_write(os.path.join(self.tempdir, 'a'*40), 'z')
    isolateserver.file_write(os.path.join(self.tempdir, 'z'), 'z')

    # Cache (size and # items) is not enforced while adding items but free disk
    # is.
    self._free_disk = 1004
    cache = self.get_cache()
    expected = sorted(((h_a, 1), (h_b, 1)))
    self.assertEqual(expected, sorted(cache._lru._items.iteritems()))
    self.assertEqual(set(), cache._protected)
    self.assertEqual(1004, cache._free_disk)

    h_c = cache.write(*self.to_hash('c'))
    # h_a and h_b may be randomly ordered. Assert that h_c is last.
    expected = sorted(((h_a, 1), (h_b, 1), (h_c, 1)))
    self.assertEqual(expected, sorted(cache._lru._items.iteritems()))
    # [0] is the one that gets evicted. Take the second one.
    kept = cache._lru._items.items()[1]
    self.assertEqual((h_c, 1), cache._lru._items.items()[-1])
    self.assertEqual({h_c}, cache._protected)
    self.assertEqual(1003, cache._free_disk)

    self._free_disk = 1003
    # Force a trim.
    with cache:
      pass
    expected = collections.OrderedDict([(kept[0], kept[1]), (h_c, 1)])
    self.assertEqual(expected, cache._lru._items)
    self.assertEqual({h_c}, cache._protected)
    self.assertEqual(1003, cache._free_disk)