예제 #1
0
 def test_main_naked_leaking(self):
   workdir = tempfile.mkdtemp()
   try:
     cmd = [
       '--no-log',
       '--cache', self.tempdir,
       '--root-dir', workdir,
       '--leak-temp-dir',
       '--named-cache-root', os.path.join(self.tempdir, 'c'),
       '/bin/echo',
       'hello',
       'world',
     ]
     ret = run_isolated.main(cmd)
     self.assertEqual(0, ret)
   finally:
     fs.rmtree(unicode(workdir))
예제 #2
0
    def install(self, path, name):
        """Moves the directory for the specified named cache to |path|.

    path must be absolute, unicode and must not exist.

    Raises NamedCacheError if cannot install the cache.
    """
        logging.info('Installing named cache %r to %r', name, path)
        with self._lock:
            try:
                if os.path.isdir(path):
                    raise NamedCacheError(
                        'installation directory %r already exists' % path)

                link_name = os.path.join(self.cache_dir, name)
                if fs.exists(link_name):
                    fs.rmtree(link_name)

                if name in self._lru:
                    rel_cache, _size = self._lru.get(name)
                    abs_cache = os.path.join(self.cache_dir, rel_cache)
                    if os.path.isdir(abs_cache):
                        logging.info('Moving %r to %r', abs_cache, path)
                        file_path.ensure_tree(os.path.dirname(path))
                        fs.rename(abs_cache, path)
                        self._remove(name)
                        return

                    logging.warning(
                        'directory for named cache %r does not exist at %s',
                        name, rel_cache)
                    self._remove(name)

                # The named cache does not exist, create an empty directory.
                # When uninstalling, we will move it back to the cache and create an
                # an entry.
                file_path.ensure_tree(path)
            except (IOError, OSError) as ex:
                raise NamedCacheError(
                    'cannot install cache named %r at %r: %s' %
                    (name, path, ex))
            finally:
                self._save()
예제 #3
0
def rmtree(root):
  """Wrapper around shutil.rmtree() to retry automatically on Windows.

  On Windows, forcibly kills processes that are found to interfere with the
  deletion.

  Returns:
    True on normal execution, False if berserk techniques (like killing
    processes) had to be used.
  """
  logging.info('rmtree(%s)', root)
  assert sys.getdefaultencoding() == 'utf-8', sys.getdefaultencoding()
  # Do not assert here yet because this would break too much code.
  root = unicode(root)
  try:
    make_tree_deleteable(root)
  except OSError as e:
    logging.warning('Swallowing make_tree_deleteable() error: %s', e)

  # First try the soft way: tries 3 times to delete and sleep a bit in between.
  # Retries help if test subprocesses outlive main process and try to actively
  # use or write to the directory while it is being deleted.
  max_tries = 3
  for i in xrange(max_tries):
    # errors is a list of tuple(function, path, excinfo).
    errors = []
    fs.rmtree(root, onerror=lambda *args: errors.append(args))
    if not errors:
      return True
    if not i and sys.platform == 'win32':
      for _, path, _ in errors:
        try:
          change_acl_for_delete(path)
        except Exception as e:
          sys.stderr.write('- %s (failed to update ACL: %s)\n' % (path, e))

    if i == max_tries - 1:
      sys.stderr.write(
          'Failed to delete %s. The following files remain:\n' % root)
      for _, path, _ in errors:
        sys.stderr.write('- %s\n' % path)
    else:
      delay = (i+1)*2
      sys.stderr.write(
          'Failed to delete %s (%d files remaining).\n'
          '  Maybe the test has a subprocess outliving it.\n'
          '  Sleeping %d seconds.\n' %
          (root, len(errors), delay))
      time.sleep(delay)

  # If soft retries fail on Linux, there's nothing better we can do.
  if sys.platform != 'win32':
    raise errors[0][2][0], errors[0][2][1], errors[0][2][2]

  # The soft way was not good enough. Try the hard way. Enumerates both:
  # - all child processes from this process.
  # - processes where the main executable in inside 'root'. The reason is that
  #   the ancestry may be broken so stray grand-children processes could be
  #   undetected by the first technique.
  # This technique is not fool-proof but gets mostly there.
  def get_processes():
    processes = enum_processes_win()
    tree_processes = filter_processes_tree_win(processes)
    dir_processes = filter_processes_dir_win(processes, root)
    # Convert to dict to remove duplicates.
    processes = dict((p.ProcessId, p) for p in tree_processes)
    processes.update((p.ProcessId, p) for p in dir_processes)
    processes.pop(os.getpid())
    return processes

  for i in xrange(3):
    sys.stderr.write('Enumerating processes:\n')
    processes = get_processes()
    if not processes:
      break
    for _, proc in sorted(processes.iteritems()):
      sys.stderr.write(
          '- pid %d; Handles: %d; Exe: %s; Cmd: %s\n' % (
            proc.ProcessId,
            proc.HandleCount,
            proc.ExecutablePath,
            proc.CommandLine))
    sys.stderr.write('Terminating %d processes.\n' % len(processes))
    for pid in sorted(processes):
      try:
        # Killing is asynchronous.
        os.kill(pid, 9)
        sys.stderr.write('- %d killed\n' % pid)
      except OSError:
        sys.stderr.write('- failed to kill %s\n' % pid)
    if i < 2:
      time.sleep((i+1)*2)
  else:
    processes = get_processes()
    if processes:
      sys.stderr.write('Failed to terminate processes.\n')
      raise errors[0][2][0], errors[0][2][1], errors[0][2][2]

  # Now that annoying processes in root are evicted, try again.
  errors = []
  fs.rmtree(root, onerror=lambda *args: errors.append(args))
  if errors:
    # There's no hope.
    sys.stderr.write(
        'Failed to delete %s. The following files remain:\n' % root)
    for _, path, _ in errors:
      sys.stderr.write('- %s\n' % path)
    raise errors[0][2][0], errors[0][2][1], errors[0][2][2]
  return False
예제 #4
0
def rmtree(root):
    """Wrapper around shutil.rmtree() to retry automatically on Windows.

  On Windows, forcibly kills processes that are found to interfere with the
  deletion.

  Returns:
    True on normal execution, False if berserk techniques (like killing
    processes) had to be used.
  """
    logging.info('rmtree(%s)', root)
    assert isinstance(root,
                      six.text_type) or sys.getdefaultencoding() == 'utf-8', (
                          repr(root), sys.getdefaultencoding())
    root = six.text_type(root)
    try:
        make_tree_deleteable(root)
    except OSError as e:
        logging.warning('Swallowing make_tree_deleteable() error: %s', e)

    # First try the soft way: tries 3 times to delete and sleep a bit in between.
    # Retries help if test subprocesses outlive main process and try to actively
    # use or write to the directory while it is being deleted.
    max_tries = 3
    for i in range(max_tries):
        # pylint: disable=cell-var-from-loop
        # errors is a list of tuple(function, path, excinfo).
        errors = []
        fs.rmtree(root, onerror=lambda *args: errors.append(args))
        if not errors or not fs.exists(root):
            if i:
                sys.stderr.write('Succeeded.\n')
            return True
        if not i and sys.platform == 'win32':
            for path in sorted(set(path for _, path, _ in errors)):
                try:
                    change_acl_for_delete(path)
                except Exception as e:
                    sys.stderr.write('- %s (failed to update ACL: %s)\n' %
                                     (path, e))

        if i != max_tries - 1:
            delay = (i + 1) * 2
            sys.stderr.write(
                'Failed to delete %s (%d files remaining).\n'
                '  Maybe the test has a subprocess outliving it.\n'
                '  Sleeping %d seconds.\n' % (root, len(errors), delay))
            time.sleep(delay)

    sys.stderr.write('Failed to delete %s. The following files remain:\n' %
                     root)
    # The same path may be listed multiple times.
    for path in sorted(set(path for _, path, _ in errors)):
        sys.stderr.write('- %s\n' % path)

    # If soft retries fail on Linux, there's nothing better we can do.
    if sys.platform != 'win32':
        six.reraise(errors[0][2][0], errors[0][2][1], errors[0][2][2])

    # The soft way was not good enough. Try the hard way.
    for i in range(max_tries):
        if not kill_children_processes(root):
            break
        if i != max_tries - 1:
            time.sleep((i + 1) * 2)
    else:
        processes = _get_children_processes_win(root)
        if processes:
            sys.stderr.write('Failed to terminate processes.\n')
            six.reraise(errors[0][2][0], errors[0][2][1], errors[0][2][2])

    # Now that annoying processes in root are evicted, try again.
    errors = []
    fs.rmtree(root, onerror=lambda *args: errors.append(args))
    if errors and fs.exists(root):
        # There's no hope: the directory was tried to be removed 4 times. Give up
        # and raise an exception.
        sys.stderr.write('Failed to delete %s. The following files remain:\n' %
                         root)
        # The same path may be listed multiple times.
        for path in sorted(set(path for _, path, _ in errors)):
            sys.stderr.write('- %s\n' % path)
        six.reraise(errors[0][2][0], errors[0][2][1], errors[0][2][2])
    return False
예제 #5
0
def rmtree(root):
  """Wrapper around shutil.rmtree() to retry automatically on Windows.

  On Windows, forcibly kills processes that are found to interfere with the
  deletion.

  Returns:
    True on normal execution, False if berserk techniques (like killing
    processes) had to be used.
  """
  logging.info('rmtree(%s)', root)
  assert sys.getdefaultencoding() == 'utf-8', sys.getdefaultencoding()
  # Do not assert here yet because this would break too much code.
  root = unicode(root)
  try:
    make_tree_deleteable(root)
  except OSError as e:
    logging.warning('Swallowing make_tree_deleteable() error: %s', e)

  # First try the soft way: tries 3 times to delete and sleep a bit in between.
  # Retries help if test subprocesses outlive main process and try to actively
  # use or write to the directory while it is being deleted.
  max_tries = 3
  for i in xrange(max_tries):
    # errors is a list of tuple(function, path, excinfo).
    errors = []
    fs.rmtree(root, onerror=lambda *args: errors.append(args))
    if not errors:
      return True
    if not i and sys.platform == 'win32':
      for _, path, _ in errors:
        try:
          change_acl_for_delete(path)
        except Exception as e:
          sys.stderr.write('- %s (failed to update ACL: %s)\n' % (path, e))

    if i == max_tries - 1:
      sys.stderr.write(
          'Failed to delete %s. The following files remain:\n' % root)
      for _, path, _ in errors:
        sys.stderr.write('- %s\n' % path)
    else:
      delay = (i+1)*2
      sys.stderr.write(
          'Failed to delete %s (%d files remaining).\n'
          '  Maybe the test has a subprocess outliving it.\n'
          '  Sleeping %d seconds.\n' %
          (root, len(errors), delay))
      time.sleep(delay)

  # If soft retries fail on Linux, there's nothing better we can do.
  if sys.platform != 'win32':
    raise errors[0][2][0], errors[0][2][1], errors[0][2][2]

  # The soft way was not good enough. Try the hard way. Enumerates both:
  # - all child processes from this process.
  # - processes where the main executable in inside 'root'. The reason is that
  #   the ancestry may be broken so stray grand-children processes could be
  #   undetected by the first technique.
  # This technique is not fool-proof but gets mostly there.
  def get_processes():
    processes = enum_processes_win()
    tree_processes = filter_processes_tree_win(processes)
    dir_processes = filter_processes_dir_win(processes, root)
    # Convert to dict to remove duplicates.
    processes = dict((p.ProcessId, p) for p in tree_processes)
    processes.update((p.ProcessId, p) for p in dir_processes)
    processes.pop(os.getpid())
    return processes

  for i in xrange(3):
    sys.stderr.write('Enumerating processes:\n')
    processes = get_processes()
    if not processes:
      break
    for _, proc in sorted(processes.iteritems()):
      sys.stderr.write(
          '- pid %d; Handles: %d; Exe: %s; Cmd: %s\n' % (
            proc.ProcessId,
            proc.HandleCount,
            proc.ExecutablePath,
            proc.CommandLine))
    sys.stderr.write('Terminating %d processes.\n' % len(processes))
    for pid in sorted(processes):
      try:
        # Killing is asynchronous.
        os.kill(pid, 9)
        sys.stderr.write('- %d killed\n' % pid)
      except OSError:
        sys.stderr.write('- failed to kill %s\n' % pid)
    if i < 2:
      time.sleep((i+1)*2)
  else:
    processes = get_processes()
    if processes:
      sys.stderr.write('Failed to terminate processes.\n')
      raise errors[0][2][0], errors[0][2][1], errors[0][2][2]

  # Now that annoying processes in root are evicted, try again.
  errors = []
  fs.rmtree(root, onerror=lambda *args: errors.append(args))
  if errors:
    # There's no hope.
    sys.stderr.write(
        'Failed to delete %s. The following files remain:\n' % root)
    for _, path, _ in errors:
      sys.stderr.write('- %s\n' % path)
    raise errors[0][2][0], errors[0][2][1], errors[0][2][2]
  return False