コード例 #1
0
ファイル: ptliar.py プロジェクト: pantouyu/ptliar2
    def _update_status(self):
        """
        update uploaded/downloaded/status etc.
        """
        # update 'uploaded'
        if self.last_commit_time:
            delta = time() - self.last_commit_time
            self.uploaded += int(ts.get_up_speed(self) * delta)

        if self.up:
            return

        # update 'downloaded' and 'left'
        delta = time() - self.last_commit_time
        down_size = int(ts.get_down_speed(self) * delta)
        down_size = min(self.left, down_size)
        self.left -= down_size
        self.downloaded += down_size

        left_file = join(DIR[DOWN], "%s.left" % self.filename)
        if self.left <= 0:
            # completed
            log.info("completed [%20s]" % self.name)
            ts.return_tickets(self, DOWN)
            self.up_down = UP

            # move torrents from down_torrents to up_torrents
            src = join(DIR[DOWN], self.filename)
            dst = join(DIR[UP], self.filename)
            remove(left_file)
            move(src, dst)

        else:
            # not yet, record progress
            write_int(left_file, self.left)
コード例 #2
0
def remove(filepath):
  """Removes a file, even if it is read-only."""
  # TODO(maruel): Not do it unless necessary since it slows this function
  # down.
  if sys.platform == 'win32':
    # Deleting a read-only file will fail if it is read-only.
    set_read_only(filepath, False)
  else:
    # Deleting a read-only file will fail if the directory is read-only.
    set_read_only(os.path.dirname(filepath), False)
  fs.remove(filepath)
コード例 #3
0
    def install(self, dst, name):
        """Creates the directory |dst| and moves a previous named cache |name| if it
    was in the local named caches cache.

    dst must be absolute, unicode and must not exist.

    Returns the reused named cache size in bytes, or 0 if none was present.

    Raises NamedCacheError if cannot install the cache.
    """
        logging.info('NamedCache.install(%r, %r)', dst, name)
        with self._lock:
            try:
                if fs.isdir(dst):
                    raise NamedCacheError(
                        'installation directory %r already exists' % dst)

                # Remove the named symlink if it exists.
                link_name = self._get_named_path(name)
                if fs.exists(link_name):
                    # Remove the symlink itself, not its destination.
                    fs.remove(link_name)

                if name in self._lru:
                    rel_cache, size = self._lru.get(name)
                    abs_cache = os.path.join(self.cache_dir, rel_cache)
                    if fs.isdir(abs_cache):
                        logging.info('- reusing %r; size was %d', rel_cache,
                                     size)
                        file_path.ensure_tree(os.path.dirname(dst))
                        self._sudo_chown(abs_cache)
                        fs.rename(abs_cache, dst)
                        self._remove(name)
                        return size

                    logging.warning('- expected directory %r, does not exist',
                                    rel_cache)
                    self._remove(name)

                # The named cache does not exist, create an empty directory. When
                # uninstalling, we will move it back to the cache and create an an
                # entry.
                logging.info('- creating new directory')
                file_path.ensure_tree(dst)
                return 0
            except (IOError, OSError) as ex:
                # Raise using the original traceback.
                exc = NamedCacheError(
                    'cannot install cache named %r at %r: %s' %
                    (name, dst, ex))
                six.reraise(type(exc), exc, sys.exc_info()[2])
            finally:
                self._save()
コード例 #4
0
ファイル: isolate.py プロジェクト: zj15243885020/arangodb
def recreate_tree(outdir, indir, infiles, action, as_hash):
  """Creates a new tree with only the input files in it.

  Arguments:
    outdir:    Output directory to create the files in.
    indir:     Root directory the infiles are based in.
    infiles:   dict of files to map from |indir| to |outdir|.
    action:    One of accepted action of file_path.link_file().
    as_hash:   Output filename is the hash instead of relfile.
  """
  logging.info(
      'recreate_tree(outdir=%s, indir=%s, files=%d, action=%s, as_hash=%s)' %
      (outdir, indir, len(infiles), action, as_hash))

  assert os.path.isabs(outdir) and outdir == os.path.normpath(outdir), outdir
  if not os.path.isdir(outdir):
    logging.info('Creating %s' % outdir)
    fs.makedirs(outdir)

  for relfile, metadata in infiles.iteritems():
    infile = os.path.join(indir, relfile)
    if as_hash:
      # Do the hashtable specific checks.
      if 'l' in metadata:
        # Skip links when storing a hashtable.
        continue
      outfile = os.path.join(outdir, metadata['h'])
      if os.path.isfile(outfile):
        # Just do a quick check that the file size matches. No need to stat()
        # again the input file, grab the value from the dict.
        if not 's' in metadata:
          raise isolated_format.MappingError(
              'Misconfigured item %s: %s' % (relfile, metadata))
        if metadata['s'] == fs.stat(outfile).st_size:
          continue
        else:
          logging.warn('Overwritting %s' % metadata['h'])
          fs.remove(outfile)
    else:
      outfile = os.path.join(outdir, relfile)
      outsubdir = os.path.dirname(outfile)
      if not os.path.isdir(outsubdir):
        fs.makedirs(outsubdir)

    if 'l' in metadata:
      pointed = metadata['l']
      logging.debug('Symlink: %s -> %s' % (outfile, pointed))
      # symlink doesn't exist on Windows.
      fs.symlink(pointed, outfile)  # pylint: disable=E1101
    else:
      file_path.link_file(outfile, infile, action)
コード例 #5
0
ファイル: isolate.py プロジェクト: mellowdistrict/luci-py
def recreate_tree(outdir, indir, infiles, action, as_hash):
  """Creates a new tree with only the input files in it.

  Arguments:
    outdir:    Output directory to create the files in.
    indir:     Root directory the infiles are based in.
    infiles:   dict of files to map from |indir| to |outdir|.
    action:    One of accepted action of file_path.link_file().
    as_hash:   Output filename is the hash instead of relfile.
  """
  logging.info(
      'recreate_tree(outdir=%s, indir=%s, files=%d, action=%s, as_hash=%s)' %
      (outdir, indir, len(infiles), action, as_hash))

  assert os.path.isabs(outdir) and outdir == os.path.normpath(outdir), outdir
  if not os.path.isdir(outdir):
    logging.info('Creating %s' % outdir)
    fs.makedirs(outdir)

  for relfile, metadata in infiles.iteritems():
    infile = os.path.join(indir, relfile)
    if as_hash:
      # Do the hashtable specific checks.
      if 'l' in metadata:
        # Skip links when storing a hashtable.
        continue
      outfile = os.path.join(outdir, metadata['h'])
      if os.path.isfile(outfile):
        # Just do a quick check that the file size matches. No need to stat()
        # again the input file, grab the value from the dict.
        if not 's' in metadata:
          raise isolated_format.MappingError(
              'Misconfigured item %s: %s' % (relfile, metadata))
        if metadata['s'] == fs.stat(outfile).st_size:
          continue
        else:
          logging.warn('Overwritting %s' % metadata['h'])
          fs.remove(outfile)
    else:
      outfile = os.path.join(outdir, relfile)
      outsubdir = os.path.dirname(outfile)
      if not os.path.isdir(outsubdir):
        fs.makedirs(outsubdir)

    if 'l' in metadata:
      pointed = metadata['l']
      logging.debug('Symlink: %s -> %s' % (outfile, pointed))
      # symlink doesn't exist on Windows.
      fs.symlink(pointed, outfile)  # pylint: disable=E1101
    else:
      file_path.link_file(outfile, infile, action)
コード例 #6
0
ファイル: file_path.py プロジェクト: mellowdistrict/luci-py
def try_remove(filepath):
  """Removes a file without crashing even if it doesn't exist."""
  try:
    # TODO(maruel): Not do it unless necessary since it slows this function
    # down.
    if sys.platform == 'win32':
      # Deleting a read-only file will fail if it is read-only.
      set_read_only(filepath, False)
    else:
      # Deleting a read-only file will fail if the directory is read-only.
      set_read_only(os.path.dirname(filepath), False)
    fs.remove(filepath)
  except OSError:
    pass
コード例 #7
0
 def test_delete_rd_rf(self):
   # Confirms that a RO file in a RO directory can't be deleted.
   dir_foo = os.path.join(self.tempdir, 'foo')
   file_bar = os.path.join(dir_foo, 'bar')
   fs.mkdir(dir_foo, 0777)
   write_content(file_bar, 'bar')
   file_path.set_read_only(dir_foo, True)
   file_path.set_read_only(file_bar, True)
   self.assertMaskedFileMode(dir_foo, 040555)
   self.assertMaskedFileMode(file_bar, 0100444)
   with self.assertRaises(OSError):
     # It fails for different reason depending on the OS. See the test cases
     # above.
     fs.remove(file_bar)
コード例 #8
0
 def test_delete_rd_rf(self):
   # Confirms that a RO file in a RO directory can't be deleted.
   dir_foo = os.path.join(self.tempdir, 'foo')
   file_bar = os.path.join(dir_foo, 'bar')
   fs.mkdir(dir_foo, 0777)
   write_content(file_bar, 'bar')
   file_path.set_read_only(dir_foo, True)
   file_path.set_read_only(file_bar, True)
   self.assertMaskedFileMode(dir_foo, 040555)
   self.assertMaskedFileMode(file_bar, 0100444)
   with self.assertRaises(OSError):
     # It fails for different reason depending on the OS. See the test cases
     # above.
     fs.remove(file_bar)
コード例 #9
0
ファイル: file_path.py プロジェクト: rmistry/luci-py
def try_remove(filepath):
  """Removes a file without crashing even if it doesn't exist."""
  try:
    # TODO(maruel): Not do it unless necessary since it slows this function
    # down.
    if sys.platform == 'win32':
      # Deleting a read-only file will fail if it is read-only.
      set_read_only(filepath, False)
    else:
      # Deleting a read-only file will fail if the directory is read-only.
      set_read_only(os.path.dirname(filepath), False)
    fs.remove(filepath)
  except OSError:
    pass
コード例 #10
0
 def test_delete_wd_rf(self):
   # Confirms that a RO file in a RW directory can be deleted on non-Windows.
   dir_foo = os.path.join(self.tempdir, 'foo')
   file_bar = os.path.join(dir_foo, 'bar')
   fs.mkdir(dir_foo, 0777)
   write_content(file_bar, 'bar')
   file_path.set_read_only(dir_foo, False)
   file_path.set_read_only(file_bar, True)
   self.assertFileMode(dir_foo, 040777)
   self.assertMaskedFileMode(file_bar, 0100444)
   if sys.platform == 'win32':
     # On Windows, a read-only file can't be deleted.
     with self.assertRaises(OSError):
       fs.remove(file_bar)
   else:
     fs.remove(file_bar)
コード例 #11
0
 def test_delete_wd_rf(self):
   # Confirms that a RO file in a RW directory can be deleted on non-Windows.
   dir_foo = os.path.join(self.tempdir, 'foo')
   file_bar = os.path.join(dir_foo, 'bar')
   fs.mkdir(dir_foo, 0777)
   write_content(file_bar, 'bar')
   file_path.set_read_only(dir_foo, False)
   file_path.set_read_only(file_bar, True)
   self.assertFileMode(dir_foo, 040777)
   self.assertMaskedFileMode(file_bar, 0100444)
   if sys.platform == 'win32':
     # On Windows, a read-only file can't be deleted.
     with self.assertRaises(OSError):
       fs.remove(file_bar)
   else:
     fs.remove(file_bar)
コード例 #12
0
ファイル: local_caching_test.py プロジェクト: ChMarina/v8_vm
    def test_cleanup_incorrect_link(self):
        cache = self.get_cache(_get_policies())
        self._add_one_item(cache, 1)
        self._add_one_item(cache, 2)
        fs.remove(os.path.join(self.cache_dir, cache.NAMED_DIR, u'1'))
        fs.remove(os.path.join(self.cache_dir, cache.NAMED_DIR, u'2'))
        fs.symlink('invalid_dest',
                   os.path.join(self.cache_dir, cache.NAMED_DIR, u'1'))
        os.mkdir(os.path.join(self.cache_dir, cache.NAMED_DIR, u'2'))

        cache = self.get_cache(_get_policies())
        self.assertEqual(
            ['1', '2'],
            sorted(fs.listdir(os.path.join(cache.cache_dir, cache.NAMED_DIR))))
        self.assertEqual(True, cache.cleanup())
        self.assertEqual([],
                         fs.listdir(
                             os.path.join(cache.cache_dir, cache.NAMED_DIR)))
コード例 #13
0
 def test_delete_rd_wf(self):
   # Confirms that a Rw file in a RO directory can be deleted on Windows only.
   dir_foo = os.path.join(self.tempdir, 'foo')
   file_bar = os.path.join(dir_foo, 'bar')
   fs.mkdir(dir_foo, 0777)
   write_content(file_bar, 'bar')
   file_path.set_read_only(dir_foo, True)
   file_path.set_read_only(file_bar, False)
   self.assertMaskedFileMode(dir_foo, 040555)
   self.assertFileMode(file_bar, 0100666)
   if sys.platform == 'win32':
     # A read-only directory has a convoluted meaning on Windows, it means that
     # the directory is "personalized". This is used as a signal by Windows
     # Explorer to tell it to look into the directory for desktop.ini.
     # See http://support.microsoft.com/kb/326549 for more details.
     # As such, it is important to not try to set the read-only bit on
     # directories on Windows since it has no effect other than trigger
     # Windows Explorer to look for desktop.ini, which is unnecessary.
     fs.remove(file_bar)
   else:
     with self.assertRaises(OSError):
       fs.remove(file_bar)
コード例 #14
0
 def test_delete_rd_wf(self):
   # Confirms that a Rw file in a RO directory can be deleted on Windows only.
   dir_foo = os.path.join(self.tempdir, 'foo')
   file_bar = os.path.join(dir_foo, 'bar')
   fs.mkdir(dir_foo, 0777)
   write_content(file_bar, 'bar')
   file_path.set_read_only(dir_foo, True)
   file_path.set_read_only(file_bar, False)
   self.assertMaskedFileMode(dir_foo, 040555)
   self.assertFileMode(file_bar, 0100666)
   if sys.platform == 'win32':
     # A read-only directory has a convoluted meaning on Windows, it means that
     # the directory is "personalized". This is used as a signal by Windows
     # Explorer to tell it to look into the directory for desktop.ini.
     # See http://support.microsoft.com/kb/326549 for more details.
     # As such, it is important to not try to set the read-only bit on
     # directories on Windows since it has no effect other than trigger
     # Windows Explorer to look for desktop.ini, which is unnecessary.
     fs.remove(file_bar)
   else:
     with self.assertRaises(OSError):
       fs.remove(file_bar)
コード例 #15
0
ファイル: ptliar.py プロジェクト: pantouyu/ptliar2
    "DEFAULT_MAX_TORRENT_SPEED": DEFAULT_MAX_TORRENT_SPEED,
    "DEFAULT_CLIENT": DEFAULT_CLIENT,
    "LUCKY_NUMBER": LUCKY_NUMBER,
}

# Q: what the f**k is SSSS?
# A: shanghai southwest some school

# project homepage: ptliar.com
# email: [email protected]

log = getLogger("ptliar")
formatter = Formatter(FMT, DATEFMT)
# delete large log file
if size(LOG_FILE) > MEGA:
    remove(LOG_FILE)
# log file handler
fh = FileHandler(LOG_FILE)
fh.setLevel(DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)


class PTLiarSettings:
    """
    global settings
    """
    def __init__(self):
        # default
        self.use_ipv6 = False  # send ipv6 addr to tracker?
        self.use_zero_rate = False  # enable zero-rate?
コード例 #16
0
ファイル: cipd.py プロジェクト: rmoorman/luci-py
    def ensure(self,
               site_root,
               packages,
               cache_dir=None,
               tmp_dir=None,
               timeout=None):
        """Ensures that packages installed in |site_root| equals |packages| set.

    Blocking call.

    Args:
      site_root (str): where to install packages.
      packages: dict of subdir -> list of (package_template, version) tuples.
      cache_dir (str): if set, cache dir for cipd binary own cache.
        Typically contains packages and tags.
      tmp_dir (str): if not None, dir for temp files.
      timeout (int): if not None, timeout in seconds for this function to run.

    Returns:
      Pinned packages in the form of {subdir: [(package_name, package_id)]},
      which correspond 1:1 with the input packages argument.

    Raises:
      Error if could not install packages or timed out.
    """
        timeoutfn = tools.sliding_timeout(timeout)
        logging.info('Installing packages %r into %s', packages, site_root)

        ensure_file_handle, ensure_file_path = tempfile.mkstemp(
            dir=tmp_dir, prefix=u'cipd-ensure-file-', suffix='.txt')
        json_out_file_handle, json_file_path = tempfile.mkstemp(
            dir=tmp_dir, prefix=u'cipd-ensure-result-', suffix='.json')
        os.close(json_out_file_handle)

        try:
            try:
                for subdir, pkgs in sorted(packages.iteritems()):
                    if '\n' in subdir:
                        raise Error(
                            'Could not install packages; subdir %r contains newline'
                            % subdir)
                    os.write(ensure_file_handle, '@Subdir %s\n' % (subdir, ))
                    for pkg, version in pkgs:
                        os.write(ensure_file_handle,
                                 '%s %s\n' % (pkg, version))
            finally:
                os.close(ensure_file_handle)

            cmd = [
                self.binary_path,
                'ensure',
                '-root',
                site_root,
                '-ensure-file',
                ensure_file_path,
                '-verbose',  # this is safe because cipd-ensure does not print a lot
                '-json-output',
                json_file_path,
            ]
            if cache_dir:
                cmd += ['-cache-dir', cache_dir]
            if self.service_url:
                cmd += ['-service-url', self.service_url]

            logging.debug('Running %r', cmd)
            process = subprocess42.Popen(cmd,
                                         stdout=subprocess42.PIPE,
                                         stderr=subprocess42.PIPE)
            output = []
            for pipe_name, line in process.yield_any_line(timeout=0.1):
                to = timeoutfn()
                if to is not None and to <= 0:
                    raise Error(
                        'Could not install packages; took more than %d seconds'
                        % timeout)
                if not pipe_name:
                    # stdout or stderr was closed, but yield_any_line still may have
                    # something to yield.
                    continue
                output.append(line)
                if pipe_name == 'stderr':
                    logging.debug('cipd client: %s', line)
                else:
                    logging.info('cipd client: %s', line)

            exit_code = process.wait(timeout=timeoutfn())
            if exit_code != 0:
                raise Error(
                    'Could not install packages; exit code %d\noutput:%s' %
                    (exit_code, '\n'.join(output)))
            with open(json_file_path) as jfile:
                result_json = json.load(jfile)
            return {
                subdir: [(x['package'], x['instance_id']) for x in pins]
                for subdir, pins in result_json['result'].iteritems()
            }
        finally:
            fs.remove(ensure_file_path)
            fs.remove(json_file_path)
コード例 #17
0
ファイル: cipd.py プロジェクト: yoshipaulbrophy/proto-quic
    def ensure(self,
               site_root,
               packages,
               cache_dir=None,
               tmp_dir=None,
               timeout=None):
        """Ensures that packages installed in |site_root| equals |packages| set.

    Blocking call.

    Args:
      site_root (str): where to install packages.
      packages: list of (package_template, version) tuples.
      cache_dir (str): if set, cache dir for cipd binary own cache.
        Typically contains packages and tags.
      tmp_dir (str): if not None, dir for temp files.
      timeout (int): if not None, timeout in seconds for this function to run.

    Returns:
      Pinned packages in the form of [(package_name, package_id)], which
      correspond 1:1 with the input packages argument.

    Raises:
      Error if could not install packages or timed out.
    """
        timeoutfn = tools.sliding_timeout(timeout)
        logging.info('Installing packages %r into %s', packages, site_root)

        list_file_handle, list_file_path = tempfile.mkstemp(
            dir=tmp_dir, prefix=u'cipd-ensure-list-', suffix='.txt')
        json_out_file_handle, json_file_path = tempfile.mkstemp(
            dir=tmp_dir, prefix=u'cipd-ensure-result-', suffix='.json')
        os.close(json_out_file_handle)

        try:
            try:
                for pkg, version in packages:
                    pkg = render_package_name_template(pkg)
                    os.write(list_file_handle, '%s %s\n' % (pkg, version))
            finally:
                os.close(list_file_handle)

            cmd = [
                self.binary_path,
                'ensure',
                '-root',
                site_root,
                '-list',
                list_file_path,
                '-verbose',  # this is safe because cipd-ensure does not print a lot
                '-json-output',
                json_file_path,
            ]
            if cache_dir:
                cmd += ['-cache-dir', cache_dir]
            if self.service_url:
                cmd += ['-service-url', self.service_url]

            logging.debug('Running %r', cmd)
            process = subprocess42.Popen(cmd,
                                         stdout=subprocess42.PIPE,
                                         stderr=subprocess42.PIPE)
            output = []
            for pipe_name, line in process.yield_any_line(timeout=0.1):
                to = timeoutfn()
                if to is not None and to <= 0:
                    raise Error(
                        'Could not install packages; took more than %d seconds'
                        % timeout)
                if not pipe_name:
                    # stdout or stderr was closed, but yield_any_line still may have
                    # something to yield.
                    continue
                output.append(line)
                if pipe_name == 'stderr':
                    logging.debug('cipd client: %s', line)
                else:
                    logging.info('cipd client: %s', line)

            exit_code = process.wait(timeout=timeoutfn())
            if exit_code != 0:
                raise Error(
                    'Could not install packages; exit code %d\noutput:%s' %
                    (exit_code, '\n'.join(output)))
            with open(json_file_path) as jfile:
                result_json = json.load(jfile)
            # TEMPORARY(iannucci): this code handles cipd <1.4 and cipd >=1.5
            # formatted ensure result formats. Cipd 1.5 added support for subdirs, and
            # as part of the transition, the result of the ensure command needed to
            # change. To ease the transition, we always return data as-if we're using
            # the new format. Once cipd 1.5+ is deployed everywhere, this type switch
            # can be removed.
            if isinstance(result_json['result'], dict):
                # cipd 1.5
                return {
                    subdir: [(x['package'], x['instance_id']) for x in pins]
                    for subdir, pins in result_json['result'].iteritems()
                }
            else:
                # cipd 1.4
                return {
                    "": [(x['package'], x['instance_id'])
                         for x in result_json['result']],
                }
        finally:
            fs.remove(list_file_path)
            fs.remove(json_file_path)
コード例 #18
0
ファイル: local_caching.py プロジェクト: ChMarina/v8_vm
    def cleanup(self):
        """Removes unknown directories.

    Does not recalculate the cache size since it's surprisingly slow on some
    OSes.
    """
        success = True
        with self._lock:
            try:
                actual = set(fs.listdir(self.cache_dir))
                actual.discard(self.NAMED_DIR)
                actual.discard(self.STATE_FILE)
                expected = {v[0]: k for k, v in self._lru.iteritems()}
                # First, handle the actual cache content.
                # Remove missing entries.
                for missing in (set(expected) - actual):
                    self._lru.pop(expected[missing])
                # Remove unexpected items.
                for unexpected in (actual - set(expected)):
                    try:
                        p = os.path.join(self.cache_dir, unexpected)
                        if fs.isdir(p) and not fs.islink(p):
                            file_path.rmtree(p)
                        else:
                            fs.remove(p)
                    except (IOError, OSError) as e:
                        logging.error('Failed to remove %s: %s', unexpected, e)
                        success = False

                # Second, fix named cache links.
                named = os.path.join(self.cache_dir, self.NAMED_DIR)
                if os.path.isdir(named):
                    actual = set(fs.listdir(named))
                    expected = set(self._lru)
                    # Confirm entries. Do not add missing ones for now.
                    for name in expected.intersection(actual):
                        p = os.path.join(self.cache_dir, self.NAMED_DIR, name)
                        expected_link = os.path.join(self.cache_dir,
                                                     self._lru[name][0])
                        if fs.islink(p):
                            if sys.platform == 'win32':
                                # TODO(maruel): Implement readlink() on Windows in fs.py, then
                                # remove this condition.
                                # https://crbug.com/853721
                                continue
                            link = fs.readlink(p)
                            if expected_link == link:
                                continue
                            logging.warning(
                                'Unexpected symlink for cache %s: %s, expected %s',
                                name, link, expected_link)
                        else:
                            logging.warning(
                                'Unexpected non symlink for cache %s', name)
                        if fs.isdir(p) and not fs.islink(p):
                            file_path.rmtree(p)
                        else:
                            fs.remove(p)
                    # Remove unexpected items.
                    for unexpected in (actual - expected):
                        try:
                            p = os.path.join(self.cache_dir, self.NAMED_DIR,
                                             unexpected)
                            if fs.isdir(p):
                                file_path.rmtree(p)
                            else:
                                fs.remove(p)
                        except (IOError, OSError) as e:
                            logging.error('Failed to remove %s: %s',
                                          unexpected, e)
                            success = False
            finally:
                self._save()
        return success