Exemplo n.º 1
0
def hashes_filter(next_part_power, suff_path, hashes):
    hashes = list(hashes)
    for hsh in hashes:
        fname = os.path.join(suff_path, hsh, 'fake-file-name')
        if replace_partition_in_path(fname, next_part_power) == fname:
            hashes.remove(hsh)
    return hashes
Exemplo n.º 2
0
def relink(swift_dir='/etc/swift',
           devices='/srv/node',
           skip_mount_check=False,
           logger=logging.getLogger(),
           device=None):
    mount_check = not skip_mount_check
    run = False
    relinked = errors = 0
    for policy in POLICIES:
        policy.object_ring = None  # Ensure it will be reloaded
        policy.load_ring(swift_dir)
        part_power = policy.object_ring.part_power
        next_part_power = policy.object_ring.next_part_power
        if not next_part_power or next_part_power == part_power:
            continue
        logging.info('Relinking files for policy %s under %s', policy.name,
                     devices)
        run = True
        datadir = diskfile.get_data_dir(policy)

        locks = [None]
        states = {}
        relink_devices_filter = partial(devices_filter, device)
        relink_hook_pre_device = partial(hook_pre_device, locks, states,
                                         datadir)
        relink_hook_post_device = partial(hook_post_device, locks)
        relink_partition_filter = partial(partitions_filter, states,
                                          STEP_RELINK, part_power,
                                          next_part_power)
        relink_hook_post_partition = partial(hook_post_partition, states,
                                             STEP_RELINK)
        relink_hashes_filter = partial(hashes_filter, next_part_power)

        locations = audit_location_generator(
            devices,
            datadir,
            mount_check=mount_check,
            devices_filter=relink_devices_filter,
            hook_pre_device=relink_hook_pre_device,
            hook_post_device=relink_hook_post_device,
            partitions_filter=relink_partition_filter,
            hook_post_partition=relink_hook_post_partition,
            hashes_filter=relink_hashes_filter)
        for fname, _, _ in locations:
            newfname = replace_partition_in_path(fname, next_part_power)
            try:
                diskfile.relink_paths(fname, newfname, check_existing=True)
                relinked += 1
            except OSError as exc:
                errors += 1
                logger.warning("Relinking %s to %s failed: %s", fname,
                               newfname, exc)

    if not run:
        logger.warning("No policy found to increase the partition power.")
        return 2
    logging.info('Relinked %d diskfiles (%d errors)', relinked, errors)
    if errors > 0:
        return 1
    return 0
Exemplo n.º 3
0
 def hashes_filter(self, suff_path, hashes):
     hashes = list(hashes)
     for hsh in hashes:
         fname = os.path.join(suff_path, hsh)
         if fname == replace_partition_in_path(self.conf['devices'], fname,
                                               self.next_part_power):
             hashes.remove(hsh)
     return hashes
Exemplo n.º 4
0
    def process_policy(self, policy):
        self.logger.info(
            'Processing files for policy %s under %s (cleanup=%s)',
            policy.name, self.root, self.do_cleanup)
        self.part_power = policy.object_ring.part_power
        self.next_part_power = policy.object_ring.next_part_power
        self.diskfile_mgr = self.diskfile_router[policy]
        self.datadir = diskfile.get_data_dir(policy)
        self.states = {
            "part_power": self.part_power,
            "next_part_power": self.next_part_power,
            "state": {},
        }
        audit_stats = {}

        locations = audit_location_generator(
            self.conf['devices'],
            self.datadir,
            mount_check=self.conf['mount_check'],
            devices_filter=self.devices_filter,
            hook_pre_device=self.hook_pre_device,
            hook_post_device=self.hook_post_device,
            partitions_filter=self.partitions_filter,
            hook_pre_partition=self.hook_pre_partition,
            hook_post_partition=self.hook_post_partition,
            hashes_filter=self.hashes_filter,
            logger=self.logger,
            error_counter=audit_stats,
            yield_hash_dirs=True)
        if self.conf['files_per_second'] > 0:
            locations = RateLimitedIterator(locations,
                                            self.conf['files_per_second'])
        for hash_path, device, partition in locations:
            # note, in cleanup step next_part_power == part_power
            new_hash_path = replace_partition_in_path(self.conf['devices'],
                                                      hash_path,
                                                      self.next_part_power)
            if new_hash_path == hash_path:
                continue
            self.process_location(hash_path, new_hash_path)

        # any unmounted devices don't trigger the pre_device trigger.
        # so we'll deal with them here.
        for dev in audit_stats.get('unmounted', []):
            self.place_policy_stat(dev, policy, 'unmounted', 1)

        # Further unlistable_partitions doesn't trigger the post_device, so
        # we also need to deal with them here.
        for datadir in audit_stats.get('unlistable_partitions', []):
            device_path, _ = os.path.split(datadir)
            device = os.path.basename(device_path)
            self.place_policy_stat(device, policy, 'unlistable_partitions', 1)
Exemplo n.º 5
0
 def check_existing(self, new_file):
     existing_link = None
     link_created = False
     start = self.next_part_power - 1
     stop = max(start - self.conf['link_check_limit'], -1)
     for check_part_power in range(start, stop, -1):
         # Try to create the link from each of several previous part power
         # locations. If an attempt succeeds then either a link was made or
         # an existing link with the same inode as the next part power
         # location was found: either is acceptable. The part power location
         # that previously failed with EEXIST is included in the further
         # attempts here for simplicity.
         target_file = replace_partition_in_path(
             self.conf['devices'], new_file, check_part_power)
         try:
             link_created = diskfile.relink_paths(target_file, new_file,
                                                  ignore_missing=False)
             existing_link = target_file
             break
         except OSError:
             pass
     return existing_link, link_created
Exemplo n.º 6
0
    def process_policy(self, policy):
        self.logger.info(
            'Processing files for policy %s under %s (cleanup=%s)',
            policy.name, self.root, self.do_cleanup)
        self.part_power = policy.object_ring.part_power
        self.next_part_power = policy.object_ring.next_part_power
        self.diskfile_mgr = self.diskfile_router[policy]
        self.datadir = diskfile.get_data_dir(policy)
        self.states = {
            "part_power": self.part_power,
            "next_part_power": self.next_part_power,
            "state": {},
        }

        locations = audit_location_generator(
            self.conf['devices'],
            self.datadir,
            mount_check=self.conf['mount_check'],
            devices_filter=self.devices_filter,
            hook_pre_device=self.hook_pre_device,
            hook_post_device=self.hook_post_device,
            partitions_filter=self.partitions_filter,
            hook_post_partition=self.hook_post_partition,
            hashes_filter=self.hashes_filter,
            logger=self.logger,
            error_counter=self.stats,
            yield_hash_dirs=True
        )
        if self.conf['files_per_second'] > 0:
            locations = RateLimitedIterator(
                locations, self.conf['files_per_second'])
        for hash_path, device, partition in locations:
            # note, in cleanup step next_part_power == part_power
            new_hash_path = replace_partition_in_path(
                self.conf['devices'], hash_path, self.next_part_power)
            if new_hash_path == hash_path:
                continue
            self.process_location(hash_path, new_hash_path)
Exemplo n.º 7
0
def relink(swift_dir='/etc/swift',
           devices='/srv/node',
           skip_mount_check=False,
           logger=logging.getLogger()):
    mount_check = not skip_mount_check
    run = False
    relinked = errors = 0
    for policy in POLICIES:
        policy.object_ring = None  # Ensure it will be reloaded
        policy.load_ring(swift_dir)
        part_power = policy.object_ring.part_power
        next_part_power = policy.object_ring.next_part_power
        if not next_part_power or next_part_power == part_power:
            continue
        logging.info('Relinking files for policy %s under %s',
                     policy.name, devices)
        run = True
        locations = audit_location_generator(
            devices,
            diskfile.get_data_dir(policy),
            mount_check=mount_check)
        for fname, _, _ in locations:
            newfname = replace_partition_in_path(fname, next_part_power)
            try:
                diskfile.relink_paths(fname, newfname, check_existing=True)
                relinked += 1
            except OSError as exc:
                errors += 1
                logger.warning("Relinking %s to %s failed: %s",
                               fname, newfname, exc)

    if not run:
        logger.warning("No policy found to increase the partition power.")
        return 2
    logging.info('Relinked %d diskfiles (%d errors)', relinked, errors)
    if errors > 0:
        return 1
    return 0
Exemplo n.º 8
0
def cleanup(conf, logger, device):
    diskfile_router = diskfile.DiskFileRouter(conf, logger)
    errors = cleaned_up = 0
    error_counter = {}
    found_policy = False
    for policy in POLICIES:
        diskfile_mgr = diskfile_router[policy]
        policy.object_ring = None  # Ensure it will be reloaded
        policy.load_ring(conf['swift_dir'])
        part_power = policy.object_ring.part_power
        next_part_power = policy.object_ring.next_part_power
        if not next_part_power or next_part_power != part_power:
            continue
        logger.info('Cleaning up files for policy %s under %s', policy.name,
                    conf['devices'])
        found_policy = True
        datadir = diskfile.get_data_dir(policy)

        locks = [None]
        states = {
            "part_power": part_power,
            "next_part_power": next_part_power,
            "state": {},
        }
        cleanup_devices_filter = partial(devices_filter, device)
        cleanup_hook_pre_device = partial(hook_pre_device, locks, states,
                                          datadir)
        cleanup_hook_post_device = partial(hook_post_device, locks)
        cleanup_partition_filter = partial(partitions_filter, states,
                                           part_power, next_part_power)
        cleanup_hook_post_partition = partial(hook_post_partition, states,
                                              STEP_CLEANUP, policy,
                                              diskfile_mgr)
        cleanup_hashes_filter = partial(hashes_filter, next_part_power)

        locations = audit_location_generator(
            conf['devices'],
            datadir,
            mount_check=conf['mount_check'],
            devices_filter=cleanup_devices_filter,
            hook_pre_device=cleanup_hook_pre_device,
            hook_post_device=cleanup_hook_post_device,
            partitions_filter=cleanup_partition_filter,
            hook_post_partition=cleanup_hook_post_partition,
            hashes_filter=cleanup_hashes_filter,
            logger=logger,
            error_counter=error_counter)
        if conf['files_per_second'] > 0:
            locations = RateLimitedIterator(locations,
                                            conf['files_per_second'])
        for fname, device, partition in locations:
            expected_fname = replace_partition_in_path(fname, part_power)
            if fname == expected_fname:
                continue
            # Make sure there is a valid object file in the expected new
            # location. Note that this could be newer than the original one
            # (which happens if there is another PUT after partition power
            # has been increased, but cleanup did not yet run)
            loc = diskfile.AuditLocation(os.path.dirname(expected_fname),
                                         device, partition, policy)
            df = diskfile_mgr.get_diskfile_from_audit_location(loc)
            try:
                with df.open():
                    pass
            except DiskFileQuarantined as exc:
                logger.warning(
                    'ERROR Object %(obj)s failed audit and was'
                    ' quarantined: %(err)r', {
                        'obj': loc,
                        'err': exc
                    })
                errors += 1
                continue
            except DiskFileDeleted:
                pass
            except DiskFileNotExist as exc:
                err = False
                if policy.policy_type == 'erasure_coding':
                    # Might be a non-durable fragment - check that there is
                    # a fragment in the new path. Will be fixed by the
                    # reconstructor then
                    if not os.path.isfile(expected_fname):
                        err = True
                else:
                    err = True
                if err:
                    logger.warning('Error cleaning up %s: %r', fname, exc)
                    errors += 1
                    continue
            try:
                os.remove(fname)
                cleaned_up += 1
                logger.debug("Removed %s", fname)
                suffix_dir = os.path.dirname(os.path.dirname(fname))
                diskfile.invalidate_hash(suffix_dir)
            except OSError as exc:
                logger.warning('Error cleaning up %s: %r', fname, exc)
                errors += 1
    return determine_exit_code(
        logger=logger,
        found_policy=found_policy,
        processed=cleaned_up,
        action='cleaned up',
        action_errors=errors,
        error_counter=error_counter,
    )
Exemplo n.º 9
0
def relink(conf, logger, device):
    diskfile_router = diskfile.DiskFileRouter(conf, logger)
    found_policy = False
    relinked = errors = 0
    error_counter = {}
    for policy in POLICIES:
        diskfile_mgr = diskfile_router[policy]
        policy.object_ring = None  # Ensure it will be reloaded
        policy.load_ring(conf['swift_dir'])
        part_power = policy.object_ring.part_power
        next_part_power = policy.object_ring.next_part_power
        if not next_part_power or next_part_power == part_power:
            continue
        logger.info('Relinking files for policy %s under %s', policy.name,
                    conf['devices'])
        found_policy = True
        datadir = diskfile.get_data_dir(policy)

        locks = [None]
        states = {
            "part_power": part_power,
            "next_part_power": next_part_power,
            "state": {},
        }
        relink_devices_filter = partial(devices_filter, device)
        relink_hook_pre_device = partial(hook_pre_device, locks, states,
                                         datadir)
        relink_hook_post_device = partial(hook_post_device, locks)
        relink_partition_filter = partial(partitions_filter, states,
                                          part_power, next_part_power)
        relink_hook_post_partition = partial(hook_post_partition, states,
                                             STEP_RELINK, policy, diskfile_mgr)
        relink_hashes_filter = partial(hashes_filter, next_part_power)

        locations = audit_location_generator(
            conf['devices'],
            datadir,
            mount_check=conf['mount_check'],
            devices_filter=relink_devices_filter,
            hook_pre_device=relink_hook_pre_device,
            hook_post_device=relink_hook_post_device,
            partitions_filter=relink_partition_filter,
            hook_post_partition=relink_hook_post_partition,
            hashes_filter=relink_hashes_filter,
            logger=logger,
            error_counter=error_counter)
        if conf['files_per_second'] > 0:
            locations = RateLimitedIterator(locations,
                                            conf['files_per_second'])
        for fname, _, _ in locations:
            newfname = replace_partition_in_path(fname, next_part_power)
            try:
                diskfile.relink_paths(fname, newfname, check_existing=True)
                relinked += 1
                suffix_dir = os.path.dirname(os.path.dirname(newfname))
                diskfile.invalidate_hash(suffix_dir)
            except OSError as exc:
                errors += 1
                logger.warning("Relinking %s to %s failed: %s", fname,
                               newfname, exc)
    return determine_exit_code(
        logger=logger,
        found_policy=found_policy,
        processed=relinked,
        action='relinked',
        action_errors=errors,
        error_counter=error_counter,
    )
    def _test_main(self, cancel=False):
        container = 'container-%s' % uuid4()
        obj = 'object-%s' % uuid4()
        obj2 = 'object-%s' % uuid4()

        # Create container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, container, headers=headers)

        # Create a new object
        client.put_object(self.url, self.token, container, obj, self.data)
        client.head_object(self.url, self.token, container, obj)

        # Prepare partition power increase
        builder = RingBuilder.load(self.builder_file)
        builder.prepare_increase_partition_power()
        builder.save(self.builder_file)
        ring_data = builder.get_ring()
        ring_data.save(self.ring_file)

        # Ensure the proxy uses the changed ring
        Manager(['proxy']).restart()

        # Ensure object is still accessible
        client.head_object(self.url, self.token, container, obj)

        # Relink existing objects
        for device in self.devices:
            self.assertEqual(0, relink(skip_mount_check=True, devices=device))

        # Create second object after relinking and ensure it is accessible
        client.put_object(self.url, self.token, container, obj2, self.data)
        client.head_object(self.url, self.token, container, obj2)

        # Remember the original object locations
        org_locations = self._find_objs_ondisk(container, obj)
        org_locations += self._find_objs_ondisk(container, obj2)

        # Remember the new object locations
        new_locations = []
        for loc in org_locations:
            new_locations.append(
                replace_partition_in_path(str(loc),
                                          self.object_ring.part_power + 1))

        # Overwrite existing object - to ensure that older timestamp files
        # will be cleaned up properly later
        client.put_object(self.url, self.token, container, obj, self.data)

        # Ensure objects are still accessible
        client.head_object(self.url, self.token, container, obj)
        client.head_object(self.url, self.token, container, obj2)

        # Increase partition power
        builder = RingBuilder.load(self.builder_file)
        if not cancel:
            builder.increase_partition_power()
        else:
            builder.cancel_increase_partition_power()
        builder.save(self.builder_file)
        ring_data = builder.get_ring()
        ring_data.save(self.ring_file)

        # Ensure the proxy uses the changed ring
        Manager(['proxy']).restart()

        # Ensure objects are still accessible
        client.head_object(self.url, self.token, container, obj)
        client.head_object(self.url, self.token, container, obj2)

        # Overwrite existing object - to ensure that older timestamp files
        # will be cleaned up properly later
        client.put_object(self.url, self.token, container, obj, self.data)

        # Cleanup old objects in the wrong location
        for device in self.devices:
            self.assertEqual(0, cleanup(skip_mount_check=True, devices=device))

        # Ensure objects are still accessible
        client.head_object(self.url, self.token, container, obj)
        client.head_object(self.url, self.token, container, obj2)

        # Ensure data in old or relinked object locations is removed
        if not cancel:
            for fn in org_locations:
                self.assertFalse(os.path.exists(fn))
        else:
            for fn in new_locations:
                self.assertFalse(os.path.exists(fn))
Exemplo n.º 11
0
    def process_location(self, hash_path, new_hash_path):
        # Compare the contents of each hash dir with contents of same hash
        # dir in its new partition to verify that the new location has the
        # most up to date set of files. The new location may have newer
        # files if it has been updated since relinked.
        self.stats['hash_dirs'] += 1

        # Get on disk data for new and old locations, cleaning up any
        # reclaimable or obsolete files in each. The new location is
        # cleaned up *before* the old location to prevent false negatives
        # where the old still has a file that has been cleaned up in the
        # new; cleaning up the new location first ensures that the old will
        # always be 'cleaner' than the new.
        new_df_data = self.diskfile_mgr.cleanup_ondisk_files(new_hash_path)
        old_df_data = self.diskfile_mgr.cleanup_ondisk_files(hash_path)
        # Now determine the most up to date set of on disk files would be
        # given the content of old and new locations...
        new_files = set(new_df_data['files'])
        old_files = set(old_df_data['files'])
        union_files = new_files.union(old_files)
        union_data = self.diskfile_mgr.get_ondisk_files(
            union_files, '', verify=False)
        obsolete_files = set(info['filename']
                             for info in union_data.get('obsolete', []))
        # drop 'obsolete' files but retain 'unexpected' files which might
        # be misplaced diskfiles from another policy
        required_files = union_files.difference(obsolete_files)
        required_links = required_files.intersection(old_files)

        missing_links = 0
        created_links = 0
        unwanted_files = []
        for filename in required_links:
            # Before removing old files, be sure that the corresponding
            # required new files exist by calling relink_paths again. There
            # are several possible outcomes:
            #  - The common case is that the new file exists, in which case
            #    relink_paths checks that the new file has the same inode
            #    as the old file. An exception is raised if the inode of
            #    the new file is not the same as the old file.
            #  - The new file may not exist because the relinker failed to
            #    create the link to the old file and has erroneously moved
            #    on to cleanup. In this case the relink_paths will create
            #    the link now or raise an exception if that fails.
            #  - The new file may not exist because some other process,
            #    such as an object server handling a request, has cleaned
            #    it up since we called cleanup_ondisk_files(new_hash_path).
            #    In this case a new link will be created to the old file.
            #    This is unnecessary but simpler than repeating the
            #    evaluation of what links are now required and safer than
            #    assuming that a non-existent file that *was* required is
            #    no longer required. The new file will eventually be
            #    cleaned up again.
            self.stats['files'] += 1
            old_file = os.path.join(hash_path, filename)
            new_file = os.path.join(new_hash_path, filename)
            try:
                if diskfile.relink_paths(old_file, new_file):
                    self.logger.debug(
                        "Relinking%s created link: %s to %s",
                        ' (cleanup)' if self.do_cleanup else '',
                        old_file, new_file)
                    created_links += 1
                    self.stats['linked'] += 1
            except OSError as exc:
                existing_link = None
                link_created = False
                if exc.errno == errno.EEXIST and filename.endswith('.ts'):
                    # special case for duplicate tombstones in older partition
                    # power locations
                    # (https://bugs.launchpad.net/swift/+bug/1921718)
                    existing_link, link_created = self.check_existing(new_file)
                if existing_link:
                    self.logger.debug(
                        "Relinking%s: link not needed: %s to %s due to "
                        "existing %s", ' (cleanup)' if self.do_cleanup else '',
                        old_file, new_file, existing_link)
                    if link_created:
                        # uncommon case: the retry succeeded in creating a link
                        created_links += 1
                        self.stats['linked'] += 1
                    wanted_file = replace_partition_in_path(
                        self.conf['devices'], old_file, self.part_power)
                    if old_file not in (existing_link, wanted_file):
                        # A link exists to a different file and this file
                        # is not the current target for client requests. If
                        # this file is visited again it is possible that
                        # the existing_link will have been cleaned up and
                        # the check will fail, so clean it up now.
                        self.logger.info(
                            "Relinking%s: cleaning up unwanted file: %s",
                            ' (cleanup)' if self.do_cleanup else '', old_file)
                        unwanted_files.append(filename)
                else:
                    self.logger.warning(
                        "Error relinking%s: failed to relink %s to %s: %s",
                        ' (cleanup)' if self.do_cleanup else '',
                        old_file, new_file, exc)
                    self.stats['errors'] += 1
                    missing_links += 1
        if created_links:
            self.linked_into_partitions.add(get_partition_from_path(
                self.conf['devices'], new_hash_path))
            diskfile.invalidate_hash(os.path.dirname(new_hash_path))

        if self.do_cleanup and not missing_links:
            # use the sorted list to help unit testing
            unwanted_files = old_df_data['files']

        # the new partition hash dir has the most up to date set of on
        # disk files so it is safe to delete the old location...
        rehash = False
        for filename in unwanted_files:
            old_file = os.path.join(hash_path, filename)
            try:
                os.remove(old_file)
            except OSError as exc:
                self.logger.warning('Error cleaning up %s: %r', old_file, exc)
                self.stats['errors'] += 1
            else:
                rehash = True
                self.stats['removed'] += 1
                self.logger.debug("Removed %s", old_file)

        if rehash:
            # Even though we're invalidating the suffix, don't update
            # self.linked_into_partitions -- we only care about them for
            # relinking into the new part-power space
            try:
                diskfile.invalidate_hash(os.path.dirname(hash_path))
            except Exception as exc:
                # note: not counted as an error
                self.logger.warning(
                    'Error invalidating suffix for %s: %r',
                    hash_path, exc)
Exemplo n.º 12
0
def cleanup(swift_dir='/etc/swift',
            devices='/srv/node',
            skip_mount_check=False,
            logger=logging.getLogger()):
    mount_check = not skip_mount_check
    conf = {'devices': devices, 'mount_check': mount_check}
    diskfile_router = diskfile.DiskFileRouter(conf, get_logger(conf))
    errors = cleaned_up = 0
    run = False
    for policy in POLICIES:
        policy.object_ring = None  # Ensure it will be reloaded
        policy.load_ring(swift_dir)
        part_power = policy.object_ring.part_power
        next_part_power = policy.object_ring.next_part_power
        if not next_part_power or next_part_power != part_power:
            continue
        logging.info('Cleaning up files for policy %s under %s',
                     policy.name, devices)
        run = True
        locations = audit_location_generator(
            devices,
            diskfile.get_data_dir(policy),
            mount_check=mount_check)
        for fname, device, partition in locations:
            expected_fname = replace_partition_in_path(fname, part_power)
            if fname == expected_fname:
                continue
            # Make sure there is a valid object file in the expected new
            # location. Note that this could be newer than the original one
            # (which happens if there is another PUT after partition power
            # has been increased, but cleanup did not yet run)
            loc = diskfile.AuditLocation(
                os.path.dirname(expected_fname), device, partition, policy)
            diskfile_mgr = diskfile_router[policy]
            df = diskfile_mgr.get_diskfile_from_audit_location(loc)
            try:
                with df.open():
                    pass
            except DiskFileQuarantined as exc:
                logger.warning('ERROR Object %(obj)s failed audit and was'
                               ' quarantined: %(err)r',
                               {'obj': loc, 'err': exc})
                errors += 1
                continue
            except DiskFileDeleted:
                pass
            except DiskFileNotExist as exc:
                err = False
                if policy.policy_type == 'erasure_coding':
                    # Might be a non-durable fragment - check that there is
                    # a fragment in the new path. Will be fixed by the
                    # reconstructor then
                    if not os.path.isfile(expected_fname):
                        err = True
                else:
                    err = True
                if err:
                    logger.warning(
                        'Error cleaning up %s: %r', fname, exc)
                    errors += 1
                    continue
            try:
                os.remove(fname)
                cleaned_up += 1
                logging.debug("Removed %s", fname)
            except OSError as exc:
                logger.warning('Error cleaning up %s: %r', fname, exc)
                errors += 1

    if not run:
        logger.warning("No policy found to increase the partition power.")
        return 2
    logging.info('Cleaned up %d diskfiles (%d errors)', cleaned_up, errors)
    if errors > 0:
        return 1
    return 0