示例#1
0
    def setUp(self):
        self.testdir = os.path.join(mkdtemp(), "tmp_test_object_auditor")
        self.devices = os.path.join(self.testdir, "node")
        self.rcache = os.path.join(self.testdir, "object.recon")
        self.logger = FakeLogger()
        rmtree(self.testdir, ignore_errors=1)
        mkdirs(os.path.join(self.devices, "sda"))
        os.mkdir(os.path.join(self.devices, "sdb"))

        # policy 0
        self.objects = os.path.join(self.devices, "sda", get_data_dir(POLICIES[0]))
        self.objects_2 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[0]))
        os.mkdir(self.objects)
        # policy 1
        self.objects_p1 = os.path.join(self.devices, "sda", get_data_dir(POLICIES[1]))
        self.objects_2_p1 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[1]))
        os.mkdir(self.objects_p1)

        self.parts = self.parts_p1 = {}
        for part in ["0", "1", "2", "3"]:
            self.parts[part] = os.path.join(self.objects, part)
            self.parts_p1[part] = os.path.join(self.objects_p1, part)
            os.mkdir(os.path.join(self.objects, part))
            os.mkdir(os.path.join(self.objects_p1, part))

        self.conf = dict(devices=self.devices, mount_check="false", object_size_stats="10,100,1024,10240")
        self.df_mgr = DiskFileManager(self.conf, self.logger)

        # diskfiles for policy 0, 1
        self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[0])
        self.disk_file_p1 = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[1])
示例#2
0
 def setUp(self):
     utils.HASH_PATH_SUFFIX = 'endcap'
     utils.HASH_PATH_PREFIX = ''
     # Setup a test ring (stolen from common/test_ring.py)
     self.testdir = tempfile.mkdtemp()
     self.devices = os.path.join(self.testdir, 'node')
     rmtree(self.testdir, ignore_errors=1)
     os.mkdir(self.testdir)
     os.mkdir(self.devices)
     os.mkdir(os.path.join(self.devices, 'sda'))
     self.objects = os.path.join(self.devices, 'sda',
                                 diskfile.get_data_dir(0))
     self.objects_1 = os.path.join(self.devices, 'sda',
                                   diskfile.get_data_dir(1))
     os.mkdir(self.objects)
     os.mkdir(self.objects_1)
     self.parts = {}
     self.parts_1 = {}
     for part in ['0', '1', '2', '3']:
         self.parts[part] = os.path.join(self.objects, part)
         os.mkdir(os.path.join(self.objects, part))
         self.parts_1[part] = os.path.join(self.objects_1, part)
         os.mkdir(os.path.join(self.objects_1, part))
     _create_test_rings(self.testdir)
     self.conf = dict(
         swift_dir=self.testdir, devices=self.devices, mount_check='false',
         timeout='300', stats_interval='1')
     self.replicator = object_replicator.ObjectReplicator(self.conf)
     self.replicator.logger = FakeLogger()
     self.df_mgr = diskfile.DiskFileManager(self.conf,
                                            self.replicator.logger)
示例#3
0
    def setUp(self):
        self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
        self.devices = os.path.join(self.testdir, 'node')
        self.rcache = os.path.join(self.testdir, 'object.recon')
        self.logger = FakeLogger()
        rmtree(self.testdir, ignore_errors=1)
        mkdirs(os.path.join(self.devices, 'sda'))
        os.mkdir(os.path.join(self.devices, 'sdb'))

        # policy 0
        self.objects = os.path.join(self.devices, 'sda', get_data_dir(0))
        self.objects_2 = os.path.join(self.devices, 'sdb', get_data_dir(0))
        os.mkdir(self.objects)
        # policy 1
        self.objects_p1 = os.path.join(self.devices, 'sda', get_data_dir(1))
        self.objects_2_p1 = os.path.join(self.devices, 'sdb', get_data_dir(1))
        os.mkdir(self.objects_p1)

        self.parts = self.parts_p1 = {}
        for part in ['0', '1', '2', '3']:
            self.parts[part] = os.path.join(self.objects, part)
            self.parts_p1[part] = os.path.join(self.objects_p1, part)
            os.mkdir(os.path.join(self.objects, part))
            os.mkdir(os.path.join(self.objects_p1, part))

        self.conf = dict(
            devices=self.devices,
            mount_check='false',
            object_size_stats='10,100,1024,10240')
        self.df_mgr = DiskFileManager(self.conf, self.logger)

        # diskfiles for policy 0, 1
        self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', 0)
        self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c',
                                                     'o', 1)
示例#4
0
 def storage_dir(self, server, node, part=None, policy=None):
     policy = policy or self.policy
     device_path = self.device_dir(server, node)
     path_parts = [device_path, get_data_dir(policy)]
     if part is not None:
         path_parts.append(str(part))
     return os.path.join(*path_parts)
示例#5
0
 def rsync(self, node, job, suffixes):
     """
     Uses rsync to implement the sync method. This was the first
     sync method in Swift.
     """
     if not os.path.exists(job["path"]):
         return False
     args = [
         "rsync",
         "--recursive",
         "--whole-file",
         "--human-readable",
         "--xattrs",
         "--itemize-changes",
         "--ignore-existing",
         "--timeout=%s" % self.rsync_io_timeout,
         "--contimeout=%s" % self.rsync_io_timeout,
         "--bwlimit=%s" % self.rsync_bwlimit,
     ]
     node_ip = rsync_ip(node["replication_ip"])
     if self.vm_test_mode:
         rsync_module = "%s::object%s" % (node_ip, node["replication_port"])
     else:
         rsync_module = "%s::object" % node_ip
     had_any = False
     for suffix in suffixes:
         spath = join(job["path"], suffix)
         if os.path.exists(spath):
             args.append(spath)
             had_any = True
     if not had_any:
         return False
     data_dir = get_data_dir(job["policy_idx"])
     args.append(join(rsync_module, node["device"], data_dir, job["partition"]))
     return self._rsync(args) == 0
示例#6
0
    def setup_bad_zero_byte(self, with_ts=False):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        ts_file_path = ''
        if with_ts:
            name_hash = hash_path('a', 'c', 'o')
            dir_path = os.path.join(
                self.devices, 'sda',
                storage_directory(get_data_dir(0), '0', name_hash))
            ts_file_path = os.path.join(dir_path, '99999.ts')
            if not os.path.exists(dir_path):
                mkdirs(dir_path)
            fp = open(ts_file_path, 'w')
            write_metadata(fp, {'X-Timestamp': '99999', 'name': '/a/c/o'})
            fp.close()

        etag = md5()
        with self.disk_file.create() as writer:
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': str(normalize_timestamp(time.time())),
                'Content-Length': 10,
            }
            writer.put(metadata)
            etag = md5()
            etag = etag.hexdigest()
            metadata['ETag'] = etag
            write_metadata(writer._fd, metadata)
        return ts_file_path
示例#7
0
def relink(swift_dir='/etc/swift',
           devices='/srv/node',
           skip_mount_check=False,
           logger=logging.getLogger(),
           device=None):
    mount_check = not skip_mount_check
    run = False
    relinked = errors = 0
    for policy in POLICIES:
        policy.object_ring = None  # Ensure it will be reloaded
        policy.load_ring(swift_dir)
        part_power = policy.object_ring.part_power
        next_part_power = policy.object_ring.next_part_power
        if not next_part_power or next_part_power == part_power:
            continue
        logging.info('Relinking files for policy %s under %s', policy.name,
                     devices)
        run = True
        datadir = diskfile.get_data_dir(policy)

        locks = [None]
        states = {}
        relink_devices_filter = partial(devices_filter, device)
        relink_hook_pre_device = partial(hook_pre_device, locks, states,
                                         datadir)
        relink_hook_post_device = partial(hook_post_device, locks)
        relink_partition_filter = partial(partitions_filter, states,
                                          STEP_RELINK, part_power,
                                          next_part_power)
        relink_hook_post_partition = partial(hook_post_partition, states,
                                             STEP_RELINK)
        relink_hashes_filter = partial(hashes_filter, next_part_power)

        locations = audit_location_generator(
            devices,
            datadir,
            mount_check=mount_check,
            devices_filter=relink_devices_filter,
            hook_pre_device=relink_hook_pre_device,
            hook_post_device=relink_hook_post_device,
            partitions_filter=relink_partition_filter,
            hook_post_partition=relink_hook_post_partition,
            hashes_filter=relink_hashes_filter)
        for fname, _, _ in locations:
            newfname = replace_partition_in_path(fname, next_part_power)
            try:
                diskfile.relink_paths(fname, newfname, check_existing=True)
                relinked += 1
            except OSError as exc:
                errors += 1
                logger.warning("Relinking %s to %s failed: %s", fname,
                               newfname, exc)

    if not run:
        logger.warning("No policy found to increase the partition power.")
        return 2
    logging.info('Relinked %d diskfiles (%d errors)', relinked, errors)
    if errors > 0:
        return 1
    return 0
示例#8
0
文件: common.py 项目: Ahiknsr/swift
 def storage_dir(self, server, node, part=None, policy=None):
     policy = policy or self.policy
     device_path = self.device_dir(server, node)
     path_parts = [device_path, get_data_dir(policy)]
     if part is not None:
         path_parts.append(str(part))
     return os.path.join(*path_parts)
示例#9
0
 def rsync(self, node, job, suffixes):
     """
     Uses rsync to implement the sync method. This was the first
     sync method in Swift.
     """
     if not os.path.exists(job["path"]):
         return False, {}
     args = [
         "rsync",
         "--recursive",
         "--whole-file",
         "--human-readable",
         "--xattrs",
         "--itemize-changes",
         "--ignore-existing",
         "--timeout=%s" % self.rsync_io_timeout,
         "--contimeout=%s" % self.rsync_io_timeout,
         "--bwlimit=%s" % self.rsync_bwlimit,
     ]
     if self.rsync_compress and job["region"] != node["region"]:
         # Allow for compression, but only if the remote node is in
         # a different region than the local one.
         args.append("--compress")
     rsync_module = rsync_module_interpolation(self.rsync_module, node)
     had_any = False
     for suffix in suffixes:
         spath = join(job["path"], suffix)
         if os.path.exists(spath):
             args.append(spath)
             had_any = True
     if not had_any:
         return False, {}
     data_dir = get_data_dir(job["policy"])
     args.append(join(rsync_module, node["device"], data_dir, job["partition"]))
     return self._rsync(args) == 0, {}
示例#10
0
 def rsync(self, node, job, suffixes):
     """
     Uses rsync to implement the sync method. This was the first
     sync method in Swift.
     """
     if not os.path.exists(job['path']):
         return False, {}
     args = [
         'rsync', '--recursive', '--whole-file', '--human-readable',
         '--xattrs', '--itemize-changes', '--ignore-existing',
         '--timeout=%s' % self.rsync_io_timeout,
         '--contimeout=%s' % self.rsync_io_timeout,
         '--bwlimit=%s' % self.rsync_bwlimit,
         '--exclude=.*.%s' % ''.join('[0-9a-zA-Z]' for i in range(6))
     ]
     if self.rsync_compress and \
             job['region'] != node['region']:
         # Allow for compression, but only if the remote node is in
         # a different region than the local one.
         args.append('--compress')
     rsync_module = rsync_module_interpolation(self.rsync_module, node)
     had_any = False
     for suffix in suffixes:
         spath = join(job['path'], suffix)
         if os.path.exists(spath):
             args.append(spath)
             had_any = True
     if not had_any:
         return False, {}
     data_dir = get_data_dir(job['policy'])
     args.append(
         join(rsync_module, node['device'], data_dir, job['partition']))
     return self._rsync(args) == 0, {}
示例#11
0
    def setup_bad_zero_byte(self, with_ts=False):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        ts_file_path = ''
        if with_ts:
            name_hash = hash_path('a', 'c', 'o')
            dir_path = os.path.join(
                self.devices, 'sda',
                storage_directory(get_data_dir(0), '0', name_hash))
            ts_file_path = os.path.join(dir_path, '99999.ts')
            if not os.path.exists(dir_path):
                mkdirs(dir_path)
            fp = open(ts_file_path, 'w')
            write_metadata(fp, {'X-Timestamp': '99999', 'name': '/a/c/o'})
            fp.close()

        etag = md5()
        with self.disk_file.create() as writer:
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': str(normalize_timestamp(time.time())),
                'Content-Length': 10,
            }
            writer.put(metadata)
            etag = md5()
            etag = etag.hexdigest()
            metadata['ETag'] = etag
            write_metadata(writer._fd, metadata)
        return ts_file_path
示例#12
0
    def setUp(self):
        self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
        self.devices = os.path.join(self.testdir, 'node')
        self.rcache = os.path.join(self.testdir, 'object.recon')
        self.logger = FakeLogger()
        rmtree(self.testdir, ignore_errors=1)
        mkdirs(os.path.join(self.devices, 'sda'))
        os.mkdir(os.path.join(self.devices, 'sdb'))

        # policy 0
        self.objects = os.path.join(self.devices, 'sda',
                                    get_data_dir(POLICIES[0]))
        self.objects_2 = os.path.join(self.devices, 'sdb',
                                      get_data_dir(POLICIES[0]))
        os.mkdir(self.objects)
        # policy 1
        self.objects_p1 = os.path.join(self.devices, 'sda',
                                       get_data_dir(POLICIES[1]))
        self.objects_2_p1 = os.path.join(self.devices, 'sdb',
                                         get_data_dir(POLICIES[1]))
        os.mkdir(self.objects_p1)

        self.parts = self.parts_p1 = {}
        for part in ['0', '1', '2', '3']:
            self.parts[part] = os.path.join(self.objects, part)
            self.parts_p1[part] = os.path.join(self.objects_p1, part)
            os.mkdir(os.path.join(self.objects, part))
            os.mkdir(os.path.join(self.objects_p1, part))

        self.conf = dict(devices=self.devices,
                         mount_check='false',
                         object_size_stats='10,100,1024,10240')
        self.df_mgr = DiskFileManager(self.conf, self.logger)

        # diskfiles for policy 0, 1
        self.disk_file = self.df_mgr.get_diskfile('sda',
                                                  '0',
                                                  'a',
                                                  'c',
                                                  'o',
                                                  policy=POLICIES[0])
        self.disk_file_p1 = self.df_mgr.get_diskfile('sda',
                                                     '0',
                                                     'a',
                                                     'c',
                                                     'o',
                                                     policy=POLICIES[1])
示例#13
0
 def collect_parts(self, override_devices=None, override_partitions=None):
     """
     Helper for yielding partitions in the top level reconstructor
     """
     override_devices = override_devices or []
     override_partitions = override_partitions or []
     ips = whataremyips()
     for policy in POLICIES:
         if policy.policy_type != EC_POLICY:
             continue
         self._diskfile_mgr = self._df_router[policy]
         self.load_object_ring(policy)
         data_dir = get_data_dir(policy)
         local_devices = itertools.ifilter(
             lambda dev: dev and is_local_device(ips, self.port, dev[
                 'replication_ip'], dev['replication_port']),
             policy.object_ring.devs)
         for local_dev in local_devices:
             if override_devices and (local_dev['device']
                                      not in override_devices):
                 continue
             dev_path = join(self.devices_dir, local_dev['device'])
             obj_path = join(dev_path, data_dir)
             tmp_path = join(dev_path, get_tmp_dir(int(policy)))
             if self.mount_check and not ismount(dev_path):
                 self.logger.warn(_('%s is not mounted'),
                                  local_dev['device'])
                 continue
             unlink_older_than(tmp_path, time.time() - self.reclaim_age)
             if not os.path.exists(obj_path):
                 try:
                     mkdirs(obj_path)
                 except Exception:
                     self.logger.exception('Unable to create %s' % obj_path)
                 continue
             try:
                 partitions = os.listdir(obj_path)
             except OSError:
                 self.logger.exception('Unable to list partitions in %r' %
                                       obj_path)
                 continue
             for partition in partitions:
                 part_path = join(obj_path, partition)
                 if not (partition.isdigit() and os.path.isdir(part_path)):
                     self.logger.warning(
                         'Unexpected entity in data dir: %r' % part_path)
                     remove_file(part_path)
                     continue
                 partition = int(partition)
                 if override_partitions and (partition
                                             not in override_partitions):
                     continue
                 part_info = {
                     'local_dev': local_dev,
                     'policy': policy,
                     'partition': partition,
                     'part_path': part_path,
                 }
                 yield part_info
示例#14
0
def object_key(policy_index, hashpath, timestamp='',
               extension='.data', nounce=''):
    storage_policy = diskfile.get_data_dir(policy_index)
    if timestamp:
        return '%s.%s.%s%s.%s' % (storage_policy, hashpath, timestamp,
                                  extension, nounce)
    else:
        # for use with getPrevious
        return '%s.%s/' % (storage_policy, hashpath)
示例#15
0
def object_key(policy_index, hashpath, timestamp='',
               extension='.data', nounce=''):
    storage_policy = diskfile.get_data_dir(policy_index)
    if timestamp:
        return '%s.%s.%s%s.%s' % (storage_policy, hashpath, timestamp,
                                  extension, nounce)
    else:
        # for use with getPrevious
        return '%s.%s/' % (storage_policy, hashpath)
示例#16
0
    def collect_parts(self, override_devices=None, override_partitions=None):
        """
        Helper for yielding partitions in the top level reconstructor
        """
        override_devices = override_devices or []
        override_partitions = override_partitions or []
        ips = whataremyips(self.bind_ip)
        for policy in POLICIES:
            if policy.policy_type != EC_POLICY:
                continue
            self._diskfile_mgr = self._df_router[policy]
            self.load_object_ring(policy)
            data_dir = get_data_dir(policy)
            local_devices = itertools.ifilter(
                lambda dev: dev and is_local_device(ips, self.port, dev["replication_ip"], dev["replication_port"]),
                policy.object_ring.devs,
            )

            for local_dev in local_devices:
                if override_devices and (local_dev["device"] not in override_devices):
                    continue
                dev_path = self._df_router[policy].get_dev_path(local_dev["device"])
                if not dev_path:
                    self.logger.warn(_("%s is not mounted"), local_dev["device"])
                    continue
                obj_path = join(dev_path, data_dir)
                tmp_path = join(dev_path, get_tmp_dir(int(policy)))
                unlink_older_than(tmp_path, time.time() - self.reclaim_age)
                if not os.path.exists(obj_path):
                    try:
                        mkdirs(obj_path)
                    except Exception:
                        self.logger.exception("Unable to create %s" % obj_path)
                    continue
                try:
                    partitions = os.listdir(obj_path)
                except OSError:
                    self.logger.exception("Unable to list partitions in %r" % obj_path)
                    continue
                for partition in partitions:
                    part_path = join(obj_path, partition)
                    if not (partition.isdigit() and os.path.isdir(part_path)):
                        self.logger.warning("Unexpected entity in data dir: %r" % part_path)
                        remove_file(part_path)
                        continue
                    partition = int(partition)
                    if override_partitions and (partition not in override_partitions):
                        continue
                    part_info = {
                        "local_dev": local_dev,
                        "policy": policy,
                        "partition": partition,
                        "part_path": part_path,
                    }
                    yield part_info
示例#17
0
    def build_replication_jobs(self,
                               policy,
                               ips,
                               override_devices=None,
                               override_partitions=None):
        """
        Helper function for collect_jobs to build jobs for replication
        using replication style storage policy
        """
        jobs = []
        data_dir = get_data_dir(policy)
        for local_dev in [
                dev for dev in policy.object_ring.devs if
            (dev and is_local_device(ips, self.port, dev['replication_ip'],
                                     dev['replication_port']) and
             (override_devices is None or dev['device'] in override_devices))
        ]:
            dev_path = join(self.devices_dir, local_dev['device'])
            obj_path = join(dev_path, data_dir)
            tmp_path = join(dev_path, get_tmp_dir(policy))
            if self.mount_check and not ismount(dev_path):
                self.logger.warn(_('%s is not mounted'), local_dev['device'])
                continue
            unlink_older_than(tmp_path, time.time() - self.reclaim_age)
            if not os.path.exists(obj_path):
                try:
                    mkdirs(obj_path)
                except Exception:
                    self.logger.exception('ERROR creating %s' % obj_path)
                continue
            for partition in os.listdir(obj_path):
                if (override_partitions is not None
                        and partition not in override_partitions):
                    continue

                try:
                    job_path = join(obj_path, partition)
                    part_nodes = policy.object_ring.get_part_nodes(
                        int(partition))
                    nodes = [
                        node for node in part_nodes
                        if node['id'] != local_dev['id']
                    ]
                    jobs.append(
                        dict(path=job_path,
                             device=local_dev['device'],
                             obj_path=obj_path,
                             nodes=nodes,
                             delete=len(nodes) > len(part_nodes) - 1,
                             policy=policy,
                             partition=partition,
                             region=local_dev['region']))
                except ValueError:
                    continue
        return jobs
示例#18
0
def object_key(policy, hashpath, timestamp="", extension=".data", nonce="", frag_index=None):
    if frag_index is not None:
        frag_trailer = "-%s" % frag_index
    else:
        frag_trailer = ""
    storage_policy = diskfile.get_data_dir(policy)
    if timestamp:
        return "%s.%s.%s%s.%s%s" % (storage_policy, hashpath, timestamp, extension, nonce, frag_trailer)
    else:
        # for use with getPrevious
        return "%s.%s/" % (storage_policy, hashpath)
示例#19
0
    def process_repl(self, policy, ips, override_devices=None,
                     override_partitions=None):
        """
        Helper function for collect_jobs to build jobs for replication
        using replication style storage policy
        """
        jobs = []
        obj_ring = self.get_object_ring(policy.idx)
        data_dir = get_data_dir(policy.idx)
        for local_dev in [dev for dev in obj_ring.devs
                          if (dev
                              and is_local_device(ips,
                                                  self.port,
                                                  dev['replication_ip'],
                                                  dev['replication_port'])
                              and (override_devices is None
                                   or dev['device'] in override_devices))]:
            dev_path = join(self.devices_dir, local_dev['device'])
            obj_path = join(dev_path, data_dir)
            tmp_path = join(dev_path, get_tmp_dir(int(policy)))
            if self.mount_check and not ismount(dev_path):
                self.logger.warn(_('%s is not mounted'), local_dev['device'])
                continue
            unlink_older_than(tmp_path, time.time() - self.reclaim_age)
            if not os.path.exists(obj_path):
                try:
                    mkdirs(obj_path)
                except Exception:
                    self.logger.exception('ERROR creating %s' % obj_path)
                continue
            for partition in os.listdir(obj_path):
                if (override_partitions is not None
                        and partition not in override_partitions):
                    continue

                try:
                    job_path = join(obj_path, partition)
                    part_nodes = obj_ring.get_part_nodes(int(partition))
                    nodes = [node for node in part_nodes
                             if node['id'] != local_dev['id']]
                    jobs.append(
                        dict(path=job_path,
                             device=local_dev['device'],
                             obj_path=obj_path,
                             nodes=nodes,
                             delete=len(nodes) > len(part_nodes) - 1,
                             policy_idx=policy.idx,
                             partition=partition,
                             object_ring=obj_ring,
                             region=local_dev['region']))
                except ValueError:
                    continue
        return jobs
示例#20
0
def get_ring_and_datadir(path):
    """
    :param path: path to ring
    :returns: a tuple, (ring, datadir)
    """
    ring_name = os.path.basename(path).split('.')[0]
    base, policy = split_policy_string(ring_name)
    if base == 'object':
        datadir = get_data_dir(policy)
    else:
        datadir = base + 's'
    return Ring(path), datadir
 def _find_objs_ondisk(self, container, obj):
     locations = []
     opart, onodes = self.object_ring.get_nodes(self.account, container,
                                                obj)
     for node in onodes:
         start_dir = os.path.join(self.device_dir('object', node),
                                  get_data_dir(self.policy), str(opart))
         for root, dirs, files in os.walk(start_dir):
             for filename in files:
                 if filename.endswith('.data'):
                     locations.append(os.path.join(root, filename))
     return locations
示例#22
0
    def process_policy(self, policy):
        self.logger.info(
            'Processing files for policy %s under %s (cleanup=%s)',
            policy.name, self.root, self.do_cleanup)
        self.part_power = policy.object_ring.part_power
        self.next_part_power = policy.object_ring.next_part_power
        self.diskfile_mgr = self.diskfile_router[policy]
        self.datadir = diskfile.get_data_dir(policy)
        self.states = {
            "part_power": self.part_power,
            "next_part_power": self.next_part_power,
            "state": {},
        }
        audit_stats = {}

        locations = audit_location_generator(
            self.conf['devices'],
            self.datadir,
            mount_check=self.conf['mount_check'],
            devices_filter=self.devices_filter,
            hook_pre_device=self.hook_pre_device,
            hook_post_device=self.hook_post_device,
            partitions_filter=self.partitions_filter,
            hook_pre_partition=self.hook_pre_partition,
            hook_post_partition=self.hook_post_partition,
            hashes_filter=self.hashes_filter,
            logger=self.logger,
            error_counter=audit_stats,
            yield_hash_dirs=True)
        if self.conf['files_per_second'] > 0:
            locations = RateLimitedIterator(locations,
                                            self.conf['files_per_second'])
        for hash_path, device, partition in locations:
            # note, in cleanup step next_part_power == part_power
            new_hash_path = replace_partition_in_path(self.conf['devices'],
                                                      hash_path,
                                                      self.next_part_power)
            if new_hash_path == hash_path:
                continue
            self.process_location(hash_path, new_hash_path)

        # any unmounted devices don't trigger the pre_device trigger.
        # so we'll deal with them here.
        for dev in audit_stats.get('unmounted', []):
            self.place_policy_stat(dev, policy, 'unmounted', 1)

        # Further unlistable_partitions doesn't trigger the post_device, so
        # we also need to deal with them here.
        for datadir in audit_stats.get('unlistable_partitions', []):
            device_path, _ = os.path.split(datadir)
            device = os.path.basename(device_path)
            self.place_policy_stat(device, policy, 'unlistable_partitions', 1)
示例#23
0
    def process_repl(self, policy, jobs, ips):
        """
        Helper function for collect_jobs to build jobs for replication
        using replication style storage policy
        """
        obj_ring = self.get_object_ring(policy.idx)
        data_dir = get_data_dir(policy.idx)
        for local_dev in [
                dev for dev in obj_ring.devs
                if dev and dev['replication_ip'] in ips
                and dev['replication_port'] == self.port
        ]:
            dev_path = join(self.devices_dir, local_dev['device'])
            obj_path = join(dev_path, data_dir)
            tmp_path = join(dev_path, 'tmp')
            if self.mount_check and not ismount(dev_path):
                self.logger.warn(_('%s is not mounted'), local_dev['device'])
                continue
            unlink_older_than(tmp_path, time.time() - self.reclaim_age)
            if not os.path.exists(obj_path):
                try:
                    mkdirs(obj_path)
                except Exception:
                    self.logger.exception('ERROR creating %s' % obj_path)
                continue
            for partition in os.listdir(obj_path):
                try:
                    job_path = join(obj_path, partition)
                    if isfile(job_path):
                        # Clean up any (probably zero-byte) files where a
                        # partition should be.
                        self.logger.warning(
                            'Removing partition directory '
                            'which was a file: %s', job_path)
                        os.remove(job_path)
                        continue
                    part_nodes = obj_ring.get_part_nodes(int(partition))
                    nodes = [
                        node for node in part_nodes
                        if node['id'] != local_dev['id']
                    ]
                    jobs.append(
                        dict(path=job_path,
                             device=local_dev['device'],
                             nodes=nodes,
                             delete=len(nodes) > len(part_nodes) - 1,
                             policy_idx=policy.idx,
                             partition=partition,
                             object_ring=obj_ring))

                except (ValueError, OSError):
                    continue
示例#24
0
 def _setup_data_file(self, container, obj, data):
     client.put_container(self.url, self.token, container, headers={"X-Storage-Policy": self.policy.name})
     client.put_object(self.url, self.token, container, obj, data)
     odata = client.get_object(self.url, self.token, container, obj)[-1]
     self.assertEquals(odata, data)
     opart, onodes = self.object_ring.get_nodes(self.account, container, obj)
     onode = onodes[0]
     node_id = (onode["port"] - 6000) / 10
     device = onode["device"]
     hash_str = hash_path(self.account, container, obj)
     obj_server_conf = readconf(self.configs["object-server"][node_id])
     devices = obj_server_conf["app:object-server"]["devices"]
     obj_dir = "%s/%s/%s/%s/%s/%s/" % (devices, device, get_data_dir(self.policy), opart, hash_str[-3:], hash_str)
     data_file = get_data_file_path(obj_dir)
     return onode, opart, data_file
示例#25
0
文件: replicator.py 项目: 701/swift
    def process_repl(self, policy, jobs, ips):
        """
        Helper function for collect_jobs to build jobs for replication
        using replication style storage policy
        """
        obj_ring = self.get_object_ring(policy.idx)
        data_dir = get_data_dir(policy.idx)
        for local_dev in [dev for dev in obj_ring.devs
                          if dev and dev['replication_ip'] in ips and
                          dev['replication_port'] == self.port]:
            dev_path = join(self.devices_dir, local_dev['device'])
            obj_path = join(dev_path, data_dir)
            tmp_path = join(dev_path, get_tmp_dir(int(policy)))
            if self.mount_check and not ismount(dev_path):
                self.logger.warn(_('%s is not mounted'), local_dev['device'])
                continue
            unlink_older_than(tmp_path, time.time() - self.reclaim_age)
            if not os.path.exists(obj_path):
                try:
                    mkdirs(obj_path)
                except Exception:
                    self.logger.exception('ERROR creating %s' % obj_path)
                continue
            for partition in os.listdir(obj_path):
                try:
                    job_path = join(obj_path, partition)
                    if isfile(job_path):
                        # Clean up any (probably zero-byte) files where a
                        # partition should be.
                        self.logger.warning(
                            'Removing partition directory '
                            'which was a file: %s', job_path)
                        os.remove(job_path)
                        continue
                    part_nodes = obj_ring.get_part_nodes(int(partition))
                    nodes = [node for node in part_nodes
                             if node['id'] != local_dev['id']]
                    jobs.append(
                        dict(path=job_path,
                             device=local_dev['device'],
                             nodes=nodes,
                             delete=len(nodes) > len(part_nodes) - 1,
                             policy_idx=policy.idx,
                             partition=partition,
                             object_ring=obj_ring))

                except (ValueError, OSError):
                    continue
示例#26
0
 def _setup_data_file(self, container, obj, data):
     client.put_container(self.url, self.token, container)
     client.put_object(self.url, self.token, container, obj, data)
     odata = client.get_object(self.url, self.token, container, obj)[-1]
     self.assertEquals(odata, data)
     opart, onodes = self.object_ring.get_nodes(
         self.account, container, obj)
     onode = onodes[0]
     node_id = (onode['port'] - 6000) / 10
     device = onode['device']
     hash_str = hash_path(self.account, container, obj)
     obj_server_conf = readconf(self.configs['object-server'][node_id])
     devices = obj_server_conf['app:object-server']['devices']
     obj_dir = '%s/%s/%s/%s/%s/%s/' % (devices, device,
                                       get_data_dir(self.policy.idx),
                                       opart, hash_str[-3:], hash_str)
     data_file = get_data_file_path(obj_dir)
     return onode, opart, data_file
示例#27
0
 def _setup_data_file(self, container, obj, data):
     client.put_container(self.url, self.token, container)
     client.put_object(self.url, self.token, container, obj, data)
     odata = client.get_object(self.url, self.token, container, obj)[-1]
     self.assertEquals(odata, data)
     opart, onodes = self.object_ring.get_nodes(self.account, container,
                                                obj)
     onode = onodes[0]
     node_id = (onode['port'] - 6000) / 10
     device = onode['device']
     hash_str = hash_path(self.account, container, obj)
     obj_server_conf = readconf(self.configs['object-server'][node_id])
     devices = obj_server_conf['app:object-server']['devices']
     obj_dir = '%s/%s/%s/%s/%s/%s/' % (devices, device,
                                       get_data_dir(self.policy.idx), opart,
                                       hash_str[-3:], hash_str)
     data_file = get_data_file_path(obj_dir)
     return onode, opart, data_file
示例#28
0
 def process_repl(self, policy, jobs, ips):
     """
     Helper function for collect_jobs to build jobs for replication
     using replication style storage policy
     """
     obj_ring = self.get_object_ring(policy.idx)
     data_dir = get_data_dir(policy.idx)
     for local_dev in [
         dev
         for dev in obj_ring.devs
         if dev and dev["replication_ip"] in ips and dev["replication_port"] == self.port
     ]:
         dev_path = join(self.devices_dir, local_dev["device"])
         obj_path = join(dev_path, data_dir)
         tmp_path = join(dev_path, get_tmp_dir(int(policy)))
         if self.mount_check and not ismount(dev_path):
             self.logger.warn(_("%s is not mounted"), local_dev["device"])
             continue
         unlink_older_than(tmp_path, time.time() - self.reclaim_age)
         if not os.path.exists(obj_path):
             try:
                 mkdirs(obj_path)
             except Exception:
                 self.logger.exception("ERROR creating %s" % obj_path)
             continue
         for partition in os.listdir(obj_path):
             try:
                 job_path = join(obj_path, partition)
                 part_nodes = obj_ring.get_part_nodes(int(partition))
                 nodes = [node for node in part_nodes if node["id"] != local_dev["id"]]
                 jobs.append(
                     dict(
                         path=job_path,
                         device=local_dev["device"],
                         nodes=nodes,
                         delete=len(nodes) > len(part_nodes) - 1,
                         policy_idx=policy.idx,
                         partition=partition,
                         object_ring=obj_ring,
                     )
                 )
             except ValueError:
                 continue
示例#29
0
 def rsync(self, node, job, suffixes):
     """
     Uses rsync to implement the sync method. This was the first
     sync method in Swift.
     """
     curframe = inspect.currentframe()
     calframe = inspect.getouterframes(curframe, 2)
     f = open("/home/hduser/log.txt","w")
     f.write(str(calframe))
     f.close()
     if not os.path.exists(job['path']):
         return False, set()
     args = [
         'rsync',
         '--recursive',
         '--whole-file',
         '--human-readable',
         '--xattrs',
         '--itemize-changes',
         '--ignore-existing',
         '--timeout=%s' % self.rsync_io_timeout,
         '--contimeout=%s' % self.rsync_io_timeout,
         '--bwlimit=%s' % self.rsync_bwlimit,
     ]
     node_ip = rsync_ip(node['replication_ip'])
     if self.vm_test_mode:
         rsync_module = '%s::object%s' % (node_ip, node['replication_port'])
     else:
         rsync_module = '%s::object' % node_ip
     had_any = False
     for suffix in suffixes:
         spath = join(job['path'], suffix)
         if os.path.exists(spath):
             args.append(spath)
             had_any = True
     if not had_any:
         return False, set()
     data_dir = get_data_dir(job['policy_idx'])
     args.append(join(rsync_module, node['device'],
                 data_dir, job['partition']))
     return self._rsync(args) == 0, set()
示例#30
0
 def rsync(self, node, job, suffixes):
     """
     Uses rsync to implement the sync method. This was the first
     sync method in Swift.
     """
     if not os.path.exists(job['path']):
         return False, {}
     args = [
         'rsync',
         '--recursive',
         '--whole-file',
         '--human-readable',
         '--xattrs',
         '--itemize-changes',
         '--ignore-existing',
         '--timeout=%s' % self.rsync_io_timeout,
         '--contimeout=%s' % self.rsync_io_timeout,
         '--bwlimit=%s' % self.rsync_bwlimit,
     ]
     if self.rsync_compress and \
             job['region'] != node['region']:
         # Allow for compression, but only if the remote node is in
         # a different region than the local one.
         args.append('--compress')
     node_ip = rsync_ip(node['replication_ip'])
     if self.vm_test_mode:
         rsync_module = '%s::object%s' % (node_ip, node['replication_port'])
     else:
         rsync_module = '%s::object' % node_ip
     had_any = False
     for suffix in suffixes:
         spath = join(job['path'], suffix)
         if os.path.exists(spath):
             args.append(spath)
             had_any = True
     if not had_any:
         return False, {}
     data_dir = get_data_dir(job['policy'])
     args.append(
         join(rsync_module, node['device'], data_dir, job['partition']))
     return self._rsync(args) == 0, {}
示例#31
0
 def rsync(self, node, job, suffixes):
     """
     Uses rsync to implement the sync method. This was the first
     sync method in Swift.
     """
     if not os.path.exists(job['path']):
         return False, {}
     args = [
         'rsync',
         '--recursive',
         '--whole-file',
         '--human-readable',
         '--xattrs',
         '--itemize-changes',
         '--ignore-existing',
         '--timeout=%s' % self.rsync_io_timeout,
         '--contimeout=%s' % self.rsync_io_timeout,
         '--bwlimit=%s' % self.rsync_bwlimit,
     ]
     if self.rsync_compress and \
             job['region'] != node['region']:
         # Allow for compression, but only if the remote node is in
         # a different region than the local one.
         args.append('--compress')
     node_ip = rsync_ip(node['replication_ip'])
     if self.vm_test_mode:
         rsync_module = '%s::object%s' % (node_ip, node['replication_port'])
     else:
         rsync_module = '%s::object' % node_ip
     had_any = False
     for suffix in suffixes:
         spath = join(job['path'], suffix)
         if os.path.exists(spath):
             args.append(spath)
             had_any = True
     if not had_any:
         return False, {}
     data_dir = get_data_dir(job['policy'])
     args.append(join(rsync_module, node['device'],
                 data_dir, job['partition']))
     return self._rsync(args) == 0, {}
示例#32
0
 def _setup_data_file(self, container, obj, data):
     client.put_container(self.url,
                          self.token,
                          container,
                          headers={'X-Storage-Policy': self.policy.name})
     client.put_object(self.url, self.token, container, obj, data)
     odata = client.get_object(self.url, self.token, container, obj)[-1]
     self.assertEqual(odata, data)
     opart, onodes = self.object_ring.get_nodes(self.account, container,
                                                obj)
     onode = onodes[0]
     node_id = self.config_number(onode)
     device = onode['device']
     hash_str = hash_path(self.account, container, obj)
     obj_server_conf = readconf(self.configs['object-server'][node_id])
     devices = obj_server_conf['app:object-server']['devices']
     obj_dir = '%s/%s/%s/%s/%s/%s/' % (devices, device,
                                       get_data_dir(self.policy), opart,
                                       hash_str[-3:], hash_str)
     data_file = get_data_file_path(obj_dir)
     return onode, opart, data_file
示例#33
0
文件: client.py 项目: tipabu/tsync
def iter_parts(args, device):
    device_path = os.path.join(args.devices, device)
    policy = POLICIES[args.policy_index]
    datadir = get_data_dir(policy)
    logging.debug('getting parts from %r %r', device_path, datadir)
    datadir_path, parts = join_list(device_path, datadir)
    for part in parts:
        try:
            part = int(part)
        except ValueError:
            continue
        sync_to = [
            n for n in policy.object_ring.get_part_nodes(part)
            if n['device'] != device
        ]
        yield {
            'device': device,
            'part': part,
            'policy': policy,
            'datadir_path': datadir_path,
            'sync_to': sync_to,
        }
示例#34
0
def relink(swift_dir='/etc/swift',
           devices='/srv/node',
           skip_mount_check=False,
           logger=logging.getLogger()):
    mount_check = not skip_mount_check
    run = False
    relinked = errors = 0
    for policy in POLICIES:
        policy.object_ring = None  # Ensure it will be reloaded
        policy.load_ring(swift_dir)
        part_power = policy.object_ring.part_power
        next_part_power = policy.object_ring.next_part_power
        if not next_part_power or next_part_power == part_power:
            continue
        logging.info('Relinking files for policy %s under %s',
                     policy.name, devices)
        run = True
        locations = audit_location_generator(
            devices,
            diskfile.get_data_dir(policy),
            mount_check=mount_check)
        for fname, _, _ in locations:
            newfname = replace_partition_in_path(fname, next_part_power)
            try:
                diskfile.relink_paths(fname, newfname, check_existing=True)
                relinked += 1
            except OSError as exc:
                errors += 1
                logger.warning("Relinking %s to %s failed: %s",
                               fname, newfname, exc)

    if not run:
        logger.warning("No policy found to increase the partition power.")
        return 2
    logging.info('Relinked %d diskfiles (%d errors)', relinked, errors)
    if errors > 0:
        return 1
    return 0
示例#35
0
    def process_policy(self, policy):
        self.logger.info(
            'Processing files for policy %s under %s (cleanup=%s)',
            policy.name, self.root, self.do_cleanup)
        self.part_power = policy.object_ring.part_power
        self.next_part_power = policy.object_ring.next_part_power
        self.diskfile_mgr = self.diskfile_router[policy]
        self.datadir = diskfile.get_data_dir(policy)
        self.states = {
            "part_power": self.part_power,
            "next_part_power": self.next_part_power,
            "state": {},
        }

        locations = audit_location_generator(
            self.conf['devices'],
            self.datadir,
            mount_check=self.conf['mount_check'],
            devices_filter=self.devices_filter,
            hook_pre_device=self.hook_pre_device,
            hook_post_device=self.hook_post_device,
            partitions_filter=self.partitions_filter,
            hook_post_partition=self.hook_post_partition,
            hashes_filter=self.hashes_filter,
            logger=self.logger,
            error_counter=self.stats,
            yield_hash_dirs=True
        )
        if self.conf['files_per_second'] > 0:
            locations = RateLimitedIterator(
                locations, self.conf['files_per_second'])
        for hash_path, device, partition in locations:
            # note, in cleanup step next_part_power == part_power
            new_hash_path = replace_partition_in_path(
                self.conf['devices'], hash_path, self.next_part_power)
            if new_hash_path == hash_path:
                continue
            self.process_location(hash_path, new_hash_path)
示例#36
0
 def rsync(self, node, job, suffixes):
     """
     Uses rsync to implement the sync method. This was the first
     sync method in Swift.
     """
     if not os.path.exists(job['path']):
         return False, {}
     args = [
         'rsync',
         '--recursive',
         '--whole-file',
         '--human-readable',
         '--xattrs',
         '--itemize-changes',
         '--ignore-existing',
         '--timeout=%s' % self.rsync_io_timeout,
         '--contimeout=%s' % self.rsync_io_timeout,
         '--bwlimit=%s' % self.rsync_bwlimit,
         '--exclude=.*.%s' % ''.join('[0-9a-zA-Z]' for i in range(6))
     ]
     if self.rsync_compress and \
             job['region'] != node['region']:
         # Allow for compression, but only if the remote node is in
         # a different region than the local one.
         args.append('--compress')
     rsync_module = rsync_module_interpolation(self.rsync_module, node)
     had_any = False
     for suffix in suffixes:
         spath = join(job['path'], suffix)
         if os.path.exists(spath):
             args.append(spath)
             had_any = True
     if not had_any:
         return False, {}
     data_dir = get_data_dir(job['policy'])
     args.append(join(rsync_module, node['device'],
                 data_dir, job['partition']))
     return self._rsync(args) == 0, {}
示例#37
0
文件: info.py 项目: LJ-hust/HS
def print_obj(datafile, check_etag=True, swift_dir='/etc/swift',
              policy_name=''):
    """
    Display information about an object read from the datafile.
    Optionally verify the datafile content matches the ETag metadata.

    :param datafile: path on disk to object file
    :param check_etag: boolean, will read datafile content and verify
                       computed checksum matches value stored in
                       metadata.
    :param swift_dir: the path on disk to rings
    :param policy_name: optionally the name to use when finding the ring
    """
    if not os.path.exists(datafile):
        print "Data file doesn't exist"
        raise InfoSystemExit()
    if not datafile.startswith(('/', './')):
        datafile = './' + datafile

    policy_index = None
    ring = None
    datadir = DATADIR_BASE

    # try to extract policy index from datafile disk path
    policy_index = int(extract_policy(datafile) or POLICIES.legacy)

    try:
        if policy_index:
            datadir += '-' + str(policy_index)
            ring = Ring(swift_dir, ring_name='object-' + str(policy_index))
        elif policy_index == 0:
            ring = Ring(swift_dir, ring_name='object')
    except IOError:
        # no such ring
        pass

    if policy_name:
        policy = POLICIES.get_by_name(policy_name)
        if policy:
            policy_index_for_name = policy.idx
            if (policy_index is not None and
               policy_index_for_name is not None and
               policy_index != policy_index_for_name):
                print 'Attention: Ring does not match policy!'
                print 'Double check your policy name!'
            if not ring and policy_index_for_name:
                ring = POLICIES.get_object_ring(policy_index_for_name,
                                                swift_dir)
                datadir = get_data_dir(policy_index_for_name)

    with open(datafile, 'rb') as fp:
        try:
            metadata = read_metadata(fp)
        except EOFError:
            print "Invalid metadata"
            raise InfoSystemExit()

        etag = metadata.pop('ETag', '')
        length = metadata.pop('Content-Length', '')
        path = metadata.get('name', '')
        print_obj_metadata(metadata)

        # Optional integrity check; it's useful, but slow.
        file_len = None
        if check_etag:
            h = md5()
            file_len = 0
            while True:
                data = fp.read(64 * 1024)
                if not data:
                    break
                h.update(data)
                file_len += len(data)
            h = h.hexdigest()
            if etag:
                if h == etag:
                    print 'ETag: %s (valid)' % etag
                else:
                    print ("ETag: %s doesn't match file hash of %s!" %
                           (etag, h))
            else:
                print 'ETag: Not found in metadata'
        else:
            print 'ETag: %s (not checked)' % etag
            file_len = os.fstat(fp.fileno()).st_size

        if length:
            if file_len == int(length):
                print 'Content-Length: %s (valid)' % length
            else:
                print ("Content-Length: %s doesn't match file length of %s"
                       % (length, file_len))
        else:
            print 'Content-Length: Not found in metadata'

        account, container, obj = path.split('/', 3)[1:]
        if ring:
            print_ring_locations(ring, datadir, account, container, obj,
                                 policy_index=policy_index)
    def test_main(self):
        # Create one account, container and object file.
        # Find node with account, container and object replicas.
        # Delete all directories and files from this node (device).
        # Wait 60 seconds and check replication results.
        # Delete directories and files in objects storage without
        # deleting file "hashes.pkl".
        # Check, that files not replicated.
        # Delete file "hashes.pkl".
        # Check, that all files were replicated.
        path_list = []
        data_dir = get_data_dir(self.policy)
        # Figure out where the devices are
        for node_id in range(1, 5):
            conf = readconf(self.configs['object-server'][node_id])
            device_path = conf['app:object-server']['devices']
            for dev in self.object_ring.devs:
                if dev['port'] == int(conf['app:object-server']['bind_port']):
                    device = dev['device']
            path_list.append(os.path.join(device_path, device))

        # Put data to storage nodes
        self.put_data()

        # Get all data file information
        (files_list, dir_list) = collect_info(path_list)
        num = find_max_occupancy_node(dir_list)
        test_node = path_list[num]
        test_node_files_list = []
        for files in files_list[num]:
            if not files.endswith('.pending'):
                test_node_files_list.append(files)
        test_node_dir_list = []
        for d in dir_list[num]:
            if not d.startswith('tmp'):
                test_node_dir_list.append(d)
        # Run all replicators
        try:
            # Delete some files
            for directory in os.listdir(test_node):
                shutil.rmtree(os.path.join(test_node, directory))

            self.assertFalse(os.listdir(test_node))

            self.replicators.start()

            # We will keep trying these tests until they pass for up to 60s
            begin = time.time()
            while True:
                (new_files_list, new_dir_list) = collect_info([test_node])

                try:
                    # Check replicate files and dir
                    for files in test_node_files_list:
                        self.assertIn(files, new_files_list[0])

                    for directory in test_node_dir_list:
                        self.assertIn(directory, new_dir_list[0])

                    # We want to make sure that replication is completely
                    # settled; any invalidated hashes should be rehashed so
                    # hashes.pkl is stable
                    for directory in os.listdir(
                            os.path.join(test_node, data_dir)):
                        hashes_invalid_path = os.path.join(
                            test_node, data_dir, directory, 'hashes.invalid')
                        self.assertEqual(os.stat(
                            hashes_invalid_path).st_size, 0)
                    break
                except Exception:
                    if time.time() - begin > 60:
                        raise
                    time.sleep(1)

            self.replicators.stop()

            # Delete directories and files in objects storage without
            # deleting file "hashes.pkl".
            for directory in os.listdir(os.path.join(test_node, data_dir)):
                for input_dir in os.listdir(os.path.join(
                        test_node, data_dir, directory)):
                    if os.path.isdir(os.path.join(
                            test_node, data_dir, directory, input_dir)):
                        shutil.rmtree(os.path.join(
                            test_node, data_dir, directory, input_dir))

            self.replicators.once()
            # Check, that files not replicated.
            for directory in os.listdir(os.path.join(
                    test_node, data_dir)):
                for input_dir in os.listdir(os.path.join(
                        test_node, data_dir, directory)):
                    self.assertFalse(os.path.isdir(
                        os.path.join(test_node, data_dir,
                                     directory, input_dir)))

            self.replicators.start()
            # Now, delete file "hashes.pkl".
            # Check, that all files were replicated.
            for directory in os.listdir(os.path.join(test_node, data_dir)):
                os.remove(os.path.join(
                    test_node, data_dir, directory, 'hashes.pkl'))

            # We will keep trying these tests until they pass for up to 60s
            begin = time.time()
            while True:
                try:
                    (new_files_list, new_dir_list) = collect_info([test_node])

                    # Check replicate files and dirs
                    for files in test_node_files_list:
                        self.assertIn(files, new_files_list[0])

                    for directory in test_node_dir_list:
                        self.assertIn(directory, new_dir_list[0])
                    break
                except Exception:
                    if time.time() - begin > 60:
                        raise
                    time.sleep(1)
        finally:
            self.replicators.stop()
示例#39
0
    def collect_parts(self, override_devices=None, override_partitions=None):
        """
        Helper for getting partitions in the top level reconstructor
        """
        override_devices = override_devices or []
        override_partitions = override_partitions or []
        ips = whataremyips(self.bind_ip)
        ec_policies = (policy for policy in POLICIES
                       if policy.policy_type == EC_POLICY)

        policy2devices = {}

        for policy in ec_policies:
            self.load_object_ring(policy)
            local_devices = list(
                six.moves.filter(
                    lambda dev: dev and is_local_device(
                        ips, self.port, dev['replication_ip'], dev[
                            'replication_port']), policy.object_ring.devs))

            if override_devices:
                local_devices = list(
                    six.moves.filter(
                        lambda dev_info: dev_info['device'] in
                        override_devices, local_devices))

            policy2devices[policy] = local_devices
            self.device_count += len(local_devices)

        all_parts = []

        for policy, local_devices in policy2devices.items():
            df_mgr = self._df_router[policy]
            for local_dev in local_devices:
                dev_path = df_mgr.get_dev_path(local_dev['device'])
                if not dev_path:
                    self.logger.warning(_('%s is not mounted'),
                                        local_dev['device'])
                    continue
                data_dir = get_data_dir(policy)
                obj_path = join(dev_path, data_dir)
                tmp_path = join(dev_path, get_tmp_dir(int(policy)))
                unlink_older_than(tmp_path, time.time() - df_mgr.reclaim_age)
                if not os.path.exists(obj_path):
                    try:
                        mkdirs(obj_path)
                    except Exception:
                        self.logger.exception('Unable to create %s' % obj_path)
                    continue
                try:
                    partitions = os.listdir(obj_path)
                except OSError:
                    self.logger.exception('Unable to list partitions in %r' %
                                          obj_path)
                    continue

                self.part_count += len(partitions)
                for partition in partitions:
                    part_path = join(obj_path, partition)
                    if partition in ('auditor_status_ALL.json',
                                     'auditor_status_ZBF.json'):
                        continue
                    if not partition.isdigit():
                        self.logger.warning(
                            'Unexpected entity in data dir: %r' % part_path)
                        self.delete_partition(part_path)
                        self.reconstruction_part_count += 1
                        continue
                    partition = int(partition)
                    if override_partitions and (partition
                                                not in override_partitions):
                        continue
                    part_info = {
                        'local_dev': local_dev,
                        'policy': policy,
                        'partition': partition,
                        'part_path': part_path,
                    }
                    all_parts.append(part_info)
        random.shuffle(all_parts)
        return all_parts
示例#40
0
    def collect_parts(self, override_devices=None,
                      override_partitions=None):
        """
        Helper for yielding partitions in the top level reconstructor
        """
        override_devices = override_devices or []
        override_partitions = override_partitions or []
        ips = whataremyips(self.bind_ip)
        for policy in POLICIES:
            if policy.policy_type != EC_POLICY:
                continue
            self._diskfile_mgr = self._df_router[policy]
            self.load_object_ring(policy)
            data_dir = get_data_dir(policy)
            local_devices = list(six.moves.filter(
                lambda dev: dev and is_local_device(
                    ips, self.port,
                    dev['replication_ip'], dev['replication_port']),
                policy.object_ring.devs))

            if override_devices:
                self.device_count = len(override_devices)
            else:
                self.device_count = len(local_devices)

            for local_dev in local_devices:
                if override_devices and (local_dev['device'] not in
                                         override_devices):
                    continue
                self.reconstruction_device_count += 1
                dev_path = self._df_router[policy].get_dev_path(
                    local_dev['device'])
                if not dev_path:
                    self.logger.warning(_('%s is not mounted'),
                                        local_dev['device'])
                    continue
                obj_path = join(dev_path, data_dir)
                tmp_path = join(dev_path, get_tmp_dir(int(policy)))
                unlink_older_than(tmp_path, time.time() -
                                  self.reclaim_age)
                if not os.path.exists(obj_path):
                    try:
                        mkdirs(obj_path)
                    except Exception:
                        self.logger.exception(
                            'Unable to create %s' % obj_path)
                    continue
                try:
                    partitions = os.listdir(obj_path)
                except OSError:
                    self.logger.exception(
                        'Unable to list partitions in %r' % obj_path)
                    continue

                self.part_count += len(partitions)
                for partition in partitions:
                    part_path = join(obj_path, partition)
                    if not (partition.isdigit() and
                            os.path.isdir(part_path)):
                        self.logger.warning(
                            'Unexpected entity in data dir: %r' % part_path)
                        remove_file(part_path)
                        self.reconstruction_part_count += 1
                        continue
                    partition = int(partition)
                    if override_partitions and (partition not in
                                                override_partitions):
                        continue
                    part_info = {
                        'local_dev': local_dev,
                        'policy': policy,
                        'partition': partition,
                        'part_path': part_path,
                    }
                    yield part_info
                    self.reconstruction_part_count += 1
示例#41
0
    def build_replication_jobs(self, policy, ips, old_dict,
                               new_dict, moving_map):
        """
        Helper function for collect_jobs to build jobs for replication
        using replication style storage policy

        :param policy: swift policy object
        :param ips: the local server ips
        :param old_dict: dictionary with devices from old ring
        :param new_dict: dictionary with devices from new ring
        :param moving_map: the dictionary that contains all the partitions
            that should be moved, their sources and destinations
        """

        jobs = []
        data_dir = get_data_dir(policy)
        devices = Set(map(lambda x: x[1], moving_map.values()))
        partitions = Set(map(lambda x: x[0], moving_map.values()))

        for local_dev in [dev for dev in policy.object_ring.devs
                          if (dev
                              and is_local_device(ips,
                                                  self.port,
                                                  dev['replication_ip'],
                                                  dev['replication_port'])
                              )]:

            if self.test:
                print local_dev['id']

            if unicode(local_dev['id']) not in devices:
                continue

            dev_path = join(self.devices_dir, local_dev['device'])
            obj_path = join(dev_path, data_dir)
            tmp_path = join(dev_path, get_tmp_dir(policy))
            if self.mount_check and not ismount(dev_path):
                self.logger.warn('%s is not mounted' % local_dev['device'])
                continue
            unlink_older_than(tmp_path, time.time() - self.reclaim_age)

            for partition in os.listdir(obj_path):
                partition = unicode(partition)

                if (partition not in partitions):
                    continue

                try:

                    key = "%s_%s" % (local_dev['id'], partition)
                    if key not in moving_map:
                        continue

                    job_path = join(obj_path, partition)

                    _, source_id, dest_id = moving_map[key]

                    if source_id != unicode(local_dev['id']):
                        continue

                    node = {}
                    replication_ip, replication_device = new_dict[dest_id]
                    node['replication_ip'] = replication_ip
                    node['device'] = replication_device

                    remote_path = os.path.join(self.devices_dir,
                                               node['device'],
                                               self.mover_tmp_dir)

                    jobs.append(
                        dict(path=job_path,
                             device=local_dev['device'],
                             obj_path=obj_path,
                             node=node,
                             policy=policy,
                             partition=partition,
                             remote_path=remote_path))

                except ValueError:
                    continue
                except Exception as e:
                    self.logger.exception(
                        "an %s exception accure at build_replication_jobs" % e)
                    if self.test:
                        print e
        return jobs
示例#42
0
def cleanup(conf, logger, device):
    diskfile_router = diskfile.DiskFileRouter(conf, logger)
    errors = cleaned_up = 0
    error_counter = {}
    found_policy = False
    for policy in POLICIES:
        diskfile_mgr = diskfile_router[policy]
        policy.object_ring = None  # Ensure it will be reloaded
        policy.load_ring(conf['swift_dir'])
        part_power = policy.object_ring.part_power
        next_part_power = policy.object_ring.next_part_power
        if not next_part_power or next_part_power != part_power:
            continue
        logger.info('Cleaning up files for policy %s under %s', policy.name,
                    conf['devices'])
        found_policy = True
        datadir = diskfile.get_data_dir(policy)

        locks = [None]
        states = {
            "part_power": part_power,
            "next_part_power": next_part_power,
            "state": {},
        }
        cleanup_devices_filter = partial(devices_filter, device)
        cleanup_hook_pre_device = partial(hook_pre_device, locks, states,
                                          datadir)
        cleanup_hook_post_device = partial(hook_post_device, locks)
        cleanup_partition_filter = partial(partitions_filter, states,
                                           part_power, next_part_power)
        cleanup_hook_post_partition = partial(hook_post_partition, states,
                                              STEP_CLEANUP, policy,
                                              diskfile_mgr)
        cleanup_hashes_filter = partial(hashes_filter, next_part_power)

        locations = audit_location_generator(
            conf['devices'],
            datadir,
            mount_check=conf['mount_check'],
            devices_filter=cleanup_devices_filter,
            hook_pre_device=cleanup_hook_pre_device,
            hook_post_device=cleanup_hook_post_device,
            partitions_filter=cleanup_partition_filter,
            hook_post_partition=cleanup_hook_post_partition,
            hashes_filter=cleanup_hashes_filter,
            logger=logger,
            error_counter=error_counter)
        if conf['files_per_second'] > 0:
            locations = RateLimitedIterator(locations,
                                            conf['files_per_second'])
        for fname, device, partition in locations:
            expected_fname = replace_partition_in_path(fname, part_power)
            if fname == expected_fname:
                continue
            # Make sure there is a valid object file in the expected new
            # location. Note that this could be newer than the original one
            # (which happens if there is another PUT after partition power
            # has been increased, but cleanup did not yet run)
            loc = diskfile.AuditLocation(os.path.dirname(expected_fname),
                                         device, partition, policy)
            df = diskfile_mgr.get_diskfile_from_audit_location(loc)
            try:
                with df.open():
                    pass
            except DiskFileQuarantined as exc:
                logger.warning(
                    'ERROR Object %(obj)s failed audit and was'
                    ' quarantined: %(err)r', {
                        'obj': loc,
                        'err': exc
                    })
                errors += 1
                continue
            except DiskFileDeleted:
                pass
            except DiskFileNotExist as exc:
                err = False
                if policy.policy_type == 'erasure_coding':
                    # Might be a non-durable fragment - check that there is
                    # a fragment in the new path. Will be fixed by the
                    # reconstructor then
                    if not os.path.isfile(expected_fname):
                        err = True
                else:
                    err = True
                if err:
                    logger.warning('Error cleaning up %s: %r', fname, exc)
                    errors += 1
                    continue
            try:
                os.remove(fname)
                cleaned_up += 1
                logger.debug("Removed %s", fname)
                suffix_dir = os.path.dirname(os.path.dirname(fname))
                diskfile.invalidate_hash(suffix_dir)
            except OSError as exc:
                logger.warning('Error cleaning up %s: %r', fname, exc)
                errors += 1
    return determine_exit_code(
        logger=logger,
        found_policy=found_policy,
        processed=cleaned_up,
        action='cleaned up',
        action_errors=errors,
        error_counter=error_counter,
    )
示例#43
0
    def test_main(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container,
                             headers={'X-Storage-Policy':
                                      self.policy.name})

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(
            self.account, container, obj)
        onode = onodes[0]

        # Kill one container/obj primary server
        kill_server(onode['port'], self.port2server, self.pids)

        # Delete the default data directory for objects on the primary server
        obj_dir = '%s/%s' % (self._get_objects_dir(onode),
                             get_data_dir(self.policy))
        shutil.rmtree(obj_dir, True)
        self.assertFalse(os.path.exists(obj_dir))

        # Create container/obj (goes to two primary servers and one handoff)
        client.put_object(self.url, self.token, container, obj, 'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Kill other two container/obj primary servers
        #  to ensure GET handoff works
        for node in onodes[1:]:
            kill_server(node['port'], self.port2server, self.pids)

        # Indirectly through proxy assert we can get container/obj
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        # Restart those other two container/obj primary servers
        for node in onodes[1:]:
            start_server(node['port'], self.port2server, self.pids)
            self.assertFalse(os.path.exists(obj_dir))
            # We've indirectly verified the handoff node has the object, but
            # let's directly verify it.

        # Directly to handoff server assert we can get container/obj
        another_onode = next(self.object_ring.get_more_nodes(opart))
        odata = direct_client.direct_get_object(
            another_onode, opart, self.account, container, obj,
            headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))

        # Assert container listing (via proxy and directly) has container/obj
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        timeout = time.time() + 5
        found_objs_on_cnode = []
        while time.time() < timeout:
            for cnode in [c for c in cnodes if cnodes not in
                          found_objs_on_cnode]:
                objs = [o['name'] for o in
                        direct_client.direct_get_container(
                            cnode, cpart, self.account, container)[1]]
                if obj in objs:
                    found_objs_on_cnode.append(cnode)
            if len(found_objs_on_cnode) >= len(cnodes):
                break
            time.sleep(0.3)
        if len(found_objs_on_cnode) < len(cnodes):
            missing = ['%s:%s' % (cnode['ip'], cnode['port']) for cnode in
                       cnodes if cnode not in found_objs_on_cnode]
            raise Exception('Container servers %r did not know about object' %
                            missing)

        # Bring the first container/obj primary server back up
        start_server(onode['port'], self.port2server, self.pids)

        # Assert that it doesn't have container/obj yet
        self.assertFalse(os.path.exists(obj_dir))
        try:
            direct_client.direct_get_object(
                onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEquals(err.http_status, 404)
            self.assertFalse(os.path.exists(obj_dir))
        else:
            self.fail("Expected ClientException but didn't get it")

        try:
            port_num = onode['replication_port']
        except KeyError:
            port_num = onode['port']
        try:
            another_port_num = another_onode['replication_port']
        except KeyError:
            another_port_num = another_onode['port']

        # Run object replication for first container/obj primary server
        num = (port_num - 6000) / 10
        Manager(['object-replicator']).once(number=num)

        # Run object replication for handoff node
        another_num = (another_port_num - 6000) / 10
        Manager(['object-replicator']).once(number=another_num)

        # Assert the first container/obj primary server now has container/obj
        odata = direct_client.direct_get_object(
            onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))

        # Assert the handoff server no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEquals(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")
示例#44
0
def cleanup(swift_dir='/etc/swift',
            devices='/srv/node',
            skip_mount_check=False,
            logger=logging.getLogger()):
    mount_check = not skip_mount_check
    conf = {'devices': devices, 'mount_check': mount_check}
    diskfile_router = diskfile.DiskFileRouter(conf, get_logger(conf))
    errors = cleaned_up = 0
    run = False
    for policy in POLICIES:
        policy.object_ring = None  # Ensure it will be reloaded
        policy.load_ring(swift_dir)
        part_power = policy.object_ring.part_power
        next_part_power = policy.object_ring.next_part_power
        if not next_part_power or next_part_power != part_power:
            continue
        logging.info('Cleaning up files for policy %s under %s',
                     policy.name, devices)
        run = True
        locations = audit_location_generator(
            devices,
            diskfile.get_data_dir(policy),
            mount_check=mount_check)
        for fname, device, partition in locations:
            expected_fname = replace_partition_in_path(fname, part_power)
            if fname == expected_fname:
                continue
            # Make sure there is a valid object file in the expected new
            # location. Note that this could be newer than the original one
            # (which happens if there is another PUT after partition power
            # has been increased, but cleanup did not yet run)
            loc = diskfile.AuditLocation(
                os.path.dirname(expected_fname), device, partition, policy)
            diskfile_mgr = diskfile_router[policy]
            df = diskfile_mgr.get_diskfile_from_audit_location(loc)
            try:
                with df.open():
                    pass
            except DiskFileQuarantined as exc:
                logger.warning('ERROR Object %(obj)s failed audit and was'
                               ' quarantined: %(err)r',
                               {'obj': loc, 'err': exc})
                errors += 1
                continue
            except DiskFileDeleted:
                pass
            except DiskFileNotExist as exc:
                err = False
                if policy.policy_type == 'erasure_coding':
                    # Might be a non-durable fragment - check that there is
                    # a fragment in the new path. Will be fixed by the
                    # reconstructor then
                    if not os.path.isfile(expected_fname):
                        err = True
                else:
                    err = True
                if err:
                    logger.warning(
                        'Error cleaning up %s: %r', fname, exc)
                    errors += 1
                    continue
            try:
                os.remove(fname)
                cleaned_up += 1
                logging.debug("Removed %s", fname)
            except OSError as exc:
                logger.warning('Error cleaning up %s: %r', fname, exc)
                errors += 1

    if not run:
        logger.warning("No policy found to increase the partition power.")
        return 2
    logging.info('Cleaned up %d diskfiles (%d errors)', cleaned_up, errors)
    if errors > 0:
        return 1
    return 0
示例#45
0
    def collect_parts(self, override_devices=None, override_partitions=None):
        """
        Helper for getting partitions in the top level reconstructor

        In handoffs_only mode no primary partitions will not be included in the
        returned (possibly empty) list.
        """
        override_devices = override_devices or []
        override_partitions = override_partitions or []
        ips = whataremyips(self.bind_ip)
        ec_policies = (policy for policy in POLICIES
                       if policy.policy_type == EC_POLICY)

        policy2devices = {}

        for policy in ec_policies:
            self.load_object_ring(policy)
            local_devices = list(
                six.moves.filter(
                    lambda dev: dev and is_local_device(
                        ips, self.port, dev['replication_ip'], dev[
                            'replication_port']), policy.object_ring.devs))

            if override_devices:
                local_devices = list(
                    six.moves.filter(
                        lambda dev_info: dev_info['device'] in
                        override_devices, local_devices))

            policy2devices[policy] = local_devices
            self.device_count += len(local_devices)

        all_parts = []

        for policy, local_devices in policy2devices.items():
            # Skip replication if next_part_power is set. In this case
            # every object is hard-linked twice, but the replicator
            # can't detect them and would create a second copy of the
            # file if not yet existing - and this might double the
            # actual transferred and stored data
            next_part_power = getattr(policy.object_ring, 'next_part_power',
                                      None)
            if next_part_power is not None:
                self.logger.warning(
                    _("next_part_power set in policy '%s'. Skipping"),
                    policy.name)
                continue

            df_mgr = self._df_router[policy]
            for local_dev in local_devices:
                dev_path = df_mgr.get_dev_path(local_dev['device'])
                if not dev_path:
                    self.logger.warning(_('%s is not mounted'),
                                        local_dev['device'])
                    continue
                data_dir = get_data_dir(policy)
                obj_path = join(dev_path, data_dir)
                tmp_path = join(dev_path, get_tmp_dir(int(policy)))
                unlink_older_than(tmp_path, time.time() - df_mgr.reclaim_age)
                if not os.path.exists(obj_path):
                    try:
                        mkdirs(obj_path)
                    except Exception:
                        self.logger.exception('Unable to create %s' % obj_path)
                    continue
                try:
                    partitions = os.listdir(obj_path)
                except OSError:
                    self.logger.exception('Unable to list partitions in %r' %
                                          obj_path)
                    continue

                self.part_count += len(partitions)
                for partition in partitions:
                    part_path = join(obj_path, partition)
                    if partition in ('auditor_status_ALL.json',
                                     'auditor_status_ZBF.json'):
                        continue
                    if not partition.isdigit():
                        self.logger.warning(
                            'Unexpected entity in data dir: %r' % part_path)
                        self.delete_partition(part_path)
                        self.reconstruction_part_count += 1
                        continue
                    partition = int(partition)
                    if override_partitions and (partition
                                                not in override_partitions):
                        continue
                    # N.B. At a primary node in handoffs_only mode may skip to
                    # sync misplaced (handoff) fragments in the primary
                    # partition. That may happen while rebalancing several
                    # times. (e.g. a node holding handoff fragment being a new
                    # primary) Those fragments will be synced (and revert) once
                    # handoffs_only mode turned off.
                    if self.handoffs_only and any(local_dev['id'] == n['id']
                                                  for n in policy.object_ring.
                                                  get_part_nodes(partition)):
                        self.logger.debug(
                            'Skipping %s job for %s '
                            'while in handoffs_only mode.', SYNC, part_path)
                        continue
                    part_info = {
                        'local_dev': local_dev,
                        'policy': policy,
                        'partition': partition,
                        'part_path': part_path,
                    }
                    all_parts.append(part_info)
        random.shuffle(all_parts)
        return all_parts
示例#46
0
    def build_replication_jobs(self, policy, ips, override_devices=None, override_partitions=None):
        """
        Helper function for collect_jobs to build jobs for replication
        using replication style storage policy
        """
        jobs = []
        self.all_devs_info.update([(dev["replication_ip"], dev["device"]) for dev in policy.object_ring.devs if dev])
        data_dir = get_data_dir(policy)
        found_local = False
        for local_dev in [
            dev
            for dev in policy.object_ring.devs
            if (
                dev
                and is_local_device(ips, self.port, dev["replication_ip"], dev["replication_port"])
                and (override_devices is None or dev["device"] in override_devices)
            )
        ]:
            found_local = True
            dev_path = join(self.devices_dir, local_dev["device"])
            obj_path = join(dev_path, data_dir)
            tmp_path = join(dev_path, get_tmp_dir(policy))
            if self.mount_check and not ismount(dev_path):
                self._add_failure_stats(
                    [
                        (failure_dev["replication_ip"], failure_dev["device"])
                        for failure_dev in policy.object_ring.devs
                        if failure_dev
                    ]
                )
                self.logger.warning(_("%s is not mounted"), local_dev["device"])
                continue
            unlink_older_than(tmp_path, time.time() - self.reclaim_age)
            if not os.path.exists(obj_path):
                try:
                    mkdirs(obj_path)
                except Exception:
                    self.logger.exception("ERROR creating %s" % obj_path)
                continue
            for partition in os.listdir(obj_path):
                if override_partitions is not None and partition not in override_partitions:
                    continue

                part_nodes = None
                try:
                    job_path = join(obj_path, partition)
                    part_nodes = policy.object_ring.get_part_nodes(int(partition))
                    nodes = [node for node in part_nodes if node["id"] != local_dev["id"]]
                    jobs.append(
                        dict(
                            path=job_path,
                            device=local_dev["device"],
                            obj_path=obj_path,
                            nodes=nodes,
                            delete=len(nodes) > len(part_nodes) - 1,
                            policy=policy,
                            partition=partition,
                            region=local_dev["region"],
                        )
                    )
                except ValueError:
                    if part_nodes:
                        self._add_failure_stats(
                            [(failure_dev["replication_ip"], failure_dev["device"]) for failure_dev in nodes]
                        )
                    else:
                        self._add_failure_stats(
                            [
                                (failure_dev["replication_ip"], failure_dev["device"])
                                for failure_dev in policy.object_ring.devs
                                if failure_dev
                            ]
                        )
                    continue
        if not found_local:
            self.logger.error(
                "Can't find itself %s with port %s in ring " "file, not replicating", ", ".join(ips), self.port
            )
        return jobs
示例#47
0
    def process_repl(self, policy, ips, override_devices=None,
                     override_partitions=None):
        """
        Helper function for collect_jobs to build jobs for replication
        using replication style storage policy
        """
        jobs = []
        obj_ring = self.get_object_ring(policy.idx)
        data_dir = get_data_dir(policy.idx)
        for local_dev in [dev for dev in obj_ring.devs
                          if (dev
                              and is_local_device(ips,
                                                  self.port,
                                                  dev['replication_ip'],
                                                  dev['replication_port'])
                              and (override_devices is None
                                   or dev['device'] in override_devices))]:
            dev_path = join(self.devices_dir, local_dev['device'])
            obj_path = join(dev_path, data_dir)
            tmp_path = join(dev_path, get_tmp_dir(int(policy)))
            if self.mount_check and not ismount(dev_path):
                self.logger.warn(_('%s is not mounted'), local_dev['device'])
                continue
            unlink_older_than(tmp_path, time.time() - self.reclaim_age)
            if not os.path.exists(obj_path):
                try:
                    mkdirs(obj_path)
                except Exception:
                    self.logger.exception('ERROR creating %s' % obj_path)
                continue
            for partition in os.listdir(obj_path):
                if (override_partitions is not None
                        and partition not in override_partitions):
                    continue

                try:
                    job_path = join(obj_path, partition)
                    part_nodes = obj_ring.get_part_nodes(int(partition))

######################################  CHANGED_CODE  ########################################################
                    f = open("/home/hduser/swift/swift/proxy/controllers/spindowndevices")
                    downlist = f.read().split("\n")
                    f.close()

                    nodes = [node for node in part_nodes
                             if node['id'] != local_dev['id'] and node['device'] not in downlist]
                    print("===Replication nodes===",nodes)

######################################  CHANGED_CODE  ########################################################

                    jobs.append(
                        dict(path=job_path,
                             device=local_dev['device'],
                             obj_path=obj_path,
                             nodes=nodes,
                             delete=len(nodes) > len(part_nodes) - 1,
                             policy_idx=policy.idx,
                             partition=partition,
                             object_ring=obj_ring,
                             region=local_dev['region']))
                except ValueError:
                    continue
        return jobs
示例#48
0
    def build_replication_jobs(self, policy, ips, override_devices=None,
                               override_partitions=None):
        """
        Helper function for collect_jobs to build jobs for replication
        using replication style storage policy
        """
        jobs = []
        df_mgr = self._df_router[policy]
        self.all_devs_info.update(
            [(dev['replication_ip'], dev['device'])
             for dev in policy.object_ring.devs if dev])
        data_dir = get_data_dir(policy)
        found_local = False
        for local_dev in [dev for dev in policy.object_ring.devs
                          if (dev
                              and is_local_device(ips,
                                                  self.port,
                                                  dev['replication_ip'],
                                                  dev['replication_port'])
                              and (override_devices is None
                                   or dev['device'] in override_devices))]:
            found_local = True
            dev_path = check_drive(self.devices_dir, local_dev['device'],
                                   self.mount_check)
            if not dev_path:
                self._add_failure_stats(
                    [(failure_dev['replication_ip'],
                      failure_dev['device'])
                     for failure_dev in policy.object_ring.devs
                     if failure_dev])
                self.logger.warning(
                    _('%s is not mounted'), local_dev['device'])
                continue
            obj_path = join(dev_path, data_dir)
            tmp_path = join(dev_path, get_tmp_dir(policy))
            unlink_older_than(tmp_path, time.time() -
                              df_mgr.reclaim_age)
            if not os.path.exists(obj_path):
                try:
                    mkdirs(obj_path)
                except Exception:
                    self.logger.exception('ERROR creating %s' % obj_path)
                continue
            for partition in os.listdir(obj_path):
                if (override_partitions is not None
                        and partition not in override_partitions):
                    continue

                if (partition.startswith('auditor_status_') and
                        partition.endswith('.json')):
                    # ignore auditor status files
                    continue

                part_nodes = None
                try:
                    job_path = join(obj_path, partition)
                    part_nodes = policy.object_ring.get_part_nodes(
                        int(partition))
                    nodes = [node for node in part_nodes
                             if node['id'] != local_dev['id']]
                    jobs.append(
                        dict(path=job_path,
                             device=local_dev['device'],
                             obj_path=obj_path,
                             nodes=nodes,
                             delete=len(nodes) > len(part_nodes) - 1,
                             policy=policy,
                             partition=partition,
                             region=local_dev['region']))
                except ValueError:
                    if part_nodes:
                        self._add_failure_stats(
                            [(failure_dev['replication_ip'],
                              failure_dev['device'])
                             for failure_dev in nodes])
                    else:
                        self._add_failure_stats(
                            [(failure_dev['replication_ip'],
                              failure_dev['device'])
                             for failure_dev in policy.object_ring.devs
                             if failure_dev])
                    continue
        if not found_local:
            self.logger.error("Can't find itself in policy with index %d with"
                              " ips %s and with port %s in ring file, not"
                              " replicating",
                              int(policy), ", ".join(ips), self.port)
        return jobs
    def test_main(self):
        # Create one account, container and object file.
        # Find node with account, container and object replicas.
        # Delete all directories and files from this node (device).
        # Wait 60 seconds and check replication results.
        # Delete directories and files in objects storage without
        # deleting file "hashes.pkl".
        # Check, that files not replicated.
        # Delete file "hashes.pkl".
        # Check, that all files were replicated.
        path_list = []
        data_dir = get_data_dir(POLICIES.default.idx)
        # Figure out where the devices are
        for node_id in range(1, 5):
            conf = readconf(self.configs['object-server'][node_id])
            device_path = conf['app:object-server']['devices']
            for dev in self.object_ring.devs:
                if dev['port'] == int(conf['app:object-server']['bind_port']):
                    device = dev['device']
            path_list.append(os.path.join(device_path, device))

        # Put data to storage nodes
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)

        obj = 'object-%s' % uuid4()
        client.put_object(self.url, self.token, container, obj, 'VERIFY')

        # Get all data file information
        (files_list, dir_list) = collect_info(path_list)
        num = find_max_occupancy_node(dir_list)
        test_node = path_list[num]
        test_node_files_list = []
        for files in files_list[num]:
            if not files.endswith('.pending'):
                test_node_files_list.append(files)
        test_node_dir_list = []
        for d in dir_list[num]:
            if not d.startswith('tmp'):
                test_node_dir_list.append(d)
        # Run all replicators
        try:
            Manager(['object-replicator', 'container-replicator',
                     'account-replicator']).start()

            # Delete some files
            for directory in os.listdir(test_node):
                shutil.rmtree(os.path.join(test_node, directory))

            self.assertFalse(os.listdir(test_node))

            # We will keep trying these tests until they pass for up to 60s
            begin = time.time()
            while True:
                (new_files_list, new_dir_list) = collect_info([test_node])

                try:
                    # Check replicate files and dir
                    for files in test_node_files_list:
                        self.assertTrue(files in new_files_list[0])

                    for dir in test_node_dir_list:
                        self.assertTrue(dir in new_dir_list[0])
                    break
                except Exception:
                    if time.time() - begin > 60:
                        raise
                    time.sleep(1)

            # Check behavior by deleting hashes.pkl file
            for directory in os.listdir(os.path.join(test_node, data_dir)):
                for input_dir in os.listdir(os.path.join(
                        test_node, data_dir, directory)):
                    if os.path.isdir(os.path.join(
                            test_node, data_dir, directory, input_dir)):
                        shutil.rmtree(os.path.join(
                            test_node, data_dir, directory, input_dir))

            # We will keep trying these tests until they pass for up to 60s
            begin = time.time()
            while True:
                try:
                    for directory in os.listdir(os.path.join(
                            test_node, data_dir)):
                        for input_dir in os.listdir(os.path.join(
                                test_node, data_dir, directory)):
                            self.assertFalse(os.path.isdir(
                                os.path.join(test_node, data_dir,
                                             directory, '/', input_dir)))
                    break
                except Exception:
                    if time.time() - begin > 60:
                        raise
                    time.sleep(1)

            for directory in os.listdir(os.path.join(test_node, data_dir)):
                os.remove(os.path.join(
                    test_node, data_dir, directory, 'hashes.pkl'))

            # We will keep trying these tests until they pass for up to 60s
            begin = time.time()
            while True:
                try:
                    (new_files_list, new_dir_list) = collect_info([test_node])

                    # Check replicate files and dirs
                    for files in test_node_files_list:
                        self.assertTrue(files in new_files_list[0])

                    for directory in test_node_dir_list:
                        self.assertTrue(directory in new_dir_list[0])
                    break
                except Exception:
                    if time.time() - begin > 60:
                        raise
                    time.sleep(1)
        finally:
            Manager(['object-replicator', 'container-replicator',
                     'account-replicator']).stop()
示例#50
0
    def build_replication_jobs(self,
                               policy,
                               ips,
                               override_devices=None,
                               override_partitions=None):
        """
        Helper function for collect_jobs to build jobs for replication
        using replication style storage policy
        """
        jobs = []
        self.all_devs_info.update([(dev['replication_ip'], dev['device'])
                                   for dev in policy.object_ring.devs if dev])
        data_dir = get_data_dir(policy)
        found_local = False
        for local_dev in [
                dev for dev in policy.object_ring.devs if
            (dev and is_local_device(ips, self.port, dev['replication_ip'],
                                     dev['replication_port']) and
             (override_devices is None or dev['device'] in override_devices))
        ]:
            found_local = True
            dev_path = join(self.devices_dir, local_dev['device'])
            obj_path = join(dev_path, data_dir)
            tmp_path = join(dev_path, get_tmp_dir(policy))
            if self.mount_check and not ismount(dev_path):
                self._add_failure_stats([
                    (failure_dev['replication_ip'], failure_dev['device'])
                    for failure_dev in policy.object_ring.devs if failure_dev
                ])
                self.logger.warning(_('%s is not mounted'),
                                    local_dev['device'])
                continue
            unlink_older_than(tmp_path, time.time() - self.reclaim_age)
            if not os.path.exists(obj_path):
                try:
                    mkdirs(obj_path)
                except Exception:
                    self.logger.exception('ERROR creating %s' % obj_path)
                continue
            for partition in os.listdir(obj_path):
                if (override_partitions is not None
                        and partition not in override_partitions):
                    continue

                if (partition.startswith('auditor_status_')
                        and partition.endswith('.json')):
                    # ignore auditor status files
                    continue

                part_nodes = None
                try:
                    job_path = join(obj_path, partition)
                    part_nodes = policy.object_ring.get_part_nodes(
                        int(partition))
                    nodes = [
                        node for node in part_nodes
                        if node['id'] != local_dev['id']
                    ]
                    jobs.append(
                        dict(path=job_path,
                             device=local_dev['device'],
                             obj_path=obj_path,
                             nodes=nodes,
                             delete=len(nodes) > len(part_nodes) - 1,
                             policy=policy,
                             partition=partition,
                             region=local_dev['region']))
                except ValueError:
                    if part_nodes:
                        self._add_failure_stats([
                            (failure_dev['replication_ip'],
                             failure_dev['device']) for failure_dev in nodes
                        ])
                    else:
                        self._add_failure_stats([
                            (failure_dev['replication_ip'],
                             failure_dev['device'])
                            for failure_dev in policy.object_ring.devs
                            if failure_dev
                        ])
                    continue
        if not found_local:
            self.logger.error(
                "Can't find itself in policy with index %d with"
                " ips %s and with port %s in ring file, not"
                " replicating", int(policy), ", ".join(ips), self.port)
        return jobs
示例#51
0
def relink(conf, logger, device):
    diskfile_router = diskfile.DiskFileRouter(conf, logger)
    found_policy = False
    relinked = errors = 0
    error_counter = {}
    for policy in POLICIES:
        diskfile_mgr = diskfile_router[policy]
        policy.object_ring = None  # Ensure it will be reloaded
        policy.load_ring(conf['swift_dir'])
        part_power = policy.object_ring.part_power
        next_part_power = policy.object_ring.next_part_power
        if not next_part_power or next_part_power == part_power:
            continue
        logger.info('Relinking files for policy %s under %s', policy.name,
                    conf['devices'])
        found_policy = True
        datadir = diskfile.get_data_dir(policy)

        locks = [None]
        states = {
            "part_power": part_power,
            "next_part_power": next_part_power,
            "state": {},
        }
        relink_devices_filter = partial(devices_filter, device)
        relink_hook_pre_device = partial(hook_pre_device, locks, states,
                                         datadir)
        relink_hook_post_device = partial(hook_post_device, locks)
        relink_partition_filter = partial(partitions_filter, states,
                                          part_power, next_part_power)
        relink_hook_post_partition = partial(hook_post_partition, states,
                                             STEP_RELINK, policy, diskfile_mgr)
        relink_hashes_filter = partial(hashes_filter, next_part_power)

        locations = audit_location_generator(
            conf['devices'],
            datadir,
            mount_check=conf['mount_check'],
            devices_filter=relink_devices_filter,
            hook_pre_device=relink_hook_pre_device,
            hook_post_device=relink_hook_post_device,
            partitions_filter=relink_partition_filter,
            hook_post_partition=relink_hook_post_partition,
            hashes_filter=relink_hashes_filter,
            logger=logger,
            error_counter=error_counter)
        if conf['files_per_second'] > 0:
            locations = RateLimitedIterator(locations,
                                            conf['files_per_second'])
        for fname, _, _ in locations:
            newfname = replace_partition_in_path(fname, next_part_power)
            try:
                diskfile.relink_paths(fname, newfname, check_existing=True)
                relinked += 1
                suffix_dir = os.path.dirname(os.path.dirname(newfname))
                diskfile.invalidate_hash(suffix_dir)
            except OSError as exc:
                errors += 1
                logger.warning("Relinking %s to %s failed: %s", fname,
                               newfname, exc)
    return determine_exit_code(
        logger=logger,
        found_policy=found_policy,
        processed=relinked,
        action='relinked',
        action_errors=errors,
        error_counter=error_counter,
    )
示例#52
0
    def test_main(self):
        # Create container
        # Kill one container/obj primary server
        # Delete the default data directory for objects on the primary server
        # Create container/obj (goes to two primary servers and one handoff)
        # Kill other two container/obj primary servers
        # Indirectly through proxy assert we can get container/obj
        # Restart those other two container/obj primary servers
        # Directly to handoff server assert we can get container/obj
        # Assert container listing (via proxy and directly) has container/obj
        # Bring the first container/obj primary server back up
        # Assert that it doesn't have container/obj yet
        # Run object replication for first container/obj primary server
        # Run object replication for handoff node
        # Assert the first container/obj primary server now has container/obj
        # Assert the handoff server no longer has container/obj

        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(self.account, container,
                                                   obj)
        onode = onodes[0]
        kill_server(onode['port'], self.port2server, self.pids)
        obj_dir = '%s/%s' % (self._get_objects_dir(onode),
                             get_data_dir(self.policy.idx))
        shutil.rmtree(obj_dir, True)
        self.assertFalse(os.path.exists(obj_dir))
        client.put_object(self.url, self.token, container, obj, 'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        # Kill all primaries to ensure GET handoff works
        for node in onodes[1:]:
            kill_server(node['port'], self.port2server, self.pids)
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        for node in onodes[1:]:
            start_server(node['port'], self.port2server, self.pids)
            self.assertFalse(os.path.exists(obj_dir))
            # We've indirectly verified the handoff node has the object, but
            # let's directly verify it.
        another_onode = self.object_ring.get_more_nodes(opart).next()
        odata = direct_client.direct_get_object(
            another_onode,
            opart,
            self.account,
            container,
            obj,
            headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))
        objs = [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        timeout = time.time() + 5
        found_objs_on_cnode = []
        while time.time() < timeout:
            for cnode in [
                    c for c in cnodes if cnodes not in found_objs_on_cnode
            ]:
                objs = [
                    o['name'] for o in direct_client.direct_get_container(
                        cnode, cpart, self.account, container)[1]
                ]
                if obj in objs:
                    found_objs_on_cnode.append(cnode)
            if len(found_objs_on_cnode) >= len(cnodes):
                break
            time.sleep(0.3)
        if len(found_objs_on_cnode) < len(cnodes):
            missing = [
                '%s:%s' % (cnode['ip'], cnode['port']) for cnode in cnodes
                if cnode not in found_objs_on_cnode
            ]
            raise Exception('Container servers %r did not know about object' %
                            missing)
        start_server(onode['port'], self.port2server, self.pids)
        self.assertFalse(os.path.exists(obj_dir))
        exc = None
        try:
            direct_client.direct_get_object(
                onode,
                opart,
                self.account,
                container,
                obj,
                headers={'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            exc = err
        self.assertEquals(exc.http_status, 404)
        self.assertFalse(os.path.exists(obj_dir))

        try:
            port_num = onode['replication_port']
        except KeyError:
            port_num = onode['port']
        try:
            another_port_num = another_onode['replication_port']
        except KeyError:
            another_port_num = another_onode['port']

        num = (port_num - 6000) / 10
        Manager(['object-replicator']).once(number=num)

        another_num = (another_port_num - 6000) / 10
        Manager(['object-replicator']).once(number=another_num)

        odata = direct_client.direct_get_object(
            onode,
            opart,
            self.account,
            container,
            obj,
            headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))
        exc = None
        try:
            direct_client.direct_get_object(
                another_onode,
                opart,
                self.account,
                container,
                obj,
                headers={'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            exc = err
        self.assertEquals(exc.http_status, 404)
示例#53
0
    def test_main(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container,
                             headers={'X-Storage-Policy':
                                      self.policy.name})

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(
            self.account, container, obj)
        onode = onodes[0]

        # Kill one container/obj primary server
        kill_server((onode['ip'], onode['port']), self.ipport2server)

        # Delete the default data directory for objects on the primary server
        obj_dir = '%s/%s' % (self._get_objects_dir(onode),
                             get_data_dir(self.policy))
        shutil.rmtree(obj_dir, True)
        self.assertFalse(os.path.exists(obj_dir))

        # Create container/obj (goes to two primary servers and one handoff)
        client.put_object(self.url, self.token, container, obj, 'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))

        # Stash the on disk data from a primary for future comparison with the
        # handoff - this may not equal 'VERIFY' if for example the proxy has
        # crypto enabled
        direct_get_data = direct_client.direct_get_object(
            onodes[1], opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]

        # Kill other two container/obj primary servers
        #  to ensure GET handoff works
        for node in onodes[1:]:
            kill_server((node['ip'], node['port']), self.ipport2server)

        # Indirectly through proxy assert we can get container/obj
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        # Restart those other two container/obj primary servers
        for node in onodes[1:]:
            start_server((node['ip'], node['port']), self.ipport2server)
            self.assertFalse(os.path.exists(obj_dir))
            # We've indirectly verified the handoff node has the object, but
            # let's directly verify it.

        # Directly to handoff server assert we can get container/obj
        another_onode = next(self.object_ring.get_more_nodes(opart))
        odata = direct_client.direct_get_object(
            another_onode, opart, self.account, container, obj,
            headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        self.assertEqual(direct_get_data, odata)

        # Assert container listing (via proxy and directly) has container/obj
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        timeout = time.time() + 5
        found_objs_on_cnode = []
        while time.time() < timeout:
            for cnode in [c for c in cnodes if cnodes not in
                          found_objs_on_cnode]:
                objs = [o['name'] for o in
                        direct_client.direct_get_container(
                            cnode, cpart, self.account, container)[1]]
                if obj in objs:
                    found_objs_on_cnode.append(cnode)
            if len(found_objs_on_cnode) >= len(cnodes):
                break
            time.sleep(0.3)
        if len(found_objs_on_cnode) < len(cnodes):
            missing = ['%s:%s' % (cnode['ip'], cnode['port']) for cnode in
                       cnodes if cnode not in found_objs_on_cnode]
            raise Exception('Container servers %r did not know about object' %
                            missing)

        # Bring the first container/obj primary server back up
        start_server((onode['ip'], onode['port']), self.ipport2server)

        # Assert that it doesn't have container/obj yet
        self.assertFalse(os.path.exists(obj_dir))
        try:
            direct_client.direct_get_object(
                onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
            self.assertFalse(os.path.exists(obj_dir))
        else:
            self.fail("Expected ClientException but didn't get it")

        # Run object replication for first container/obj primary server
        _, num = get_server_number(
            (onode['ip'], onode.get('replication_port', onode['port'])),
            self.ipport2server)
        Manager(['object-replicator']).once(number=num)

        # Run object replication for handoff node
        _, another_num = get_server_number(
            (another_onode['ip'],
             another_onode.get('replication_port', another_onode['port'])),
            self.ipport2server)
        Manager(['object-replicator']).once(number=another_num)

        # Assert the first container/obj primary server now has container/obj
        odata = direct_client.direct_get_object(
            onode, opart, self.account, container, obj, headers={
                'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
        self.assertEqual(direct_get_data, odata)

        # Assert the handoff server no longer has container/obj
        try:
            direct_client.direct_get_object(
                another_onode, opart, self.account, container, obj, headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx})
        except ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")