Beispiel #1
0
    def test_defaults(self):
        self.assertTrue(len(POLICIES) > 0)

        # test class functions
        default_policy = POLICIES.default
        self.assert_(default_policy.is_default)
        zero_policy = POLICIES.get_by_index(0)
        self.assert_(zero_policy.idx == 0)
        zero_policy_by_name = POLICIES.get_by_name(zero_policy.name)
        self.assert_(zero_policy_by_name.idx == 0)
    def test_defaults(self):
        self.assertTrue(len(POLICIES) > 0)

        # test class functions
        default_policy = POLICIES.default
        self.assertTrue(default_policy.is_default)
        zero_policy = POLICIES.get_by_index(0)
        self.assertTrue(zero_policy.idx == 0)
        zero_policy_by_name = POLICIES.get_by_name(zero_policy.name)
        self.assertTrue(zero_policy_by_name.idx == 0)
Beispiel #3
0
    def DELETE(self, req):
        """HTTP DELETE request handler."""
        account_partition, accounts, container_count = \
            self.account_info(self.account_name, req)
        if not accounts:
            return HTTPNotFound(request=req)

        policy_index = self._convert_policy_to_index(req)
        if policy_index is None:
            policy_index = int(POLICIES.default)
        cloud_ring = CloudRing(self.container_name, POLICIES.get_by_index(policy_index))

        container_partition, containers = self.app.container_ring.get_nodes(
            self.account_name, self.container_name)
        headers = self._backend_requests(req, len(containers),
                                         account_partition, accounts)
        clear_info_cache(self.app, req.environ,
                         self.account_name, self.container_name)
        resp = self.make_requests(
            req, self.app.container_ring, container_partition, 'DELETE',
            req.swift_entity_path, headers)
        # Indicates no server had the container
        if resp.status_int == HTTP_ACCEPTED:
            return HTTPNotFound(request=req)
        return_flag, _info = cloud_ring.delete_containers()
        if not return_flag:
            msg = 'Failed:' + str(_info)
            raise DELETECloudContainerException(msg)
        return resp
Beispiel #4
0
    def get_and_validate_policy_index(self, req):
        """
        Validate that the index supplied maps to a policy.

        :returns: policy index from request, or None if not present
        :raises HTTPBadRequest: if the supplied index is bogus
        """

        policy_index = req.headers.get('X-Backend-Storage-Policy-Index', None)
        if policy_index is None:
            return None

        try:
            policy_index = int(policy_index)
        except ValueError:
            raise HTTPBadRequest(request=req,
                                 content_type="text/plain",
                                 body=("Invalid X-Storage-Policy-Index %r" %
                                       policy_index))

        policy = POLICIES.get_by_index(policy_index)
        if policy is None:
            raise HTTPBadRequest(request=req,
                                 content_type="text/plain",
                                 body=("Invalid X-Storage-Policy-Index %r" %
                                       policy_index))
        return int(policy)
def parse_get_node_args(options, args):
    """
    Parse the get_nodes commandline args

    :returns: a tuple, (ring_path, args)
    """
    ring_path = None

    if options.policy_name:
        if POLICIES.get_by_name(options.policy_name) is None:
            raise InfoSystemExit('No policy named %r' % options.policy_name)
    elif args and args[0].endswith('ring.gz'):
        if os.path.exists(args[0]):
            ring_path = args.pop(0)
        else:
            raise InfoSystemExit('Ring file does not exist')

    if len(args) == 1:
        args = args[0].strip('/').split('/', 2)

    if not ring_path and not options.policy_name:
        raise InfoSystemExit('Need to specify policy_name or <ring.gz>')

    if not (args or options.partition):
        raise InfoSystemExit('No target specified')

    if len(args) > 3:
        raise InfoSystemExit('Invalid arguments')

    return ring_path, args
Beispiel #6
0
def parse_get_node_args(options, args):
    """
    Parse the get_nodes commandline args

    :returns: a tuple, (ring_path, args)
    """
    ring_path = None

    if options.policy_name:
        if POLICIES.get_by_name(options.policy_name) is None:
            raise InfoSystemExit('No policy named %r' % options.policy_name)
    elif args and args[0].endswith('.ring.gz'):
        if os.path.exists(args[0]):
            ring_path = args.pop(0)
        else:
            raise InfoSystemExit('Ring file does not exist')

    if len(args) == 1:
        args = args[0].strip('/').split('/', 2)

    if not ring_path and not options.policy_name:
        raise InfoSystemExit('Need to specify policy_name or <ring.gz>')

    if not (args or options.partition):
        raise InfoSystemExit('No target specified')

    if len(args) > 3:
        raise InfoSystemExit('Invalid arguments')

    return ring_path, args
Beispiel #7
0
def get_name_and_placement(request,
                           minsegs=1,
                           maxsegs=None,
                           rest_with_last=False):
    """
    Utility function to split and validate the request path and storage
    policy.  The storage policy index is extracted from the headers of
    the request and converted to a StoragePolicy instance.  The
    remaining args are passed through to
    :meth:`split_and_validate_path`.

    :returns: a list, result of :meth:`split_and_validate_path` with
              the BaseStoragePolicy instance appended on the end
    :raises HTTPServiceUnavailable: if the path is invalid or no policy exists
             with the extracted policy_index.
    """
    policy_index = request.headers.get('X-Backend-Storage-Policy-Index')
    policy = POLICIES.get_by_index(policy_index)
    if not policy:
        raise HTTPServiceUnavailable(body=_("No policy with index %s") %
                                     policy_index,
                                     request=request,
                                     content_type='text/plain')
    results = split_and_validate_path(request,
                                      minsegs=minsegs,
                                      maxsegs=maxsegs,
                                      rest_with_last=rest_with_last)
    results.append(policy)
    return results
Beispiel #8
0
    def get_and_validate_policy_index(self, req):
        """
        Validate that the index supplied maps to a policy.

        :returns: policy index from request, or None if not present
        :raises: HTTPBadRequest if the supplied index is bogus
        """

        policy_index = req.headers.get('X-Backend-Storage-Policy-Index', None)
        if policy_index is None:
            return None

        try:
            policy_index = int(policy_index)
        except ValueError:
            raise HTTPBadRequest(
                request=req, content_type="text/plain",
                body=("Invalid X-Storage-Policy-Index %r" % policy_index))

        policy = POLICIES.get_by_index(policy_index)
        if policy is None:
            raise HTTPBadRequest(
                request=req, content_type="text/plain",
                body=("Invalid X-Storage-Policy-Index %r" % policy_index))
        return int(policy)
def get_name_and_placement(request, minsegs=1, maxsegs=None,
                           rest_with_last=False):
    """
    Utility function to split and validate the request path and storage
    policy.  The storage policy index is extracted from the headers of
    the request and converted to a StoragePolicy instance.  The
    remaining args are passed through to
    :meth:`split_and_validate_path`.

    :returns: a list, result of :meth:`split_and_validate_path` with
              the BaseStoragePolicy instance appended on the end
    :raises: HTTPServiceUnavailable if the path is invalid or no policy exists
             with the extracted policy_index.
    """
    policy_index = request.headers.get('X-Backend-Storage-Policy-Index')
    policy = POLICIES.get_by_index(policy_index)
    if not policy:
        raise HTTPServiceUnavailable(
            body=_("No policy with index %s") % policy_index,
            request=request, content_type='text/plain')
    results = split_and_validate_path(request, minsegs=minsegs,
                                      maxsegs=maxsegs,
                                      rest_with_last=rest_with_last)
    results.append(policy)
    return results
Beispiel #10
0
    def get_object_ring(self, policy_idx):
        """
        Get the ring object to use to handle a request based on its policy.

        :policy_idx: policy index as defined in swift.conf
        :returns: appropriate ring object
        """
        return POLICIES.get_object_ring(policy_idx, self.swift_dir)
Beispiel #11
0
    def get_object_ring(self, policy_idx):
        """
        Get the ring object to use based on its policy.

        :policy_idx: policy index as defined in swift.conf
        :returns: appropriate ring object
        """
        return POLICIES.get_object_ring(policy_idx, self.swift_dir)
Beispiel #12
0
    def get_object_ring(self, policy_idx):
        """
        Get the ring identified by the policy index

        :param policy_idx: Storage policy index
        :returns: A ring matching the storage policy
        """
        return POLICIES.get_object_ring(policy_idx, self.swift_dir)
Beispiel #13
0
 def test_swift_info(self):
     # the deprecated 'three' should not exist in expect
     expect = [{'default': True, 'name': 'zero'},
               {'name': 'two'},
               {'name': 'one'}]
     swift_info = POLICIES.get_policy_info()
     self.assertEquals(sorted(expect, key=lambda k: k['name']),
                       sorted(swift_info, key=lambda k: k['name']))
Beispiel #14
0
 def test_swift_info(self):
     # the deprecated 'three' should not exist in expect
     expect = [{'aliases': 'zero', 'default': True, 'name': 'zero', },
               {'aliases': 'two', 'name': 'two'},
               {'aliases': 'one', 'name': 'one'},
               {'aliases': 'ten', 'name': 'ten'}]
     swift_info = POLICIES.get_policy_info()
     self.assertEqual(sorted(expect, key=lambda k: k['name']),
                      sorted(swift_info, key=lambda k: k['name']))
Beispiel #15
0
    def get_controller(self, req):
        """
        Get the controller to handle a request.

        :param req: the request
        :returns: tuple of (controller class, path dictionary)

        :raises ValueError: (thrown by split_path) if given invalid path
        """
        if req.path == '/info':
            d = dict(version=None,
                     expose_info=self.expose_info,
                     disallowed_sections=self.disallowed_sections,
                     admin_key=self.admin_key)
            return InfoController, d

        #分割请求路径,eg. http://127.0.0.1:8080/auth/v1.0/account/container
        version, account, container, obj = split_path(req.path, 1, 4, True)
        d = dict(version=version,
                 account_name=account,
                 container_name=container,
                 object_name=obj)
        # 如果account为空或者版本号不对,则抛出异常
        if account and not valid_api_version(version):
            raise APIVersionError('Invalid path')
        # 如果account,container,object都存在,表明对object操作,则返回 object controller
        if obj and container and account:
            # info={"status": ..., "sync_key": null, "write_acl": null, "object_count": 1,
            # "storage_policy": 0, "versions": null, "bytes": ..., "meta": {}, "sharding_state": ...,
            # "cors": {"allow_origin": null, "expose_headers": null, "max_age": null},
            #  "sysmeta": {}, "read_acl": null}
            info = get_container_info(req.environ, self)
            policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
                                           info['storage_policy'])
            policy = POLICIES.get_by_index(policy_index)
            if not policy:
                # This indicates that a new policy has been created,
                # with rings, deployed, released (i.e. deprecated =
                # False), used by a client to create a container via
                # another proxy that was restarted after the policy
                # was released, and is now cached - all before this
                # worker was HUPed to stop accepting new
                # connections.  There should never be an "unknown"
                # index - but when there is - it's probably operator
                # error and hopefully temporary.
                raise HTTPServiceUnavailable('Unknown Storage Policy')
            # obj_controller_router[policy] 等价于调用obj_controller_router类中的 __getitem__(policy)
            # 根据策略选择返回 ECObjectController 对象 还是 ReplicatedObjectController 对象
            return self.obj_controller_router[policy], d
        # 如果account,container都存在,表明对container操作,则返回 container controller
        elif container and account:
            return ContainerController, d
        # 如果只存在account,表明对account操作,则返回 account controller
        elif account and not container and not obj:
            return AccountController, d
        return None, d
Beispiel #16
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        error_response = \
            self.clean_acls(req) or check_metadata(req, 'container')
        if error_response:
            return error_response
        policy_index = self._convert_policy_to_index(req)
        if policy_index is None:
            policy_index = int(POLICIES.default)
        if not req.environ.get('swift_owner'):
            for key in self.app.swift_owner_headers:
                req.headers.pop(key, None)
        if len(self.container_name) > constraints.MAX_CONTAINER_NAME_LENGTH:
            resp = HTTPBadRequest(request=req)
            resp.body = 'Container name length of %d longer than %d' % \
                        (len(self.container_name),
                         constraints.MAX_CONTAINER_NAME_LENGTH)
            return resp
        account_partition, accounts, container_count = \
            self.account_info(self.account_name, req)
        if not accounts and self.app.account_autocreate:
            self.autocreate_account(req, self.account_name)
            account_partition, accounts, container_count = \
                self.account_info(self.account_name, req)
        if not accounts:
            return HTTPNotFound(request=req)
        if self.app.max_containers_per_account > 0 and \
                container_count >= self.app.max_containers_per_account and \
                self.account_name not in self.app.max_containers_whitelist:
            container_info = \
                self.container_info(self.account_name, self.container_name,
                                    req)
            if not is_success(container_info.get('status')):
                resp = HTTPForbidden(request=req)
                resp.body = 'Reached container limit of %s' % \
                    self.app.max_containers_per_account
                return resp
        container_partition, containers = self.app.container_ring.get_nodes(
            self.account_name, self.container_name)
        headers = self._backend_requests(req, len(containers),
                                         account_partition, accounts,
                                         policy_index)
        clear_info_cache(self.app, req.environ,
                         self.account_name, self.container_name)
        resp = self.make_requests(
            req, self.app.container_ring,
            container_partition, 'PUT', req.swift_entity_path, headers)

        cloud_ring = CloudRing(self.container_name, POLICIES.get_by_index(policy_index))
        return_flag, _info = cloud_ring.create_containers()
        if not return_flag:
            msg = 'Failed:' + str(_info)
            raise PUTCloudContainerException(msg)

        return resp
Beispiel #17
0
    def test_obj_put_legacy_updates(self):
        ts = (normalize_timestamp(t) for t in itertools.count(int(time())))
        policy = POLICIES.get_by_index(0)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        async_dir = os.path.join(self.sda1, get_async_dir(policy))
        os.mkdir(async_dir)

        account, container, obj = 'a', 'c', 'o'
        # write an async
        for op in ('PUT', 'DELETE'):
            self.logger._clear()
            daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
            dfmanager = DiskFileManager(conf, daemon.logger)
            # don't include storage-policy-index in headers_out pickle
            headers_out = HeaderKeyDict({
                'x-size': 0,
                'x-content-type': 'text/plain',
                'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
                'x-timestamp': next(ts),
            })
            data = {
                'op': op,
                'account': account,
                'container': container,
                'obj': obj,
                'headers': headers_out
            }
            dfmanager.pickle_async_update(self.sda1, account, container, obj,
                                          data, next(ts), policy)

            request_log = []

            def capture(*args, **kwargs):
                request_log.append((args, kwargs))

            # run once
            fake_status_codes = [200, 200, 200]
            with mocked_http_conn(*fake_status_codes, give_connect=capture):
                daemon.run_once()
            self.assertEqual(len(fake_status_codes), len(request_log))
            for request_args, request_kwargs in request_log:
                ip, part, method, path, headers, qs, ssl = request_args
                self.assertEqual(method, op)
                self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
                                 str(int(policy)))
            self.assertEqual(daemon.logger.get_increment_counts(), {
                'successes': 1,
                'unlinks': 1,
                'async_pendings': 1
            })
Beispiel #18
0
    def get_controller(self, req):
        """
        Get the controller to handle a request.

        :param req: the request
        :returns: tuple of (controller class, path dictionary)

        :raises: ValueError (thrown by split_path) if given invalid path
        """
	print 'req.path',req.path
        if req.path == '/info':
            d = dict(version=None,
                     expose_info=self.expose_info,
                     disallowed_sections=self.disallowed_sections,
                     admin_key=self.admin_key)
	    print 'd',d
            return InfoController, d

        version, account, container, obj = split_path(req.path, 1, 4, True)
        d = dict(version=version,
                 account_name=account,
                 container_name=container,
                 object_name=obj)
	print 'd',d
	#print 'valid_api_version(version)',valid_api_version(version)
        if account and not valid_api_version(version):
            raise APIVersionError('Invalid path')
        if obj and container and account:
            info = get_container_info(req.environ, self)
	    print 'info of obj,Acc,Con',info
            policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
                                           info['storage_policy'])
	    print 'policy_index',policy_index
            policy = POLICIES.get_by_index(policy_index)
	    print 'policy',policy
            if not policy:
                # This indicates that a new policy has been created,
                # with rings, deployed, released (i.e. deprecated =
                # False), used by a client to create a container via
                # another proxy that was restarted after the policy
                # was released, and is now cached - all before this
                # worker was HUPed to stop accepting new
                # connections.  There should never be an "unknown"
                # index - but when there is - it's probably operator
                # error and hopefully temporary.
                raise HTTPServiceUnavailable('Unknown Storage Policy')
            return self.obj_controller_router[policy], d
        elif container and account:
	    print 'container & account, returning containercontroller',container,account
            return ContainerController, d
        elif account and not container and not obj:
	    print 'account, returning accountcontroller',account
            return AccountController, d
        return None, d
Beispiel #19
0
    def test_sync(self):
        all_objects = []
        # upload some containers
        for policy in ENABLED_POLICIES:
            container = 'container-%s-%s' % (policy.name, uuid.uuid4())
            client.put_container(self.url, self.token, container,
                                 headers={'X-Storage-Policy': policy.name})
            obj = 'object-%s' % uuid.uuid4()
            body = 'test-body'
            client.put_object(self.url, self.token, container, obj, body)
            all_objects.append((policy, container, obj))

        Manager(['container-updater']).once()

        headers = client.head_account(self.url, self.token)

        self.assertEqual(int(headers['x-account-container-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-object-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-bytes-used']),
                         len(ENABLED_POLICIES) * len(body))

        part, nodes = self.account_ring.get_nodes(self.account)
        for node in nodes:
            direct_delete_account(node, part, self.account)

        Manager(['account-reaper']).once()

        get_to_final_state()

        for policy, container, obj in all_objects:
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s on %r' %
                              (self.account, container, node))
            object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
            part, nodes = object_ring.get_nodes(self.account, container, obj)
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
Beispiel #20
0
    def _store_object(self, req, data_source, nodes, partition,
                      outgoing_headers):
        """
        Store a replicated object.

        This method is responsible for establishing connection
        with storage nodes and sending object to each one of those
        nodes. After sending the data, the "best" reponse will be
        returned based on statuses from all connections
        """
        policy_idx = req.headers.get('X-Backend-Storage-Policy-Index')
        policy = POLICIES.get_by_index(policy_idx)
        if not nodes:
            return HTTPNotFound()

        # RFC2616:8.2.3 disallows 100-continue without a body
        if (req.content_length > 0) or req.is_chunked:
            expect = True
        else:
            expect = False
        conns = self._get_put_connections(req, nodes, partition,
                                          outgoing_headers, policy, expect)

        try:
            # check that a minimum number of connections were established and
            # meet all the correct conditions set in the request
            self._check_failure_put_connections(conns, req, nodes)

            # transfer data
            self._transfer_data(req, data_source, conns, nodes)

            # get responses
            statuses, reasons, bodies, etags = self._get_put_responses(
                req, conns, nodes)
        except HTTPException as resp:
            return resp
        finally:
            for conn in conns:
                conn.close()

        if len(etags) > 1:
            self.app.logger.error(
                _('Object servers returned %s mismatched etags'), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req,
                                  statuses,
                                  reasons,
                                  bodies,
                                  _('Object PUT'),
                                  etag=etag)
        resp.last_modified = math.ceil(
            float(Timestamp(req.headers['X-Timestamp'])))
        return resp
Beispiel #21
0
    def test_sync(self):
        all_objects = []
        # upload some containers
        for policy in ENABLED_POLICIES:
            container = 'container-%s-%s' % (policy.name, uuid.uuid4())
            client.put_container(self.url, self.token, container,
                                 headers={'X-Storage-Policy': policy.name})
            obj = 'object-%s' % uuid.uuid4()
            body = 'test-body'
            client.put_object(self.url, self.token, container, obj, body)
            all_objects.append((policy, container, obj))

        Manager(['container-updater']).once()

        headers = client.head_account(self.url, self.token)

        self.assertEqual(int(headers['x-account-container-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-object-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-bytes-used']),
                         len(ENABLED_POLICIES) * len(body))

        part, nodes = self.account_ring.get_nodes(self.account)
        for node in nodes:
            direct_delete_account(node, part, self.account)

        Manager(['account-reaper']).once()

        get_to_final_state()

        for policy, container, obj in all_objects:
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s on %r' %
                              (self.account, container, node))
            object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
            part, nodes = object_ring.get_nodes(self.account, container, obj)
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
Beispiel #22
0
    def get_controller(self, req):
        """
        Get the controller to handle a request.

        :param req: the request
        :returns: tuple of (controller class, path dictionary)

        :raises: ValueError (thrown by split_path) if given invalid path
        """
        if req.path == '/info':
            d = dict(version=None,
                     expose_info=self.expose_info,
                     disallowed_sections=self.disallowed_sections,
                     admin_key=self.admin_key)
            return InfoController, d

        #分割路径信息
        version, account, container, obj = split_path(req.path, 1, 4, True)
        #生成包含version、account、container、object的路径字典,用于返回
        d = dict(version=version,
                 account_name=account,
                 container_name=container,
                 object_name=obj)
        if account and not valid_api_version(version):
            raise APIVersionError('Invalid path')
        #如果是对象操作
        if obj and container and account:
            #获取container信息
            info = get_container_info(req.environ, self)
            policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
                                           info['storage_policy'])
            #通过index获取存储策略对象
            policy = POLICIES.get_by_index(policy_index)
            if not policy:
                # This indicates that a new policy has been created,
                # with rings, deployed, released (i.e. deprecated =
                # False), used by a client to create a container via
                # another proxy that was restarted after the policy
                # was released, and is now cached - all before this
                # worker was HUPed to stop accepting new
                # connections.  There should never be an "unknown"
                # index - but when there is - it's probably operator
                # error and hopefully temporary.
                raise HTTPServiceUnavailable('Unknown Storage Policy')
            #返回对象操作的控制器对象,以及路径字典
            return self.obj_controller_router[policy], d
        #如果是container操作,返回container控制器,以及路径字典
        elif container and account:
            return ContainerController, d
        #如果是account操作,返回account控制器,以及路径字典
        elif account and not container and not obj:
            return AccountController, d
        return None, d
Beispiel #23
0
 def put_container(self, policy_index=None):
     """
     put container with next storage policy
     """
     policy = self.policies.next()
     if policy_index is not None:
         policy = POLICIES.get_by_index(int(policy_index))
         if not policy:
             raise ValueError('Unknown policy with index %s' % policy)
     headers = {'X-Storage-Policy': policy.name}
     client.put_container(self.url, self.token, self.container_name,
                          headers=headers)
Beispiel #24
0
 def put_container(self, policy_index=None):
     """
     put container with next storage policy
     """
     policy = self.policies.next()
     if policy_index is not None:
         policy = POLICIES.get_by_index(int(policy_index))
         if not policy:
             raise ValueError('Unknown policy with index %s' % policy)
     headers = {'X-Storage-Policy': policy.name}
     client.put_container(self.url, self.token, self.container_name,
                          headers=headers)
Beispiel #25
0
 def check_config(self):
     """
     Check the configuration for possible errors
     """
     for policy_idx, options in self._override_options.items():
         policy = (None if policy_idx is None
                   else POLICIES.get_by_index(policy_idx))
         if options.read_affinity and options.sorting_method != 'affinity':
             self.logger.warning(
                 _("sorting_method is set to '%(method)s', not 'affinity'; "
                   "%(label)s read_affinity setting will have no effect."),
                 {'label': _label_for_policy(policy),
                  'method': options.sorting_method})
Beispiel #26
0
 def check_config(self):
     """
     Check the configuration for possible errors
     """
     for policy_idx, options in self._override_options.items():
         policy = (None if policy_idx is None
                   else POLICIES.get_by_index(policy_idx))
         if options.read_affinity and options.sorting_method != 'affinity':
             self.logger.warning(
                 _("sorting_method is set to '%(method)s', not 'affinity'; "
                   "%(label)s read_affinity setting will have no effect."),
                 {'label': _label_for_policy(policy),
                  'method': options.sorting_method})
Beispiel #27
0
    def _store_object(self, req, data_source, nodes, partition,
                      outgoing_headers):
        """
        Store a replicated object.

        This method is responsible for establishing connection
        with storage nodes and sending object to each one of those
        nodes. After sending the data, the "best" reponse will be
        returned based on statuses from all connections
        """
        policy_idx = req.headers.get('X-Backend-Storage-Policy-Index')
        policy = POLICIES.get_by_index(policy_idx)
        if not nodes:
            return HTTPNotFound()

        # RFC2616:8.2.3 disallows 100-continue without a body
        if (req.content_length > 0) or req.is_chunked:
            expect = True
        else:
            expect = False
        conns = self._get_put_connections(req, nodes, partition,
                                          outgoing_headers, policy, expect)

        try:
            # check that a minimum number of connections were established and
            # meet all the correct conditions set in the request
            self._check_failure_put_connections(conns, req, nodes)

            # transfer data
            self._transfer_data(req, data_source, conns, nodes)

            # get responses
            statuses, reasons, bodies, etags = self._get_put_responses(
                req, conns, nodes)
        except HTTPException as resp:
            return resp
        finally:
            for conn in conns:
                conn.close()

        if len(etags) > 1:
            self.app.logger.error(
                _('Object servers returned %s mismatched etags'), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req, statuses, reasons, bodies,
                                  _('Object PUT'), etag=etag)
        resp.last_modified = math.ceil(
            float(Timestamp(req.headers['X-Timestamp'])))
        return resp
Beispiel #28
0
    def test_obj_put_legacy_updates(self):
        ts = (normalize_timestamp(t) for t in
              itertools.count(int(time())))
        policy = POLICIES.get_by_index(0)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        async_dir = os.path.join(self.sda1, get_async_dir(policy))
        os.mkdir(async_dir)

        account, container, obj = 'a', 'c', 'o'
        # write an async
        for op in ('PUT', 'DELETE'):
            self.logger._clear()
            daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
            dfmanager = DiskFileManager(conf, daemon.logger)
            # don't include storage-policy-index in headers_out pickle
            headers_out = swob.HeaderKeyDict({
                'x-size': 0,
                'x-content-type': 'text/plain',
                'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
                'x-timestamp': next(ts),
            })
            data = {'op': op, 'account': account, 'container': container,
                    'obj': obj, 'headers': headers_out}
            dfmanager.pickle_async_update(self.sda1, account, container, obj,
                                          data, next(ts), policy)

            request_log = []

            def capture(*args, **kwargs):
                request_log.append((args, kwargs))

            # run once
            fake_status_codes = [200, 200, 200]
            with mocked_http_conn(*fake_status_codes, give_connect=capture):
                daemon.run_once()
            self.assertEqual(len(fake_status_codes), len(request_log))
            for request_args, request_kwargs in request_log:
                ip, part, method, path, headers, qs, ssl = request_args
                self.assertEqual(method, op)
                self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
                                 str(int(policy)))
            self.assertEqual(daemon.logger.get_increment_counts(),
                             {'successes': 1, 'unlinks': 1,
                              'async_pendings': 1})
Beispiel #29
0
 def statsd_metric_name_policy(self, req, status_int, method, policy_index):
     if policy_index is None:
         return None
     stat_type = self.get_metric_name_type(req)
     if stat_type == 'object':
         stat_method = method if method in self.valid_methods \
             else 'BAD_METHOD'
         # The policy may not exist
         policy = POLICIES.get_by_index(policy_index)
         if policy:
             return '.'.join((stat_type, 'policy', str(policy_index),
                              stat_method, str(status_int)))
         else:
             return None
     else:
         return None
Beispiel #30
0
    def _convert_policy_to_index(self, req):
        """
        Helper method to convert a policy name (from a request from a client)
        to a policy index (for a request to a backend).

        :param req: incoming request
        """
        policy_name = req.headers.get(POLICY)
        if policy_name:
            policy = POLICIES.get_by_name(policy_name)
            if policy:
                return policy.idx
            else:
                raise HTTPBadRequest(request=req,
                                     content_type="text/plain",
                                     body=("Invalid X-Storage-Policy '%s'"
                                           % policy_name))
Beispiel #31
0
    def _convert_policy_to_index(self, req):
        """
        Helper method to convert a policy name (from a request from a client)
        to a policy index (for a request to a backend).

        :param req: incoming request
        """
        policy_name = req.headers.get("X-Storage-Policy")
        if not policy_name:
            return
        policy = POLICIES.get_by_name(policy_name)
        if not policy:
            raise HTTPBadRequest(
                request=req, content_type="text/plain", body=("Invalid %s '%s'" % ("X-Storage-Policy", policy_name))
            )
        if policy.is_deprecated:
            body = "Storage Policy %r is deprecated" % (policy.name)
            raise HTTPBadRequest(request=req, body=body)
        return int(policy)
Beispiel #32
0
 def _get_object_info(self, account, container, obj, number):
     obj_conf = self.configs['object-server']
     config_path = obj_conf[number]
     options = utils.readconf(config_path, 'app:object-server')
     swift_dir = options.get('swift_dir', '/etc/swift')
     ring = POLICIES.get_object_ring(int(self.policy), swift_dir)
     part, nodes = ring.get_nodes(account, container, obj)
     for node in nodes:
         # assumes one to one mapping
         if node['port'] == int(options.get('bind_port')):
             device = node['device']
             break
     else:
         return None
     mgr = DiskFileManager(options, get_logger(options))
     disk_file = mgr.get_diskfile(device, part, account, container, obj,
                                  self.policy)
     info = disk_file.read_metadata()
     return info
 def _get_object_info(self, account, container, obj, number):
     obj_conf = self.configs['object-server']
     config_path = obj_conf[number]
     options = utils.readconf(config_path, 'app:object-server')
     swift_dir = options.get('swift_dir', '/etc/swift')
     ring = POLICIES.get_object_ring(int(self.policy), swift_dir)
     part, nodes = ring.get_nodes(account, container, obj)
     for node in nodes:
         # assumes one to one mapping
         if node['port'] == int(options.get('bind_port')):
             device = node['device']
             break
     else:
         return None
     mgr = DiskFileManager(options, get_logger(options))
     disk_file = mgr.get_diskfile(device, part, account, container, obj,
                                  self.policy)
     info = disk_file.read_metadata()
     return info
    def __call__(self, env, start_response):
        request = Request(env)

        if request.method == 'PUT':
            try:
                version, account, container, obj = \
                    request.split_path(1, 4, True)
            except ValueError:
                return self.app(env, start_response)

            # check container creation request
            if account and container and not obj:
                policy_name = request.headers.get('X-Storage-Policy', '')
                default_policy = POLICIES.default.name
                if (policy_name in self.policies) or \
                   (policy_name == '' and default_policy in self.policies):

                    container = unquote(container)
                    if len(container) > constraints. \
                            SOF_MAX_CONTAINER_NAME_LENGTH:
                        resp = HTTPBadRequest(request=request)
                        resp.body = \
                            'Container name length of %d longer than %d' % \
                            (len(container),
                                constraints.SOF_MAX_CONTAINER_NAME_LENGTH)
                        return resp(env, start_response)
            elif account and container and obj:
                # check object creation request
                obj = unquote(obj)

                container_info = get_container_info(
                    env, self.app)
                policy = POLICIES.get_by_index(
                    container_info['storage_policy'])

                if policy.name in self.policies:
                    error_response = sof_check_object_creation(request, obj)
                    if error_response:
                        self.logger.warn("returning error: %s", error_response)
                        return error_response(env, start_response)

        return self.app(env, start_response)
Beispiel #35
0
    def _convert_policy_to_index(self, req):
        """
        Helper method to convert a policy name (from a request from a client)
        to a policy index (for a request to a backend).

        :param req: incoming request
        """
        policy_name = req.headers.get('X-Storage-Policy')
        if not policy_name:
            return
        policy = POLICIES.get_by_name(policy_name)
        if not policy:
            raise HTTPBadRequest(request=req,
                                 content_type="text/plain",
                                 body=("Invalid %s '%s'"
                                       % ('X-Storage-Policy', policy_name)))
        if policy.is_deprecated:
            body = 'Storage Policy %r is deprecated' % (policy.name)
            raise HTTPBadRequest(request=req, body=body)
        return int(policy)
Beispiel #36
0
    def __call__(self, env, start_response):
        request = Request(env)

        if request.method == 'PUT':
            try:
                version, account, container, obj = \
                    request.split_path(1, 4, True)
            except ValueError:
                return self.app(env, start_response)

            # check container creation request
            if account and container and not obj:
                policy_name = request.headers.get('X-Storage-Policy', '')
                default_policy = POLICIES.default.name
                if (policy_name in self.policies) or \
                   (policy_name == '' and default_policy in self.policies):

                    container = unquote(container)
                    if len(container) > constraints. \
                            SOF_MAX_CONTAINER_NAME_LENGTH:
                        resp = HTTPBadRequest(request=request)
                        resp.body = \
                            'Container name length of %d longer than %d' % \
                            (len(container),
                                constraints.SOF_MAX_CONTAINER_NAME_LENGTH)
                        return resp(env, start_response)
            elif account and container and obj:
                # check object creation request
                obj = unquote(obj)

                container_info = get_container_info(env, self.app)
                policy = POLICIES.get_by_index(
                    container_info['storage_policy'])

                if policy.name in self.policies:
                    error_response = sof_check_object_creation(request, obj)
                    if error_response:
                        self.logger.warn("returning error: %s", error_response)
                        return error_response(env, start_response)

        return self.app(env, start_response)
Beispiel #37
0
    def test_obj_put_legacy_updates(self):
        ts = (normalize_timestamp(t) for t in itertools.count(int(time())))
        policy = POLICIES.get_by_index(0)
        # setup updater
        conf = {"devices": self.devices_dir, "mount_check": "false", "swift_dir": self.testdir}
        async_dir = os.path.join(self.sda1, get_async_dir(policy.idx))
        os.mkdir(async_dir)

        account, container, obj = "a", "c", "o"
        # write an async
        for op in ("PUT", "DELETE"):
            self.logger._clear()
            daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
            dfmanager = DiskFileManager(conf, daemon.logger)
            # don't include storage-policy-index in headers_out pickle
            headers_out = swob.HeaderKeyDict(
                {
                    "x-size": 0,
                    "x-content-type": "text/plain",
                    "x-etag": "d41d8cd98f00b204e9800998ecf8427e",
                    "x-timestamp": ts.next(),
                }
            )
            data = {"op": op, "account": account, "container": container, "obj": obj, "headers": headers_out}
            dfmanager.pickle_async_update(self.sda1, account, container, obj, data, ts.next(), policy.idx)

            request_log = []

            def capture(*args, **kwargs):
                request_log.append((args, kwargs))

            # run once
            fake_status_codes = [200, 200, 200]
            with mocked_http_conn(*fake_status_codes, give_connect=capture):
                daemon.run_once()
            self.assertEqual(len(fake_status_codes), len(request_log))
            for request_args, request_kwargs in request_log:
                ip, part, method, path, headers, qs, ssl = request_args
                self.assertEqual(method, op)
                self.assertEqual(headers["X-Backend-Storage-Policy-Index"], str(policy.idx))
            self.assertEqual(daemon.logger.get_increment_counts(), {"successes": 1, "unlinks": 1, "async_pendings": 1})
Beispiel #38
0
def main():
    options, commands = parser.parse_args()
    if not commands:
        parser.print_help()
        return 'ERROR: must specify at least one command'
    for cmd_args in commands:
        cmd = cmd_args.split(':', 1)[0]
        if cmd not in BrainSplitter.__commands__:
            parser.print_help()
            return 'ERROR: unknown command %s' % cmd
    url, token = get_auth('http://127.0.0.1:8080/auth/v1.0', 'test:tester',
                          'testing')
    if options.server_type == 'object' and not options.policy_name:
        options.policy_name = POLICIES.default.name
    if options.policy_name:
        options.server_type = 'object'
        policy = POLICIES.get_by_name(options.policy_name)
        if not policy:
            return 'ERROR: unknown policy %r' % options.policy_name
    else:
        policy = None
    brain = BrainSplitter(url,
                          token,
                          options.container,
                          options.object,
                          options.server_type,
                          policy=policy)
    for cmd_args in commands:
        parts = cmd_args.split(':', 1)
        command = parts[0]
        if len(parts) > 1:
            args = utils.list_from_csv(parts[1])
        else:
            args = ()
        try:
            brain.run(command, *args)
        except ClientException as e:
            print '**WARNING**: %s raised %s' % (command, e)
    print 'STATUS'.join(['*' * 25] * 2)
    brain.servers.status()
    sys.exit()
Beispiel #39
0
def get_response_headers(broker):
    info = broker.get_info()
    resp_headers = {
        'X-Account-Container-Count': info['container_count'],
        'X-Account-Object-Count': info['object_count'],
        'X-Account-Bytes-Used': info['bytes_used'],
        'X-Timestamp': Timestamp(info['created_at']).normal,
        'X-PUT-Timestamp': Timestamp(info['put_timestamp']).normal}
    policy_stats = broker.get_policy_stats()
    for policy_idx, stats in policy_stats.items():
        policy = POLICIES.get_by_index(policy_idx)
        if not policy:
            continue
        header_prefix = 'X-Account-Storage-Policy-%s-%%s' % policy.name
        for key, value in stats.items():
            header_name = header_prefix % key.replace('_', '-')
            resp_headers[header_name] = value
    resp_headers.update((key, value)
                        for key, (value, timestamp) in
                        broker.metadata.items() if value != '')
    return resp_headers
Beispiel #40
0
def get_response_headers(broker):
    info = broker.get_info()
    resp_headers = {
        'X-Account-Container-Count': info['container_count'],
        'X-Account-Object-Count': info['object_count'],
        'X-Account-Bytes-Used': info['bytes_used'],
        'X-Timestamp': Timestamp(info['created_at']).normal,
        'X-PUT-Timestamp': Timestamp(info['put_timestamp']).normal}
    policy_stats = broker.get_policy_stats()
    for policy_idx, stats in policy_stats.items():
        policy = POLICIES.get_by_index(policy_idx)
        if not policy:
            continue
        header_prefix = 'X-Account-Storage-Policy-%s-%%s' % policy.name
        for key, value in stats.items():
            header_name = header_prefix % key.replace('_', '-')
            resp_headers[header_name] = value
    resp_headers.update((key, value)
                        for key, (value, timestamp) in
                        broker.metadata.items() if value != '')
    return resp_headers
Beispiel #41
0
def main():
    options, commands = parser.parse_args()
    if not commands:
        parser.print_help()
        return 'ERROR: must specify at least one command'
    for cmd_args in commands:
        cmd = cmd_args.split(':', 1)[0]
        if cmd not in BrainSplitter.__commands__:
            parser.print_help()
            return 'ERROR: unknown command %s' % cmd
    url, token = get_auth('http://127.0.0.1:8080/auth/v1.0',
                          'test:tester', 'testing')
    if options.server_type == 'object' and not options.policy_name:
        options.policy_name = POLICIES.default.name
    if options.policy_name:
        options.server_type = 'object'
        policy = POLICIES.get_by_name(options.policy_name)
        if not policy:
            return 'ERROR: unknown policy %r' % options.policy_name
    else:
        policy = None
    brain = BrainSplitter(url, token, options.container, options.object,
                          options.server_type, policy=policy)
    for cmd_args in commands:
        parts = cmd_args.split(':', 1)
        command = parts[0]
        if len(parts) > 1:
            args = utils.list_from_csv(parts[1])
        else:
            args = ()
        try:
            brain.run(command, *args)
        except ClientException as e:
            print '**WARNING**: %s raised %s' % (command, e)
    print 'STATUS'.join(['*' * 25] * 2)
    brain.servers.status()
    sys.exit()
Beispiel #42
0
def policy(policy_name_or_index):
    value = POLICIES.get_by_name_or_index(policy_name_or_index)
    if value is None:
        raise ValueError
    return value
Beispiel #43
0
def print_item_locations(ring, ring_name=None, account=None, container=None,
                         obj=None, **kwargs):
    """
    Display placement information for an item based on ring lookup.

    If a ring is provided it always takes precedence, but warnings will be
    emitted if it doesn't match other optional arguments like the policy_name
    or ring_name.

    If no ring is provided the ring_name and/or policy_name will be used to
    lookup the ring.

    :param ring: a ring instance
    :param ring_name: server type, or storage policy ring name if object ring
    :param account: account name
    :param container: container name
    :param obj: object name
    :param partition: part number for non path lookups
    :param policy_name: name of storage policy to use to lookup the ring
    :param all_nodes: include all handoff nodes. If false, only the N primary
                      nodes and first N handoffs will be printed.
    """

    policy_name = kwargs.get('policy_name', None)
    part = kwargs.get('partition', None)
    all_nodes = kwargs.get('all', False)
    swift_dir = kwargs.get('swift_dir', '/etc/swift')

    if ring and policy_name:
        policy = POLICIES.get_by_name(policy_name)
        if policy:
            if ring_name != policy.ring_name:
                print 'Attention! mismatch between ring and policy detected!'
        else:
            print 'Attention! Policy %s is not valid' % policy_name

    policy_index = None
    if ring is None and (obj or part):
        if not policy_name:
            print 'Need a ring or policy'
            raise InfoSystemExit()
        policy = POLICIES.get_by_name(policy_name)
        if not policy:
            print 'No policy named %r' % policy_name
            raise InfoSystemExit()
        policy_index = int(policy)
        ring = POLICIES.get_object_ring(policy_index, swift_dir)
        ring_name = (POLICIES.get_by_name(policy_name)).ring_name

    if account is None and (container is not None or obj is not None):
        print 'No account specified'
        raise InfoSystemExit()

    if container is None and obj is not None:
        print 'No container specified'
        raise InfoSystemExit()

    if account is None and part is None:
        print 'No target specified'
        raise InfoSystemExit()

    loc = '<type>'
    if part and ring_name:
        if '-' in ring_name and ring_name.startswith('object'):
            loc = 'objects-' + ring_name.split('-', 1)[1]
        else:
            loc = ring_name + 's'
    if account and container and obj:
        loc = 'objects'
        if '-' in ring_name and ring_name.startswith('object'):
            policy_index = int(ring_name.rsplit('-', 1)[1])
            loc = 'objects-%d' % policy_index
    if account and container and not obj:
        loc = 'containers'
        if not any([ring, ring_name]):
            ring = Ring(swift_dir, ring_name='container')
        else:
            if ring_name != 'container':
                print 'Attention! mismatch between ring and item detected!'
    if account and not container and not obj:
        loc = 'accounts'
        if not any([ring, ring_name]):
            ring = Ring(swift_dir, ring_name='account')
        else:
            if ring_name != 'account':
                print 'Attention! mismatch between ring and item detected!'

    print '\nAccount  \t%s' % account
    print 'Container\t%s' % container
    print 'Object   \t%s\n\n' % obj
    print_ring_locations(ring, loc, account, container, obj, part, all_nodes,
                         policy_index=policy_index)
 def test_reconcile_delete(self):
     # generic split brain
     self.brain.stop_primary_half()
     self.brain.put_container()
     self.brain.put_object()
     self.brain.start_primary_half()
     self.brain.stop_handoff_half()
     self.brain.put_container()
     self.brain.delete_object()
     self.brain.start_handoff_half()
     # make sure we have some manner of split brain
     container_part, container_nodes = self.container_ring.get_nodes(
         self.account, self.container_name)
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for
             node, metadata in head_responses)
     self.assertTrue(
         len(found_policy_indexes) > 1,
         'primary nodes did not disagree about policy index %r' %
         head_responses)
     # find our object
     orig_policy_index = ts_policy_index = None
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(
             self.account, self.container_name, self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node, part, self.account, self.container_name,
                     self.object_name,
                     headers={'X-Backend-Storage-Policy-Index':
                              policy_index})
             except direct_client.ClientException as err:
                 if 'x-backend-timestamp' in err.http_headers:
                     ts_policy_index = policy_index
                     break
             else:
                 orig_policy_index = policy_index
                 break
     if not orig_policy_index:
         self.fail('Unable to find /%s/%s/%s in %r' % (
             self.account, self.container_name, self.object_name,
             found_policy_indexes))
     if not ts_policy_index:
         self.fail('Unable to find tombstone /%s/%s/%s in %r' % (
             self.account, self.container_name, self.object_name,
             found_policy_indexes))
     self.get_to_final_state()
     Manager(['container-reconciler']).once()
     # validate containers
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     new_found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for node,
             metadata in head_responses)
     self.assertTrue(len(new_found_policy_indexes) == 1,
                     'primary nodes disagree about policy index %r' %
                     dict((node['port'],
                          metadata['X-Backend-Storage-Policy-Index'])
                          for node, metadata in head_responses))
     expected_policy_index = new_found_policy_indexes.pop()
     self.assertEqual(orig_policy_index, expected_policy_index)
     # validate object fully deleted
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(
             self.account, self.container_name, self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node, part, self.account, self.container_name,
                     self.object_name,
                     headers={'X-Backend-Storage-Policy-Index':
                              policy_index})
             except direct_client.ClientException as err:
                 if err.http_status == HTTP_NOT_FOUND:
                     continue
             else:
                 self.fail('Found /%s/%s/%s in %s on %s' % (
                     self.account, self.container_name, self.object_name,
                     orig_policy_index, node))
Beispiel #45
0
    def __init__(self, conf, memcache=None, logger=None, account_ring=None,
                 container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger

        self._error_limiting = {}

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.swift_dir = swift_dir
        self.node_timeout = int(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = int(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(conf.get('put_queue_depth', 10))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        self.error_suppression_interval = \
            int(conf.get('error_suppression_interval', 60))
        self.error_suppression_limit = \
            int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence', 60))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence', 60))
        self.allow_account_management = \
            config_true_value(conf.get('allow_account_management', 'no'))
        self.object_post_as_copy = \
            config_true_value(conf.get('object_post_as_copy', 'true'))
        self.container_ring = container_ring or Ring(swift_dir,
                                                     ring_name='container')
        self.account_ring = account_ring or Ring(swift_dir,
                                                 ring_name='account')
        # ensure rings are loaded for all configured storage policies
        for policy in POLICIES:
            policy.load_ring(swift_dir)
        self.obj_controller_router = ObjectControllerRouter()
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(conf.get('account_autocreate', 'no'))
        self.auto_create_account_prefix = (
            conf.get('auto_create_account_prefix') or '.')
        self.expiring_objects_account = self.auto_create_account_prefix + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 0)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()]
        self.deny_host_headers = [
            host.strip() for host in
            conf.get('deny_host_headers', '').split(',') if host.strip()]
        self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        self.cors_allow_origin = [
            a.strip()
            for a in conf.get('cors_allow_origin', '').split(',')
            if a.strip()]
        self.strict_cors_mode = config_true_value(
            conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
        self.max_large_object_get_time = float(
            conf.get('max_large_object_get_time', '86400'))
        value = conf.get('request_node_count', '2 * replicas').lower().split()
        if len(value) == 1:
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value * replicas
        else:
            raise ValueError(
                'Invalid request_node_count value: %r' % ''.join(value))
        try:
            self._read_affinity = read_affinity = conf.get('read_affinity', '')
            self.read_affinity_sort_key = affinity_key_function(read_affinity)
        except ValueError as err:
            # make the message a little more useful
            raise ValueError("Invalid read_affinity value: %r (%s)" %
                             (read_affinity, err.message))
        try:
            write_affinity = conf.get('write_affinity', '')
            self.write_affinity_is_local_fn \
                = affinity_locality_predicate(write_affinity)
        except ValueError as err:
            # make the message a little more useful
            raise ValueError("Invalid write_affinity value: %r (%s)" %
                             (write_affinity, err.message))
        value = conf.get('write_affinity_node_count',
                         '2 * replicas').lower().split()
        if len(value) == 1:
            wanc_value = int(value[0])
            self.write_affinity_node_count = lambda replicas: wanc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            wanc_value = int(value[0])
            self.write_affinity_node_count = \
                lambda replicas: wanc_value * replicas
        else:
            raise ValueError(
                'Invalid write_affinity_node_count value: %r' % ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers',
            'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title()
            for name in swift_owner_headers.split(',') if name.strip()]
        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(
            conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections', 'swift.valid_api_versions'))
        self.admin_key = conf.get('admin_key', None)
        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS)
Beispiel #46
0
    def map_objects_to_targets(self):
        """ Map objects to their local storage server data replicas
        and create a request for the generic backend interface """
        self.logger.debug('Mapping objects to files')
        self.logger.debug('request_in(first 1024 bytes): %s',
                          str(self.request_in)[0:1023])

        request_in_dict = json.loads(self.request_in)
        # TODO consider modifying incoming request in place
        self.request_out = {}
        self.request_out['request'] = request_in_dict['request']
        objects_and_files = []
        oc = ObjectController(self.conf, self.logger)
        self.logger.debug('oc.node_timeout: %s', oc.node_timeout)
        for obj_and_dev in request_in_dict['objects']:
            obj_and_file = {}
            obj_and_file['object'] = obj_and_dev['object']
            self.logger.debug('obj: %s', obj_and_dev)
            try:
                (account, container,
                 obj) = split_path(obj_and_dev['object'].encode('utf-8'), 3, 3,
                                   True)
            except ValueError:
                self.logger.debug('split_path exception')
                raise
            device = obj_and_dev['device']
            # TODO, can can storage_policy_index be determined from storage
            # node to not have to pass from proxy?
            # container_info = get_container_info(
            #     {'PATH_INFO': '/v1/%s/%s' % (account, container)},
            #     self.app, swift_source='LE')
            # storage_policy_index = container_info['storage_policy']
            # obj_ring = self.get_object_ring(storage_policy_index)
            swift_dir = request_in_dict['swift_dir']
            storage_policy_index = request_in_dict['storage_policy_index']
            obj_ring = POLICIES.get_object_ring(storage_policy_index,
                                                swift_dir)
            # need partition, same comment as for storage_policy_index
            partition, nodes = obj_ring.get_nodes(account, container, obj)
            self.logger.debug('Storage nodes: %s' % str(nodes))
            self.logger.debug('partition: %s', partition)
            # scor (aux)
            # key = hash_path(account, container, obj, raw_digest=True)
            key = hash_path(account, container, obj)
            self.logger.debug('hash_path or key: %s', key)

            # Create/use Object Controller to map objects to files
            policy = POLICIES.get_by_index(storage_policy_index)
            self.logger.debug('policy: %s index: %s', policy, str(int(policy)))
            try:
                oc.disk_file = oc.get_diskfile(device,
                                               partition,
                                               account,
                                               container,
                                               obj,
                                               policy=policy)
            except DiskFileDeviceUnavailable:  # scor
                self.logger.error(
                    "Unavailable device: %s, for object: %s,"
                    "storage policy: %s", device, obj_and_dev['object'],
                    policy)
            data_dir = oc.disk_file._datadir
            self.logger.debug('data_dir: %s', data_dir)
            # Swift-on-File detection
            sof_detected = False
            # Get the device path from the object server config file
            devpath = self.conf.get('devices', None)
            # The Swift-on-File device directory is a symlink
            # in the devpath directory constructed like shown below
            sofpath = devpath + '/' + obj_and_dev['device']
            if data_dir.find(sofpath) == 0 and os.path.islink(sofpath):
                # data_dir starts with sofpath and sofpath is a symlink -> SoF
                sof_detected = True
                self.logger.debug('SOF detected, sofpath: %s, realpath: %s',
                                  sofpath, os.path.realpath(sofpath))
                # Follow the symlink and append a/c/o to get the data file path
                oc._data_file = os.path.realpath(sofpath) + \
                    obj_and_file['object']
            elif not self.gbi_provide_dirpaths_instead_of_filepaths:
                files = os.listdir(oc.disk_file._datadir)
                file_info = {}
                # DiskFile method got renamed between Liberty and Mitaka
                try:
                    file_info = oc.disk_file._get_ondisk_file(files)
                except AttributeError:
                    file_info = oc.disk_file._get_ondisk_files(files)
                oc._data_file = file_info.get('data_file')
                self.logger.debug('data_file: %s', oc._data_file)
            # Add file path to the request
            self.logger.debug('obj_and_dev: %s', obj_and_dev)
            if (not self.gbi_provide_dirpaths_instead_of_filepaths) or \
               sof_detected:
                obj_and_file['file'] = oc._data_file
            else:
                obj_and_file['file'] = data_dir
            self.logger.debug('obj_and_file: %s', obj_and_file)
            objects_and_files.append(obj_and_file)

        self.logger.debug('objects_and_files(first 1024 bytes): %s',
                          str(objects_and_files[0:1023]))
        self.request_out['objects'] = objects_and_files

        self.logger.debug('request_in(first 1024 bytes): %s',
                          str(self.request_in)[0:1023])
        self.logger.debug('request_out(first 1024 bytes): %s',
                          str(self.request_out)[0:1023])
Beispiel #47
0
    def __init__(self,
                 conf,
                 memcache=None,
                 logger=None,
                 account_ring=None,
                 container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.swift_dir = swift_dir
        self.node_timeout = float(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = float(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(conf.get('put_queue_depth', 10))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.allow_account_management = \
            config_true_value(conf.get('allow_account_management', 'no'))
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(conf.get('account_autocreate', 'no'))
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        self.strict_cors_mode = config_true_value(
            conf.get('strict_cors_mode', 't'))
        value = conf.get('request_node_count', '2 * replicas').lower().split()
        if len(value) == 1:
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value * replicas
        else:
            raise ValueError('Invalid request_node_count value: %r' %
                             ''.join(value))
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections', 'swift.valid_api_versions'))
        self.admin_key = conf.get('admin_key', None)
        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS)
        self.swift_baseurl = conf.get('swift_baseurl')
    def test_reconcile_manifest(self):
        info_url = "%s://%s/info" % (urlparse(self.url).scheme,
                                     urlparse(self.url).netloc)
        proxy_conn = client.http_connection(info_url)
        cluster_info = client.get_capabilities(proxy_conn)
        if 'slo' not in cluster_info:
            raise SkipTest("SLO not enabled in proxy; "
                           "can't test manifest reconciliation")

        # this test is not only testing a split brain scenario on
        # multiple policies with mis-placed objects - it even writes out
        # a static large object directly to the storage nodes while the
        # objects are unavailably mis-placed from *behind* the proxy and
        # doesn't know how to do that for EC_POLICY (clayg: why did you
        # guys let me write a test that does this!?) - so we force
        # wrong_policy (where the manifest gets written) to be one of
        # any of your configured REPL_POLICY (we know you have one
        # because this is a ReplProbeTest)
        wrong_policy = random.choice(POLICIES_BY_TYPE[REPL_POLICY])
        policy = random.choice([p for p in ENABLED_POLICIES
                                if p is not wrong_policy])
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url, self.token, self.container_name,
                              part_name, contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        with self.assertRaises(ClientException) as catcher:
            client.put_object(self.url, self.token, self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')

        # so as it works out, you can't really upload a multi-part
        # manifest for objects that are currently misplaced - you have to
        # wait until they're all available - which is about the same as
        # some other failure that causes data to be unavailable to the
        # proxy at the time of upload
        self.assertEqual(catcher.exception.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(
            self.account, self.container_name, direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }
        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n
                                         in self.container_ring.devs),
            'x-container-device': ','.join(n['device'] for n in
                                           self.container_ring.devs),
            'x-container-partition': container_part,
            'X-Backend-Storage-Policy-Index': wrong_policy.idx,
            'X-Static-Large-Object': 'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node, part, self.account, self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url, self.token, self.container_name, direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEqual(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(
            self.url, self.token, self.container_name, direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576
                                       for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url, self.token, self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name,
                                      self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
Beispiel #49
0
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object()
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(
            len(found_policy_indexes) > 1,
            'primary nodes did not disagree about policy index %r' %
            head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(self.account,
                                                self.container_name,
                                                self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node,
                        part,
                        self.account,
                        self.container_name,
                        self.object_name,
                        headers={
                            'X-Backend-Storage-Policy-Index': policy_index
                        })
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' %
                      (self.account, self.container_name, self.object_name,
                       found_policy_indexes))
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(
            len(found_policy_indexes) == 1,
            'primary nodes disagree about policy index %r' % head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node,
                    part,
                    self.account,
                    self.container_name,
                    self.object_name,
                    headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index
                    })
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' %
                          (self.account, self.container_name, self.object_name,
                           orig_policy_index))
        # use proxy to access object (bad container info might be cached...)
        timeout = time.time() + TIMEOUT
        while time.time() < timeout:
            try:
                metadata = client.head_object(self.url, self.token,
                                              self.container_name,
                                              self.object_name)
            except ClientException as err:
                if err.http_status != HTTP_NOT_FOUND:
                    raise
                time.sleep(1)
            else:
                break
        else:
            self.fail('could not HEAD /%s/%s/%s/ from policy %s '
                      'after %s seconds.' %
                      (self.account, self.container_name, self.object_name,
                       expected_policy_index, TIMEOUT))
Beispiel #50
0
    def __init__(self,
                 conf,
                 memcache=None,
                 logger=None,
                 account_ring=None,
                 container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger

        self._error_limiting = {}

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.swift_dir = swift_dir
        self.node_timeout = float(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = float(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(conf.get('put_queue_depth', 10))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        self.error_suppression_interval = \
            int(conf.get('error_suppression_interval', 60))
        self.error_suppression_limit = \
            int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence', 60))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence', 60))
        self.allow_account_management = \
            config_true_value(conf.get('allow_account_management', 'no'))
        self.object_post_as_copy = \
            config_true_value(conf.get('object_post_as_copy', 'true'))
        self.container_ring = container_ring or Ring(swift_dir,
                                                     ring_name='container')
        self.account_ring = account_ring or Ring(swift_dir,
                                                 ring_name='account')
        # ensure rings are loaded for all configured storage policies
        for policy in POLICIES:
            policy.load_ring(swift_dir)
        self.obj_controller_router = ObjectControllerRouter()
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(conf.get('account_autocreate', 'no'))
        self.auto_create_account_prefix = (
            conf.get('auto_create_account_prefix') or '.')
        self.expiring_objects_account = self.auto_create_account_prefix + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 0)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        self.cors_allow_origin = [
            a.strip() for a in conf.get('cors_allow_origin', '').split(',')
            if a.strip()
        ]
        self.strict_cors_mode = config_true_value(
            conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
        self.concurrent_gets = \
            config_true_value(conf.get('concurrent_gets'))
        self.concurrency_timeout = float(
            conf.get('concurrency_timeout', self.conn_timeout))
        value = conf.get('request_node_count', '2 * replicas').lower().split()
        if len(value) == 1:
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value * replicas
        else:
            raise ValueError('Invalid request_node_count value: %r' %
                             ''.join(value))
        try:
            self._read_affinity = read_affinity = conf.get('read_affinity', '')
            self.read_affinity_sort_key = affinity_key_function(read_affinity)
        except ValueError as err:
            # make the message a little more useful
            raise ValueError("Invalid read_affinity value: %r (%s)" %
                             (read_affinity, err.message))
        try:
            write_affinity = conf.get('write_affinity', '')
            self.write_affinity_is_local_fn \
                = affinity_locality_predicate(write_affinity)
        except ValueError as err:
            # make the message a little more useful
            raise ValueError("Invalid write_affinity value: %r (%s)" %
                             (write_affinity, err.message))
        value = conf.get('write_affinity_node_count',
                         '2 * replicas').lower().split()
        if len(value) == 1:
            wanc_value = int(value[0])
            self.write_affinity_node_count = lambda replicas: wanc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            wanc_value = int(value[0])
            self.write_affinity_node_count = \
                lambda replicas: wanc_value * replicas
        else:
            raise ValueError('Invalid write_affinity_node_count value: %r' %
                             ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]
        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections', 'swift.valid_api_versions'))
        self.admin_key = conf.get('admin_key', None)
        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS)
Beispiel #51
0
    def __init__(self,
                 conf,
                 logger=None,
                 account_ring=None,
                 container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger
        self._error_limiting = {}

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.swift_dir = swift_dir
        self.node_timeout = float(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = float(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = float(conf.get('client_timeout', 60))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        self.error_suppression_interval = \
            int(conf.get('error_suppression_interval', 60))
        self.error_suppression_limit = \
            int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence',
                         DEFAULT_RECHECK_CONTAINER_EXISTENCE))
        self.recheck_updating_shard_ranges = \
            int(conf.get('recheck_updating_shard_ranges',
                         DEFAULT_RECHECK_UPDATING_SHARD_RANGES))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence',
                         DEFAULT_RECHECK_ACCOUNT_EXISTENCE))
        self.allow_account_management = \
            config_true_value(conf.get('allow_account_management', 'no'))
        self.container_ring = container_ring or Ring(swift_dir,
                                                     ring_name='container')
        self.account_ring = account_ring or Ring(swift_dir,
                                                 ring_name='account')
        # ensure rings are loaded for all configured storage policies
        for policy in POLICIES:
            policy.load_ring(swift_dir)
        self.obj_controller_router = ObjectControllerRouter()
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(conf.get('account_autocreate', 'no'))
        if conf.get('auto_create_account_prefix'):
            self.logger.warning('Option auto_create_account_prefix is '
                                'deprecated. Configure '
                                'auto_create_account_prefix under the '
                                'swift-constraints section of '
                                'swift.conf. This option will '
                                'be ignored in a future release.')
            self.auto_create_account_prefix = \
                conf['auto_create_account_prefix']
        else:
            self.auto_create_account_prefix = \
                constraints.AUTO_CREATE_ACCOUNT_PREFIX
        self.expiring_objects_account = self.auto_create_account_prefix + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 0)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        self.cors_allow_origin = [
            a.strip() for a in conf.get('cors_allow_origin', '').split(',')
            if a.strip()
        ]
        self.cors_expose_headers = [
            a.strip() for a in conf.get('cors_expose_headers', '').split(',')
            if a.strip()
        ]
        self.strict_cors_mode = config_true_value(
            conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        value = conf.get('request_node_count', '2 * replicas').lower().split()
        if len(value) == 1:
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value * replicas
        else:
            raise ValueError('Invalid request_node_count value: %r' %
                             ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]

        # When upgrading from liberasurecode<=1.5.0, you may want to continue
        # writing legacy CRCs until all nodes are upgraded and capabale of
        # reading fragments with zlib CRCs.
        # See https://bugs.launchpad.net/liberasurecode/+bug/1886088 for more
        # information.
        if 'write_legacy_ec_crc' in conf:
            os.environ['LIBERASURECODE_WRITE_LEGACY_CRC'] = \
                '1' if config_true_value(conf['write_legacy_ec_crc']) else '0'
        # else, assume operators know what they're doing and leave env alone

        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        if sys.version_info < (3, ):
            socket._fileobject.default_bufsize = self.client_chunk_size
        # TODO: find a way to enable similar functionality in py3

        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get(
                'disallowed_sections', ', '.join([
                    'swift.auto_create_account_prefix',
                    'swift.valid_api_versions',
                ])))
        self.admin_key = conf.get('admin_key', None)
        self._override_options = self._load_per_policy_config(conf)
        self.sorts_by_timing = any(pc.sorting_method == 'timing'
                                   for pc in self._override_options.values())

        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS)
        self.watchdog = Watchdog()
        self.watchdog.spawn()
Beispiel #52
0
def print_obj(datafile, check_etag=True, swift_dir='/etc/swift',
              policy_name=''):
    """
    Display information about an object read from the datafile.
    Optionally verify the datafile content matches the ETag metadata.

    :param datafile: path on disk to object file
    :param check_etag: boolean, will read datafile content and verify
                       computed checksum matches value stored in
                       metadata.
    :param swift_dir: the path on disk to rings
    :param policy_name: optionally the name to use when finding the ring
    """
    if not os.path.exists(datafile):
        print "Data file doesn't exist"
        raise InfoSystemExit()
    if not datafile.startswith(('/', './')):
        datafile = './' + datafile

    policy_index = None
    ring = None
    datadir = DATADIR_BASE

    # try to extract policy index from datafile disk path
    policy_index = int(extract_policy(datafile) or POLICIES.legacy)

    try:
        if policy_index:
            datadir += '-' + str(policy_index)
            ring = Ring(swift_dir, ring_name='object-' + str(policy_index))
        elif policy_index == 0:
            ring = Ring(swift_dir, ring_name='object')
    except IOError:
        # no such ring
        pass

    if policy_name:
        policy = POLICIES.get_by_name(policy_name)
        if policy:
            policy_index_for_name = policy.idx
            if (policy_index is not None and
               policy_index_for_name is not None and
               policy_index != policy_index_for_name):
                print 'Attention: Ring does not match policy!'
                print 'Double check your policy name!'
            if not ring and policy_index_for_name:
                ring = POLICIES.get_object_ring(policy_index_for_name,
                                                swift_dir)
                datadir = get_data_dir(policy_index_for_name)

    with open(datafile, 'rb') as fp:
        try:
            metadata = read_metadata(fp)
        except EOFError:
            print "Invalid metadata"
            raise InfoSystemExit()

        etag = metadata.pop('ETag', '')
        length = metadata.pop('Content-Length', '')
        path = metadata.get('name', '')
        print_obj_metadata(metadata)

        # Optional integrity check; it's useful, but slow.
        file_len = None
        if check_etag:
            h = md5()
            file_len = 0
            while True:
                data = fp.read(64 * 1024)
                if not data:
                    break
                h.update(data)
                file_len += len(data)
            h = h.hexdigest()
            if etag:
                if h == etag:
                    print 'ETag: %s (valid)' % etag
                else:
                    print ("ETag: %s doesn't match file hash of %s!" %
                           (etag, h))
            else:
                print 'ETag: Not found in metadata'
        else:
            print 'ETag: %s (not checked)' % etag
            file_len = os.fstat(fp.fileno()).st_size

        if length:
            if file_len == int(length):
                print 'Content-Length: %s (valid)' % length
            else:
                print ("Content-Length: %s doesn't match file length of %s"
                       % (length, file_len))
        else:
            print 'Content-Length: Not found in metadata'

        account, container, obj = path.split('/', 3)[1:]
        if ring:
            print_ring_locations(ring, datadir, account, container, obj,
                                 policy_index=policy_index)
Beispiel #53
0
    def reap_container(self, account, account_partition, account_nodes,
                       container):
        """
        Deletes the data and the container itself for the given container. This
        will call :func:`reap_object` up to sqrt(self.concurrency) times
        concurrently for the objects in the container.

        If there is any exception while deleting a single object, the process
        will continue for any other objects in the container and the failed
        objects will be tried again the next time this function is called with
        the same parameters.

        If there is any exception while listing the objects for deletion, the
        process will stop (but will obviously be tried again the next time this
        function is called with the same parameters). This is a possibility
        since the listing comes from querying just the primary remote container
        server.

        Once all objects have been attempted to be deleted, the container
        itself will be attempted to be deleted by sending a delete request to
        all container nodes. The format of the delete request is such that each
        container server will update a corresponding account server, removing
        the container from the account's listing.

        This function returns nothing and should raise no exception but only
        update various self.stats_* values for what occurs.

        :param account: The name of the account for the container.
        :param account_partition: The partition for the account on the account
                                  ring.
        :param account_nodes: The primary node dicts for the account.
        :param container: The name of the container to delete.

        * See also: :func:`swift.common.ring.Ring.get_nodes` for a description
          of the account node dicts.
        """
        account_nodes = list(account_nodes)
        part, nodes = self.get_container_ring().get_nodes(account, container)
        node = nodes[-1]
        pool = GreenPool(size=self.object_concurrency)
        marker = ''
        while True:
            objects = None
            try:
                headers, objects = direct_get_container(
                    node, part, account, container,
                    marker=marker,
                    conn_timeout=self.conn_timeout,
                    response_timeout=self.node_timeout)
                self.stats_return_codes[2] = \
                    self.stats_return_codes.get(2, 0) + 1
                self.logger.increment('return_codes.2')
            except ClientException as err:
                if self.logger.getEffectiveLevel() <= DEBUG:
                    self.logger.exception(
                        _('Exception with %(ip)s:%(port)s/%(device)s'), node)
                self.stats_return_codes[err.http_status / 100] = \
                    self.stats_return_codes.get(err.http_status / 100, 0) + 1
                self.logger.increment(
                    'return_codes.%d' % (err.http_status / 100,))
            if not objects:
                break
            try:
                policy_index = headers.get('X-Backend-Storage-Policy-Index', 0)
                policy = POLICIES.get_by_index(policy_index)
                if not policy:
                    self.logger.error('ERROR: invalid storage policy index: %r'
                                      % policy_index)
                for obj in objects:
                    if isinstance(obj['name'], unicode):
                        obj['name'] = obj['name'].encode('utf8')
                    pool.spawn(self.reap_object, account, container, part,
                               nodes, obj['name'], policy_index)
                pool.waitall()
            except (Exception, Timeout):
                self.logger.exception(_('Exception with objects for container '
                                        '%(container)s for account %(account)s'
                                        ),
                                      {'container': container,
                                       'account': account})
            marker = objects[-1]['name']
            if marker == '':
                break
        successes = 0
        failures = 0
        timestamp = Timestamp(time())
        for node in nodes:
            anode = account_nodes.pop()
            try:
                direct_delete_container(
                    node, part, account, container,
                    conn_timeout=self.conn_timeout,
                    response_timeout=self.node_timeout,
                    headers={'X-Account-Host': '%(ip)s:%(port)s' % anode,
                             'X-Account-Partition': str(account_partition),
                             'X-Account-Device': anode['device'],
                             'X-Account-Override-Deleted': 'yes',
                             'X-Timestamp': timestamp.internal})
                successes += 1
                self.stats_return_codes[2] = \
                    self.stats_return_codes.get(2, 0) + 1
                self.logger.increment('return_codes.2')
            except ClientException as err:
                if self.logger.getEffectiveLevel() <= DEBUG:
                    self.logger.exception(
                        _('Exception with %(ip)s:%(port)s/%(device)s'), node)
                failures += 1
                self.logger.increment('containers_failures')
                self.stats_return_codes[err.http_status / 100] = \
                    self.stats_return_codes.get(err.http_status / 100, 0) + 1
                self.logger.increment(
                    'return_codes.%d' % (err.http_status / 100,))
        if successes > failures:
            self.stats_containers_deleted += 1
            self.logger.increment('containers_deleted')
        elif not successes:
            self.stats_containers_remaining += 1
            self.logger.increment('containers_remaining')
        else:
            self.stats_containers_possibly_remaining += 1
            self.logger.increment('containers_possibly_remaining')
Beispiel #54
0
    def reap_container(self, account, account_partition, account_nodes,
                       container):
        """
        Deletes the data and the container itself for the given container. This
        will call :func:`reap_object` up to sqrt(self.concurrency) times
        concurrently for the objects in the container.

        If there is any exception while deleting a single object, the process
        will continue for any other objects in the container and the failed
        objects will be tried again the next time this function is called with
        the same parameters.

        If there is any exception while listing the objects for deletion, the
        process will stop (but will obviously be tried again the next time this
        function is called with the same parameters). This is a possibility
        since the listing comes from querying just the primary remote container
        server.

        Once all objects have been attempted to be deleted, the container
        itself will be attempted to be deleted by sending a delete request to
        all container nodes. The format of the delete request is such that each
        container server will update a corresponding account server, removing
        the container from the account's listing.

        This function returns nothing and should raise no exception but only
        update various self.stats_* values for what occurs.

        :param account: The name of the account for the container.
        :param account_partition: The partition for the account on the account
                                  ring.
        :param account_nodes: The primary node dicts for the account.
        :param container: The name of the container to delete.

        * See also: :func:`swift.common.ring.Ring.get_nodes` for a description
          of the account node dicts.
        """
        account_nodes = list(account_nodes)
        part, nodes = self.get_container_ring().get_nodes(account, container)
        node = nodes[-1]
        pool = GreenPool(size=self.object_concurrency)
        marker = ''
        while True:
            objects = None
            try:
                headers, objects = direct_get_container(
                    node,
                    part,
                    account,
                    container,
                    marker=marker,
                    conn_timeout=self.conn_timeout,
                    response_timeout=self.node_timeout)
                self.stats_return_codes[2] = \
                    self.stats_return_codes.get(2, 0) + 1
                self.logger.increment('return_codes.2')
            except ClientException as err:
                if self.logger.getEffectiveLevel() <= DEBUG:
                    self.logger.exception(
                        _('Exception with %(ip)s:%(port)s/%(device)s'), node)
                self.stats_return_codes[err.http_status // 100] = \
                    self.stats_return_codes.get(err.http_status // 100, 0) + 1
                self.logger.increment('return_codes.%d' %
                                      (err.http_status // 100, ))
            except (Timeout, socket.error) as err:
                self.logger.error(
                    _('Timeout Exception with %(ip)s:%(port)s/%(device)s'),
                    node)
            if not objects:
                break
            try:
                policy_index = headers.get('X-Backend-Storage-Policy-Index', 0)
                policy = POLICIES.get_by_index(policy_index)
                if not policy:
                    self.logger.error(
                        'ERROR: invalid storage policy index: %r' %
                        policy_index)
                for obj in objects:
                    if isinstance(obj['name'], six.text_type):
                        obj['name'] = obj['name'].encode('utf8')
                    pool.spawn(self.reap_object, account, container, part,
                               nodes, obj['name'], policy_index)
                pool.waitall()
            except (Exception, Timeout):
                self.logger.exception(
                    _('Exception with objects for container '
                      '%(container)s for account %(account)s'), {
                          'container': container,
                          'account': account
                      })
            marker = objects[-1]['name']
            if marker == '':
                break
        successes = 0
        failures = 0
        timestamp = Timestamp.now()
        for node in nodes:
            anode = account_nodes.pop()
            try:
                direct_delete_container(node,
                                        part,
                                        account,
                                        container,
                                        conn_timeout=self.conn_timeout,
                                        response_timeout=self.node_timeout,
                                        headers={
                                            'X-Account-Host':
                                            '%(ip)s:%(port)s' % anode,
                                            'X-Account-Partition':
                                            str(account_partition),
                                            'X-Account-Device':
                                            anode['device'],
                                            'X-Account-Override-Deleted':
                                            'yes',
                                            'X-Timestamp':
                                            timestamp.internal
                                        })
                successes += 1
                self.stats_return_codes[2] = \
                    self.stats_return_codes.get(2, 0) + 1
                self.logger.increment('return_codes.2')
            except ClientException as err:
                if self.logger.getEffectiveLevel() <= DEBUG:
                    self.logger.exception(
                        _('Exception with %(ip)s:%(port)s/%(device)s'), node)
                failures += 1
                self.logger.increment('containers_failures')
                self.stats_return_codes[err.http_status // 100] = \
                    self.stats_return_codes.get(err.http_status // 100, 0) + 1
                self.logger.increment('return_codes.%d' %
                                      (err.http_status // 100, ))
            except (Timeout, socket.error) as err:
                self.logger.error(
                    _('Timeout Exception with %(ip)s:%(port)s/%(device)s'),
                    node)
                failures += 1
                self.logger.increment('containers_failures')
        if successes > failures:
            self.stats_containers_deleted += 1
            self.logger.increment('containers_deleted')
        elif not successes:
            self.stats_containers_remaining += 1
            self.logger.increment('containers_remaining')
        else:
            self.stats_containers_possibly_remaining += 1
            self.logger.increment('containers_possibly_remaining')
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
                              contents='VERIFY')
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assertTrue(
            len(found_policy_indexes) > 1,
            'primary nodes did not disagree about policy index %r' %
            head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(
                self.account, self.container_name, self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node, part, self.account, self.container_name,
                        self.object_name,
                        headers={'X-Backend-Storage-Policy-Index':
                                 policy_index})
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' % (
                self.account, self.container_name, self.object_name,
                found_policy_indexes))
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assertTrue(len(found_policy_indexes) == 1,
                        'primary nodes disagree about policy index %r' %
                        head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node, part, self.account, self.container_name,
                    self.object_name, headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index})
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' % (
                    self.account, self.container_name, self.object_name,
                    orig_policy_index))
        # verify that the object data read by external client is correct
        headers, data = self._get_object_patiently(expected_policy_index)
        self.assertEqual('VERIFY', data)
        self.assertEqual('custom-meta', headers['x-object-meta-test'])