def get_name_and_placement(request, minsegs=1, maxsegs=None, rest_with_last=False): """ Utility function to split and validate the request path and storage policy. The storage policy index is extracted from the headers of the request and converted to a StoragePolicy instance. The remaining args are passed through to :meth:`split_and_validate_path`. :returns: a list, result of :meth:`split_and_validate_path` with the BaseStoragePolicy instance appended on the end :raises HTTPServiceUnavailable: if the path is invalid or no policy exists with the extracted policy_index. """ policy_index = request.headers.get('X-Backend-Storage-Policy-Index') policy = POLICIES.get_by_index(policy_index) if not policy: raise HTTPServiceUnavailable(body=_("No policy with index %s") % policy_index, request=request, content_type='text/plain') results = split_and_validate_path(request, minsegs=minsegs, maxsegs=maxsegs, rest_with_last=rest_with_last) results.append(policy) return results
def get_and_validate_policy_index(self, req): """ Validate that the index supplied maps to a policy. :returns: policy index from request, or None if not present :raises HTTPBadRequest: if the supplied index is bogus """ policy_index = req.headers.get('X-Backend-Storage-Policy-Index', None) if policy_index is None: return None try: policy_index = int(policy_index) except ValueError: raise HTTPBadRequest(request=req, content_type="text/plain", body=("Invalid X-Storage-Policy-Index %r" % policy_index)) policy = POLICIES.get_by_index(policy_index) if policy is None: raise HTTPBadRequest(request=req, content_type="text/plain", body=("Invalid X-Storage-Policy-Index %r" % policy_index)) return int(policy)
def get_and_validate_policy_index(self, req): """ Validate that the index supplied maps to a policy. :returns: policy index from request, or None if not present :raises: HTTPBadRequest if the supplied index is bogus """ policy_index = req.headers.get('X-Backend-Storage-Policy-Index', None) if policy_index is None: return None try: policy_index = int(policy_index) except ValueError: raise HTTPBadRequest( request=req, content_type="text/plain", body=("Invalid X-Storage-Policy-Index %r" % policy_index)) policy = POLICIES.get_by_index(policy_index) if policy is None: raise HTTPBadRequest( request=req, content_type="text/plain", body=("Invalid X-Storage-Policy-Index %r" % policy_index)) return int(policy)
def DELETE(self, req): """HTTP DELETE request handler.""" account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts: return HTTPNotFound(request=req) policy_index = self._convert_policy_to_index(req) if policy_index is None: policy_index = int(POLICIES.default) cloud_ring = CloudRing(self.container_name, POLICIES.get_by_index(policy_index)) container_partition, containers = self.app.container_ring.get_nodes( self.account_name, self.container_name) headers = self._backend_requests(req, len(containers), account_partition, accounts) clear_info_cache(self.app, req.environ, self.account_name, self.container_name) resp = self.make_requests( req, self.app.container_ring, container_partition, 'DELETE', req.swift_entity_path, headers) # Indicates no server had the container if resp.status_int == HTTP_ACCEPTED: return HTTPNotFound(request=req) return_flag, _info = cloud_ring.delete_containers() if not return_flag: msg = 'Failed:' + str(_info) raise DELETECloudContainerException(msg) return resp
def get_name_and_placement(request, minsegs=1, maxsegs=None, rest_with_last=False): """ Utility function to split and validate the request path and storage policy. The storage policy index is extracted from the headers of the request and converted to a StoragePolicy instance. The remaining args are passed through to :meth:`split_and_validate_path`. :returns: a list, result of :meth:`split_and_validate_path` with the BaseStoragePolicy instance appended on the end :raises: HTTPServiceUnavailable if the path is invalid or no policy exists with the extracted policy_index. """ policy_index = request.headers.get('X-Backend-Storage-Policy-Index') policy = POLICIES.get_by_index(policy_index) if not policy: raise HTTPServiceUnavailable( body=_("No policy with index %s") % policy_index, request=request, content_type='text/plain') results = split_and_validate_path(request, minsegs=minsegs, maxsegs=maxsegs, rest_with_last=rest_with_last) results.append(policy) return results
def get_controller(self, req): """ Get the controller to handle a request. :param req: the request :returns: tuple of (controller class, path dictionary) :raises ValueError: (thrown by split_path) if given invalid path """ if req.path == '/info': d = dict(version=None, expose_info=self.expose_info, disallowed_sections=self.disallowed_sections, admin_key=self.admin_key) return InfoController, d #分割请求路径,eg. http://127.0.0.1:8080/auth/v1.0/account/container version, account, container, obj = split_path(req.path, 1, 4, True) d = dict(version=version, account_name=account, container_name=container, object_name=obj) # 如果account为空或者版本号不对,则抛出异常 if account and not valid_api_version(version): raise APIVersionError('Invalid path') # 如果account,container,object都存在,表明对object操作,则返回 object controller if obj and container and account: # info={"status": ..., "sync_key": null, "write_acl": null, "object_count": 1, # "storage_policy": 0, "versions": null, "bytes": ..., "meta": {}, "sharding_state": ..., # "cors": {"allow_origin": null, "expose_headers": null, "max_age": null}, # "sysmeta": {}, "read_acl": null} info = get_container_info(req.environ, self) policy_index = req.headers.get('X-Backend-Storage-Policy-Index', info['storage_policy']) policy = POLICIES.get_by_index(policy_index) if not policy: # This indicates that a new policy has been created, # with rings, deployed, released (i.e. deprecated = # False), used by a client to create a container via # another proxy that was restarted after the policy # was released, and is now cached - all before this # worker was HUPed to stop accepting new # connections. There should never be an "unknown" # index - but when there is - it's probably operator # error and hopefully temporary. raise HTTPServiceUnavailable('Unknown Storage Policy') # obj_controller_router[policy] 等价于调用obj_controller_router类中的 __getitem__(policy) # 根据策略选择返回 ECObjectController 对象 还是 ReplicatedObjectController 对象 return self.obj_controller_router[policy], d # 如果account,container都存在,表明对container操作,则返回 container controller elif container and account: return ContainerController, d # 如果只存在account,表明对account操作,则返回 account controller elif account and not container and not obj: return AccountController, d return None, d
def test_defaults(self): self.assertTrue(len(POLICIES) > 0) # test class functions default_policy = POLICIES.default self.assertTrue(default_policy.is_default) zero_policy = POLICIES.get_by_index(0) self.assertTrue(zero_policy.idx == 0) zero_policy_by_name = POLICIES.get_by_name(zero_policy.name) self.assertTrue(zero_policy_by_name.idx == 0)
def test_defaults(self): self.assertTrue(len(POLICIES) > 0) # test class functions default_policy = POLICIES.default self.assert_(default_policy.is_default) zero_policy = POLICIES.get_by_index(0) self.assert_(zero_policy.idx == 0) zero_policy_by_name = POLICIES.get_by_name(zero_policy.name) self.assert_(zero_policy_by_name.idx == 0)
def PUT(self, req): """HTTP PUT request handler.""" error_response = \ self.clean_acls(req) or check_metadata(req, 'container') if error_response: return error_response policy_index = self._convert_policy_to_index(req) if policy_index is None: policy_index = int(POLICIES.default) if not req.environ.get('swift_owner'): for key in self.app.swift_owner_headers: req.headers.pop(key, None) if len(self.container_name) > constraints.MAX_CONTAINER_NAME_LENGTH: resp = HTTPBadRequest(request=req) resp.body = 'Container name length of %d longer than %d' % \ (len(self.container_name), constraints.MAX_CONTAINER_NAME_LENGTH) return resp account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts and self.app.account_autocreate: self.autocreate_account(req, self.account_name) account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts: return HTTPNotFound(request=req) if self.app.max_containers_per_account > 0 and \ container_count >= self.app.max_containers_per_account and \ self.account_name not in self.app.max_containers_whitelist: container_info = \ self.container_info(self.account_name, self.container_name, req) if not is_success(container_info.get('status')): resp = HTTPForbidden(request=req) resp.body = 'Reached container limit of %s' % \ self.app.max_containers_per_account return resp container_partition, containers = self.app.container_ring.get_nodes( self.account_name, self.container_name) headers = self._backend_requests(req, len(containers), account_partition, accounts, policy_index) clear_info_cache(self.app, req.environ, self.account_name, self.container_name) resp = self.make_requests( req, self.app.container_ring, container_partition, 'PUT', req.swift_entity_path, headers) cloud_ring = CloudRing(self.container_name, POLICIES.get_by_index(policy_index)) return_flag, _info = cloud_ring.create_containers() if not return_flag: msg = 'Failed:' + str(_info) raise PUTCloudContainerException(msg) return resp
def test_obj_put_legacy_updates(self): ts = (normalize_timestamp(t) for t in itertools.count(int(time()))) policy = POLICIES.get_by_index(0) # setup updater conf = { 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, } async_dir = os.path.join(self.sda1, get_async_dir(policy)) os.mkdir(async_dir) account, container, obj = 'a', 'c', 'o' # write an async for op in ('PUT', 'DELETE'): self.logger._clear() daemon = object_updater.ObjectUpdater(conf, logger=self.logger) dfmanager = DiskFileManager(conf, daemon.logger) # don't include storage-policy-index in headers_out pickle headers_out = HeaderKeyDict({ 'x-size': 0, 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e', 'x-timestamp': next(ts), }) data = { 'op': op, 'account': account, 'container': container, 'obj': obj, 'headers': headers_out } dfmanager.pickle_async_update(self.sda1, account, container, obj, data, next(ts), policy) request_log = [] def capture(*args, **kwargs): request_log.append((args, kwargs)) # run once fake_status_codes = [200, 200, 200] with mocked_http_conn(*fake_status_codes, give_connect=capture): daemon.run_once() self.assertEqual(len(fake_status_codes), len(request_log)) for request_args, request_kwargs in request_log: ip, part, method, path, headers, qs, ssl = request_args self.assertEqual(method, op) self.assertEqual(headers['X-Backend-Storage-Policy-Index'], str(int(policy))) self.assertEqual(daemon.logger.get_increment_counts(), { 'successes': 1, 'unlinks': 1, 'async_pendings': 1 })
def _store_object(self, req, data_source, nodes, partition, outgoing_headers): """ Store a replicated object. This method is responsible for establishing connection with storage nodes and sending object to each one of those nodes. After sending the data, the "best" reponse will be returned based on statuses from all connections """ policy_idx = req.headers.get('X-Backend-Storage-Policy-Index') policy = POLICIES.get_by_index(policy_idx) if not nodes: return HTTPNotFound() # RFC2616:8.2.3 disallows 100-continue without a body if (req.content_length > 0) or req.is_chunked: expect = True else: expect = False conns = self._get_put_connections(req, nodes, partition, outgoing_headers, policy, expect) try: # check that a minimum number of connections were established and # meet all the correct conditions set in the request self._check_failure_put_connections(conns, req, nodes) # transfer data self._transfer_data(req, data_source, conns, nodes) # get responses statuses, reasons, bodies, etags = self._get_put_responses( req, conns, nodes) except HTTPException as resp: return resp finally: for conn in conns: conn.close() if len(etags) > 1: self.app.logger.error( _('Object servers returned %s mismatched etags'), len(etags)) return HTTPServerError(request=req) etag = etags.pop() if len(etags) else None resp = self.best_response(req, statuses, reasons, bodies, _('Object PUT'), etag=etag) resp.last_modified = math.ceil( float(Timestamp(req.headers['X-Timestamp']))) return resp
def get_controller(self, req): """ Get the controller to handle a request. :param req: the request :returns: tuple of (controller class, path dictionary) :raises: ValueError (thrown by split_path) if given invalid path """ print 'req.path',req.path if req.path == '/info': d = dict(version=None, expose_info=self.expose_info, disallowed_sections=self.disallowed_sections, admin_key=self.admin_key) print 'd',d return InfoController, d version, account, container, obj = split_path(req.path, 1, 4, True) d = dict(version=version, account_name=account, container_name=container, object_name=obj) print 'd',d #print 'valid_api_version(version)',valid_api_version(version) if account and not valid_api_version(version): raise APIVersionError('Invalid path') if obj and container and account: info = get_container_info(req.environ, self) print 'info of obj,Acc,Con',info policy_index = req.headers.get('X-Backend-Storage-Policy-Index', info['storage_policy']) print 'policy_index',policy_index policy = POLICIES.get_by_index(policy_index) print 'policy',policy if not policy: # This indicates that a new policy has been created, # with rings, deployed, released (i.e. deprecated = # False), used by a client to create a container via # another proxy that was restarted after the policy # was released, and is now cached - all before this # worker was HUPed to stop accepting new # connections. There should never be an "unknown" # index - but when there is - it's probably operator # error and hopefully temporary. raise HTTPServiceUnavailable('Unknown Storage Policy') return self.obj_controller_router[policy], d elif container and account: print 'container & account, returning containercontroller',container,account return ContainerController, d elif account and not container and not obj: print 'account, returning accountcontroller',account return AccountController, d return None, d
def get_controller(self, req): """ Get the controller to handle a request. :param req: the request :returns: tuple of (controller class, path dictionary) :raises: ValueError (thrown by split_path) if given invalid path """ if req.path == '/info': d = dict(version=None, expose_info=self.expose_info, disallowed_sections=self.disallowed_sections, admin_key=self.admin_key) return InfoController, d #分割路径信息 version, account, container, obj = split_path(req.path, 1, 4, True) #生成包含version、account、container、object的路径字典,用于返回 d = dict(version=version, account_name=account, container_name=container, object_name=obj) if account and not valid_api_version(version): raise APIVersionError('Invalid path') #如果是对象操作 if obj and container and account: #获取container信息 info = get_container_info(req.environ, self) policy_index = req.headers.get('X-Backend-Storage-Policy-Index', info['storage_policy']) #通过index获取存储策略对象 policy = POLICIES.get_by_index(policy_index) if not policy: # This indicates that a new policy has been created, # with rings, deployed, released (i.e. deprecated = # False), used by a client to create a container via # another proxy that was restarted after the policy # was released, and is now cached - all before this # worker was HUPed to stop accepting new # connections. There should never be an "unknown" # index - but when there is - it's probably operator # error and hopefully temporary. raise HTTPServiceUnavailable('Unknown Storage Policy') #返回对象操作的控制器对象,以及路径字典 return self.obj_controller_router[policy], d #如果是container操作,返回container控制器,以及路径字典 elif container and account: return ContainerController, d #如果是account操作,返回account控制器,以及路径字典 elif account and not container and not obj: return AccountController, d return None, d
def put_container(self, policy_index=None): """ put container with next storage policy """ policy = self.policies.next() if policy_index is not None: policy = POLICIES.get_by_index(int(policy_index)) if not policy: raise ValueError('Unknown policy with index %s' % policy) headers = {'X-Storage-Policy': policy.name} client.put_container(self.url, self.token, self.container_name, headers=headers)
def check_config(self): """ Check the configuration for possible errors """ for policy_idx, options in self._override_options.items(): policy = (None if policy_idx is None else POLICIES.get_by_index(policy_idx)) if options.read_affinity and options.sorting_method != 'affinity': self.logger.warning( _("sorting_method is set to '%(method)s', not 'affinity'; " "%(label)s read_affinity setting will have no effect."), {'label': _label_for_policy(policy), 'method': options.sorting_method})
def test_obj_put_legacy_updates(self): ts = (normalize_timestamp(t) for t in itertools.count(int(time()))) policy = POLICIES.get_by_index(0) # setup updater conf = { 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, } async_dir = os.path.join(self.sda1, get_async_dir(policy)) os.mkdir(async_dir) account, container, obj = 'a', 'c', 'o' # write an async for op in ('PUT', 'DELETE'): self.logger._clear() daemon = object_updater.ObjectUpdater(conf, logger=self.logger) dfmanager = DiskFileManager(conf, daemon.logger) # don't include storage-policy-index in headers_out pickle headers_out = swob.HeaderKeyDict({ 'x-size': 0, 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e', 'x-timestamp': next(ts), }) data = {'op': op, 'account': account, 'container': container, 'obj': obj, 'headers': headers_out} dfmanager.pickle_async_update(self.sda1, account, container, obj, data, next(ts), policy) request_log = [] def capture(*args, **kwargs): request_log.append((args, kwargs)) # run once fake_status_codes = [200, 200, 200] with mocked_http_conn(*fake_status_codes, give_connect=capture): daemon.run_once() self.assertEqual(len(fake_status_codes), len(request_log)) for request_args, request_kwargs in request_log: ip, part, method, path, headers, qs, ssl = request_args self.assertEqual(method, op) self.assertEqual(headers['X-Backend-Storage-Policy-Index'], str(int(policy))) self.assertEqual(daemon.logger.get_increment_counts(), {'successes': 1, 'unlinks': 1, 'async_pendings': 1})
def statsd_metric_name_policy(self, req, status_int, method, policy_index): if policy_index is None: return None stat_type = self.get_metric_name_type(req) if stat_type == 'object': stat_method = method if method in self.valid_methods \ else 'BAD_METHOD' # The policy may not exist policy = POLICIES.get_by_index(policy_index) if policy: return '.'.join((stat_type, 'policy', str(policy_index), stat_method, str(status_int))) else: return None else: return None
def __call__(self, env, start_response): request = Request(env) if request.method == 'PUT': try: version, account, container, obj = \ request.split_path(1, 4, True) except ValueError: return self.app(env, start_response) # check container creation request if account and container and not obj: policy_name = request.headers.get('X-Storage-Policy', '') default_policy = POLICIES.default.name if (policy_name in self.policies) or \ (policy_name == '' and default_policy in self.policies): container = unquote(container) if len(container) > constraints. \ SOF_MAX_CONTAINER_NAME_LENGTH: resp = HTTPBadRequest(request=request) resp.body = \ 'Container name length of %d longer than %d' % \ (len(container), constraints.SOF_MAX_CONTAINER_NAME_LENGTH) return resp(env, start_response) elif account and container and obj: # check object creation request obj = unquote(obj) container_info = get_container_info( env, self.app) policy = POLICIES.get_by_index( container_info['storage_policy']) if policy.name in self.policies: error_response = sof_check_object_creation(request, obj) if error_response: self.logger.warn("returning error: %s", error_response) return error_response(env, start_response) return self.app(env, start_response)
def test_obj_put_legacy_updates(self): ts = (normalize_timestamp(t) for t in itertools.count(int(time()))) policy = POLICIES.get_by_index(0) # setup updater conf = {"devices": self.devices_dir, "mount_check": "false", "swift_dir": self.testdir} async_dir = os.path.join(self.sda1, get_async_dir(policy.idx)) os.mkdir(async_dir) account, container, obj = "a", "c", "o" # write an async for op in ("PUT", "DELETE"): self.logger._clear() daemon = object_updater.ObjectUpdater(conf, logger=self.logger) dfmanager = DiskFileManager(conf, daemon.logger) # don't include storage-policy-index in headers_out pickle headers_out = swob.HeaderKeyDict( { "x-size": 0, "x-content-type": "text/plain", "x-etag": "d41d8cd98f00b204e9800998ecf8427e", "x-timestamp": ts.next(), } ) data = {"op": op, "account": account, "container": container, "obj": obj, "headers": headers_out} dfmanager.pickle_async_update(self.sda1, account, container, obj, data, ts.next(), policy.idx) request_log = [] def capture(*args, **kwargs): request_log.append((args, kwargs)) # run once fake_status_codes = [200, 200, 200] with mocked_http_conn(*fake_status_codes, give_connect=capture): daemon.run_once() self.assertEqual(len(fake_status_codes), len(request_log)) for request_args, request_kwargs in request_log: ip, part, method, path, headers, qs, ssl = request_args self.assertEqual(method, op) self.assertEqual(headers["X-Backend-Storage-Policy-Index"], str(policy.idx)) self.assertEqual(daemon.logger.get_increment_counts(), {"successes": 1, "unlinks": 1, "async_pendings": 1})
def __call__(self, env, start_response): request = Request(env) if request.method == 'PUT': try: version, account, container, obj = \ request.split_path(1, 4, True) except ValueError: return self.app(env, start_response) # check container creation request if account and container and not obj: policy_name = request.headers.get('X-Storage-Policy', '') default_policy = POLICIES.default.name if (policy_name in self.policies) or \ (policy_name == '' and default_policy in self.policies): container = unquote(container) if len(container) > constraints. \ SOF_MAX_CONTAINER_NAME_LENGTH: resp = HTTPBadRequest(request=request) resp.body = \ 'Container name length of %d longer than %d' % \ (len(container), constraints.SOF_MAX_CONTAINER_NAME_LENGTH) return resp(env, start_response) elif account and container and obj: # check object creation request obj = unquote(obj) container_info = get_container_info(env, self.app) policy = POLICIES.get_by_index( container_info['storage_policy']) if policy.name in self.policies: error_response = sof_check_object_creation(request, obj) if error_response: self.logger.warn("returning error: %s", error_response) return error_response(env, start_response) return self.app(env, start_response)
def get_response_headers(broker): info = broker.get_info() resp_headers = { 'X-Account-Container-Count': info['container_count'], 'X-Account-Object-Count': info['object_count'], 'X-Account-Bytes-Used': info['bytes_used'], 'X-Timestamp': Timestamp(info['created_at']).normal, 'X-PUT-Timestamp': Timestamp(info['put_timestamp']).normal} policy_stats = broker.get_policy_stats() for policy_idx, stats in policy_stats.items(): policy = POLICIES.get_by_index(policy_idx) if not policy: continue header_prefix = 'X-Account-Storage-Policy-%s-%%s' % policy.name for key, value in stats.items(): header_name = header_prefix % key.replace('_', '-') resp_headers[header_name] = value resp_headers.update((key, value) for key, (value, timestamp) in broker.metadata.items() if value != '') return resp_headers
def reap_container(self, account, account_partition, account_nodes, container): """ Deletes the data and the container itself for the given container. This will call :func:`reap_object` up to sqrt(self.concurrency) times concurrently for the objects in the container. If there is any exception while deleting a single object, the process will continue for any other objects in the container and the failed objects will be tried again the next time this function is called with the same parameters. If there is any exception while listing the objects for deletion, the process will stop (but will obviously be tried again the next time this function is called with the same parameters). This is a possibility since the listing comes from querying just the primary remote container server. Once all objects have been attempted to be deleted, the container itself will be attempted to be deleted by sending a delete request to all container nodes. The format of the delete request is such that each container server will update a corresponding account server, removing the container from the account's listing. This function returns nothing and should raise no exception but only update various self.stats_* values for what occurs. :param account: The name of the account for the container. :param account_partition: The partition for the account on the account ring. :param account_nodes: The primary node dicts for the account. :param container: The name of the container to delete. * See also: :func:`swift.common.ring.Ring.get_nodes` for a description of the account node dicts. """ account_nodes = list(account_nodes) part, nodes = self.get_container_ring().get_nodes(account, container) node = nodes[-1] pool = GreenPool(size=self.object_concurrency) marker = '' while True: objects = None try: headers, objects = direct_get_container( node, part, account, container, marker=marker, conn_timeout=self.conn_timeout, response_timeout=self.node_timeout) self.stats_return_codes[2] = \ self.stats_return_codes.get(2, 0) + 1 self.logger.increment('return_codes.2') except ClientException as err: if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( _('Exception with %(ip)s:%(port)s/%(device)s'), node) self.stats_return_codes[err.http_status // 100] = \ self.stats_return_codes.get(err.http_status // 100, 0) + 1 self.logger.increment('return_codes.%d' % (err.http_status // 100, )) except (Timeout, socket.error) as err: self.logger.error( _('Timeout Exception with %(ip)s:%(port)s/%(device)s'), node) if not objects: break try: policy_index = headers.get('X-Backend-Storage-Policy-Index', 0) policy = POLICIES.get_by_index(policy_index) if not policy: self.logger.error( 'ERROR: invalid storage policy index: %r' % policy_index) for obj in objects: if isinstance(obj['name'], six.text_type): obj['name'] = obj['name'].encode('utf8') pool.spawn(self.reap_object, account, container, part, nodes, obj['name'], policy_index) pool.waitall() except (Exception, Timeout): self.logger.exception( _('Exception with objects for container ' '%(container)s for account %(account)s'), { 'container': container, 'account': account }) marker = objects[-1]['name'] if marker == '': break successes = 0 failures = 0 timestamp = Timestamp.now() for node in nodes: anode = account_nodes.pop() try: direct_delete_container(node, part, account, container, conn_timeout=self.conn_timeout, response_timeout=self.node_timeout, headers={ 'X-Account-Host': '%(ip)s:%(port)s' % anode, 'X-Account-Partition': str(account_partition), 'X-Account-Device': anode['device'], 'X-Account-Override-Deleted': 'yes', 'X-Timestamp': timestamp.internal }) successes += 1 self.stats_return_codes[2] = \ self.stats_return_codes.get(2, 0) + 1 self.logger.increment('return_codes.2') except ClientException as err: if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( _('Exception with %(ip)s:%(port)s/%(device)s'), node) failures += 1 self.logger.increment('containers_failures') self.stats_return_codes[err.http_status // 100] = \ self.stats_return_codes.get(err.http_status // 100, 0) + 1 self.logger.increment('return_codes.%d' % (err.http_status // 100, )) except (Timeout, socket.error) as err: self.logger.error( _('Timeout Exception with %(ip)s:%(port)s/%(device)s'), node) failures += 1 self.logger.increment('containers_failures') if successes > failures: self.stats_containers_deleted += 1 self.logger.increment('containers_deleted') elif not successes: self.stats_containers_remaining += 1 self.logger.increment('containers_remaining') else: self.stats_containers_possibly_remaining += 1 self.logger.increment('containers_possibly_remaining')
def can_reconcile_policy(self, policy_index): pol = POLICIES.get_by_index(policy_index) if pol: pol.load_ring(self.swift_dir, reload_time=self.ring_check_interval) return pol.object_ring.next_part_power is None return False
def reap_container(self, account, account_partition, account_nodes, container): """ Deletes the data and the container itself for the given container. This will call :func:`reap_object` up to sqrt(self.concurrency) times concurrently for the objects in the container. If there is any exception while deleting a single object, the process will continue for any other objects in the container and the failed objects will be tried again the next time this function is called with the same parameters. If there is any exception while listing the objects for deletion, the process will stop (but will obviously be tried again the next time this function is called with the same parameters). This is a possibility since the listing comes from querying just the primary remote container server. Once all objects have been attempted to be deleted, the container itself will be attempted to be deleted by sending a delete request to all container nodes. The format of the delete request is such that each container server will update a corresponding account server, removing the container from the account's listing. This function returns nothing and should raise no exception but only update various self.stats_* values for what occurs. :param account: The name of the account for the container. :param account_partition: The partition for the account on the account ring. :param account_nodes: The primary node dicts for the account. :param container: The name of the container to delete. * See also: :func:`swift.common.ring.Ring.get_nodes` for a description of the account node dicts. """ account_nodes = list(account_nodes) part, nodes = self.get_container_ring().get_nodes(account, container) node = nodes[-1] pool = GreenPool(size=self.object_concurrency) marker = '' while True: objects = None try: headers, objects = direct_get_container( node, part, account, container, marker=marker, conn_timeout=self.conn_timeout, response_timeout=self.node_timeout) self.stats_return_codes[2] = \ self.stats_return_codes.get(2, 0) + 1 self.logger.increment('return_codes.2') except ClientException as err: if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( _('Exception with %(ip)s:%(port)s/%(device)s'), node) self.stats_return_codes[err.http_status / 100] = \ self.stats_return_codes.get(err.http_status / 100, 0) + 1 self.logger.increment( 'return_codes.%d' % (err.http_status / 100,)) if not objects: break try: policy_index = headers.get('X-Backend-Storage-Policy-Index', 0) policy = POLICIES.get_by_index(policy_index) if not policy: self.logger.error('ERROR: invalid storage policy index: %r' % policy_index) for obj in objects: if isinstance(obj['name'], unicode): obj['name'] = obj['name'].encode('utf8') pool.spawn(self.reap_object, account, container, part, nodes, obj['name'], policy_index) pool.waitall() except (Exception, Timeout): self.logger.exception(_('Exception with objects for container ' '%(container)s for account %(account)s' ), {'container': container, 'account': account}) marker = objects[-1]['name'] if marker == '': break successes = 0 failures = 0 timestamp = Timestamp(time()) for node in nodes: anode = account_nodes.pop() try: direct_delete_container( node, part, account, container, conn_timeout=self.conn_timeout, response_timeout=self.node_timeout, headers={'X-Account-Host': '%(ip)s:%(port)s' % anode, 'X-Account-Partition': str(account_partition), 'X-Account-Device': anode['device'], 'X-Account-Override-Deleted': 'yes', 'X-Timestamp': timestamp.internal}) successes += 1 self.stats_return_codes[2] = \ self.stats_return_codes.get(2, 0) + 1 self.logger.increment('return_codes.2') except ClientException as err: if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( _('Exception with %(ip)s:%(port)s/%(device)s'), node) failures += 1 self.logger.increment('containers_failures') self.stats_return_codes[err.http_status / 100] = \ self.stats_return_codes.get(err.http_status / 100, 0) + 1 self.logger.increment( 'return_codes.%d' % (err.http_status / 100,)) if successes > failures: self.stats_containers_deleted += 1 self.logger.increment('containers_deleted') elif not successes: self.stats_containers_remaining += 1 self.logger.increment('containers_remaining') else: self.stats_containers_possibly_remaining += 1 self.logger.increment('containers_possibly_remaining')
def get_controller(self, req): """ Get the controller to handle a request. 获得一个请求的控制器 :param req: the request :returns: tuple of (controller class, path dictionary) :raises: ValueError (thrown by split_path) if given invalid path """ #如果请求为/info{?swiftinfo_sig,swiftinfo_expires}这种格式 #表示请求的是一些info,所以会调用InfoController if req.path == '/info': d = dict(version=None, expose_info=self.expose_info, disallowed_sections=self.disallowed_sections, admin_key=self.admin_key) return InfoController, d #获得具体信息 version, account, container, obj = split_path(req.path, 1, 4, True) d = dict(version=version, account_name=account, container_name=container, object_name=obj) #account存在但是version不对,目前version只能是v1或者是v1.0 if account and not valid_api_version(version): raise APIVersionError('Invalid path') #如果path中account, container和object都存在,返回的是object的controller if obj and container and account: info = get_container_info(req.environ, self) #获得container的存储策略,共有三种策略,在配置文件swift.conf中有,可以去查看 policy_index = req.headers.get('X-Backend-Storage-Policy-Index', info['storage_policy']) policy = POLICIES.get_by_index(policy_index) if not policy: # This indicates that a new policy has been created, # with rings, deployed, released (i.e. deprecated = # False), used by a client to create a container via # another proxy that was restarted after the policy # was released, and is now cached - all before this # worker was HUPed to stop accepting new # connections. There should never be an "unknown" # index - but when there is - it's probably operator # error and hopefully temporary. raise HTTPServiceUnavailable('Unknown Storage Policy') return self.obj_controller_router[policy], d #如果path中只包含account和container,则返回container的controller elif container and account: return ContainerController, d #如果path只存在account,则返回account的controller elif account and not container and not obj: return AccountController, d #都没有,返回None return None, d
def map_objects_to_targets(self): """ Map objects to their local storage server data replicas and create a request for the generic backend interface """ self.logger.debug('Mapping objects to files') self.logger.debug('request_in(first 1024 bytes): %s', str(self.request_in)[0:1023]) request_in_dict = json.loads(self.request_in) # TODO consider modifying incoming request in place self.request_out = {} self.request_out['request'] = request_in_dict['request'] objects_and_files = [] oc = ObjectController(self.conf, self.logger) self.logger.debug('oc.node_timeout: %s', oc.node_timeout) for obj_and_dev in request_in_dict['objects']: obj_and_file = {} obj_and_file['object'] = obj_and_dev['object'] self.logger.debug('obj: %s', obj_and_dev) try: (account, container, obj) = split_path(obj_and_dev['object'].encode('utf-8'), 3, 3, True) except ValueError: self.logger.debug('split_path exception') raise device = obj_and_dev['device'] # TODO, can can storage_policy_index be determined from storage # node to not have to pass from proxy? # container_info = get_container_info( # {'PATH_INFO': '/v1/%s/%s' % (account, container)}, # self.app, swift_source='LE') # storage_policy_index = container_info['storage_policy'] # obj_ring = self.get_object_ring(storage_policy_index) swift_dir = request_in_dict['swift_dir'] storage_policy_index = request_in_dict['storage_policy_index'] obj_ring = POLICIES.get_object_ring(storage_policy_index, swift_dir) # need partition, same comment as for storage_policy_index partition, nodes = obj_ring.get_nodes(account, container, obj) self.logger.debug('Storage nodes: %s' % str(nodes)) self.logger.debug('partition: %s', partition) # scor (aux) # key = hash_path(account, container, obj, raw_digest=True) key = hash_path(account, container, obj) self.logger.debug('hash_path or key: %s', key) # Create/use Object Controller to map objects to files policy = POLICIES.get_by_index(storage_policy_index) self.logger.debug('policy: %s index: %s', policy, str(int(policy))) try: oc.disk_file = oc.get_diskfile(device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: # scor self.logger.error( "Unavailable device: %s, for object: %s," "storage policy: %s", device, obj_and_dev['object'], policy) data_dir = oc.disk_file._datadir self.logger.debug('data_dir: %s', data_dir) # Swift-on-File detection sof_detected = False # Get the device path from the object server config file devpath = self.conf.get('devices', None) # The Swift-on-File device directory is a symlink # in the devpath directory constructed like shown below sofpath = devpath + '/' + obj_and_dev['device'] if data_dir.find(sofpath) == 0 and os.path.islink(sofpath): # data_dir starts with sofpath and sofpath is a symlink -> SoF sof_detected = True self.logger.debug('SOF detected, sofpath: %s, realpath: %s', sofpath, os.path.realpath(sofpath)) # Follow the symlink and append a/c/o to get the data file path oc._data_file = os.path.realpath(sofpath) + \ obj_and_file['object'] elif not self.gbi_provide_dirpaths_instead_of_filepaths: files = os.listdir(oc.disk_file._datadir) file_info = {} # DiskFile method got renamed between Liberty and Mitaka try: file_info = oc.disk_file._get_ondisk_file(files) except AttributeError: file_info = oc.disk_file._get_ondisk_files(files) oc._data_file = file_info.get('data_file') self.logger.debug('data_file: %s', oc._data_file) # Add file path to the request self.logger.debug('obj_and_dev: %s', obj_and_dev) if (not self.gbi_provide_dirpaths_instead_of_filepaths) or \ sof_detected: obj_and_file['file'] = oc._data_file else: obj_and_file['file'] = data_dir self.logger.debug('obj_and_file: %s', obj_and_file) objects_and_files.append(obj_and_file) self.logger.debug('objects_and_files(first 1024 bytes): %s', str(objects_and_files[0:1023])) self.request_out['objects'] = objects_and_files self.logger.debug('request_in(first 1024 bytes): %s', str(self.request_in)[0:1023]) self.logger.debug('request_out(first 1024 bytes): %s', str(self.request_out)[0:1023])