def _config_obj_expiration(self, req): delete_at_container = None delete_at_part = None delete_at_nodes = None if 'x-delete-after' in req.headers: try: x_delete_after = int(req.headers['x-delete-after']) except ValueError: raise ValueError('Non-integer X-Delete-After') req.headers['x-delete-at'] = normalize_delete_at_timestamp( time.time() + x_delete_after) if 'x-delete-at' in req.headers: try: x_delete_at = int(normalize_delete_at_timestamp( int(req.headers['x-delete-at']))) except ValueError: raise ValueError('Non-integer X-Delete-At') if x_delete_at < time.time(): raise ValueError('X-Delete-At in past') req.environ.setdefault('swift.log_info', []).append( 'x-delete-at:%s' % x_delete_at) delete_at_container = normalize_delete_at_timestamp( x_delete_at / self.app.expiring_objects_container_divisor * self.app.expiring_objects_container_divisor) delete_at_part, delete_at_nodes = \ self.app.container_ring.get_nodes( self.app.expiring_objects_account, delete_at_container) return req, delete_at_container, delete_at_part, delete_at_nodes
def check_delete_headers(request): """ Validate if 'x-delete' headers are have correct values values should be positive integers and correspond to a time in the future. :param request: the swob request object :returns: HTTPBadRequest in case of invalid values or None if values are ok """ if "x-delete-after" in request.headers: try: x_delete_after = int(request.headers["x-delete-after"]) except ValueError: raise HTTPBadRequest(request=request, content_type="text/plain", body="Non-integer X-Delete-After") actual_del_time = time.time() + x_delete_after if actual_del_time < time.time(): raise HTTPBadRequest(request=request, content_type="text/plain", body="X-Delete-After in past") request.headers["x-delete-at"] = utils.normalize_delete_at_timestamp(actual_del_time) if "x-delete-at" in request.headers: try: x_delete_at = int(utils.normalize_delete_at_timestamp(int(request.headers["x-delete-at"]))) except ValueError: raise HTTPBadRequest(request=request, content_type="text/plain", body="Non-integer X-Delete-At") if x_delete_at < time.time(): raise HTTPBadRequest(request=request, content_type="text/plain", body="X-Delete-At in past") return request
def check_delete_headers(request): """ Check that 'x-delete-after' and 'x-delete-at' headers have valid values. Values should be positive integers and correspond to a time greater than the request timestamp. If the 'x-delete-after' header is found then its value is used to compute an 'x-delete-at' value which takes precedence over any existing 'x-delete-at' header. :param request: the swob request object :raises: HTTPBadRequest in case of invalid values :returns: the swob request object """ now = float(valid_timestamp(request)) if 'x-delete-after' in request.headers: try: x_delete_after = int(request.headers['x-delete-after']) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body=b'Non-integer X-Delete-After') actual_del_time = utils.normalize_delete_at_timestamp(now + x_delete_after) if int(actual_del_time) <= now: raise HTTPBadRequest(request=request, content_type='text/plain', body=b'X-Delete-After in past') request.headers['x-delete-at'] = actual_del_time del request.headers['x-delete-after'] if 'x-delete-at' in request.headers: try: x_delete_at = int( utils.normalize_delete_at_timestamp( int(request.headers['x-delete-at']))) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body=b'Non-integer X-Delete-At') if x_delete_at <= now and not utils.config_true_value( request.headers.get('x-backend-replication', 'f')): raise HTTPBadRequest(request=request, content_type='text/plain', body=b'X-Delete-At in past') return request
def build_task_obj(timestamp, target_account, target_container, target_obj): """ :return: a task object name in format of "<timestamp>-<target_account>/<target_container>/<target_obj>" """ timestamp = Timestamp(timestamp) return '%s-%s/%s/%s' % (normalize_delete_at_timestamp(timestamp), target_account, target_container, target_obj)
def check_delete_headers(request): """ Check that 'x-delete-after' and 'x-delete-at' headers have valid values. Values should be positive integers and correspond to a time greater than or equal to the request timestamp. :param request: the swob request object :returns: HTTPBadRequest in case of invalid values or None if values are ok """ now = float(valid_timestamp(request)) if 'x-delete-after' in request.headers: try: x_delete_after = int(request.headers['x-delete-after']) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-After') actual_del_time = utils.normalize_delete_at_timestamp(now + x_delete_after) if int(actual_del_time) < now: raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-After in past') request.headers['x-delete-at'] = actual_del_time del request.headers['x-delete-after'] if 'x-delete-at' in request.headers: try: x_delete_at = int( utils.normalize_delete_at_timestamp( int(request.headers['x-delete-at']))) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-At') if x_delete_at < now and not utils.config_true_value( request.headers.get('x-backend-replication', 'f')): raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-At in past') return request
def check_delete_headers(request): """ Check that 'x-delete-after' and 'x-delete-at' headers have valid values. Values should be positive integers and correspond to a time greater than the request timestamp. If the 'x-delete-after' header is found then its value is used to compute an 'x-delete-at' value which takes precedence over any existing 'x-delete-at' header. :param request: the swob request object :raises: HTTPBadRequest in case of invalid values :returns: the swob request object """ now = float(valid_timestamp(request)) if 'x-delete-after' in request.headers: try: x_delete_after = int(request.headers['x-delete-after']) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-After') actual_del_time = utils.normalize_delete_at_timestamp( now + x_delete_after) if int(actual_del_time) <= now: raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-After in past') request.headers['x-delete-at'] = actual_del_time del request.headers['x-delete-after'] if 'x-delete-at' in request.headers: try: x_delete_at = int(utils.normalize_delete_at_timestamp( int(request.headers['x-delete-at']))) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-At') if x_delete_at <= now and not utils.config_true_value( request.headers.get('x-backend-replication', 'f')): raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-At in past') return request
def build_task_obj(timestamp, target_account, target_container, target_obj): """ :return: a task object name in format of "<timestamp>-<target_account>/<target_container>/<target_obj>" """ timestamp = Timestamp(timestamp) return '%s-%s/%s/%s' % ( normalize_delete_at_timestamp(timestamp), target_account, target_container, target_obj)
def check_delete_headers(request): """ Validate if 'x-delete' headers are have correct values values should be positive integers and correspond to a time in the future. :param request: the swob request object :returns: HTTPBadRequest in case of invalid values or None if values are ok """ if 'x-delete-after' in request.headers: try: x_delete_after = int(request.headers['x-delete-after']) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-After') actual_del_time = time.time() + x_delete_after if actual_del_time < time.time(): raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-After in past') request.headers['x-delete-at'] = utils.normalize_delete_at_timestamp( actual_del_time) if 'x-delete-at' in request.headers: try: x_delete_at = int( utils.normalize_delete_at_timestamp( int(request.headers['x-delete-at']))) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-At') if x_delete_at < time.time(): raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-At in past') return request
def check_delete_headers(request): """ Validate if 'x-delete' headers are have correct values values should be positive integers and correspond to a time in the future. :param request: the swob request object :returns: HTTPBadRequest in case of invalid values or None if values are ok """ if 'x-delete-after' in request.headers: try: x_delete_after = int(request.headers['x-delete-after']) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-After') actual_del_time = time.time() + x_delete_after if actual_del_time < time.time(): raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-After in past') request.headers['x-delete-at'] = utils.normalize_delete_at_timestamp( actual_del_time) if 'x-delete-at' in request.headers: try: x_delete_at = int(utils.normalize_delete_at_timestamp( int(request.headers['x-delete-at']))) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-At') if x_delete_at < time.time() and not utils.config_true_value( request.headers.get('x-backend-replication', 'f')): raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-At in past') return request
def _config_obj_expiration(self, req): delete_at_container = None delete_at_part = None delete_at_nodes = None if 'x-delete-after' in req.headers: try: x_delete_after = int(req.headers['x-delete-after']) except ValueError: raise HTTPBadRequest(request=req, content_type='text/plain', body='Non-integer X-Delete-After') req.headers['x-delete-at'] = normalize_delete_at_timestamp( time.time() + x_delete_after) if 'x-delete-at' in req.headers: try: x_delete_at = int(normalize_delete_at_timestamp( int(req.headers['x-delete-at']))) except ValueError: raise HTTPBadRequest(request=req, content_type='text/plain', body='Non-integer X-Delete-At') if x_delete_at < time.time(): raise HTTPBadRequest(request=req, content_type='text/plain', body='X-Delete-At in past') req.environ.setdefault('swift.log_info', []).append( 'x-delete-at:%s' % x_delete_at) delete_at_container = get_expirer_container( x_delete_at, self.app.expiring_objects_container_divisor, self.account_name, self.container_name, self.object_name) delete_at_part, delete_at_nodes = \ self.app.container_ring.get_nodes( self.app.expiring_objects_account, delete_at_container) return req, delete_at_container, delete_at_part, delete_at_nodes
def calc_when_actions_do(rule, from_time): actions_timestamp = dict() for key in ('Expiration', 'Transition'): if key not in rule: continue action = rule[key] time = None if 'Date' in action: time = calendar.timegm(dateutil.parser.parse(action['Date']) .timetuple()) elif 'Days' in action: time = calc_nextDay(from_time) + int(action['Days']) * DAY_SECONDS time = normalize_delete_at_timestamp(time) actions_timestamp[key] = time return actions_timestamp
def complete_restore(self, actual_obj, job): tmppath = tempfile.NamedTemporaryFile(bufsize=0, delete=False, dir=self.glacier_tmpdir).name try: job.download_to_file(filename=tmppath) prefix = 'X-Object-Meta' a, c, o = actual_obj.split('/', 2) metadata = self.swift.get_object_metadata(a, c, o, metadata_prefix=prefix) metadata = {'X-Object-Meta' + key: value for key, value in metadata .iteritems()} days = int(metadata['X-Object-Meta-s3-restore-expire-days']) exp_time = normalize_delete_at_timestamp(calc_nextDay(time()) + (days - 1) * 86400) # send restored object to proxy server path = '/v1/%s' % actual_obj metadata['X-Object-Meta-S3-Restored'] = True exp_date = strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime(float(exp_time))) metadata['X-Object-Meta-s3-restore'] = 'ongoing-request="false", ' \ 'expiry-date="%s"' % exp_date metadata['Content-Length'] = os.path.getsize(tmppath) del metadata['X-Object-Meta-s3-restore-expire-days'] obj_body = open(tmppath, 'r') self.swift.make_request('PUT', path, metadata, (2,), body_file=obj_body) # Add to .s3_expiring_restored_objects self.update_action_hidden(self.expiring_restored_account, exp_time, actual_obj) obj_body.close() self.logger.increment('done') except UnexpectedResponse as e: if e.resp.status_int == 404: self.logger.error('Restoring object not found - %s' % actual_obj) except Exception as e: self.logger.increment('errors') self.logger.debug(e) finally: os.remove(tmppath)
def _config_obj_expiration(self, req): delete_at_container = None delete_at_part = None delete_at_nodes = None req = constraints.check_delete_headers(req) if 'x-delete-at' in req.headers: x_delete_at = int( normalize_delete_at_timestamp(int(req.headers['x-delete-at']))) req.environ.setdefault('swift.log_info', []).append('x-delete-at:%s' % x_delete_at) delete_at_container = get_expirer_container( x_delete_at, self.app.expiring_objects_container_divisor, self.account_name, self.container_name, self.object_name) delete_at_part, delete_at_nodes = \ self.app.container_ring.get_nodes( self.app.expiring_objects_account, delete_at_container) return req, delete_at_container, delete_at_part, delete_at_nodes
def _config_obj_expiration(self, req): delete_at_container = None delete_at_part = None delete_at_nodes = None req = constraints.check_delete_headers(req) if 'x-delete-at' in req.headers: x_delete_at = int(normalize_delete_at_timestamp( int(req.headers['x-delete-at']))) req.environ.setdefault('swift.log_info', []).append( 'x-delete-at:%s' % x_delete_at) delete_at_container = get_expirer_container( x_delete_at, self.app.expiring_objects_container_divisor, self.account_name, self.container_name, self.object_name) delete_at_part, delete_at_nodes = \ self.app.container_ring.get_nodes( self.app.expiring_objects_account, delete_at_container) return req, delete_at_container, delete_at_part, delete_at_nodes
def delete_at_update(self, op, delete_at, account, container, obj, request, objdevice, policy_index): """ Update the expiring objects container when objects are updated. :param op: operation performed (ex: 'PUT', or 'DELETE') :param delete_at: scheduled delete in UNIX seconds, int :param account: account name for the object :param container: container name for the object :param obj: object name :param request: the original request driving the update :param objdevice: device name that the object is in :param policy_index: the policy index to be used for tmp dir """ if config_true_value(request.headers.get('x-backend-replication', 'f')): return delete_at = normalize_delete_at_timestamp(delete_at) updates = [(None, None)] partition = None hosts = contdevices = [None] headers_in = request.headers headers_out = HeaderKeyDict({ # system accounts are always Policy-0 'X-Backend-Storage-Policy-Index': 0, 'x-timestamp': request.timestamp.internal, 'x-trans-id': headers_in.get('x-trans-id', '-'), 'referer': request.as_referer() }) if op != 'DELETE': delete_at_container = headers_in.get('X-Delete-At-Container', None) if not delete_at_container: self.logger.warning( 'X-Delete-At-Container header must be specified for ' 'expiring objects background %s to work properly. Making ' 'best guess as to the container name for now.' % op) # TODO(gholt): In a future release, change the above warning to # a raised exception and remove the guess code below. delete_at_container = get_expirer_container( delete_at, self.expiring_objects_container_divisor, account, container, obj) partition = headers_in.get('X-Delete-At-Partition', None) hosts = headers_in.get('X-Delete-At-Host', '') contdevices = headers_in.get('X-Delete-At-Device', '') updates = [ upd for upd in zip((h.strip() for h in hosts.split(',')), ( c.strip() for c in contdevices.split(','))) if all(upd) and partition ] if not updates: updates = [(None, None)] headers_out['x-size'] = '0' headers_out['x-content-type'] = 'text/plain' headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e' else: # DELETEs of old expiration data have no way of knowing what the # old X-Delete-At-Container was at the time of the initial setting # of the data, so a best guess is made here. # Worst case is a DELETE is issued now for something that doesn't # exist there and the original data is left where it is, where # it will be ignored when the expirer eventually tries to issue the # object DELETE later since the X-Delete-At value won't match up. delete_at_container = get_expirer_container( delete_at, self.expiring_objects_container_divisor, account, container, obj) delete_at_container = normalize_delete_at_timestamp( delete_at_container) for host, contdevice in updates: self.async_update( op, self.expiring_objects_account, delete_at_container, '%s-%s/%s/%s' % (delete_at, account, container, obj), host, partition, contdevice, headers_out, objdevice, policy_index)
def calc_nextDay(timestamp): current = normalize_delete_at_timestamp((int(timestamp) / DAY_SECONDS) * DAY_SECONDS) return int(current) + DAY_SECONDS
def PUT(self, req): """HTTP PUT request handler.""" if req.if_none_match is not None and '*' not in req.if_none_match: # Sending an etag with if-none-match isn't currently supported return HTTPBadRequest(request=req, content_type='text/plain', body='If-None-Match only supports *') container_info = self.container_info( self.account_name, self.container_name, req) policy_index = req.headers.get('X-Backend-Storage-Policy-Index', container_info['storage_policy']) obj_ring = self.app.get_object_ring(policy_index) # pass the policy index to storage nodes via req header req.headers['X-Backend-Storage-Policy-Index'] = policy_index container_partition = container_info['partition'] containers = container_info['nodes'] req.acl = container_info['write_acl'] req.environ['swift_sync_key'] = container_info['sync_key'] object_versions = container_info['versions'] if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp if not containers: return HTTPNotFound(request=req) try: ml = req.message_length() except ValueError as e: return HTTPBadRequest(request=req, content_type='text/plain', body=str(e)) except AttributeError as e: return HTTPNotImplemented(request=req, content_type='text/plain', body=str(e)) if ml is not None and ml > constraints.MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(request=req) if 'x-delete-after' in req.headers: try: x_delete_after = int(req.headers['x-delete-after']) except ValueError: return HTTPBadRequest(request=req, content_type='text/plain', body='Non-integer X-Delete-After') req.headers['x-delete-at'] = normalize_delete_at_timestamp( time.time() + x_delete_after) partition, nodes = obj_ring.get_nodes( self.account_name, self.container_name, self.object_name) # do a HEAD request for container sync and checking object versions if 'x-timestamp' in req.headers or \ (object_versions and not req.environ.get('swift_versioned_copy')): # make sure proxy-server uses the right policy index _headers = {'X-Backend-Storage-Policy-Index': policy_index, 'X-Newest': 'True'} hreq = Request.blank(req.path_info, headers=_headers, environ={'REQUEST_METHOD': 'HEAD'}) hresp = self.GETorHEAD_base( hreq, _('Object'), obj_ring, partition, hreq.swift_entity_path) # Used by container sync feature if 'x-timestamp' in req.headers: try: req_timestamp = Timestamp(req.headers['X-Timestamp']) if hresp.environ and 'swift_x_timestamp' in hresp.environ and \ hresp.environ['swift_x_timestamp'] >= req_timestamp: return HTTPAccepted(request=req) except ValueError: return HTTPBadRequest( request=req, content_type='text/plain', body='X-Timestamp should be a UNIX timestamp float value; ' 'was %r' % req.headers['x-timestamp']) req.headers['X-Timestamp'] = req_timestamp.internal else: req.headers['X-Timestamp'] = Timestamp(time.time()).internal # Sometimes the 'content-type' header exists, but is set to None. content_type_manually_set = True detect_content_type = \ config_true_value(req.headers.get('x-detect-content-type')) if detect_content_type or not req.headers.get('content-type'): guessed_type, _junk = mimetypes.guess_type(req.path_info) req.headers['Content-Type'] = guessed_type or \ 'application/octet-stream' if detect_content_type: req.headers.pop('x-detect-content-type') else: content_type_manually_set = False error_response = check_object_creation(req, self.object_name) or \ check_content_type(req) if error_response: return error_response if object_versions and not req.environ.get('swift_versioned_copy'): if hresp.status_int != HTTP_NOT_FOUND: # This is a version manifest and needs to be handled # differently. First copy the existing data to a new object, # then write the data from this request to the version manifest # object. lcontainer = object_versions.split('/')[0] prefix_len = '%03x' % len(self.object_name) lprefix = prefix_len + self.object_name + '/' ts_source = hresp.environ.get('swift_x_timestamp') if ts_source is None: ts_source = time.mktime(time.strptime( hresp.headers['last-modified'], '%a, %d %b %Y %H:%M:%S GMT')) new_ts = Timestamp(ts_source).internal vers_obj_name = lprefix + new_ts copy_headers = { 'Destination': '%s/%s' % (lcontainer, vers_obj_name)} copy_environ = {'REQUEST_METHOD': 'COPY', 'swift_versioned_copy': True } copy_req = Request.blank(req.path_info, headers=copy_headers, environ=copy_environ) copy_resp = self.COPY(copy_req) if is_client_error(copy_resp.status_int): # missing container or bad permissions return HTTPPreconditionFailed(request=req) elif not is_success(copy_resp.status_int): # could not copy the data, bail return HTTPServiceUnavailable(request=req) reader = req.environ['wsgi.input'].read data_source = iter(lambda: reader(self.app.client_chunk_size), '') source_header = req.headers.get('X-Copy-From') source_resp = None if source_header: if req.environ.get('swift.orig_req_method', req.method) != 'POST': req.environ.setdefault('swift.log_info', []).append( 'x-copy-from:%s' % source_header) src_container_name, src_obj_name = check_copy_from_header(req) ver, acct, _rest = req.split_path(2, 3, True) if isinstance(acct, unicode): acct = acct.encode('utf-8') source_header = '/%s/%s/%s/%s' % (ver, acct, src_container_name, src_obj_name) source_req = req.copy_get() # make sure the source request uses it's container_info source_req.headers.pop('X-Backend-Storage-Policy-Index', None) source_req.path_info = source_header source_req.headers['X-Newest'] = 'true' orig_obj_name = self.object_name orig_container_name = self.container_name self.object_name = src_obj_name self.container_name = src_container_name sink_req = Request.blank(req.path_info, environ=req.environ, headers=req.headers) source_resp = self.GET(source_req) # This gives middlewares a way to change the source; for example, # this lets you COPY a SLO manifest and have the new object be the # concatenation of the segments (like what a GET request gives # the client), not a copy of the manifest file. hook = req.environ.get( 'swift.copy_hook', (lambda source_req, source_resp, sink_req: source_resp)) source_resp = hook(source_req, source_resp, sink_req) if source_resp.status_int >= HTTP_MULTIPLE_CHOICES: return source_resp self.object_name = orig_obj_name self.container_name = orig_container_name data_source = iter(source_resp.app_iter) sink_req.content_length = source_resp.content_length if sink_req.content_length is None: # This indicates a transfer-encoding: chunked source object, # which currently only happens because there are more than # CONTAINER_LISTING_LIMIT segments in a segmented object. In # this case, we're going to refuse to do the server-side copy. return HTTPRequestEntityTooLarge(request=req) if sink_req.content_length > constraints.MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(request=req) sink_req.etag = source_resp.etag # we no longer need the X-Copy-From header del sink_req.headers['X-Copy-From'] if not content_type_manually_set: sink_req.headers['Content-Type'] = \ source_resp.headers['Content-Type'] if not config_true_value( sink_req.headers.get('x-fresh-metadata', 'false')): copy_headers_into(source_resp, sink_req) copy_headers_into(req, sink_req) # copy over x-static-large-object for POSTs and manifest copies if 'X-Static-Large-Object' in source_resp.headers and \ req.params.get('multipart-manifest') == 'get': sink_req.headers['X-Static-Large-Object'] = \ source_resp.headers['X-Static-Large-Object'] req = sink_req if 'x-delete-at' in req.headers: try: x_delete_at = normalize_delete_at_timestamp( int(req.headers['x-delete-at'])) if int(x_delete_at) < time.time(): return HTTPBadRequest( body='X-Delete-At in past', request=req, content_type='text/plain') except ValueError: return HTTPBadRequest(request=req, content_type='text/plain', body='Non-integer X-Delete-At') req.environ.setdefault('swift.log_info', []).append( 'x-delete-at:%s' % x_delete_at) delete_at_container = normalize_delete_at_timestamp( int(x_delete_at) / self.app.expiring_objects_container_divisor * self.app.expiring_objects_container_divisor) delete_at_part, delete_at_nodes = \ self.app.container_ring.get_nodes( self.app.expiring_objects_account, delete_at_container) else: delete_at_container = delete_at_part = delete_at_nodes = None node_iter = GreenthreadSafeIterator( self.iter_nodes_local_first(obj_ring, partition)) pile = GreenPile(len(nodes)) te = req.headers.get('transfer-encoding', '') chunked = ('chunked' in te) outgoing_headers = self._backend_requests( req, len(nodes), container_partition, containers, delete_at_container, delete_at_part, delete_at_nodes) for nheaders in outgoing_headers: # RFC2616:8.2.3 disallows 100-continue without a body if (req.content_length > 0) or chunked: nheaders['Expect'] = '100-continue' pile.spawn(self._connect_put_node, node_iter, partition, req.swift_entity_path, nheaders, self.app.logger.thread_locals) conns = [conn for conn in pile if conn] min_conns = quorum_size(len(nodes)) if req.if_none_match is not None and '*' in req.if_none_match: statuses = [conn.resp.status for conn in conns if conn.resp] if HTTP_PRECONDITION_FAILED in statuses: # If we find any copy of the file, it shouldn't be uploaded self.app.logger.debug( _('Object PUT returning 412, %(statuses)r'), {'statuses': statuses}) return HTTPPreconditionFailed(request=req) if len(conns) < min_conns: self.app.logger.error( _('Object PUT returning 503, %(conns)s/%(nodes)s ' 'required connections'), {'conns': len(conns), 'nodes': min_conns}) return HTTPServiceUnavailable(request=req) bytes_transferred = 0 try: with ContextPool(len(nodes)) as pool: for conn in conns: conn.failed = False conn.queue = Queue(self.app.put_queue_depth) pool.spawn(self._send_file, conn, req.path) while True: with ChunkReadTimeout(self.app.client_timeout): try: chunk = next(data_source) except StopIteration: if chunked: for conn in conns: conn.queue.put('0\r\n\r\n') break bytes_transferred += len(chunk) if bytes_transferred > constraints.MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(request=req) for conn in list(conns): if not conn.failed: conn.queue.put( '%x\r\n%s\r\n' % (len(chunk), chunk) if chunked else chunk) else: conns.remove(conn) if len(conns) < min_conns: self.app.logger.error(_( 'Object PUT exceptions during' ' send, %(conns)s/%(nodes)s required connections'), {'conns': len(conns), 'nodes': min_conns}) return HTTPServiceUnavailable(request=req) for conn in conns: if conn.queue.unfinished_tasks: conn.queue.join() conns = [conn for conn in conns if not conn.failed] except ChunkReadTimeout as err: self.app.logger.warn( _('ERROR Client read timeout (%ss)'), err.seconds) self.app.logger.increment('client_timeouts') return HTTPRequestTimeout(request=req) except (Exception, Timeout): self.app.logger.exception( _('ERROR Exception causing client disconnect')) return HTTPClientDisconnect(request=req) if req.content_length and bytes_transferred < req.content_length: req.client_disconnect = True self.app.logger.warn( _('Client disconnected without sending enough data')) self.app.logger.increment('client_disconnects') return HTTPClientDisconnect(request=req) statuses, reasons, bodies, etags = self._get_put_responses(req, conns, nodes) if len(etags) > 1: self.app.logger.error( _('Object servers returned %s mismatched etags'), len(etags)) return HTTPServerError(request=req) etag = etags.pop() if len(etags) else None resp = self.best_response(req, statuses, reasons, bodies, _('Object PUT'), etag=etag) if source_header: resp.headers['X-Copied-From'] = quote( source_header.split('/', 3)[3]) if 'last-modified' in source_resp.headers: resp.headers['X-Copied-From-Last-Modified'] = \ source_resp.headers['last-modified'] copy_headers_into(req, resp) resp.last_modified = math.ceil( float(Timestamp(req.headers['X-Timestamp']))) return resp
def POST(self, req): """HTTP POST request handler.""" if 'x-delete-after' in req.headers: try: x_delete_after = int(req.headers['x-delete-after']) except ValueError: return HTTPBadRequest(request=req, content_type='text/plain', body='Non-integer X-Delete-After') req.headers['x-delete-at'] = normalize_delete_at_timestamp( time.time() + x_delete_after) if self.app.object_post_as_copy: req.method = 'PUT' req.path_info = '/v1/%s/%s/%s' % ( self.account_name, self.container_name, self.object_name) req.headers['Content-Length'] = 0 req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name, self.object_name)) req.headers['X-Fresh-Metadata'] = 'true' req.environ['swift_versioned_copy'] = True if req.environ.get('QUERY_STRING'): req.environ['QUERY_STRING'] += '&multipart-manifest=get' else: req.environ['QUERY_STRING'] = 'multipart-manifest=get' resp = self.PUT(req) # Older editions returned 202 Accepted on object POSTs, so we'll # convert any 201 Created responses to that for compatibility with # picky clients. if resp.status_int != HTTP_CREATED: return resp return HTTPAccepted(request=req) else: error_response = check_metadata(req, 'object') if error_response: return error_response container_info = self.container_info( self.account_name, self.container_name, req) container_partition = container_info['partition'] containers = container_info['nodes'] req.acl = container_info['write_acl'] if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp if not containers: return HTTPNotFound(request=req) if 'x-delete-at' in req.headers: try: x_delete_at = normalize_delete_at_timestamp( int(req.headers['x-delete-at'])) if int(x_delete_at) < time.time(): return HTTPBadRequest( body='X-Delete-At in past', request=req, content_type='text/plain') except ValueError: return HTTPBadRequest(request=req, content_type='text/plain', body='Non-integer X-Delete-At') req.environ.setdefault('swift.log_info', []).append( 'x-delete-at:%s' % x_delete_at) delete_at_container = normalize_delete_at_timestamp( int(x_delete_at) / self.app.expiring_objects_container_divisor * self.app.expiring_objects_container_divisor) delete_at_part, delete_at_nodes = \ self.app.container_ring.get_nodes( self.app.expiring_objects_account, delete_at_container) else: delete_at_container = delete_at_part = delete_at_nodes = None # pass the policy index to storage nodes via req header policy_index = req.headers.get('X-Backend-Storage-Policy-Index', container_info['storage_policy']) obj_ring = self.app.get_object_ring(policy_index) req.headers['X-Backend-Storage-Policy-Index'] = policy_index partition, nodes = obj_ring.get_nodes( self.account_name, self.container_name, self.object_name) req.headers['X-Timestamp'] = Timestamp(time.time()).internal headers = self._backend_requests( req, len(nodes), container_partition, containers, delete_at_container, delete_at_part, delete_at_nodes) resp = self.make_requests(req, obj_ring, partition, 'POST', req.swift_entity_path, headers) return resp
def delete_at_update(self, op, delete_at, account, container, obj, request, objdevice): """ Update the expiring objects container when objects are updated. :param op: operation performed (ex: 'PUT', or 'DELETE') :param delete_at: scheduled delete in UNIX seconds, int :param account: account name for the object :param container: container name for the object :param obj: object name :param request: the original request driving the update :param objdevice: device name that the object is in """ if config_true_value(request.headers.get("x-backend-replication", "f")): return delete_at = normalize_delete_at_timestamp(delete_at) updates = [(None, None)] partition = None hosts = contdevices = [None] headers_in = request.headers headers_out = HeaderKeyDict( { "x-timestamp": headers_in["x-timestamp"], "x-trans-id": headers_in.get("x-trans-id", "-"), "referer": request.as_referer(), } ) if op != "DELETE": delete_at_container = headers_in.get("X-Delete-At-Container", None) if not delete_at_container: self.logger.warning( "X-Delete-At-Container header must be specified for " "expiring objects background %s to work properly. Making " "best guess as to the container name for now." % op ) # TODO(gholt): In a future release, change the above warning to # a raised exception and remove the guess code below. delete_at_container = ( int(delete_at) / self.expiring_objects_container_divisor * self.expiring_objects_container_divisor ) partition = headers_in.get("X-Delete-At-Partition", None) hosts = headers_in.get("X-Delete-At-Host", "") contdevices = headers_in.get("X-Delete-At-Device", "") updates = [ upd for upd in zip((h.strip() for h in hosts.split(",")), (c.strip() for c in contdevices.split(","))) if all(upd) and partition ] if not updates: updates = [(None, None)] headers_out["x-size"] = "0" headers_out["x-content-type"] = "text/plain" headers_out["x-etag"] = "d41d8cd98f00b204e9800998ecf8427e" else: # DELETEs of old expiration data have no way of knowing what the # old X-Delete-At-Container was at the time of the initial setting # of the data, so a best guess is made here. # Worst case is a DELETE is issued now for something that doesn't # exist there and the original data is left where it is, where # it will be ignored when the expirer eventually tries to issue the # object DELETE later since the X-Delete-At value won't match up. delete_at_container = str( int(delete_at) / self.expiring_objects_container_divisor * self.expiring_objects_container_divisor ) delete_at_container = normalize_delete_at_timestamp(delete_at_container) for host, contdevice in updates: self.async_update( op, self.expiring_objects_account, delete_at_container, "%s-%s/%s/%s" % (delete_at, account, container, obj), host, partition, contdevice, headers_out, objdevice, )
def POST(self, req): """HTTP POST request handler.""" if "x-delete-after" in req.headers: try: x_delete_after = int(req.headers["x-delete-after"]) except ValueError: return HTTPBadRequest(request=req, content_type="text/plain", body="Non-integer X-Delete-After") req.headers["x-delete-at"] = normalize_delete_at_timestamp(time.time() + x_delete_after) if self.app.object_post_as_copy: req.method = "PUT" req.path_info = "/v1/%s/%s/%s" % (self.account_name, self.container_name, self.object_name) req.headers["Content-Length"] = 0 req.headers["X-Copy-From"] = quote("/%s/%s" % (self.container_name, self.object_name)) req.headers["X-Fresh-Metadata"] = "true" req.environ["swift_versioned_copy"] = True if req.environ.get("QUERY_STRING"): req.environ["QUERY_STRING"] += "&multipart-manifest=get" else: req.environ["QUERY_STRING"] = "multipart-manifest=get" resp = self.PUT(req) # Older editions returned 202 Accepted on object POSTs, so we'll # convert any 201 Created responses to that for compatibility with # picky clients. if resp.status_int != HTTP_CREATED: return resp return HTTPAccepted(request=req) else: error_response = check_metadata(req, "object") if error_response: return error_response container_info = self.container_info(self.account_name, self.container_name, req) container_partition = container_info["partition"] containers = container_info["nodes"] req.acl = container_info["write_acl"] if "swift.authorize" in req.environ: aresp = req.environ["swift.authorize"](req) if aresp: return aresp if not containers: return HTTPNotFound(request=req) if "x-delete-at" in req.headers: try: x_delete_at = normalize_delete_at_timestamp(int(req.headers["x-delete-at"])) if int(x_delete_at) < time.time(): return HTTPBadRequest(body="X-Delete-At in past", request=req, content_type="text/plain") except ValueError: return HTTPBadRequest(request=req, content_type="text/plain", body="Non-integer X-Delete-At") req.environ.setdefault("swift.log_info", []).append("x-delete-at:%s" % x_delete_at) delete_at_container = normalize_delete_at_timestamp( int(x_delete_at) / self.app.expiring_objects_container_divisor * self.app.expiring_objects_container_divisor ) delete_at_part, delete_at_nodes = self.app.container_ring.get_nodes( self.app.expiring_objects_account, delete_at_container ) else: delete_at_container = delete_at_part = delete_at_nodes = None # pass the policy index to storage nodes via req header policy_index = req.headers.get("X-Backend-Storage-Policy-Index", container_info["storage_policy"]) obj_ring = self.app.get_object_ring(policy_index) req.headers["X-Backend-Storage-Policy-Index"] = policy_index partition, nodes = obj_ring.get_nodes(self.account_name, self.container_name, self.object_name) req.headers["X-Timestamp"] = Timestamp(time.time()).internal headers = self._backend_requests( req, len(nodes), container_partition, containers, delete_at_container, delete_at_part, delete_at_nodes ) resp = self.make_requests(req, obj_ring, partition, "POST", req.swift_entity_path, headers) return resp
def delete_at_update(self, op, delete_at, account, container, obj, request, objdevice, policy): """ Update the expiring objects container when objects are updated. :param op: operation performed (ex: 'PUT', or 'DELETE') :param delete_at: scheduled delete in UNIX seconds, int :param account: account name for the object :param container: container name for the object :param obj: object name :param request: the original request driving the update :param objdevice: device name that the object is in :param policy: the BaseStoragePolicy instance (used for tmp dir) """ if config_true_value( request.headers.get('x-backend-replication', 'f')): return delete_at = normalize_delete_at_timestamp(delete_at) updates = [(None, None)] partition = None hosts = contdevices = [None] headers_in = request.headers headers_out = HeaderKeyDict({ # system accounts are always Policy-0 'X-Backend-Storage-Policy-Index': 0, 'x-timestamp': request.timestamp.internal, 'x-trans-id': headers_in.get('x-trans-id', '-'), 'referer': request.as_referer()}) if op != 'DELETE': delete_at_container = headers_in.get('X-Delete-At-Container', None) if not delete_at_container: self.logger.warning( 'X-Delete-At-Container header must be specified for ' 'expiring objects background %s to work properly. Making ' 'best guess as to the container name for now.' % op) # TODO(gholt): In a future release, change the above warning to # a raised exception and remove the guess code below. delete_at_container = get_expirer_container( delete_at, self.expiring_objects_container_divisor, account, container, obj) partition = headers_in.get('X-Delete-At-Partition', None) hosts = headers_in.get('X-Delete-At-Host', '') contdevices = headers_in.get('X-Delete-At-Device', '') updates = [upd for upd in zip((h.strip() for h in hosts.split(',')), (c.strip() for c in contdevices.split(','))) if all(upd) and partition] if not updates: updates = [(None, None)] headers_out['x-size'] = '0' headers_out['x-content-type'] = 'text/plain' headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e' else: # DELETEs of old expiration data have no way of knowing what the # old X-Delete-At-Container was at the time of the initial setting # of the data, so a best guess is made here. # Worst case is a DELETE is issued now for something that doesn't # exist there and the original data is left where it is, where # it will be ignored when the expirer eventually tries to issue the # object DELETE later since the X-Delete-At value won't match up. delete_at_container = get_expirer_container( delete_at, self.expiring_objects_container_divisor, account, container, obj) delete_at_container = normalize_delete_at_timestamp( delete_at_container) for host, contdevice in updates: self.async_update( op, self.expiring_objects_account, delete_at_container, '%s-%s/%s/%s' % (delete_at, account, container, obj), host, partition, contdevice, headers_out, objdevice, policy)
def PUT(self, req): """HTTP PUT request handler.""" if req.if_none_match is not None and '*' not in req.if_none_match: # Sending an etag with if-none-match isn't currently supported return HTTPBadRequest(request=req, content_type='text/plain', body='If-None-Match only supports *') container_info = self.container_info( self.account_name, self.container_name, req) container_partition = container_info['partition'] containers = container_info['nodes'] req.acl = container_info['write_acl'] req.environ['swift_sync_key'] = container_info['sync_key'] object_versions = container_info['versions'] if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp if not containers: return HTTPNotFound(request=req) try: ml = req.message_length() except ValueError as e: return HTTPBadRequest(request=req, content_type='text/plain', body=str(e)) except AttributeError as e: return HTTPNotImplemented(request=req, content_type='text/plain', body=str(e)) if ml is not None and ml > MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(request=req) if 'x-delete-after' in req.headers: try: x_delete_after = int(req.headers['x-delete-after']) except ValueError: return HTTPBadRequest(request=req, content_type='text/plain', body='Non-integer X-Delete-After') req.headers['x-delete-at'] = normalize_delete_at_timestamp( time.time() + x_delete_after) partition, nodes = self.app.object_ring.get_nodes( self.account_name, self.container_name, self.object_name) #### CHANGED CODE #### d = dict() logging.info("===Original nodes===%s",str(nodes)) temp_nodes = [] flag = 0 f = open("/home/swift/spindowndevices","r") sdlist = f.read().strip().split("\n") logging.info("===Spun down devices===:%s",str(sdlist)) f.close() sddict = dict() for i in sdlist[1:]: if(i.split(":")[0] in sddict): sddict[i.split(":")[0]].append(i.split(":")[1]) else: sddict[i.split(":")[0]] = [] sddict[i.split(":")[0]].append(i.split(":")[1]) upnodes = [] for item in nodes: if(item['ip'] not in sddict): upnodes.append(item) else: if(item['device'] not in sddict[item['ip']]): upnodes.append(item) logging.info("===SDICT==%s",str(sddict)) logging.info("====UPNODES===%s",str(upnodes)) downnodes = [item for item in nodes if item['ip'] in sddict and item['device'] in sddict[item['ip']]] temp_nodes = upnodes if(len(downnodes) > 0): d = ast.literal_eval(open("/home/swift/nodes.txt","r").read()) # d_temp=pickle.load("/home/hduser/swift/proxy/controllers/nodes.p","rb") # print("===Current dict===:",d) for item in downnodes: if(partition in d and item not in d[partition]): d[partition].append(item) # print("===Modified dict===:",d) else: d[partition] = [item] # print("===Modified dict===:",d) fo = open("/home/swift/nodes.txt","w") fo.write(str(d)+"\n") fo.close() # Code to spin up a device if none are running already. # if(len(upnodes) == 0): # dev = nodes[0]['device'] # f = open("/home/swift/spindowndevices","r") # s = f.read() # os.system("mount /dev/"+str(dev)) logging.info('===In controller PUT===:') logging.info("===Partition===%s",str(partition)) nodes = temp_nodes logging.info("===Final Nodes===:%s",str(nodes)) check_ssd() #### CHANGED CODE #### # do a HEAD request for container sync and checking object versions if 'x-timestamp' in req.headers or \ (object_versions and not req.environ.get('swift_versioned_copy')): hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'}, environ={'REQUEST_METHOD': 'HEAD'}) hresp = self.GETorHEAD_base( hreq, _('Object'), self.app.object_ring, partition, hreq.swift_entity_path) # Used by container sync feature if 'x-timestamp' in req.headers: try: req.headers['X-Timestamp'] = \ normalize_timestamp(req.headers['x-timestamp']) if hresp.environ and 'swift_x_timestamp' in hresp.environ and \ float(hresp.environ['swift_x_timestamp']) >= \ float(req.headers['x-timestamp']): return HTTPAccepted(request=req) except ValueError: return HTTPBadRequest( request=req, content_type='text/plain', body='X-Timestamp should be a UNIX timestamp float value; ' 'was %r' % req.headers['x-timestamp']) else: req.headers['X-Timestamp'] = normalize_timestamp(time.time()) # Sometimes the 'content-type' header exists, but is set to None. content_type_manually_set = True detect_content_type = \ config_true_value(req.headers.get('x-detect-content-type')) if detect_content_type or not req.headers.get('content-type'): guessed_type, _junk = mimetypes.guess_type(req.path_info) req.headers['Content-Type'] = guessed_type or \ 'application/octet-stream' if detect_content_type: req.headers.pop('x-detect-content-type') else: content_type_manually_set = False error_response = check_object_creation(req, self.object_name) or \ check_content_type(req) if error_response: return error_response if object_versions and not req.environ.get('swift_versioned_copy'): if hresp.status_int != HTTP_NOT_FOUND: # This is a version manifest and needs to be handled # differently. First copy the existing data to a new object, # then write the data from this request to the version manifest # object. lcontainer = object_versions.split('/')[0] prefix_len = '%03x' % len(self.object_name) lprefix = prefix_len + self.object_name + '/' ts_source = hresp.environ.get('swift_x_timestamp') if ts_source is None: ts_source = time.mktime(time.strptime( hresp.headers['last-modified'], '%a, %d %b %Y %H:%M:%S GMT')) new_ts = normalize_timestamp(ts_source) vers_obj_name = lprefix + new_ts copy_headers = { 'Destination': '%s/%s' % (lcontainer, vers_obj_name)} copy_environ = {'REQUEST_METHOD': 'COPY', 'swift_versioned_copy': True } copy_req = Request.blank(req.path_info, headers=copy_headers, environ=copy_environ) copy_resp = self.COPY(copy_req) if is_client_error(copy_resp.status_int): # missing container or bad permissions return HTTPPreconditionFailed(request=req) elif not is_success(copy_resp.status_int): # could not copy the data, bail return HTTPServiceUnavailable(request=req) reader = req.environ['wsgi.input'].read data_source = iter(lambda: reader(self.app.client_chunk_size), '') source_header = req.headers.get('X-Copy-From') source_resp = None if source_header: if req.environ.get('swift.orig_req_method', req.method) != 'POST': req.environ.setdefault('swift.log_info', []).append( 'x-copy-from:%s' % source_header) src_container_name, src_obj_name = check_copy_from_header(req) ver, acct, _rest = req.split_path(2, 3, True) if isinstance(acct, unicode): acct = acct.encode('utf-8') source_header = '/%s/%s/%s/%s' % (ver, acct, src_container_name, src_obj_name) source_req = req.copy_get() source_req.path_info = source_header source_req.headers['X-Newest'] = 'true' orig_obj_name = self.object_name orig_container_name = self.container_name self.object_name = src_obj_name self.container_name = src_container_name sink_req = Request.blank(req.path_info, environ=req.environ, headers=req.headers) source_resp = self.GET(source_req) # This gives middlewares a way to change the source; for example, # this lets you COPY a SLO manifest and have the new object be the # concatenation of the segments (like what a GET request gives # the client), not a copy of the manifest file. hook = req.environ.get( 'swift.copy_hook', (lambda source_req, source_resp, sink_req: source_resp)) source_resp = hook(source_req, source_resp, sink_req) if source_resp.status_int >= HTTP_MULTIPLE_CHOICES: return source_resp self.object_name = orig_obj_name self.container_name = orig_container_name data_source = iter(source_resp.app_iter) sink_req.content_length = source_resp.content_length if sink_req.content_length is None: # This indicates a transfer-encoding: chunked source object, # which currently only happens because there are more than # CONTAINER_LISTING_LIMIT segments in a segmented object. In # this case, we're going to refuse to do the server-side copy. return HTTPRequestEntityTooLarge(request=req) if sink_req.content_length > MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(request=req) sink_req.etag = source_resp.etag # we no longer need the X-Copy-From header del sink_req.headers['X-Copy-From'] if not content_type_manually_set: sink_req.headers['Content-Type'] = \ source_resp.headers['Content-Type'] if not config_true_value( sink_req.headers.get('x-fresh-metadata', 'false')): copy_headers_into(source_resp, sink_req) copy_headers_into(req, sink_req) # copy over x-static-large-object for POSTs and manifest copies if 'X-Static-Large-Object' in source_resp.headers and \ req.params.get('multipart-manifest') == 'get': sink_req.headers['X-Static-Large-Object'] = \ source_resp.headers['X-Static-Large-Object'] req = sink_req if 'x-delete-at' in req.headers: try: x_delete_at = normalize_delete_at_timestamp( int(req.headers['x-delete-at'])) if int(x_delete_at) < time.time(): return HTTPBadRequest( body='X-Delete-At in past', request=req, content_type='text/plain') except ValueError: return HTTPBadRequest(request=req, content_type='text/plain', body='Non-integer X-Delete-At') req.environ.setdefault('swift.log_info', []).append( 'x-delete-at:%s' % x_delete_at) delete_at_container = normalize_delete_at_timestamp( int(x_delete_at) / self.app.expiring_objects_container_divisor * self.app.expiring_objects_container_divisor) delete_at_part, delete_at_nodes = \ self.app.container_ring.get_nodes( self.app.expiring_objects_account, delete_at_container) else: delete_at_container = delete_at_part = delete_at_nodes = None node_iter = GreenthreadSafeIterator( self.iter_nodes_local_first(self.app.object_ring, partition)) pile = GreenPile(len(nodes)) te = req.headers.get('transfer-encoding', '') chunked = ('chunked' in te) outgoing_headers = self._backend_requests( req, len(nodes), container_partition, containers, delete_at_container, delete_at_part, delete_at_nodes) for nheaders in outgoing_headers: # RFC2616:8.2.3 disallows 100-continue without a body if (req.content_length > 0) or chunked: nheaders['Expect'] = '100-continue' ################################# CHANGED_CODE ################################################################### # Replaced node_iter by nodes in the following line to make sure that a new list with different order isnt used. # Change from node_iter to nodes to make sure it writes to the same device. # Without this, it gets a new list of nodes from the ring in a different order and connects to the first one. pile.spawn(self._connect_put_node, nodes, partition, req.swift_entity_path, nheaders, self.app.logger.thread_locals) ################################# CHANGED_CODE ################################################################### conns = [conn for conn in pile if conn] min_conns = quorum_size(len(nodes)) if req.if_none_match is not None and '*' in req.if_none_match: statuses = [conn.resp.status for conn in conns if conn.resp] if HTTP_PRECONDITION_FAILED in statuses: # If we find any copy of the file, it shouldn't be uploaded self.app.logger.debug( _('Object PUT returning 412, %(statuses)r'), {'statuses': statuses}) return HTTPPreconditionFailed(request=req) if len(conns) < min_conns: self.app.logger.error( _('Object PUT returning 503, %(conns)s/%(nodes)s ' 'required connections'), {'conns': len(conns), 'nodes': min_conns}) return HTTPServiceUnavailable(request=req) bytes_transferred = 0 #### CHANGED CODE #### key = hash_path(self.account_name,self.container_name,self.object_name) os.system("mkdir -p /SSD/"+str(partition)+"/"+str(key[-3:])+"/"+str(key)) f= open("/SSD/"+str(partition)+"/"+str(key[-3:])+"/"+str(key)+"/"+str(self.object_name),"w") #### try: with ContextPool(len(nodes)) as pool: for conn in conns: conn.failed = False conn.queue = Queue(self.app.put_queue_depth) pool.spawn(self._send_file, conn, req.path) while True: with ChunkReadTimeout(self.app.client_timeout): try: chunk = next(data_source) #### CHANGED CODE #### f.write(chunk) f.close() ####### except StopIteration: if chunked: for conn in conns: conn.queue.put('0\r\n\r\n') break bytes_transferred += len(chunk) if bytes_transferred > MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(request=req) for conn in list(conns): if not conn.failed: conn.queue.put( '%x\r\n%s\r\n' % (len(chunk), chunk) if chunked else chunk) else: conns.remove(conn) if len(conns) < min_conns: self.app.logger.error(_( 'Object PUT exceptions during' ' send, %(conns)s/%(nodes)s required connections'), {'conns': len(conns), 'nodes': min_conns}) return HTTPServiceUnavailable(request=req) for conn in conns: if conn.queue.unfinished_tasks: conn.queue.join() conns = [conn for conn in conns if not conn.failed] except ChunkReadTimeout as err: self.app.logger.warn( _('ERROR Client read timeout (%ss)'), err.seconds) self.app.logger.increment('client_timeouts') return HTTPRequestTimeout(request=req) except (Exception, Timeout): self.app.logger.exception( _('ERROR Exception causing client disconnect')) return HTTPClientDisconnect(request=req) if req.content_length and bytes_transferred < req.content_length: req.client_disconnect = True self.app.logger.warn( _('Client disconnected without sending enough data')) self.app.logger.increment('client_disconnects') return HTTPClientDisconnect(request=req) statuses, reasons, bodies, etags = self._get_put_responses(req, conns, nodes) if len(etags) > 1: self.app.logger.error( _('Object servers returned %s mismatched etags'), len(etags)) return HTTPServerError(request=req) etag = etags.pop() if len(etags) else None resp = self.best_response(req, statuses, reasons, bodies, _('Object PUT'), etag=etag) if source_header: resp.headers['X-Copied-From'] = quote( source_header.split('/', 3)[3]) if 'last-modified' in source_resp.headers: resp.headers['X-Copied-From-Last-Modified'] = \ source_resp.headers['last-modified'] copy_headers_into(req, resp) resp.last_modified = math.ceil(float(req.headers['X-Timestamp'])) return resp