Ejemplo n.º 1
0
 def PUT(self, req):
     """Handle HTTP PUT request."""
     drive, part, account, container = split_and_validate_path(req, 3, 4)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     if container:   # put account container
         pending_timeout = None
         if 'x-trans-id' in req.headers:
             pending_timeout = 3
         broker = self._get_account_broker(drive, part, account,
                                           pending_timeout=pending_timeout)
         if account.startswith(self.auto_create_account_prefix) and \
                 not os.path.exists(broker.db_file):
             try:
                 broker.initialize(normalize_timestamp(
                     req.headers.get('x-timestamp') or time.time()))
             except DatabaseAlreadyExists:
                 pass
         if req.headers.get('x-account-override-deleted', 'no').lower() != \
                 'yes' and broker.is_deleted():
             return HTTPNotFound(request=req)
         broker.put_container(container, req.headers['x-put-timestamp'],
                              req.headers['x-delete-timestamp'],
                              req.headers['x-object-count'],
                              req.headers['x-bytes-used'])
         if req.headers['x-delete-timestamp'] > \
                 req.headers['x-put-timestamp']:
             return HTTPNoContent(request=req)
         else:
             return HTTPCreated(request=req)
     else:   # put account
         broker = self._get_account_broker(drive, part, account)
         timestamp = normalize_timestamp(req.headers['x-timestamp'])
         if not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp)
                 created = True
             except DatabaseAlreadyExists:
                 created = False
         elif broker.is_status_deleted():
             return self._deleted_response(broker, req, HTTPForbidden,
                                           body='Recently deleted')
         else:
             created = broker.is_deleted()
             broker.update_put_timestamp(timestamp)
             if broker.is_deleted():
                 return HTTPConflict(request=req)
         metadata = {}
         metadata.update((key, (value, timestamp))
                         for key, value in req.headers.iteritems()
                         if key.lower().startswith('x-account-meta-'))
         if metadata:
             broker.update_metadata(metadata)
         if created:
             return HTTPCreated(request=req)
         else:
             return HTTPAccepted(request=req)
Ejemplo n.º 2
0
 def PUT(self, req):
     """Handle HTTP PUT request."""
     drive, part, account, container, obj = split_and_validate_path(
         req, 4, 5, True)
     if 'x-timestamp' not in req.headers or \
             not check_float(req.headers['x-timestamp']):
         return HTTPBadRequest(body='Missing timestamp', request=req,
                               content_type='text/plain')
     if 'x-container-sync-to' in req.headers:
         err, sync_to, realm, realm_key = validate_sync_to(
             req.headers['x-container-sync-to'], self.allowed_sync_hosts,
             self.realms_conf)
         if err:
             return HTTPBadRequest(err)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     timestamp = normalize_timestamp(req.headers['x-timestamp'])
     broker = self._get_container_broker(drive, part, account, container)
     if obj:     # put container object
         if account.startswith(self.auto_create_account_prefix) and \
                 not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp)
             except DatabaseAlreadyExists:
                 pass
         if not os.path.exists(broker.db_file):
             return HTTPNotFound()
         broker.put_object(obj, timestamp, int(req.headers['x-size']),
                           req.headers['x-content-type'],
                           req.headers['x-etag'])
         return HTTPCreated(request=req)
     else:   # put container
         created = self._update_or_create(req, broker, timestamp)
         metadata = {}
         metadata.update(
             (key, (value, timestamp))
             for key, value in req.headers.iteritems()
             if key.lower() in self.save_headers or
             is_sys_or_user_meta('container', key))
         if metadata:
             if 'X-Container-Sync-To' in metadata:
                 if 'X-Container-Sync-To' not in broker.metadata or \
                         metadata['X-Container-Sync-To'][0] != \
                         broker.metadata['X-Container-Sync-To'][0]:
                     broker.set_x_container_sync_points(-1, -1)
             broker.update_metadata(metadata)
         resp = self.account_update(req, account, container, broker)
         if resp:
             return resp
         if created:
             return HTTPCreated(request=req)
         else:
             return HTTPAccepted(request=req)
Ejemplo n.º 3
0
 def __call__(self, env, start_response):
     # note: no read from wsgi.input
     req = Request(env)
     env['swift.callback.update_footers'](req.headers)
     call_headers.append(req.headers)
     resp = HTTPCreated(req=req, headers={'Etag': 'response etag'})
     return resp(env, start_response)
Ejemplo n.º 4
0
    def remove_old_chunks(self, env, chunks_diff, container):
        error = False
        self.app.logger.info('StackSync API: remove old chunks: container: %s',
                             str(container))
        for chunk_name in chunks_diff:

            env_aux = env.copy()
            new_path = "/v1/" + env[
                'stacksync_user_account'] + "/" + container + "/" + str(
                    chunk_name)
            del env_aux['HTTP_STACKSYNC_API']
            seg_req = make_pre_authed_request(env_aux,
                                              method='DELETE',
                                              path=new_path,
                                              agent=str(container))

            seg_resp = seg_req.get_response(self.app)

            if not is_valid_status(seg_resp.status_int):
                self.app.logger.error(
                    'StackSync API: upload_file_chunks: error deleting old chunk %s',
                    str(chunk_name))
                error = True
                break

        if error:
            self.app.logger.error(
                'StackSync API: upload_file_chunks: status: %s description: Error uploading chunks to storage backend',
                seg_resp.status)
            response = create_error_response(
                500, "Error uploading chunks to storage backend")
        else:
            response = HTTPCreated()

        return response
Ejemplo n.º 5
0
 def __call__(self, env, start_response):
     if env['REQUEST_METHOD'] == 'GET':
         if self.status == 200:
             start_response(Response().status,
                            [('Content-Type', 'text/xml')])
             json_pattern = [
                 '"name":%s', '"last_modified":%s', '"hash":%s',
                 '"bytes":%s'
             ]
             json_pattern = '{' + ','.join(json_pattern) + '}'
             json_out = []
             for b in self.objects:
                 name = simplejson.dumps(b[0])
                 time = simplejson.dumps(b[1])
                 json_out.append(json_pattern % (name, time, b[2], b[3]))
             account_list = '[' + ','.join(json_out) + ']'
             return account_list
         elif self.status == 401:
             start_response(HTTPUnauthorized().status, [])
         elif self.status == 403:
             start_response(HTTPForbidden().status, [])
         elif self.status == 404:
             start_response(HTTPNotFound().status, [])
         else:
             start_response(HTTPBadRequest().status, [])
     elif env['REQUEST_METHOD'] == 'PUT':
         if self.status == 201:
             start_response(HTTPCreated().status, [])
         elif self.status == 401:
             start_response(HTTPUnauthorized().status, [])
         elif self.status == 403:
             start_response(HTTPForbidden().status, [])
         elif self.status == 202:
             start_response(HTTPAccepted().status, [])
         else:
             start_response(HTTPBadRequest().status, [])
     elif env['REQUEST_METHOD'] == 'POST':
         if self.status == 204:
             start_response(HTTPNoContent().status, [])
         else:
             start_response(HTTPBadRequest().status, [])
     elif env['REQUEST_METHOD'] == 'DELETE':
         if self.status == 204:
             start_response(HTTPNoContent().status, [])
         elif self.status == 401:
             start_response(HTTPUnauthorized().status, [])
         elif self.status == 403:
             start_response(HTTPForbidden().status, [])
         elif self.status == 404:
             start_response(HTTPNotFound().status, [])
         elif self.status == 409:
             start_response(HTTPConflict().status, [])
         else:
             start_response(HTTPBadRequest().status, [])
     return []
Ejemplo n.º 6
0
 def get_container_create_resp(self, req, headers):
     properties, system = self.properties_from_headers(headers)
     # TODO container update metadata
     oio_headers = {'X-oio-req-id': self.trans_id}
     created = self.app.storage.container_create(
         self.account_name, self.container_name,
         properties=properties, system=system,
         headers=oio_headers)
     if created:
         return HTTPCreated(request=req)
     else:
         return HTTPNoContent(request=req)
Ejemplo n.º 7
0
 def __call__(self, env, start_response):
     req = Request(env)
     if env['REQUEST_METHOD'] == 'GET' or env['REQUEST_METHOD'] == 'HEAD':
         if self.status == 200:
             if 'HTTP_RANGE' in env:
                 resp = Response(request=req,
                                 body=self.object_body,
                                 conditional_response=True)
                 return resp(env, start_response)
             start_response(
                 Response(request=req).status,
                 self.response_headers.items())
             if env['REQUEST_METHOD'] == 'GET':
                 return self.object_body
         elif self.status == 401:
             start_response(HTTPUnauthorized(request=req).status, [])
         elif self.status == 403:
             start_response(HTTPForbidden(request=req).status, [])
         elif self.status == 404:
             start_response(HTTPNotFound(request=req).status, [])
         else:
             start_response(HTTPBadRequest(request=req).status, [])
     elif env['REQUEST_METHOD'] == 'PUT':
         if self.status == 201:
             start_response(
                 HTTPCreated(request=req).status,
                 [('etag', self.response_headers['etag'])])
         elif self.status == 401:
             start_response(HTTPUnauthorized(request=req).status, [])
         elif self.status == 403:
             start_response(HTTPForbidden(request=req).status, [])
         elif self.status == 404:
             start_response(HTTPNotFound(request=req).status, [])
         elif self.status == 413:
             start_response(
                 HTTPRequestEntityTooLarge(request=req).status, [])
         else:
             start_response(HTTPBadRequest(request=req).status, [])
     elif env['REQUEST_METHOD'] == 'DELETE':
         if self.status == 204:
             start_response(HTTPNoContent(request=req).status, [])
         elif self.status == 401:
             start_response(HTTPUnauthorized(request=req).status, [])
         elif self.status == 403:
             start_response(HTTPForbidden(request=req).status, [])
         elif self.status == 404:
             start_response(HTTPNotFound(request=req).status, [])
         else:
             start_response(HTTPBadRequest(request=req).status, [])
     return []
Ejemplo n.º 8
0
    def get_account_put_resp(self, req, headers):
        created = self.app.storage.account_create(self.account_name)
        metadata = {}
        metadata.update((key, value)
                        for key, value in req.headers.items()
                        if is_sys_or_user_meta('account', key))

        if metadata:
            self.app.storage.account_update(self.account_name, metadata)

        if created:
            resp = HTTPCreated(request=req)
        else:
            resp = HTTPAccepted(request=req)
        return resp
Ejemplo n.º 9
0
    def handle_put_access_key(self, req, access_key):
        """Create auth details of access key.

        Required headers:
         - `x-s3auth-secret-key`: secret key to store
         - `x-s3auth-account`: account to store
         - `x-s3auth-admin-key`: admin key
        """
        secret_key = req.headers.get('x-s3auth-secret-key')
        account = req.headers.get('x-s3auth-account')
        if not (secret_key and account):
            return HTTPBadRequest(
                body='x-s3auth-secret-key and x-s3auth-account '
                'headers required',
                request=req)
        self._set_details(req, access_key, secret_key, account)
        return HTTPCreated(request=req)
Ejemplo n.º 10
0
    def get_account_put_resp(self, req, headers):
        oio_headers = {'X-oio-req-id': self.trans_id}
        created = self.app.storage.account_create(
            self.account_name, headers=oio_headers)
        metadata = {}
        metadata.update((key, value)
                        for key, value in req.headers.items()
                        if is_sys_or_user_meta('account', key))

        if metadata:
            self.app.storage.account_set_properties(
                self.account_name, metadata, headers=oio_headers)

        if created:
            resp = HTTPCreated(request=req)
        else:
            resp = HTTPAccepted(request=req)
        return resp
Ejemplo n.º 11
0
    def upload_file_chunks(self, env, chunked_file, container):
        error = False
        self.app.logger.info(
            'StackSync API: upload_file_chunks: container: %s', str(container))
        upload_chunks = []
        for i in range(len(chunked_file.chunks)):
            chunk_name = chunked_file.name_list[i - 1]
            chunk_content = chunked_file.chunks[i - 1]

            env_aux = env.copy()
            new_path = "/v1/" + env[
                'stacksync_user_account'] + "/" + container + "/" + chunk_name
            del env_aux['HTTP_STACKSYNC_API']
            seg_req = make_pre_authed_request(env_aux,
                                              method='PUT',
                                              path=new_path,
                                              body=chunk_content,
                                              agent=str(container))

            seg_resp = seg_req.get_response(self.app)

            if not is_valid_status(seg_resp.status_int):
                self.app.logger.error(
                    'StackSync API: upload_file_chunks: error uploading chunk %s',
                    chunk_name)
                error = True
                break
            upload_chunks.append(chunk_name)

        if error:
            self.app.logger.error(
                'StackSync API: upload_file_chunks: status: %s description: Error uploading chunks to storage backend',
                seg_resp.status)
            response = create_error_response(
                500, "Error uploading chunks to storage backend")
            self.remove_chunks(env, upload_chunks, container)

        else:
            response = HTTPCreated()

        return response
Ejemplo n.º 12
0
 def get_container_create_resp(self, req, headers):
     properties, system = self.properties_from_headers(headers)
     # Save the name of the S3 bucket in a container property.
     # This will be used when aggregating container statistics
     # to make bucket statistics.
     bname = self.container_name
     if bname.endswith(MULTIUPLOAD_SUFFIX):
         bname = bname[:-len(MULTIUPLOAD_SUFFIX)]
     system[BUCKET_NAME_PROP] = bname
     # TODO container update metadata
     oio_headers = {REQID_HEADER: self.trans_id}
     oio_cache = req.environ.get('oio.cache')
     perfdata = req.environ.get('oio.perfdata')
     created = self.app.storage.container_create(
         self.account_name, self.container_name,
         properties=properties, system=system,
         headers=oio_headers, cache=oio_cache, perfdata=perfdata)
     if created:
         return HTTPCreated(request=req)
     else:
         return HTTPNoContent(request=req)
Ejemplo n.º 13
0
 def PUT(self, req):
     """Handle HTTP PUT request."""
     drive, part, account, container = split_and_validate_path(req, 3, 4)
     try:
         check_drive(self.root, drive, self.mount_check)
     except ValueError:
         return HTTPInsufficientStorage(drive=drive, request=req)
     if not self.check_free_space(drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     if container:  # put account container
         if 'x-timestamp' not in req.headers:
             timestamp = Timestamp.now()
         else:
             timestamp = valid_timestamp(req)
         pending_timeout = None
         container_policy_index = \
             req.headers.get('X-Backend-Storage-Policy-Index', 0)
         if 'x-trans-id' in req.headers:
             pending_timeout = 3
         broker = self._get_account_broker(drive,
                                           part,
                                           account,
                                           pending_timeout=pending_timeout)
         if account.startswith(self.auto_create_account_prefix) and \
                 not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp.internal)
             except DatabaseAlreadyExists:
                 pass
         if req.headers.get('x-account-override-deleted', 'no').lower() != \
                 'yes' and broker.is_deleted():
             return HTTPNotFound(request=req)
         broker.put_container(container, req.headers['x-put-timestamp'],
                              req.headers['x-delete-timestamp'],
                              req.headers['x-object-count'],
                              req.headers['x-bytes-used'],
                              container_policy_index)
         if req.headers['x-delete-timestamp'] > \
                 req.headers['x-put-timestamp']:
             return HTTPNoContent(request=req)
         else:
             return HTTPCreated(request=req)
     else:  # put account
         timestamp = valid_timestamp(req)
         broker = self._get_account_broker(drive, part, account)
         if not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp.internal)
                 created = True
             except DatabaseAlreadyExists:
                 created = False
         elif broker.is_status_deleted():
             return self._deleted_response(broker,
                                           req,
                                           HTTPForbidden,
                                           body='Recently deleted')
         else:
             created = broker.is_deleted()
             broker.update_put_timestamp(timestamp.internal)
             if broker.is_deleted():
                 return HTTPConflict(request=req)
         self._update_metadata(req, broker, timestamp)
         if created:
             return HTTPCreated(request=req)
         else:
             return HTTPAccepted(request=req)
Ejemplo n.º 14
0
         broker.initialize(normalize_timestamp(
             req.headers.get('x-timestamp') or time.time()))
     if req.headers.get('x-account-override-deleted', 'no').lower() != \
             'yes' and broker.is_deleted():
         self.logger.timing_since('PUT.timing', start_time)
         return HTTPNotFound(request=req)
     broker.put_container(container, req.headers['x-put-timestamp'],
                          req.headers['x-delete-timestamp'],
                          req.headers['x-object-count'],
                          req.headers['x-bytes-used'])
     self.logger.timing_since('PUT.timing', start_time)
     if req.headers['x-delete-timestamp'] > \
             req.headers['x-put-timestamp']:
         return HTTPNoContent(request=req)
     else:
         return HTTPCreated(request=req)
 else:   # put account
     timestamp = normalize_timestamp(req.headers['x-timestamp'])
     if not os.path.exists(broker.db_file):
         broker.initialize(timestamp)
         created = True
     elif broker.is_status_deleted():
         self.logger.timing_since('PUT.timing', start_time)
         return HTTPForbidden(request=req, body='Recently deleted')
     else:
         created = broker.is_deleted()
         broker.update_put_timestamp(timestamp)
         if broker.is_deleted():
             self.logger.increment('PUT.errors')
             return HTTPConflict(request=req)
     metadata = {}
Ejemplo n.º 15
0
    def _link_object(self, req):
        _, container, obj = req.headers['Oio-Copy-From'].split('/', 2)

        from_account = req.headers.get('X-Copy-From-Account',
                                       self.account_name)
        self.app.logger.info("LINK (%s,%s,%s) TO (%s,%s,%s)", from_account,
                             self.container_name, self.object_name,
                             self.account_name, container, obj)
        storage = self.app.storage

        if req.headers.get('Range'):
            raise Exception("Fast Copy with Range is unsupported")

            ranges = ranges_from_http_header(req.headers.get('Range'))
            if len(ranges) != 1:
                raise HTTPInternalServerError(
                    request=req, body="mutiple ranges unsupported")
            ranges = ranges[0]
        else:
            ranges = None

        headers = self._prepare_headers(req)
        metadata = self.load_object_metadata(headers)
        oio_headers = {'X-oio-req-id': self.trans_id}
        # FIXME(FVE): use object_show, cache in req.environ
        props = storage.object_get_properties(from_account, container, obj)
        if props['properties'].get(SLO, None):
            raise Exception("Fast Copy with SLO is unsupported")

            if ranges is None:
                raise HTTPInternalServerError(request=req,
                                              body="LINK a MPU requires range")
            self.app.logger.debug("LINK, original object is a SLO")

            # retrieve manifest
            _, data = storage.object_fetch(from_account, container, obj)
            manifest = json.loads("".join(data))
            offset = 0
            # identify segment to copy
            for entry in manifest:
                if (ranges[0] == offset
                        and ranges[1] + 1 == offset + entry['bytes']):
                    _, container, obj = entry['name'].split('/', 2)
                    checksum = entry['hash']
                    self.app.logger.info("LINK SLO (%s,%s,%s) TO (%s,%s,%s)",
                                         from_account, self.container_name,
                                         self.object_name, self.account_name,
                                         container, obj)
                    break
                offset += entry['bytes']
            else:
                raise HTTPInternalServerError(request=req,
                                              body="no segment matching range")
        else:
            checksum = props['hash']
            if ranges:
                raise HTTPInternalServerError(
                    request=req, body="no range supported with single object")

        try:
            # TODO check return code (values ?)
            storage.object_fastcopy(from_account,
                                    container,
                                    obj,
                                    self.account_name,
                                    self.container_name,
                                    self.object_name,
                                    headers=oio_headers,
                                    properties=metadata,
                                    properties_directive='REPLACE')
        # TODO(FVE): this exception catching block has to be refactored
        # TODO check which ones are ok or make non sense
        except exceptions.Conflict:
            raise HTTPConflict(request=req)
        except exceptions.PreconditionFailed:
            raise HTTPPreconditionFailed(request=req)
        except exceptions.SourceReadError:
            req.client_disconnect = True
            self.app.logger.warning(
                _('Client disconnected without sending last chunk'))
            self.app.logger.increment('client_disconnects')
            raise HTTPClientDisconnect(request=req)
        except exceptions.EtagMismatch:
            return HTTPUnprocessableEntity(request=req)
        except (exceptions.ServiceBusy, exceptions.OioTimeout):
            raise
        except (exceptions.NoSuchContainer, exceptions.NotFound):
            raise HTTPNotFound(request=req)
        except exceptions.ClientException as err:
            # 481 = CODE_POLICY_NOT_SATISFIABLE
            if err.status == 481:
                raise exceptions.ServiceBusy()
            self.app.logger.exception(
                _('ERROR Exception transferring data %s'), {'path': req.path})
            raise HTTPInternalServerError(request=req)
        except Exception:
            self.app.logger.exception(
                _('ERROR Exception transferring data %s'), {'path': req.path})
            raise HTTPInternalServerError(request=req)

        resp = HTTPCreated(request=req, etag=checksum)
        return resp
Ejemplo n.º 16
0
                    create_obj_req = Request.blank(destination, new_env)
                    resp = create_obj_req.get_response(self.app)
                    if resp.status_int // 100 == 2:
                        success_count += 1
                    else:
                        if resp.status_int == HTTP_UNAUTHORIZED:
                            return HTTPUnauthorized(request=req)
                        failed_files.append([
                            quote(destination[:MAX_PATH_LENGTH]), resp.status
                        ])

            resp_body = get_response_body(
                out_content_type, {'Number Files Created': success_count},
                failed_files)
            if success_count and not failed_files:
                return HTTPCreated(resp_body, content_type=out_content_type)
            if failed_files:
                return HTTPBadGateway(resp_body, content_type=out_content_type)
            return HTTPBadRequest('Invalid Tar File: No Valid Files')

        except tarfile.TarError, tar_error:
            return HTTPBadRequest('Invalid Tar File: %s' % tar_error)

    @wsgify
    def __call__(self, req):
        extract_type = req.params.get('extract-archive')
        if extract_type is not None and req.method == 'PUT':
            archive_type = {
                'tar': '',
                'tar.gz': 'gz',
                'tar.bz2': 'bz2'
Ejemplo n.º 17
0
    def _store_object(self, req, data_source, headers):
        kwargs = {}
        content_type = req.headers.get('content-type', 'octet/stream')
        policy = None
        container_info = self.container_info(self.account_name,
                                             self.container_name, req)
        if 'X-Oio-Storage-Policy' in req.headers:
            policy = req.headers.get('X-Oio-Storage-Policy')
            if not self.app.POLICIES.get_by_name(policy):
                raise HTTPBadRequest(
                    "invalid policy '%s', must be in %s" %
                    (policy, self.app.POLICIES.by_name.keys()))
        else:
            try:
                policy_index = int(
                    req.headers.get('X-Backend-Storage-Policy-Index',
                                    container_info['storage_policy']))
            except TypeError:
                policy_index = 0
            if policy_index != 0:
                policy = self.app.POLICIES.get_by_index(policy_index).name
            else:
                content_length = int(req.headers.get('content-length', 0))
                policy = self._get_auto_policy_from_size(content_length)

        ct_props = {'properties': {}, 'system': {}}
        metadata = self.load_object_metadata(headers)
        oio_headers = {REQID_HEADER: self.trans_id}
        oio_cache = req.environ.get('oio.cache')
        perfdata = req.environ.get('swift.perfdata')
        # only send headers if needed
        if SUPPORT_VERSIONING and headers.get(FORCEVERSIONING_HEADER):
            oio_headers[FORCEVERSIONING_HEADER] = \
                headers.get(FORCEVERSIONING_HEADER)
        if req.environ.get('oio.force-version'):
            # In a case of MPU, it contains version of the UploadId
            # to be able to include version-id of MPU in S3 reponse
            kwargs['version'] = req.environ.get('oio.force-version')

        # In case a shard is being created, save the name of the S3 bucket
        # in a container property. This will be used when aggregating
        # container statistics to make bucket statistics.
        if BUCKET_NAME_HEADER in headers:
            bname = headers[BUCKET_NAME_HEADER]
            # FIXME(FVE): the segments container is not part of another bucket!
            # We should not have to strip this here.
            if bname and bname.endswith(MULTIUPLOAD_SUFFIX):
                bname = bname[:-len(MULTIUPLOAD_SUFFIX)]
            ct_props['system'][BUCKET_NAME_PROP] = bname
        try:
            _chunks, _size, checksum, _meta = self._object_create(
                self.account_name,
                self.container_name,
                obj_name=self.object_name,
                file_or_path=data_source,
                mime_type=content_type,
                policy=policy,
                headers=oio_headers,
                etag=req.headers.get('etag', '').strip('"'),
                properties=metadata,
                container_properties=ct_props,
                cache=oio_cache,
                perfdata=perfdata,
                **kwargs)
            # TODO(FVE): when oio-sds supports it, do that in a callback
            # passed to object_create (or whatever upload method supports it)
            footer_md = self.load_object_metadata(self._get_footers(req))
            if footer_md:
                self.app.storage.object_set_properties(self.account_name,
                                                       self.container_name,
                                                       self.object_name,
                                                       version=_meta.get(
                                                           'version', None),
                                                       properties=footer_md,
                                                       headers=oio_headers,
                                                       cache=oio_cache,
                                                       perfdata=perfdata)
        except exceptions.Conflict:
            raise HTTPConflict(request=req)
        except exceptions.PreconditionFailed:
            raise HTTPPreconditionFailed(request=req)
        except SourceReadTimeout as err:
            self.app.logger.warning(_('ERROR Client read timeout (%s)'), err)
            self.app.logger.increment('client_timeouts')
            raise HTTPRequestTimeout(request=req)
        except exceptions.SourceReadError:
            req.client_disconnect = True
            self.app.logger.warning(
                _('Client disconnected without sending last chunk'))
            self.app.logger.increment('client_disconnects')
            raise HTTPClientDisconnect(request=req)
        except exceptions.EtagMismatch:
            return HTTPUnprocessableEntity(request=req)
        except (exceptions.ServiceBusy, exceptions.OioTimeout,
                exceptions.DeadlineReached):
            raise
        except exceptions.NoSuchContainer:
            raise HTTPNotFound(request=req)
        except exceptions.ClientException as err:
            # 481 = CODE_POLICY_NOT_SATISFIABLE
            if err.status == 481:
                raise exceptions.ServiceBusy()
            self.app.logger.exception(
                _('ERROR Exception transferring data %s'), {'path': req.path})
            raise HTTPInternalServerError(request=req)
        except Exception:
            self.app.logger.exception(
                _('ERROR Exception transferring data %s'), {'path': req.path})
            raise HTTPInternalServerError(request=req)

        last_modified = int(_meta.get('mtime', math.ceil(time.time())))

        # FIXME(FVE): if \x10 character in object name, decode version
        # number and set it in the response headers, instead of the oio
        # version number.
        version_id = _meta.get('version', 'null')
        resp = HTTPCreated(request=req,
                           etag=checksum,
                           last_modified=last_modified,
                           headers={'x-object-sysmeta-version-id': version_id})
        return resp
Ejemplo n.º 18
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req, content_type='text/plain',
                                  body='If-None-Match only supports *')
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = \
            config_true_value(req.headers.get('x-detect-content-type'))
        if detect_content_type or not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                                          'application/octet-stream'
            if detect_content_type:
                req.headers.pop('x-detect-content-type')
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or \
                         check_content_type(req)
        if error_response:
            return error_response

        req.headers['X-Timestamp'] = Timestamp(time.time()).internal

        stream = req.environ['wsgi.input']
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            if req.environ.get('swift.orig_req_method', req.method) != 'POST':
                req.environ.setdefault('swift.log_info', []).append(
                    'x-copy-from:%s' % source_header)
            ver, acct, _rest = req.split_path(2, 3, True)
            src_account_name = req.headers.get('X-Copy-From-Account', None)
            if src_account_name:
                src_account_name = check_account_format(req, src_account_name)
            else:
                src_account_name = acct
            src_container_name, src_obj_name = check_copy_from_header(req)
            source_header = '/%s/%s/%s/%s' % (ver, src_account_name,
                                              src_container_name, src_obj_name)
            source_req = req.copy_get()

            # make sure the source request uses it's container_info
            source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            orig_account_name = self.account_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            self.account_name = src_account_name
            sink_req = Request.blank(req.path_info,
                                     environ=req.environ, headers=req.headers)
            source_resp = self.GET(source_req)

            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get(
                'swift.copy_hook',
                (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            self.account_name = orig_account_name
            stream = IterO(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > constraints.MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag

            # we no longer need the X-Copy-From header
            del sink_req.headers['X-Copy-From']
            if 'X-Copy-From-Account' in sink_req.headers:
                del sink_req.headers['X-Copy-From-Account']
            if not content_type_manually_set:
                sink_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if config_true_value(
                    sink_req.headers.get('x-fresh-metadata', 'false')):
                # post-as-copy: ignore new sysmeta, copy existing sysmeta
                condition = lambda k: is_sys_meta('object', k)
                remove_items(sink_req.headers, condition)
                copy_header_subset(source_resp, sink_req, condition)
            else:
                # copy/update existing sysmeta and user meta
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)

            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                            req.params.get('multipart-manifest') == 'get':
                sink_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = sink_req

        content_length = req.content_length
        content_type = req.headers.get('content-type', 'octet/stream')
        storage = self.app.storage

        if content_length is None:
            content_length = 0
        try:
            chunks, size, checksum = storage.object_create(self.account_name, self.container_name,
                                  obj_name=self.object_name,
                                  file_or_path=stream,
                                  content_length=content_length,
                                  content_type=content_type)
        except exceptions.NoSuchContainer:
            return HTTPNotFound(request=req)
        except exceptions.ClientReadTimeout:
            return HTTPRequestTimeout(request=req)
        resp = HTTPCreated(request=req, etag=checksum)
        if source_header:
            acct, path = source_header.split('/', 3)[2:4]
            resp.headers['X-Copied-From-Account'] = quote(acct)
            resp.headers['X-Copied-From'] = quote(path)
            if 'last-modified' in source_resp.headers:
                resp.headers['X-Copied-From-Last-Modified'] = \
                    source_resp.headers['last-modified']
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(
            float(Timestamp(req.headers['X-Timestamp'])))
        return resp
Ejemplo n.º 19
0
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj, policy_idx = \
            get_name_and_placement(request, 5, 5, True)
        req_timestamp = valid_timestamp(request)
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past',
                                  request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e),
                                  request=request,
                                  content_type='text/plain')
        try:
            disk_file = self.get_diskfile(device,
                                          partition,
                                          account,
                                          container,
                                          obj,
                                          policy_idx=policy_idx)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            orig_metadata = disk_file.read_metadata()
        except DiskFileXattrNotSupported:
            return HTTPInsufficientStorage(drive=device, request=request)
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}

        # Checks for If-None-Match
        if request.if_none_match is not None and orig_metadata:
            if '*' in request.if_none_match:
                # File exists already so return 412
                return HTTPPreconditionFailed(request=request)
            if orig_metadata.get('ETag') in request.if_none_match:
                # The current ETag matches, so return 412
                return HTTPPreconditionFailed(request=request)

        orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
        if orig_timestamp >= req_timestamp:
            return HTTPConflict(
                request=request,
                headers={'X-Backend-Timestamp': orig_timestamp.internal})
        orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                upload_size = 0

                def timeout_reader():
                    with ChunkReadTimeout(self.client_timeout):
                        return request.environ['wsgi.input'].read(
                            self.network_chunk_size)

                try:
                    for chunk in iter(lambda: timeout_reader(), ''):
                        start_time = time.time()
                        if start_time > upload_expiration:
                            self.logger.increment('PUT.timeouts')
                            return HTTPRequestTimeout(request=request)
                        etag.update(chunk)
                        upload_size = writer.write(chunk)
                        elapsed_time += time.time() - start_time
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)
                if upload_size:
                    self.logger.transfer_rate('PUT.' + device + '.timing',
                                              elapsed_time, upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)
                etag = etag.hexdigest()
                if 'etag' in request.headers and \
                        request.headers['etag'].lower() != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.timestamp.internal,
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.iteritems()
                                if is_sys_or_user_meta('object', val[0]))
                headers_to_copy = (request.headers.get(
                    'X-Backend-Replication-Headers', '').split() +
                                   list(self.allowed_headers))
                for header_key in headers_to_copy:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)
        except (DiskFileXattrNotSupported, DiskFileNoSpace):
            return HTTPInsufficientStorage(drive=device, request=request)
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update('PUT', new_delete_at, account, container,
                                      obj, request, device, policy_idx)
            if orig_delete_at:
                self.delete_at_update('DELETE', orig_delete_at, account,
                                      container, obj, request, device,
                                      policy_idx)
        self.container_update(
            'PUT', account, container, obj, request,
            HeaderKeyDict({
                'x-size': metadata['Content-Length'],
                'x-content-type': metadata['Content-Type'],
                'x-timestamp': metadata['X-Timestamp'],
                'x-etag': metadata['ETag']
            }), device, policy_idx)
        return HTTPCreated(request=request, etag=etag)
Ejemplo n.º 20
0
 def PUT(self, req):
     """Handle HTTP PUT request."""
     drive, part, account, container, obj = split_and_validate_path(
         req, 4, 5, True)
     req_timestamp = valid_timestamp(req)
     if 'x-container-sync-to' in req.headers:
         err, sync_to, realm, realm_key = validate_sync_to(
             req.headers['x-container-sync-to'], self.allowed_sync_hosts,
             self.realms_conf)
         if err:
             return HTTPBadRequest(err)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     requested_policy_index = self.get_and_validate_policy_index(req)
     broker = self._get_container_broker(drive, part, account, container)
     if obj:  # put container object
         # obj put expects the policy_index header, default is for
         # legacy support during upgrade.
         obj_policy_index = requested_policy_index or 0
         if account.startswith(self.auto_create_account_prefix) and \
                 not os.path.exists(broker.db_file):
             try:
                 broker.initialize(req_timestamp.internal, obj_policy_index)
             except DatabaseAlreadyExists:
                 pass
         if not os.path.exists(broker.db_file):
             return HTTPNotFound()
         broker.put_object(obj, req_timestamp.internal,
                           int(req.headers['x-size']),
                           req.headers['x-content-type'],
                           req.headers['x-etag'], 0, obj_policy_index,
                           req.headers.get('x-content-type-timestamp'),
                           req.headers.get('x-meta-timestamp'))
         return HTTPCreated(request=req)
     else:  # put container
         if requested_policy_index is None:
             # use the default index sent by the proxy if available
             new_container_policy = req.headers.get(
                 'X-Backend-Storage-Policy-Default', int(POLICIES.default))
         else:
             new_container_policy = requested_policy_index
         created = self._update_or_create(req, broker,
                                          req_timestamp.internal,
                                          new_container_policy,
                                          requested_policy_index)
         metadata = {}
         metadata.update((key, (value, req_timestamp.internal))
                         for key, value in req.headers.items()
                         if key.lower() in self.save_headers
                         or is_sys_or_user_meta('container', key))
         if 'X-Container-Sync-To' in metadata:
             if 'X-Container-Sync-To' not in broker.metadata or \
                     metadata['X-Container-Sync-To'][0] != \
                     broker.metadata['X-Container-Sync-To'][0]:
                 broker.set_x_container_sync_points(-1, -1)
         broker.update_metadata(metadata, validate_metadata=True)
         if metadata:
             self._update_sync_store(broker, 'PUT')
         resp = self.account_update(req, account, container, broker)
         if resp:
             return resp
         if created:
             return HTTPCreated(request=req,
                                headers={
                                    'x-backend-storage-policy-index':
                                    broker.storage_policy_index
                                })
         else:
             return HTTPAccepted(request=req,
                                 headers={
                                     'x-backend-storage-policy-index':
                                     broker.storage_policy_index
                                 })
Ejemplo n.º 21
0
    def handle_extract_iter(self, req, compress_type,
                            out_content_type='text/plain'):
        """
        A generator that can be assigned to a swob Response's app_iter which,
        when iterated over, will extract and PUT the objects pulled from the
        request body. Will occasionally yield whitespace while request is being
        processed. When the request is completed will yield a response body
        that can be parsed to determine success. See above documentation for
        details.

        :params req: a swob Request
        :params compress_type: specifying the compression type of the tar.
            Accepts '', 'gz', or 'bz2'
        """
        resp_dict = {'Response Status': HTTPCreated().status,
                     'Response Body': '', 'Number Files Created': 0}
        failed_files = []
        last_yield = time()
        separator = ''
        containers_accessed = set()
        try:
            if not out_content_type:
                raise HTTPNotAcceptable(request=req)
            if out_content_type.endswith('/xml'):
                yield '<?xml version="1.0" encoding="UTF-8"?>\n'

            if req.content_length is None and \
                    req.headers.get('transfer-encoding',
                                    '').lower() != 'chunked':
                raise HTTPLengthRequired(request=req)
            try:
                vrs, account, extract_base = req.split_path(2, 3, True)
            except ValueError:
                raise HTTPNotFound(request=req)
            extract_base = extract_base or ''
            extract_base = extract_base.rstrip('/')
            tar = tarfile.open(mode='r|' + compress_type,
                               fileobj=req.body_file)
            failed_response_type = HTTPBadRequest
            req.environ['eventlet.minimum_write_chunk_size'] = 0
            containers_created = 0
            while True:
                if last_yield + self.yield_frequency < time():
                    separator = '\r\n\r\n'
                    last_yield = time()
                    yield ' '
                tar_info = tar.next()
                if tar_info is None or \
                        len(failed_files) >= self.max_failed_extractions:
                    break
                if tar_info.isfile():
                    obj_path = tar_info.name
                    if obj_path.startswith('./'):
                        obj_path = obj_path[2:]
                    obj_path = obj_path.lstrip('/')
                    if extract_base:
                        obj_path = extract_base + '/' + obj_path
                    if '/' not in obj_path:
                        continue  # ignore base level file

                    destination = '/'.join(
                        ['', vrs, account, obj_path])
                    container = obj_path.split('/', 1)[0]
                    if not check_utf8(destination):
                        failed_files.append(
                            [quote(obj_path[:MAX_PATH_LENGTH]),
                             HTTPPreconditionFailed().status])
                        continue
                    if tar_info.size > MAX_FILE_SIZE:
                        failed_files.append([
                            quote(obj_path[:MAX_PATH_LENGTH]),
                            HTTPRequestEntityTooLarge().status])
                        continue
                    container_failure = None
                    if container not in containers_accessed:
                        cont_path = '/'.join(['', vrs, account, container])
                        try:
                            if self.create_container(req, cont_path):
                                containers_created += 1
                                if containers_created > self.max_containers:
                                    raise HTTPBadRequest(
                                        'More than %d containers to create '
                                        'from tar.' % self.max_containers)
                        except CreateContainerError as err:
                            # the object PUT to this container still may
                            # succeed if acls are set
                            container_failure = [
                                quote(cont_path[:MAX_PATH_LENGTH]),
                                err.status]
                            if err.status_int == HTTP_UNAUTHORIZED:
                                raise HTTPUnauthorized(request=req)
                        except ValueError:
                            failed_files.append([
                                quote(obj_path[:MAX_PATH_LENGTH]),
                                HTTPBadRequest().status])
                            continue

                    tar_file = tar.extractfile(tar_info)
                    new_env = req.environ.copy()
                    new_env['REQUEST_METHOD'] = 'PUT'
                    new_env['wsgi.input'] = tar_file
                    new_env['PATH_INFO'] = destination
                    new_env['CONTENT_LENGTH'] = tar_info.size
                    new_env['swift.source'] = 'EA'
                    new_env['HTTP_USER_AGENT'] = \
                        '%s BulkExpand' % req.environ.get('HTTP_USER_AGENT')
                    create_obj_req = Request.blank(destination, new_env)
                    resp = create_obj_req.get_response(self.app)
                    containers_accessed.add(container)
                    if resp.is_success:
                        resp_dict['Number Files Created'] += 1
                    else:
                        if container_failure:
                            failed_files.append(container_failure)
                        if resp.status_int == HTTP_UNAUTHORIZED:
                            failed_files.append([
                                quote(obj_path[:MAX_PATH_LENGTH]),
                                HTTPUnauthorized().status])
                            raise HTTPUnauthorized(request=req)
                        if resp.status_int // 100 == 5:
                            failed_response_type = HTTPBadGateway
                        failed_files.append([
                            quote(obj_path[:MAX_PATH_LENGTH]), resp.status])

            if failed_files:
                resp_dict['Response Status'] = failed_response_type().status
            elif not resp_dict['Number Files Created']:
                resp_dict['Response Status'] = HTTPBadRequest().status
                resp_dict['Response Body'] = 'Invalid Tar File: No Valid Files'

        except HTTPException as err:
            resp_dict['Response Status'] = err.status
            resp_dict['Response Body'] = err.body
        except (tarfile.TarError, zlib.error) as tar_error:
            resp_dict['Response Status'] = HTTPBadRequest().status
            resp_dict['Response Body'] = 'Invalid Tar File: %s' % tar_error
        except Exception:
            self.logger.exception('Error in extract archive.')
            resp_dict['Response Status'] = HTTPServerError().status

        yield separator + get_response_body(
            out_content_type, resp_dict, failed_files)
Ejemplo n.º 22
0
            self.container_update(
                "PUT",
                account,
                container,
                obj,
                request.headers,
                {
                    "x-size": file.metadata["Content-Length"],
                    "x-content-type": file.metadata["Content-Type"],
                    "x-timestamp": file.metadata["X-Timestamp"],
                    "x-etag": file.metadata["ETag"],
                    "x-trans-id": request.headers.get("x-trans-id", "-"),
                },
                device,
            )
        resp = HTTPCreated(request=request, etag=etag)
        return resp

    @public
    @timing_stats
    def GET(self, request):
        """Handle HTTP GET requests for the Swift Object Server."""
        try:
            device, partition, account, container, obj = split_path(unquote(request.path), 5, 5, True)
            validate_device_partition(device, partition)
        except ValueError, err:
            return HTTPBadRequest(body=str(err), request=request, content_type="text/plain")
        if self.mount_check and not check_mount(self.devices, device):
            return HTTPInsufficientStorage(drive=device, request=request)
        file = DiskFile(
            self.devices,
Ejemplo n.º 23
0
    def handle_extract_iter(self,
                            req,
                            compress_type,
                            out_content_type='text/plain'):
        """
        A generator that can be assigned to a swob Response's app_iter which,
        when iterated over, will extract and PUT the objects pulled from the
        request body. Will occasionally yield whitespace while request is being
        processed. When the request is completed will yield a response body
        that can be parsed to determine success. See above documentation for
        details.

        :params req: a swob Request
        :params compress_type: specifying the compression type of the tar.
            Accepts '', 'gz', or 'bz2'
        """
        resp_dict = {
            'Response Status': HTTPCreated().status,
            'Response Body': '',
            'Number Files Created': 0
        }
        failed_files = []
        last_yield = time()
        separator = ''
        containers_accessed = set()
        try:
            if not out_content_type:
                raise HTTPNotAcceptable(request=req)
            if out_content_type.endswith('/xml'):
                yield '<?xml version="1.0" encoding="UTF-8"?>\n'

            if req.content_length is None and \
                    req.headers.get('transfer-encoding',
                                    '').lower() != 'chunked':
                raise HTTPLengthRequired(request=req)
            try:
                vrs, account, extract_base = req.split_path(2, 3, True)
            except ValueError:
                raise HTTPNotFound(request=req)
            extract_base = extract_base or ''
            extract_base = extract_base.rstrip('/')
            tar = tarfile.open(mode='r|' + compress_type,
                               fileobj=req.body_file)
            failed_response_type = HTTPBadRequest
            req.environ['eventlet.minimum_write_chunk_size'] = 0
            containers_created = 0
            while True:
                if last_yield + self.yield_frequency < time():
                    separator = '\r\n\r\n'
                    last_yield = time()
                    yield ' '
                tar_info = tar.next()
                if tar_info is None or \
                        len(failed_files) >= self.max_failed_extractions:
                    break
                if tar_info.isfile():
                    obj_path = tar_info.name
                    if obj_path.startswith('./'):
                        obj_path = obj_path[2:]
                    obj_path = obj_path.lstrip('/')
                    if extract_base:
                        obj_path = extract_base + '/' + obj_path
                    if '/' not in obj_path:
                        continue  # ignore base level file

                    destination = '/'.join(['', vrs, account, obj_path])
                    container = obj_path.split('/', 1)[0]
                    if not check_utf8(destination):
                        failed_files.append([
                            quote(obj_path[:MAX_PATH_LENGTH]),
                            HTTPPreconditionFailed().status
                        ])
                        continue
                    if tar_info.size > MAX_FILE_SIZE:
                        failed_files.append([
                            quote(obj_path[:MAX_PATH_LENGTH]),
                            HTTPRequestEntityTooLarge().status
                        ])
                        continue
                    container_failure = None
                    if container not in containers_accessed:
                        cont_path = '/'.join(['', vrs, account, container])
                        try:
                            if self.create_container(req, cont_path):
                                containers_created += 1
                                if containers_created > self.max_containers:
                                    raise HTTPBadRequest(
                                        'More than %d containers to create '
                                        'from tar.' % self.max_containers)
                        except CreateContainerError, err:
                            # the object PUT to this container still may
                            # succeed if acls are set
                            container_failure = [
                                quote(cont_path[:MAX_PATH_LENGTH]), err.status
                            ]
                            if err.status_int == HTTP_UNAUTHORIZED:
                                raise HTTPUnauthorized(request=req)
                        except ValueError:
                            failed_files.append([
                                quote(obj_path[:MAX_PATH_LENGTH]),
                                HTTPBadRequest().status
                            ])
                            continue
Ejemplo n.º 24
0
    def _store_object(self, req, data_source, headers):
        content_type = req.headers.get('content-type', 'octet/stream')
        storage = self.app.storage
        policy = None
        container_info = self.container_info(self.account_name,
                                             self.container_name, req)
        if 'X-Oio-Storage-Policy' in req.headers:
            policy = req.headers.get('X-Oio-Storage-Policy')
            if not self.app.POLICIES.get_by_name(policy):
                raise HTTPBadRequest(
                    "invalid policy '%s', must be in %s" %
                    (policy, self.app.POLICIES.by_name.keys()))
        else:
            try:
                policy_index = int(
                    req.headers.get('X-Backend-Storage-Policy-Index',
                                    container_info['storage_policy']))
            except TypeError:
                policy_index = 0
            if policy_index != 0:
                policy = self.app.POLICIES.get_by_index(policy_index).name
            else:
                content_length = int(req.headers.get('content-length', 0))
                policy = self._get_auto_policy_from_size(content_length)

        metadata = self.load_object_metadata(headers)
        oio_headers = {'X-oio-req-id': self.trans_id}
        try:
            # FIXME(FVE): use 'properties' instead of 'metadata'
            # as soon as we require oio>=4.2.0
            _chunks, _size, checksum = storage.object_create(
                self.account_name,
                self.container_name,
                obj_name=self.object_name,
                file_or_path=data_source,
                mime_type=content_type,
                policy=policy,
                headers=oio_headers,
                etag=req.headers.get('etag', '').strip('"'),
                metadata=metadata)
            # TODO(FVE): when oio-sds supports it, do that in a callback
            # passed to object_create (or whatever upload method supports it)
            footer_md = self.load_object_metadata(self._get_footers(req))
            if footer_md:
                storage.object_set_properties(self.account_name,
                                              self.container_name,
                                              self.object_name,
                                              properties=footer_md)
        except exceptions.Conflict:
            raise HTTPConflict(request=req)
        except exceptions.PreconditionFailed:
            raise HTTPPreconditionFailed(request=req)
        except (SourceReadTimeout, GreenSourceReadTimeout) as err:
            self.app.logger.warning(_('ERROR Client read timeout (%s)'), err)
            self.app.logger.increment('client_timeouts')
            raise HTTPRequestTimeout(request=req)
        except exceptions.SourceReadError:
            req.client_disconnect = True
            self.app.logger.warning(
                _('Client disconnected without sending last chunk'))
            self.app.logger.increment('client_disconnects')
            raise HTTPClientDisconnect(request=req)
        except exceptions.EtagMismatch:
            return HTTPUnprocessableEntity(request=req)
        except (exceptions.ServiceBusy, exceptions.OioTimeout):
            raise
        # TODO(FVE): exceptions.NotFound is never raised from oio
        # Remove when dependency is oio>=4.2.0
        except (exceptions.NoSuchContainer, exceptions.NotFound):
            raise HTTPNotFound(request=req)
        except exceptions.ClientException as err:
            # 481 = CODE_POLICY_NOT_SATISFIABLE
            if err.status == 481:
                raise exceptions.ServiceBusy()
            self.app.logger.exception(
                _('ERROR Exception transferring data %s'), {'path': req.path})
            raise HTTPInternalServerError(request=req)
        except Exception:
            self.app.logger.exception(
                _('ERROR Exception transferring data %s'), {'path': req.path})
            raise HTTPInternalServerError(request=req)

        # TODO(FVE): use the timestamp of the object.
        # Unfortunately the oio-sds API does not return the actual ctime.
        resp = HTTPCreated(request=req,
                           etag=checksum,
                           last_modified=int(time.time()))
        return resp
Ejemplo n.º 25
0
                    request, device)
            if old_delete_at:
                self.delete_at_update(
                    'DELETE', old_delete_at, account, container, obj,
                    request, device)
        if not orig_timestamp or \
                orig_timestamp < request.headers['x-timestamp']:
            self.container_update(
                'PUT', account, container, obj, request,
                HeaderKeyDict({
                    'x-size': disk_file.metadata['Content-Length'],
                    'x-content-type': disk_file.metadata['Content-Type'],
                    'x-timestamp': disk_file.metadata['X-Timestamp'],
                    'x-etag': disk_file.metadata['ETag']}),
                device)
        resp = HTTPCreated(request=request, etag=etag)
        return resp

    @public
    @timing_stats()
    def GET(self, request):
        """Handle HTTP GET requests for the Swift Object Server."""
        try:
            device, partition, account, container, obj = \
                split_path(unquote(request.path), 5, 5, True)
            validate_device_partition(device, partition)
        except ValueError, err:
            return HTTPBadRequest(body=str(err), request=request,
                                  content_type='text/plain')
        if self.mount_check and not check_mount(self.devices, device):
            return HTTPInsufficientStorage(drive=device, request=request)
Ejemplo n.º 26
0
                    self.delete_at_update('DELETE', old_delete_at, account,
                                          container, obj, request.headers,
                                          device)
            file.put(fd, metadata)
        file.unlinkold(metadata['X-Timestamp'])
        if not orig_timestamp or \
                orig_timestamp < request.headers['x-timestamp']:
            self.container_update(
                'PUT', account, container, obj, request.headers, {
                    'x-size': file.metadata['Content-Length'],
                    'x-content-type': file.metadata['Content-Type'],
                    'x-timestamp': file.metadata['X-Timestamp'],
                    'x-etag': file.metadata['ETag'],
                    'x-trans-id': request.headers.get('x-trans-id', '-')
                }, device)
        resp = HTTPCreated(request=request, etag=etag)
        return resp

    @public
    @timing_stats()
    def GET(self, request):
        """Handle HTTP GET requests for the Swift Object Server."""
        try:
            device, partition, account, container, obj = \
                split_path(unquote(request.path), 5, 5, True)
            validate_device_partition(device, partition)
        except ValueError, err:
            return HTTPBadRequest(body=str(err),
                                  request=request,
                                  content_type='text/plain')
        if self.mount_check and not check_mount(self.devices, device):
Ejemplo n.º 27
0
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj = \
            split_and_validate_path(request, 5, 5, True)

        if 'x-timestamp' not in request.headers or \
                not check_float(request.headers['x-timestamp']):
            return HTTPBadRequest(body='Missing timestamp',
                                  request=request,
                                  content_type='text/plain')
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past',
                                  request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e),
                                  request=request,
                                  content_type='text/plain')
        try:
            disk_file = self._diskfile(device, partition, account, container,
                                       obj)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        old_delete_at = int(disk_file.metadata.get('X-Delete-At') or 0)
        orig_timestamp = disk_file.metadata.get('X-Timestamp')
        if orig_timestamp and orig_timestamp >= request.headers['x-timestamp']:
            return HTTPConflict(request=request)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                reader = request.environ['wsgi.input'].read
                for chunk in iter(lambda: reader(self.network_chunk_size), ''):
                    start_time = time.time()
                    if start_time > upload_expiration:
                        self.logger.increment('PUT.timeouts')
                        return HTTPRequestTimeout(request=request)
                    etag.update(chunk)
                    writer.write(chunk)
                    sleep()
                    elapsed_time += time.time() - start_time
                upload_size = writer.upload_size
                if upload_size:
                    self.logger.transfer_rate('PUT.' + device + '.timing',
                                              elapsed_time, upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)
                etag = etag.hexdigest()
                if 'etag' in request.headers and \
                        request.headers['etag'].lower() != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.headers['x-timestamp'],
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.iteritems()
                                if val[0].lower().startswith('x-object-meta-')
                                and len(val[0]) > 14)
                for header_key in self.allowed_headers:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)
        except DiskFileNoSpace:
            return HTTPInsufficientStorage(drive=device, request=request)
        if old_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update('PUT', new_delete_at, account, container,
                                      obj, request, device)
            if old_delete_at:
                self.delete_at_update('DELETE', old_delete_at, account,
                                      container, obj, request, device)
        if not orig_timestamp or \
                orig_timestamp < request.headers['x-timestamp']:
            self.container_update(
                'PUT', account, container, obj, request,
                HeaderKeyDict({
                    'x-size':
                    disk_file.metadata['Content-Length'],
                    'x-content-type':
                    disk_file.metadata['Content-Type'],
                    'x-timestamp':
                    disk_file.metadata['X-Timestamp'],
                    'x-etag':
                    disk_file.metadata['ETag']
                }), device)
        resp = HTTPCreated(request=request, etag=etag)
        return resp
Ejemplo n.º 28
0
                        request.headers, device)
            file.put(fd, metadata)
        file.unlinkold(metadata['X-Timestamp'])
        if not orig_timestamp or \
                orig_timestamp < request.headers['x-timestamp']:
            #FIXME(vvechkanov): maybe Original-Content-Length
            #                   should be used here
            self.container_update(
                'PUT', account, container, obj, request.headers,
                {'x-size': file.metadata['Content-Length'],
                 'x-content-type': file.metadata['Content-Type'],
                 'x-timestamp': file.metadata['X-Timestamp'],
                 'x-etag': file.metadata['Original-Etag'],
                 'x-trans-id': request.headers.get('x-trans-id', '-')},
                device)
        resp = HTTPCreated(request=request, etag=etag_orig)
        self.logger.timing_since('PUT.timing', start_time)
        return resp

    @public
    @timing_stats()
    def GET(self, request):
        """Handle HTTP GET requests for the Swift Object Server."""
        try:
            device, partition, account, container, obj = \
                split_path(unquote(request.path), 5, 5, True)
            validate_device_partition(device, partition)
        except ValueError, err:
            return HTTPBadRequest(body=str(err), request=request,
                                  content_type='text/plain')
        if self.mount_check and not check_mount(self.devices, device):
Ejemplo n.º 29
0
    def PUT(self, req):
        """Handle HTTP PUT request."""
        drive, part, account, container, obj = get_obj_name_and_placement(req)
        req_timestamp = valid_timestamp(req)
        if 'x-container-sync-to' in req.headers:
            err, sync_to, realm, realm_key = validate_sync_to(
                req.headers['x-container-sync-to'], self.allowed_sync_hosts,
                self.realms_conf)
            if err:
                return HTTPBadRequest(err)
        try:
            check_drive(self.root, drive, self.mount_check)
        except ValueError:
            return HTTPInsufficientStorage(drive=drive, request=req)
        if not self.check_free_space(drive):
            return HTTPInsufficientStorage(drive=drive, request=req)
        requested_policy_index = self.get_and_validate_policy_index(req)
        broker = self._get_container_broker(drive, part, account, container)
        if obj:  # put container object
            # obj put expects the policy_index header, default is for
            # legacy support during upgrade.
            obj_policy_index = requested_policy_index or 0
            self._maybe_autocreate(broker, req_timestamp, account,
                                   obj_policy_index)
            # redirect if a shard exists for this object name
            response = self._redirect_to_shard(req, broker, obj)
            if response:
                return response

            broker.put_object(
                obj, req_timestamp.internal, int(req.headers['x-size']),
                wsgi_to_str(req.headers['x-content-type']),
                wsgi_to_str(req.headers['x-etag']), 0, obj_policy_index,
                wsgi_to_str(req.headers.get('x-content-type-timestamp')),
                wsgi_to_str(req.headers.get('x-meta-timestamp')))
            return HTTPCreated(request=req)

        record_type = req.headers.get('x-backend-record-type', '').lower()
        if record_type == RECORD_TYPE_SHARD:
            try:
                # validate incoming data...
                shard_ranges = [
                    ShardRange.from_dict(sr) for sr in json.loads(req.body)
                ]
            except (ValueError, KeyError, TypeError) as err:
                return HTTPBadRequest('Invalid body: %r' % err)
            created = self._maybe_autocreate(broker, req_timestamp, account,
                                             requested_policy_index)
            self._update_metadata(req, broker, req_timestamp, 'PUT')
            if shard_ranges:
                # TODO: consider writing the shard ranges into the pending
                # file, but if so ensure an all-or-none semantic for the write
                broker.merge_shard_ranges(shard_ranges)
        else:  # put container
            if requested_policy_index is None:
                # use the default index sent by the proxy if available
                new_container_policy = req.headers.get(
                    'X-Backend-Storage-Policy-Default', int(POLICIES.default))
            else:
                new_container_policy = requested_policy_index
            created = self._update_or_create(req, broker,
                                             req_timestamp.internal,
                                             new_container_policy,
                                             requested_policy_index)
            self._update_metadata(req, broker, req_timestamp, 'PUT')
            resp = self.account_update(req, account, container, broker)
            if resp:
                return resp
        if created:
            return HTTPCreated(request=req,
                               headers={
                                   'x-backend-storage-policy-index':
                                   broker.storage_policy_index
                               })
        else:
            return HTTPAccepted(request=req,
                                headers={
                                    'x-backend-storage-policy-index':
                                    broker.storage_policy_index
                                })
Ejemplo n.º 30
0
 def PUT(self, req):
     """Handle HTTP PUT request."""
     drive, part, account, container = split_and_validate_path(req, 3, 4)
     try:
         check_drive(self.root, drive, self.mount_check)
     except ValueError:
         return HTTPInsufficientStorage(drive=drive, request=req)
     if not self.check_free_space(drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     if container:  # put account container
         if 'x-timestamp' not in req.headers:
             timestamp = Timestamp.now()
         else:
             timestamp = valid_timestamp(req)
         pending_timeout = None
         container_policy_index = \
             req.headers.get('X-Backend-Storage-Policy-Index', 0)
         if 'x-trans-id' in req.headers:
             pending_timeout = 3
         broker = self._get_account_broker(drive,
                                           part,
                                           account,
                                           pending_timeout=pending_timeout)
         if account.startswith(self.auto_create_account_prefix) and \
                 not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp.internal)
             except DatabaseAlreadyExists:
                 pass
         if req.headers.get('x-account-override-deleted', 'no').lower() != \
                 'yes' and broker.is_deleted():
             return HTTPNotFound(request=req)
         broker.put_container(container, req.headers['x-put-timestamp'],
                              req.headers['x-delete-timestamp'],
                              req.headers['x-object-count'],
                              req.headers['x-bytes-used'],
                              container_policy_index)
         if req.headers['x-delete-timestamp'] > \
                 req.headers['x-put-timestamp']:
             return HTTPNoContent(request=req)
         else:
             return HTTPCreated(request=req)
     else:  # put account
         timestamp = valid_timestamp(req)
         broker = self._get_account_broker(drive, part, account)
         if not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp.internal)
                 created = True
             except DatabaseAlreadyExists:
                 created = False
         elif broker.is_status_deleted():
             return self._deleted_response(broker,
                                           req,
                                           HTTPForbidden,
                                           body='Recently deleted')
         else:
             created = broker.is_deleted()
             broker.update_put_timestamp(timestamp.internal)
             if broker.is_deleted():
                 return HTTPConflict(request=req)
         metadata = {}
         if six.PY2:
             metadata.update((key, (value, timestamp.internal))
                             for key, value in req.headers.items()
                             if is_sys_or_user_meta('account', key))
         else:
             for key, value in req.headers.items():
                 if is_sys_or_user_meta('account', key):
                     # Cast to native strings, so that json inside
                     # updata_metadata eats the data.
                     try:
                         value = value.encode('latin-1').decode('utf-8')
                     except UnicodeDecodeError:
                         raise HTTPBadRequest(
                             'Metadata must be valid UTF-8')
                     metadata[key] = (value, timestamp.internal)
         if metadata:
             broker.update_metadata(metadata, validate_metadata=True)
         if created:
             return HTTPCreated(request=req)
         else:
             return HTTPAccepted(request=req)
Ejemplo n.º 31
0
    def _link_object(self, req):
        _, container, obj = req.headers['Oio-Copy-From'].split('/', 2)

        from_account = req.headers.get('X-Copy-From-Account',
                                       self.account_name)
        self.app.logger.info(
            "Creating link from %s/%s/%s to %s/%s/%s",
            # Existing
            from_account,
            container,
            obj,
            # New
            self.account_name,
            self.container_name,
            self.object_name)
        storage = self.app.storage

        if req.headers.get('Range'):
            raise Exception("Fast Copy with Range is unsupported")

            ranges = ranges_from_http_header(req.headers.get('Range'))
            if len(ranges) != 1:
                raise HTTPInternalServerError(
                    request=req, body="mutiple ranges unsupported")
            ranges = ranges[0]
        else:
            ranges = None

        headers = self._prepare_headers(req)
        metadata = self.load_object_metadata(headers)
        oio_headers = {REQID_HEADER: self.trans_id}
        oio_cache = req.environ.get('oio.cache')
        perfdata = req.environ.get('swift.perfdata')
        # FIXME(FVE): use object_show, cache in req.environ
        version = obj_version_from_env(req.environ)
        props = storage.object_get_properties(from_account,
                                              container,
                                              obj,
                                              headers=oio_headers,
                                              version=version,
                                              cache=oio_cache,
                                              perfdata=perfdata)
        if props['properties'].get(SLO, None):
            raise Exception("Fast Copy with SLO is unsupported")
        else:
            if ranges:
                raise HTTPInternalServerError(
                    request=req, body="no range supported with single object")

        try:
            # TODO check return code (values ?)
            link_meta = storage.object_link(from_account,
                                            container,
                                            obj,
                                            self.account_name,
                                            self.container_name,
                                            self.object_name,
                                            headers=oio_headers,
                                            properties=metadata,
                                            properties_directive='REPLACE',
                                            target_version=version,
                                            cache=oio_cache,
                                            perfdata=perfdata)
        # TODO(FVE): this exception catching block has to be refactored
        # TODO check which ones are ok or make non sense
        except exceptions.Conflict:
            raise HTTPConflict(request=req)
        except exceptions.PreconditionFailed:
            raise HTTPPreconditionFailed(request=req)
        except exceptions.SourceReadError:
            req.client_disconnect = True
            self.app.logger.warning(
                _('Client disconnected without sending last chunk'))
            self.app.logger.increment('client_disconnects')
            raise HTTPClientDisconnect(request=req)
        except exceptions.EtagMismatch:
            return HTTPUnprocessableEntity(request=req)
        except (exceptions.ServiceBusy, exceptions.OioTimeout,
                exceptions.DeadlineReached):
            raise
        except (exceptions.NoSuchContainer, exceptions.NotFound):
            raise HTTPNotFound(request=req)
        except exceptions.ClientException as err:
            # 481 = CODE_POLICY_NOT_SATISFIABLE
            if err.status == 481:
                raise exceptions.ServiceBusy()
            self.app.logger.exception(
                _('ERROR Exception transferring data %s'), {'path': req.path})
            raise HTTPInternalServerError(request=req)
        except Exception:
            self.app.logger.exception(
                _('ERROR Exception transferring data %s'), {'path': req.path})
            raise HTTPInternalServerError(request=req)

        resp = HTTPCreated(request=req, etag=link_meta['hash'])
        return resp
Ejemplo n.º 32
0
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj, policy = \
            get_name_and_placement(request, 5, 5, True)
        req_timestamp = valid_timestamp(request)
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past', request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e), request=request,
                                  content_type='text/plain')

        # In case of multipart-MIME put, the proxy sends a chunked request,
        # but may let us know the real content length so we can verify that
        # we have enough disk space to hold the object.
        if fsize is None:
            fsize = request.headers.get('X-Backend-Obj-Content-Length')
            if fsize is not None:
                try:
                    fsize = int(fsize)
                except ValueError as e:
                    return HTTPBadRequest(body=str(e), request=request,
                                          content_type='text/plain')
        # SSYNC will include Frag-Index header for subrequests to primary
        # nodes; handoff nodes should 409 subrequests to over-write an
        # existing data fragment until they offloaded the existing fragment
        frag_index = request.headers.get('X-Backend-Ssync-Frag-Index')
        try:
            disk_file = self.get_diskfile(
                device, partition, account, container, obj,
                policy=policy, frag_index=frag_index)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            orig_metadata = disk_file.read_metadata()
        except DiskFileXattrNotSupported:
            return HTTPInsufficientStorage(drive=device, request=request)
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}

        # Checks for If-None-Match
        if request.if_none_match is not None and orig_metadata:
            if '*' in request.if_none_match:
                # File exists already so return 412
                return HTTPPreconditionFailed(request=request)
            if orig_metadata.get('ETag') in request.if_none_match:
                # The current ETag matches, so return 412
                return HTTPPreconditionFailed(request=request)

        orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
        if orig_timestamp >= req_timestamp:
            return HTTPConflict(
                request=request,
                headers={'X-Backend-Timestamp': orig_timestamp.internal})
        orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                upload_size = 0

                # If the proxy wants to send us object metadata after the
                # object body, it sets some headers. We have to tell the
                # proxy, in the 100 Continue response, that we're able to
                # parse a multipart MIME document and extract the object and
                # metadata from it. If we don't, then the proxy won't
                # actually send the footer metadata.
                have_metadata_footer = False
                use_multiphase_commit = False
                mime_documents_iter = iter([])
                obj_input = request.environ['wsgi.input']

                hundred_continue_headers = []
                if config_true_value(
                        request.headers.get(
                            'X-Backend-Obj-Multiphase-Commit')):
                    use_multiphase_commit = True
                    hundred_continue_headers.append(
                        ('X-Obj-Multiphase-Commit', 'yes'))

                if config_true_value(
                        request.headers.get('X-Backend-Obj-Metadata-Footer')):
                    have_metadata_footer = True
                    hundred_continue_headers.append(
                        ('X-Obj-Metadata-Footer', 'yes'))

                if have_metadata_footer or use_multiphase_commit:
                    obj_input.set_hundred_continue_response_headers(
                        hundred_continue_headers)
                    mime_boundary = request.headers.get(
                        'X-Backend-Obj-Multipart-Mime-Boundary')
                    if not mime_boundary:
                        return HTTPBadRequest("no MIME boundary")

                    try:
                        with ChunkReadTimeout(self.client_timeout):
                            mime_documents_iter = iter_mime_headers_and_bodies(
                                request.environ['wsgi.input'],
                                mime_boundary, self.network_chunk_size)
                            _junk_hdrs, obj_input = next(mime_documents_iter)
                    except ChunkReadTimeout:
                        return HTTPRequestTimeout(request=request)

                timeout_reader = self._make_timeout_reader(obj_input)
                try:
                    for chunk in iter(timeout_reader, ''):
                        start_time = time.time()
                        if start_time > upload_expiration:
                            self.logger.increment('PUT.timeouts')
                            return HTTPRequestTimeout(request=request)
                        etag.update(chunk)
                        upload_size = writer.write(chunk)
                        elapsed_time += time.time() - start_time
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)
                if upload_size:
                    self.logger.transfer_rate(
                        'PUT.' + device + '.timing', elapsed_time,
                        upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)

                footer_meta = {}
                if have_metadata_footer:
                    footer_meta = self._read_metadata_footer(
                        mime_documents_iter)

                request_etag = (footer_meta.get('etag') or
                                request.headers.get('etag', '')).lower()
                etag = etag.hexdigest()
                if request_etag and request_etag != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.timestamp.internal,
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.items()
                                if is_sys_or_user_meta('object', val[0]))
                metadata.update(val for val in footer_meta.items()
                                if is_sys_or_user_meta('object', val[0]))
                headers_to_copy = (
                    request.headers.get(
                        'X-Backend-Replication-Headers', '').split() +
                    list(self.allowed_headers))
                for header_key in headers_to_copy:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)

                # if the PUT requires a two-phase commit (a data and a commit
                # phase) send the proxy server another 100-continue response
                # to indicate that we are finished writing object data
                if use_multiphase_commit:
                    request.environ['wsgi.input'].\
                        send_hundred_continue_response()
                    if not self._read_put_commit_message(mime_documents_iter):
                        return HTTPServerError(request=request)
                    # got 2nd phase confirmation, write a timestamp.durable
                    # state file to indicate a successful PUT

                writer.commit(request.timestamp)

                # Drain any remaining MIME docs from the socket. There
                # shouldn't be any, but we must read the whole request body.
                try:
                    while True:
                        with ChunkReadTimeout(self.client_timeout):
                            _junk_hdrs, _junk_body = next(mime_documents_iter)
                        drain(_junk_body, self.network_chunk_size,
                              self.client_timeout)
                except ChunkReadTimeout:
                    raise HTTPClientDisconnect()
                except StopIteration:
                    pass

        except (DiskFileXattrNotSupported, DiskFileNoSpace):
            return HTTPInsufficientStorage(drive=device, request=request)
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update(
                    'PUT', new_delete_at, account, container, obj, request,
                    device, policy)
            if orig_delete_at:
                self.delete_at_update(
                    'DELETE', orig_delete_at, account, container, obj,
                    request, device, policy)
        update_headers = HeaderKeyDict({
            'x-size': metadata['Content-Length'],
            'x-content-type': metadata['Content-Type'],
            'x-timestamp': metadata['X-Timestamp'],
            'x-etag': metadata['ETag']})
        # apply any container update header overrides sent with request
        self._check_container_override(update_headers, request.headers)
        self._check_container_override(update_headers, footer_meta)
        self.container_update(
            'PUT', account, container, obj, request,
            update_headers,
            device, policy)
        return HTTPCreated(request=request, etag=etag)
Ejemplo n.º 33
0
                    self.delete_at_update(
                        'DELETE', old_delete_at, account, container, obj,
                        request.headers, device)
            file.put(fd, tmppath, metadata)
        file.unlinkold(metadata['X-Timestamp'])
        if not orig_timestamp or \
                orig_timestamp < request.headers['x-timestamp']:
            self.container_update(
                'PUT', account, container, obj, request.headers,
                {'x-size': file.metadata['Content-Length'],
                 'x-content-type': file.metadata['Content-Type'],
                 'x-timestamp': file.metadata['X-Timestamp'],
                 'x-etag': file.metadata['ETag'],
                 'x-trans-id': request.headers.get('x-trans-id', '-')},
                device)
        resp = HTTPCreated(request=request, etag=etag)
        self.logger.timing_since('PUT.timing', start_time)
        return resp

    @public
    def GET(self, request):
        """Handle HTTP GET requests for the Swift Object Server."""
        start_time = time.time()
        try:
            device, partition, account, container, obj = \
                split_path(unquote(request.path), 5, 5, True)
            validate_device_partition(device, partition)
        except ValueError, err:
            self.logger.increment('GET.errors')
            return HTTPBadRequest(body=str(err), request=request,
                                  content_type='text/plain')
Ejemplo n.º 34
0
                "PUT",
                account,
                container,
                obj,
                request,
                HeaderKeyDict(
                    {
                        "x-size": disk_file.metadata["Content-Length"],
                        "x-content-type": disk_file.metadata["Content-Type"],
                        "x-timestamp": disk_file.metadata["X-Timestamp"],
                        "x-etag": disk_file.metadata["ETag"],
                    }
                ),
                device,
            )
        resp = HTTPCreated(request=request, etag=etag)
        return resp

    @public
    @timing_stats()
    def GET(self, request):
        """Handle HTTP GET requests for the Swift Object Server."""
        try:
            device, partition, account, container, obj = split_path(unquote(request.path), 5, 5, True)
            validate_device_partition(device, partition)
        except ValueError, err:
            return HTTPBadRequest(body=str(err), request=request, content_type="text/plain")
        if self.mount_check and not check_mount(self.devices, device):
            return HTTPInsufficientStorage(drive=device, request=request)
        disk_file = DiskFile(
            self.devices,
Ejemplo n.º 35
0
    def handle_extract_iter(self,
                            req,
                            compress_type,
                            out_content_type='text/plain'):
        """
        A generator that can be assigned to a swob Response's app_iter which,
        when iterated over, will extract and PUT the objects pulled from the
        request body. Will occasionally yield whitespace while request is being
        processed. When the request is completed will yield a response body
        that can be parsed to determine success. See above documentation for
        details.

        :params req: a swob Request
        :params compress_type: specifying the compression type of the tar.
            Accepts '', 'gz', or 'bz2'
        """
        resp_dict = {
            'Response Status': HTTPCreated().status,
            'Response Body': '',
            'Number Files Created': 0
        }
        failed_files = []
        last_yield = time()
        if out_content_type and out_content_type.endswith('/xml'):
            to_yield = b'<?xml version="1.0" encoding="UTF-8"?>\n'
        else:
            to_yield = b' '
        separator = b''
        containers_accessed = set()
        req.environ['eventlet.minimum_write_chunk_size'] = 0
        try:
            if not out_content_type:
                raise HTTPNotAcceptable(request=req)

            if req.content_length is None and \
                    req.headers.get('transfer-encoding',
                                    '').lower() != 'chunked':
                raise HTTPLengthRequired(request=req)
            try:
                vrs, account, extract_base = req.split_path(2, 3, True)
            except ValueError:
                raise HTTPNotFound(request=req)
            extract_base = extract_base or ''
            extract_base = extract_base.rstrip('/')
            tar = tarfile.open(mode='r|' + compress_type,
                               fileobj=req.body_file)
            failed_response_type = HTTPBadRequest
            containers_created = 0
            while True:
                if last_yield + self.yield_frequency < time():
                    last_yield = time()
                    yield to_yield
                    to_yield, separator = b' ', b'\r\n\r\n'
                tar_info = tar.next()
                if tar_info is None or \
                        len(failed_files) >= self.max_failed_extractions:
                    break
                if tar_info.isfile():
                    obj_path = tar_info.name
                    if not six.PY2:
                        obj_path = obj_path.encode('utf-8', 'surrogateescape')
                    obj_path = bytes_to_wsgi(obj_path)
                    if obj_path.startswith('./'):
                        obj_path = obj_path[2:]
                    obj_path = obj_path.lstrip('/')
                    if extract_base:
                        obj_path = extract_base + '/' + obj_path
                    if '/' not in obj_path:
                        continue  # ignore base level file

                    destination = '/'.join(['', vrs, account, obj_path])
                    container = obj_path.split('/', 1)[0]
                    if not constraints.check_utf8(wsgi_to_str(destination)):
                        failed_files.append([
                            wsgi_quote(obj_path[:self.max_path_length]),
                            HTTPPreconditionFailed().status
                        ])
                        continue
                    if tar_info.size > constraints.MAX_FILE_SIZE:
                        failed_files.append([
                            wsgi_quote(obj_path[:self.max_path_length]),
                            HTTPRequestEntityTooLarge().status
                        ])
                        continue
                    container_failure = None
                    if container not in containers_accessed:
                        cont_path = '/'.join(['', vrs, account, container])
                        try:
                            if self.create_container(req, cont_path):
                                containers_created += 1
                                if containers_created > self.max_containers:
                                    raise HTTPBadRequest(
                                        'More than %d containers to create '
                                        'from tar.' % self.max_containers)
                        except CreateContainerError as err:
                            # the object PUT to this container still may
                            # succeed if acls are set
                            container_failure = [
                                wsgi_quote(cont_path[:self.max_path_length]),
                                err.status
                            ]
                            if err.status_int == HTTP_UNAUTHORIZED:
                                raise HTTPUnauthorized(request=req)
                        except ValueError:
                            failed_files.append([
                                wsgi_quote(obj_path[:self.max_path_length]),
                                HTTPBadRequest().status
                            ])
                            continue

                    tar_file = tar.extractfile(tar_info)
                    create_headers = {
                        'Content-Length': tar_info.size,
                        'X-Auth-Token': req.headers.get('X-Auth-Token'),
                    }

                    # Copy some whitelisted headers to the subrequest
                    for k, v in req.headers.items():
                        if ((k.lower() in ('x-delete-at', 'x-delete-after'))
                                or is_user_meta('object', k)):
                            create_headers[k] = v

                    create_obj_req = make_subrequest(
                        req.environ,
                        method='PUT',
                        path=wsgi_quote(destination),
                        headers=create_headers,
                        agent='%(orig)s BulkExpand',
                        swift_source='EA')
                    create_obj_req.environ['wsgi.input'] = tar_file

                    for pax_key, pax_value in tar_info.pax_headers.items():
                        header_name = pax_key_to_swift_header(pax_key)
                        if header_name:
                            # Both pax_key and pax_value are unicode
                            # strings; the key is already UTF-8 encoded, but
                            # we still have to encode the value.
                            create_obj_req.headers[header_name] = \
                                pax_value.encode("utf-8")

                    resp = create_obj_req.get_response(self.app)
                    containers_accessed.add(container)
                    if resp.is_success:
                        resp_dict['Number Files Created'] += 1
                    else:
                        if container_failure:
                            failed_files.append(container_failure)
                        if resp.status_int == HTTP_UNAUTHORIZED:
                            failed_files.append([
                                wsgi_quote(obj_path[:self.max_path_length]),
                                HTTPUnauthorized().status
                            ])
                            raise HTTPUnauthorized(request=req)
                        if resp.status_int // 100 == 5:
                            failed_response_type = HTTPBadGateway
                        failed_files.append([
                            wsgi_quote(obj_path[:self.max_path_length]),
                            resp.status
                        ])

            if failed_files:
                resp_dict['Response Status'] = failed_response_type().status
            elif not resp_dict['Number Files Created']:
                resp_dict['Response Status'] = HTTPBadRequest().status
                resp_dict['Response Body'] = 'Invalid Tar File: No Valid Files'

        except HTTPException as err:
            resp_dict['Response Status'] = err.status
            resp_dict['Response Body'] = err.body.decode('utf-8')
        except (tarfile.TarError, zlib.error) as tar_error:
            resp_dict['Response Status'] = HTTPBadRequest().status
            resp_dict['Response Body'] = 'Invalid Tar File: %s' % tar_error
        except Exception:
            self.logger.exception('Error in extract archive.')
            resp_dict['Response Status'] = HTTPServerError().status

        yield separator + get_response_body(out_content_type, resp_dict,
                                            failed_files, 'extract')
Ejemplo n.º 36
0
                'Content-Type': listing_content_type,
                'Content-Length': 0
            },
            agent='SwiftOrigin',
            swift_source='SOS').get_response(self.app)

        if cdn_list_resp.status_int // 100 != 2:
            raise OriginDbFailure('Could not PUT/POST to cdn listing in '
                                  'origin db: %s %s' %
                                  (cdn_list_path, cdn_list_resp.status_int))
        # PUTs and POSTs have the headers as HEAD
        cdn_url_headers = self.get_cdn_urls(hsh, 'HEAD')
        if req.method == 'POST':
            resp = HTTPAccepted(request=req, headers=cdn_url_headers)
        else:
            resp = HTTPCreated(request=req, headers=cdn_url_headers)
        resp.extra_log_data = listing_content_type
        return resp

    def handle_request(self, env, req):
        """
        This handles requests from a user to activate cdn access for their
        containers, list them, etc.
        """
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp
        try:
            if req.method in ('PUT', 'POST'):
                return self.origin_db_puts_posts(env, req)