Beispiel #1
0
 def GET(self, req):
     """Handle HTTP GET request."""
     drive, part, account, container, obj = split_and_validate_path(
         req, 4, 5, True)
     path = get_param(req, 'path')
     prefix = get_param(req, 'prefix')
     delimiter = get_param(req, 'delimiter')
     if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
         # delimiters can be made more flexible later
         return HTTPPreconditionFailed(body='Bad delimiter')
     marker = get_param(req, 'marker', '')
     end_marker = get_param(req, 'end_marker')
     limit = CONTAINER_LISTING_LIMIT
     given_limit = get_param(req, 'limit')
     if given_limit and given_limit.isdigit():
         limit = int(given_limit)
         if limit > CONTAINER_LISTING_LIMIT:
             return HTTPPreconditionFailed(
                 request=req,
                 body='Maximum limit is %d' % CONTAINER_LISTING_LIMIT)
     out_content_type = get_listing_content_type(req)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     broker = self._get_container_broker(drive, part, account, container,
                                         pending_timeout=0.1,
                                         stale_reads_ok=True)
     if broker.is_deleted():
         return HTTPNotFound(request=req)
     info = broker.get_info()
     resp_headers = {
         'X-Container-Object-Count': info['object_count'],
         'X-Container-Bytes-Used': info['bytes_used'],
         'X-Timestamp': info['created_at'],
         'X-PUT-Timestamp': info['put_timestamp'],
     }
     for key, (value, timestamp) in broker.metadata.iteritems():
         if value and (key.lower() in self.save_headers or
                       key.lower().startswith('x-container-meta-')):
             resp_headers[key] = value
     ret = Response(request=req, headers=resp_headers,
                    content_type=out_content_type, charset='utf-8')
     container_list = broker.list_objects_iter(limit, marker, end_marker,
                                               prefix, delimiter, path)
     if out_content_type == 'application/json':
         ret.body = json.dumps([self.update_data_record(record)
                                for record in container_list])
     elif out_content_type.endswith('/xml'):
         doc = Element('container', name=container.decode('utf-8'))
         for obj in container_list:
             record = self.update_data_record(obj)
             if 'subdir' in record:
                 name = record['subdir'].decode('utf-8')
                 sub = SubElement(doc, 'subdir', name=name)
                 SubElement(sub, 'name').text = name
             else:
                 obj_element = SubElement(doc, 'object')
                 for field in ["name", "hash", "bytes", "content_type",
                               "last_modified"]:
                     SubElement(obj_element, field).text = str(
                         record.pop(field)).decode('utf-8')
                 for field in sorted(record):
                     SubElement(obj_element, field).text = str(
                         record[field]).decode('utf-8')
         ret.body = tostring(doc, encoding='UTF-8').replace(
             "<?xml version='1.0' encoding='UTF-8'?>",
             '<?xml version="1.0" encoding="UTF-8"?>', 1)
     else:
         if not container_list:
             return HTTPNoContent(request=req, headers=resp_headers)
         ret.body = '\n'.join(rec[0] for rec in container_list) + '\n'
     return ret
Beispiel #2
0
class ObjectController(object):
    """Implements the WSGI application for the Swift Object Server."""
    def __init__(self, conf):
        """
        Creates a new WSGI application for the Swift Object Server. An
        example configuration is given at
        <source-dir>/etc/object-server.conf-sample or
        /etc/swift/object-server.conf-sample.
        """
        self.logger = get_logger(conf, log_route='object-server')
        self.devices = conf.get('devices', '/srv/node/')
        self.mount_check = conf.get('mount_check', 'true').lower() in \
            TRUE_VALUES
        self.node_timeout = int(conf.get('node_timeout', 3))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
        self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
        self.keep_cache_size = int(conf.get('keep_cache_size', 5242880))
        self.keep_cache_private = \
            conf.get('keep_cache_private', 'false').lower() in TRUE_VALUES
        self.log_requests = \
            conf.get('log_requests', 'true').lower() in TRUE_VALUES
        self.max_upload_time = int(conf.get('max_upload_time', 86400))
        self.slow = int(conf.get('slow', 0))
        self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
        default_allowed_headers = '''
            content-disposition,
            content-encoding,
            x-delete-at,
            x-object-manifest,
        '''
        self.allowed_headers = set(
            i.strip().lower() for i in conf.get(
                'allowed_headers', default_allowed_headers).split(',')
            if i.strip() and i.strip().lower() not in DISALLOWED_HEADERS)
        self.expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            'expiring_objects'
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)

    def async_update(self, op, account, container, obj, host, partition,
                     contdevice, headers_out, objdevice):
        """
        Sends or saves an async update.

        :param op: operation performed (ex: 'PUT', or 'DELETE')
        :param account: account name for the object
        :param container: container name for the object
        :param obj: object name
        :param host: host that the container is on
        :param partition: partition that the container is on
        :param contdevice: device name that the container is on
        :param headers_out: dictionary of headers to send in the container
                            request
        :param objdevice: device name that the object is in
        """
        full_path = '/%s/%s/%s' % (account, container, obj)
        if all([host, partition, contdevice]):
            try:
                with ConnectionTimeout(self.conn_timeout):
                    ip, port = host.rsplit(':', 1)
                    conn = http_connect(ip, port, contdevice, partition, op,
                                        full_path, headers_out)
                with Timeout(self.node_timeout):
                    response = conn.getresponse()
                    response.read()
                    if is_success(response.status):
                        return
                    else:
                        self.logger.error(
                            _('ERROR Container update failed '
                              '(saving for async update later): %(status)d '
                              'response from %(ip)s:%(port)s/%(dev)s'), {
                                  'status': response.status,
                                  'ip': ip,
                                  'port': port,
                                  'dev': contdevice
                              })
            except (Exception, Timeout):
                self.logger.exception(
                    _('ERROR container update failed with '
                      '%(ip)s:%(port)s/%(dev)s (saving for async update later)'
                      ), {
                          'ip': ip,
                          'port': port,
                          'dev': contdevice
                      })
        async_dir = os.path.join(self.devices, objdevice, ASYNCDIR)
        ohash = hash_path(account, container, obj)
        self.logger.increment('async_pendings')
        write_pickle(
            {
                'op': op,
                'account': account,
                'container': container,
                'obj': obj,
                'headers': headers_out
            },
            os.path.join(
                async_dir, ohash[-3:],
                ohash + '-' + normalize_timestamp(headers_out['x-timestamp'])),
            os.path.join(self.devices, objdevice, 'tmp'))

    def container_update(self, op, account, container, obj, headers_in,
                         headers_out, objdevice):
        """
        Update the container when objects are updated.

        :param op: operation performed (ex: 'PUT', or 'DELETE')
        :param account: account name for the object
        :param container: container name for the object
        :param obj: object name
        :param headers_in: dictionary of headers from the original request
        :param headers_out: dictionary of headers to send in the container
                            request
        :param objdevice: device name that the object is in
        """
        host = headers_in.get('X-Container-Host', None)
        partition = headers_in.get('X-Container-Partition', None)
        contdevice = headers_in.get('X-Container-Device', None)
        if not all([host, partition, contdevice]):
            return
        self.async_update(op, account, container, obj, host, partition,
                          contdevice, headers_out, objdevice)

    def delete_at_update(self, op, delete_at, account, container, obj,
                         headers_in, objdevice):
        """
        Update the expiring objects container when objects are updated.

        :param op: operation performed (ex: 'PUT', or 'DELETE')
        :param account: account name for the object
        :param container: container name for the object
        :param obj: object name
        :param headers_in: dictionary of headers from the original request
        :param objdevice: device name that the object is in
        """
        # Quick cap that will work from now until Sat Nov 20 17:46:39 2286
        # At that time, Swift will be so popular and pervasive I will have
        # created income for thousands of future programmers.
        delete_at = max(min(delete_at, 9999999999), 0)
        host = partition = contdevice = None
        headers_out = {
            'x-timestamp': headers_in['x-timestamp'],
            'x-trans-id': headers_in.get('x-trans-id', '-')
        }
        if op != 'DELETE':
            host = headers_in.get('X-Delete-At-Host', None)
            partition = headers_in.get('X-Delete-At-Partition', None)
            contdevice = headers_in.get('X-Delete-At-Device', None)
            headers_out['x-size'] = '0'
            headers_out['x-content-type'] = 'text/plain'
            headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
        self.async_update(
            op, self.expiring_objects_account,
            str(delete_at / self.expiring_objects_container_divisor *
                self.expiring_objects_container_divisor),
            '%s-%s/%s/%s' % (delete_at, account, container, obj), host,
            partition, contdevice, headers_out, objdevice)

    @public
    def POST(self, request):
        """Handle HTTP POST requests for the Swift Object Server."""
        start_time = time.time()
        try:
            device, partition, account, container, obj = \
                split_path(unquote(request.path), 5, 5, True)
            validate_device_partition(device, partition)
        except ValueError, err:
            self.logger.increment('POST.errors')
            return HTTPBadRequest(body=str(err),
                                  request=request,
                                  content_type='text/plain')
        if 'x-timestamp' not in request.headers or \
                    not check_float(request.headers['x-timestamp']):
            self.logger.increment('POST.errors')
            return HTTPBadRequest(body='Missing timestamp',
                                  request=request,
                                  content_type='text/plain')
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            self.logger.increment('POST.errors')
            return HTTPBadRequest(body='X-Delete-At in past',
                                  request=request,
                                  content_type='text/plain')
        if self.mount_check and not check_mount(self.devices, device):
            self.logger.increment('POST.errors')
            return HTTPInsufficientStorage(drive=device, request=request)
        file = DiskFile(self.devices,
                        device,
                        partition,
                        account,
                        container,
                        obj,
                        self.logger,
                        disk_chunk_size=self.disk_chunk_size)

        if 'X-Delete-At' in file.metadata and \
                int(file.metadata['X-Delete-At']) <= time.time():
            self.logger.timing_since('POST.timing', start_time)
            return HTTPNotFound(request=request)
        if file.is_deleted():
            response_class = HTTPNotFound
        else:
            response_class = HTTPAccepted
        try:
            file_size = file.get_data_file_size()
        except (DiskFileError, DiskFileNotExist):
            file.quarantine()
            return HTTPNotFound(request=request)
        metadata = {'X-Timestamp': request.headers['x-timestamp']}
        metadata.update(val for val in request.headers.iteritems()
                        if val[0].lower().startswith('x-object-meta-'))
        for header_key in self.allowed_headers:
            if header_key in request.headers:
                header_caps = header_key.title()
                metadata[header_caps] = request.headers[header_key]
        old_delete_at = int(file.metadata.get('X-Delete-At') or 0)
        if old_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update('PUT', new_delete_at, account, container,
                                      obj, request.headers, device)
            if old_delete_at:
                self.delete_at_update('DELETE', old_delete_at, account,
                                      container, obj, request.headers, device)
        with file.mkstemp() as (fd, tmppath):
            file.put(fd, tmppath, metadata, extension='.meta')
        self.logger.timing_since('POST.timing', start_time)
        return response_class(request=request)
Beispiel #3
0
 def PUT(self, req):
     """Handle HTTP PUT request."""
     drive, part, account, container, obj = split_and_validate_path(
         req, 4, 5, True)
     if 'x-timestamp' not in req.headers or \
             not check_float(req.headers['x-timestamp']):
         return HTTPBadRequest(body='Missing timestamp', request=req,
                               content_type='text/plain')
     if 'x-container-sync-to' in req.headers:
         err = validate_sync_to(req.headers['x-container-sync-to'],
                                self.allowed_sync_hosts)
         if err:
             return HTTPBadRequest(err)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     timestamp = normalize_timestamp(req.headers['x-timestamp'])
     broker = self._get_container_broker(drive, part, account, container)
     if obj:     # put container object
         if account.startswith(self.auto_create_account_prefix) and \
                 not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp)
             except DatabaseAlreadyExists:
                 pass
         if not os.path.exists(broker.db_file):
             return HTTPNotFound()
         broker.put_object(obj, timestamp, int(req.headers['x-size']),
                           req.headers['x-content-type'],
                           req.headers['x-etag'])
         return HTTPCreated(request=req)
     else:   # put container
         if not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp)
                 created = True
             except DatabaseAlreadyExists:
                 created = False
         else:
             created = broker.is_deleted()
             broker.update_put_timestamp(timestamp)
             if broker.is_deleted():
                 return HTTPConflict(request=req)
         metadata = {}
         metadata.update(
             (key, (value, timestamp))
             for key, value in req.headers.iteritems()
             if key.lower() in self.save_headers or
             key.lower().startswith('x-container-meta-'))
         if metadata:
             if 'X-Container-Sync-To' in metadata:
                 if 'X-Container-Sync-To' not in broker.metadata or \
                         metadata['X-Container-Sync-To'][0] != \
                         broker.metadata['X-Container-Sync-To'][0]:
                     broker.set_x_container_sync_points(-1, -1)
             broker.update_metadata(metadata)
         resp = self.account_update(req, account, container, broker)
         if resp:
             return resp
         if created:
             return HTTPCreated(request=req)
         else:
             return HTTPAccepted(request=req)
Beispiel #4
0
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj = \
            split_and_validate_path(request, 5, 5, True)

        if 'x-timestamp' not in request.headers or \
                not check_float(request.headers['x-timestamp']):
            return HTTPBadRequest(body='Missing timestamp',
                                  request=request,
                                  content_type='text/plain')
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past',
                                  request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e),
                                  request=request,
                                  content_type='text/plain')
        try:
            disk_file = self.get_diskfile(device, partition, account,
                                          container, obj)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            orig_metadata = disk_file.read_metadata()
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}
        orig_timestamp = orig_metadata.get('X-Timestamp')
        if orig_timestamp and orig_timestamp >= request.headers['x-timestamp']:
            return HTTPConflict(request=request)
        orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                upload_size = 0
                reader = request.environ['wsgi.input'].read
                for chunk in iter(lambda: reader(self.network_chunk_size), ''):
                    start_time = time.time()
                    if start_time > upload_expiration:
                        self.logger.increment('PUT.timeouts')
                        return HTTPRequestTimeout(request=request)
                    etag.update(chunk)
                    upload_size = writer.write(chunk)
                    elapsed_time += time.time() - start_time
                if upload_size:
                    self.logger.transfer_rate('PUT.' + device + '.timing',
                                              elapsed_time, upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)
                etag = etag.hexdigest()
                if 'etag' in request.headers and \
                        request.headers['etag'].lower() != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.headers['x-timestamp'],
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.iteritems()
                                if is_user_meta('object', val[0]))
                for header_key in (
                        request.headers.get('X-Backend-Replication-Headers')
                        or self.allowed_headers):
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)
        except DiskFileNoSpace:
            return HTTPInsufficientStorage(drive=device, request=request)
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update('PUT', new_delete_at, account, container,
                                      obj, request, device)
            if orig_delete_at:
                self.delete_at_update('DELETE', orig_delete_at, account,
                                      container, obj, request, device)
        self.container_update(
            'PUT', account, container, obj, request,
            HeaderKeyDict({
                'x-size': metadata['Content-Length'],
                'x-content-type': metadata['Content-Type'],
                'x-timestamp': metadata['X-Timestamp'],
                'x-etag': metadata['ETag']
            }), device)
        return HTTPCreated(request=request, etag=etag)
Beispiel #5
0
 @public
 def PUT(self, request):
     """Handle HTTP PUT requests for the Swift Object Server."""
     start_time = time.time()
     try:
         device, partition, account, container, obj = \
             split_path(unquote(request.path), 5, 5, True)
         validate_device_partition(device, partition)
     except ValueError, err:
         self.logger.increment('PUT.errors')
         return HTTPBadRequest(body=str(err),
                               request=request,
                               content_type='text/plain')
     if self.mount_check and not check_mount(self.devices, device):
         self.logger.increment('PUT.errors')
         return HTTPInsufficientStorage(drive=device, request=request)
     if 'x-timestamp' not in request.headers or \
                 not check_float(request.headers['x-timestamp']):
         self.logger.increment('PUT.errors')
         return HTTPBadRequest(body='Missing timestamp',
                               request=request,
                               content_type='text/plain')
     error_response = check_object_creation(request, obj)
     if error_response:
         self.logger.increment('PUT.errors')
         return error_response
     new_delete_at = int(request.headers.get('X-Delete-At') or 0)
     if new_delete_at and new_delete_at < time.time():
         self.logger.increment('PUT.errors')
         return HTTPBadRequest(body='X-Delete-At in past',
                               request=request,
Beispiel #6
0
 def PUT(self, req):
     """Handle HTTP PUT request."""
     drive, part, account, container, obj = split_and_validate_path(
         req, 4, 5, True)
     req_timestamp = valid_timestamp(req)
     if 'x-container-sync-to' in req.headers:
         err, sync_to, realm, realm_key = validate_sync_to(
             req.headers['x-container-sync-to'], self.allowed_sync_hosts,
             self.realms_conf)
         if err:
             return HTTPBadRequest(err)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     requested_policy_index = self.get_and_validate_policy_index(req)
     broker = self._get_container_broker(drive, part, account, container)
     if obj:  # put container object
         # obj put expects the policy_index header, default is for
         # legacy support during upgrade.
         obj_policy_index = requested_policy_index or 0
         if account.startswith(self.auto_create_account_prefix) and \
                 not os.path.exists(broker.db_file):
             try:
                 broker.initialize(req_timestamp.internal, obj_policy_index)
             except DatabaseAlreadyExists:
                 pass
         if not os.path.exists(broker.db_file):
             return HTTPNotFound()
         broker.put_object(obj, req_timestamp.internal,
                           int(req.headers['x-size']),
                           req.headers['x-content-type'],
                           req.headers['x-etag'], 0, obj_policy_index)
         return HTTPCreated(request=req)
     else:  # put container
         if requested_policy_index is None:
             # use the default index sent by the proxy if available
             new_container_policy = req.headers.get(
                 'X-Backend-Storage-Policy-Default', int(POLICIES.default))
         else:
             new_container_policy = requested_policy_index
         created = self._update_or_create(req, broker,
                                          req_timestamp.internal,
                                          new_container_policy,
                                          requested_policy_index)
         metadata = {}
         metadata.update((key, (value, req_timestamp.internal))
                         for key, value in req.headers.iteritems()
                         if key.lower() in self.save_headers
                         or is_sys_or_user_meta('container', key))
         if 'X-Container-Sync-To' in metadata:
             if 'X-Container-Sync-To' not in broker.metadata or \
                     metadata['X-Container-Sync-To'][0] != \
                     broker.metadata['X-Container-Sync-To'][0]:
                 broker.set_x_container_sync_points(-1, -1)
         broker.update_metadata(metadata, validate_metadata=True)
         resp = self.account_update(req, account, container, broker)
         if resp:
             return resp
         if created:
             return HTTPCreated(request=req)
         else:
             return HTTPAccepted(request=req)
Beispiel #7
0
    def PUT(self, req):
        """Handle HTTP PUT request."""
        drive, part, account, container = split_and_validate_path(req, 3, 4)
        if self.mount_check and not check_mount(self.root, drive):
            return HTTPInsufficientStorage(drive=drive, request=req)
        if container:  # put account container
            if 'x-timestamp' not in req.headers:
                timestamp = Timestamp(time.time())
            else:
                timestamp = valid_timestamp(req)
            pending_timeout = None
            container_policy_index = \
                req.headers.get('X-Backend-Storage-Policy-Index', 0)
            if 'x-trans-id' in req.headers:
                pending_timeout = 3
            broker = self._get_account_broker(drive,
                                              part,
                                              account,
                                              pending_timeout=pending_timeout)
            if account.startswith(self.auto_create_account_prefix) and \
                    not os.path.exists(broker.db_file):
                try:
                    broker.initialize(timestamp.internal)
                except DatabaseAlreadyExists:
                    pass
            if req.headers.get('x-account-override-deleted', 'no').lower() != \
                    'yes' and broker.is_deleted():
                return HTTPNotFound(request=req)
            broker.put_container(container, req.headers['x-put-timestamp'],
                                 req.headers['x-delete-timestamp'],
                                 req.headers['x-object-count'],
                                 req.headers['x-bytes-used'],
                                 container_policy_index)
            if req.headers['x-delete-timestamp'] > \
                    req.headers['x-put-timestamp']:
                return HTTPNoContent(request=req)
            else:
                return HTTPCreated(request=req)
        else:  # put account
            timestamp = valid_timestamp(req)
            broker = self._get_account_broker(drive, part, account)
            if not os.path.exists(broker.db_file):
                try:
                    broker.initialize(timestamp.internal)
                    created = True
                except DatabaseAlreadyExists:
                    created = False
            elif broker.is_status_deleted():
                return self._deleted_response(broker,
                                              req,
                                              HTTPForbidden,
                                              body='Recently deleted')
            else:
                created = broker.is_deleted()
                broker.update_put_timestamp(timestamp.internal)
                if broker.is_deleted():
                    return HTTPConflict(request=req)
            metadata = {}
            metadata.update((key, (value, timestamp.internal))
                            for key, value in req.headers.iteritems()
                            if is_sys_or_user_meta('account', key))
            if metadata:
                broker.update_metadata(metadata, validate_metadata=True)

            if created:
                metaDict = {}
                metaList = []
                f = open('/home/ubuntu/accountstuff', 'w')
                metaDict = broker.get_info()
                metaDict.update(
                    (key, value)
                    for key, (value, timestamp) in broker.metadata.iteritems()
                    if value != '' and is_sys_or_user_meta('account', key))
                if metaDict != {}:
                    metaList.append(format_metadata(metaDict))
                    for item in metaList:
                        f.write("%s\n" % item)
                    AccountSender = Sender(self.conf)
                    AccountSender.sendData(metaList, 'account_data', self.ip,
                                           self.port)
            #####################################
                return HTTPCreated(request=req)
            else:
                metaDict = {}
                metaList = []
                f = open('/home/ubuntu/accountstuff', 'w')
                metaDict = broker.get_info()
                metaDict.update(
                    (key, value)
                    for key, (value, timestamp) in broker.metadata.iteritems()
                    if value != '' and is_sys_or_user_meta('account', key))
                if metaDict != {}:
                    metaList.append(format_metadata(metaDict))
                    for item in metaList:
                        f.write("%s\n" % item)
                    AccountSender = Sender(self.conf)
                    AccountSender.sendData(metaList, 'account_data', self.ip,
                                           self.port)
                return HTTPAccepted(request=req)
Beispiel #8
0
 def DELETE(self, request):
     """Handle HTTP DELETE requests for the Swift Object Server."""
     device, partition, account, container, obj, policy = \
         get_name_and_placement(request, 5, 5, True)
     req_timestamp = valid_timestamp(request)
     try:
         disk_file = self.get_diskfile(
             device, partition, account, container, obj,
             policy=policy)
     except DiskFileDeviceUnavailable:
         return HTTPInsufficientStorage(drive=device, request=request)
     try:
         orig_metadata = disk_file.read_metadata()
     except DiskFileXattrNotSupported:
         return HTTPInsufficientStorage(drive=device, request=request)
     except DiskFileExpired as e:
         orig_timestamp = e.timestamp
         orig_metadata = e.metadata
         response_class = HTTPNotFound
     except DiskFileDeleted as e:
         orig_timestamp = e.timestamp
         orig_metadata = {}
         response_class = HTTPNotFound
     except (DiskFileNotExist, DiskFileQuarantined):
         orig_timestamp = 0
         orig_metadata = {}
         response_class = HTTPNotFound
     else:
         orig_timestamp = disk_file.data_timestamp
         if orig_timestamp < req_timestamp:
             response_class = HTTPNoContent
         else:
             response_class = HTTPConflict
     response_timestamp = max(orig_timestamp, req_timestamp)
     orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
     try:
         req_if_delete_at_val = request.headers['x-if-delete-at']
         req_if_delete_at = int(req_if_delete_at_val)
     except KeyError:
         pass
     except ValueError:
         return HTTPBadRequest(
             request=request,
             body='Bad X-If-Delete-At header value')
     else:
         # request includes x-if-delete-at; we must not place a tombstone
         # if we can not verify the x-if-delete-at time
         if not orig_timestamp:
             # no object found at all
             return HTTPNotFound()
         if orig_delete_at != req_if_delete_at:
             return HTTPPreconditionFailed(
                 request=request,
                 body='X-If-Delete-At and X-Delete-At do not match')
         else:
             # differentiate success from no object at all
             response_class = HTTPNoContent
     if orig_delete_at:
         self.delete_at_update('DELETE', orig_delete_at, account,
                               container, obj, request, device,
                               policy)
     if orig_timestamp < req_timestamp:
         disk_file.delete(req_timestamp)
         self.container_update(
             'DELETE', account, container, obj, request,
             HeaderKeyDict({'x-timestamp': req_timestamp.internal}),
             device, policy)
     return response_class(
         request=request,
         headers={'X-Backend-Timestamp': response_timestamp.internal})
Beispiel #9
0
 def GET(self, request):
     """Handle HTTP GET requests for the Swift Object Server."""
     device, partition, account, container, obj = \
         split_and_validate_path(request, 5, 5, True)
     keep_cache = self.keep_cache_private or (
         'X-Auth-Token' not in request.headers
         and 'X-Storage-Token' not in request.headers)
     try:
         disk_file = self.get_diskfile(device, partition, account,
                                       container, obj)
     except DiskFileDeviceUnavailable:
         return HTTPInsufficientStorage(drive=device, request=request)
     try:
         with disk_file.open():
             metadata = disk_file.get_metadata()
             obj_size = int(metadata['Content-Length'])
             if request.headers.get('if-match') not in (None, '*') and \
                     metadata['ETag'] not in request.if_match:
                 return HTTPPreconditionFailed(request=request)
             if request.headers.get('if-none-match') is not None:
                 if metadata['ETag'] in request.if_none_match:
                     resp = HTTPNotModified(request=request)
                     resp.etag = metadata['ETag']
                     return resp
             file_x_ts = metadata['X-Timestamp']
             file_x_ts_flt = float(file_x_ts)
             try:
                 if_unmodified_since = request.if_unmodified_since
             except (OverflowError, ValueError):
                 # catches timestamps before the epoch
                 return HTTPPreconditionFailed(request=request)
             file_x_ts_utc = datetime.fromtimestamp(file_x_ts_flt, UTC)
             if if_unmodified_since and file_x_ts_utc > if_unmodified_since:
                 return HTTPPreconditionFailed(request=request)
             try:
                 if_modified_since = request.if_modified_since
             except (OverflowError, ValueError):
                 # catches timestamps before the epoch
                 return HTTPPreconditionFailed(request=request)
             if if_modified_since and file_x_ts_utc < if_modified_since:
                 return HTTPNotModified(request=request)
             keep_cache = (self.keep_cache_private or
                           ('X-Auth-Token' not in request.headers
                            and 'X-Storage-Token' not in request.headers))
             response = Response(app_iter=disk_file.reader(
                 iter_hook=sleep, keep_cache=keep_cache),
                                 request=request,
                                 conditional_response=True)
             response.headers['Content-Type'] = metadata.get(
                 'Content-Type', 'application/octet-stream')
             for key, value in metadata.iteritems():
                 if key.lower().startswith('x-object-meta-') or \
                         key.lower() in self.allowed_headers:
                     response.headers[key] = value
             response.etag = metadata['ETag']
             response.last_modified = file_x_ts_flt
             response.content_length = obj_size
             try:
                 response.content_encoding = metadata['Content-Encoding']
             except KeyError:
                 pass
             response.headers['X-Timestamp'] = file_x_ts
             resp = request.get_response(response)
     except DiskFileNotExist:
         if request.headers.get('if-match') == '*':
             resp = HTTPPreconditionFailed(request=request)
         else:
             resp = HTTPNotFound(request=request)
     except DiskFileQuarantined:
         resp = HTTPNotFound(request=request)
     return resp
Beispiel #10
0
 def PUT(self, req):
     """Handle HTTP PUT request."""
     drive, part, account, container = get_container_name_and_placement(req)
     try:
         check_drive(self.root, drive, self.mount_check)
     except ValueError:
         return HTTPInsufficientStorage(drive=drive, request=req)
     if not self.check_free_space(drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     if container:   # put account container
         if 'x-timestamp' not in req.headers:
             timestamp = Timestamp.now()
         else:
             timestamp = valid_timestamp(req)
         pending_timeout = None
         container_policy_index = \
             req.headers.get('X-Backend-Storage-Policy-Index', 0)
         if 'x-trans-id' in req.headers:
             pending_timeout = 3
         broker = self._get_account_broker(drive, part, account,
                                           pending_timeout=pending_timeout)
         if account.startswith(self.auto_create_account_prefix) and \
                 not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp.internal)
             except DatabaseAlreadyExists:
                 pass
         if req.headers.get('x-account-override-deleted', 'no').lower() != \
                 'yes' and broker.is_deleted():
             return HTTPNotFound(request=req)
         broker.put_container(container, req.headers['x-put-timestamp'],
                              req.headers['x-delete-timestamp'],
                              req.headers['x-object-count'],
                              req.headers['x-bytes-used'],
                              container_policy_index)
         if req.headers['x-delete-timestamp'] > \
                 req.headers['x-put-timestamp']:
             return HTTPNoContent(request=req)
         else:
             return HTTPCreated(request=req)
     else:   # put account
         timestamp = valid_timestamp(req)
         broker = self._get_account_broker(drive, part, account)
         if not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp.internal)
                 created = True
             except DatabaseAlreadyExists:
                 created = False
         elif broker.is_status_deleted():
             return self._deleted_response(broker, req, HTTPForbidden,
                                           body='Recently deleted')
         else:
             created = broker.is_deleted()
             broker.update_put_timestamp(timestamp.internal)
             if broker.is_deleted():
                 return HTTPConflict(request=req)
         self._update_metadata(req, broker, timestamp)
         if created:
             return HTTPCreated(request=req)
         else:
             return HTTPAccepted(request=req)
Beispiel #11
0
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj, policy = \
            get_name_and_placement(request, 5, 5, True)
        req_timestamp = valid_timestamp(request)
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past', request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e), request=request,
                                  content_type='text/plain')

        # In case of multipart-MIME put, the proxy sends a chunked request,
        # but may let us know the real content length so we can verify that
        # we have enough disk space to hold the object.
        if fsize is None:
            fsize = request.headers.get('X-Backend-Obj-Content-Length')
            if fsize is not None:
                try:
                    fsize = int(fsize)
                except ValueError as e:
                    return HTTPBadRequest(body=str(e), request=request,
                                          content_type='text/plain')
        # SSYNC will include Frag-Index header for subrequests to primary
        # nodes; handoff nodes should 409 subrequests to over-write an
        # existing data fragment until they offloaded the existing fragment
        frag_index = request.headers.get('X-Backend-Ssync-Frag-Index')
        try:
            disk_file = self.get_diskfile(
                device, partition, account, container, obj,
                policy=policy, frag_index=frag_index)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            orig_metadata = disk_file.read_metadata()
            orig_timestamp = disk_file.data_timestamp
        except DiskFileXattrNotSupported:
            return HTTPInsufficientStorage(drive=device, request=request)
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}
            orig_timestamp = 0

        # Checks for If-None-Match
        if request.if_none_match is not None and orig_metadata:
            if '*' in request.if_none_match:
                # File exists already so return 412
                return HTTPPreconditionFailed(request=request)
            if orig_metadata.get('ETag') in request.if_none_match:
                # The current ETag matches, so return 412
                return HTTPPreconditionFailed(request=request)

        if orig_timestamp >= req_timestamp:
            return HTTPConflict(
                request=request,
                headers={'X-Backend-Timestamp': orig_timestamp.internal})
        orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                upload_size = 0

                # If the proxy wants to send us object metadata after the
                # object body, it sets some headers. We have to tell the
                # proxy, in the 100 Continue response, that we're able to
                # parse a multipart MIME document and extract the object and
                # metadata from it. If we don't, then the proxy won't
                # actually send the footer metadata.
                have_metadata_footer = False
                use_multiphase_commit = False
                mime_documents_iter = iter([])
                obj_input = request.environ['wsgi.input']

                hundred_continue_headers = []
                if config_true_value(
                        request.headers.get(
                            'X-Backend-Obj-Multiphase-Commit')):
                    use_multiphase_commit = True
                    hundred_continue_headers.append(
                        ('X-Obj-Multiphase-Commit', 'yes'))

                if config_true_value(
                        request.headers.get('X-Backend-Obj-Metadata-Footer')):
                    have_metadata_footer = True
                    hundred_continue_headers.append(
                        ('X-Obj-Metadata-Footer', 'yes'))

                if have_metadata_footer or use_multiphase_commit:
                    obj_input.set_hundred_continue_response_headers(
                        hundred_continue_headers)
                    mime_boundary = request.headers.get(
                        'X-Backend-Obj-Multipart-Mime-Boundary')
                    if not mime_boundary:
                        return HTTPBadRequest("no MIME boundary")

                    try:
                        with ChunkReadTimeout(self.client_timeout):
                            mime_documents_iter = iter_mime_headers_and_bodies(
                                request.environ['wsgi.input'],
                                mime_boundary, self.network_chunk_size)
                            _junk_hdrs, obj_input = next(mime_documents_iter)
                    except ChunkReadError:
                        return HTTPClientDisconnect(request=request)
                    except ChunkReadTimeout:
                        return HTTPRequestTimeout(request=request)

                timeout_reader = self._make_timeout_reader(obj_input)
                try:
                    for chunk in iter(timeout_reader, ''):
                        start_time = time.time()
                        if start_time > upload_expiration:
                            self.logger.increment('PUT.timeouts')
                            return HTTPRequestTimeout(request=request)
                        etag.update(chunk)
                        upload_size = writer.write(chunk)
                        elapsed_time += time.time() - start_time
                except ChunkReadError:
                    return HTTPClientDisconnect(request=request)
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)
                if upload_size:
                    self.logger.transfer_rate(
                        'PUT.' + device + '.timing', elapsed_time,
                        upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)

                footer_meta = {}
                if have_metadata_footer:
                    footer_meta = self._read_metadata_footer(
                        mime_documents_iter)

                request_etag = (footer_meta.get('etag') or
                                request.headers.get('etag', '')).lower()
                etag = etag.hexdigest()
                if request_etag and request_etag != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.timestamp.internal,
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.items()
                                if is_sys_or_user_meta('object', val[0]))
                metadata.update(val for val in footer_meta.items()
                                if is_sys_or_user_meta('object', val[0]))
                headers_to_copy = (
                    request.headers.get(
                        'X-Backend-Replication-Headers', '').split() +
                    list(self.allowed_headers))
                for header_key in headers_to_copy:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)

                # if the PUT requires a two-phase commit (a data and a commit
                # phase) send the proxy server another 100-continue response
                # to indicate that we are finished writing object data
                if use_multiphase_commit:
                    request.environ['wsgi.input'].\
                        send_hundred_continue_response()
                    if not self._read_put_commit_message(mime_documents_iter):
                        return HTTPServerError(request=request)
                    # got 2nd phase confirmation, write a timestamp.durable
                    # state file to indicate a successful PUT

                writer.commit(request.timestamp)

                # Drain any remaining MIME docs from the socket. There
                # shouldn't be any, but we must read the whole request body.
                try:
                    while True:
                        with ChunkReadTimeout(self.client_timeout):
                            _junk_hdrs, _junk_body = next(mime_documents_iter)
                        drain(_junk_body, self.network_chunk_size,
                              self.client_timeout)
                except ChunkReadError:
                    raise HTTPClientDisconnect()
                except ChunkReadTimeout:
                    raise HTTPRequestTimeout()
                except StopIteration:
                    pass

        except (DiskFileXattrNotSupported, DiskFileNoSpace):
            return HTTPInsufficientStorage(drive=device, request=request)
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update(
                    'PUT', new_delete_at, account, container, obj, request,
                    device, policy)
            if orig_delete_at:
                self.delete_at_update(
                    'DELETE', orig_delete_at, account, container, obj,
                    request, device, policy)
        update_headers = HeaderKeyDict({
            'x-size': metadata['Content-Length'],
            'x-content-type': metadata['Content-Type'],
            'x-timestamp': metadata['X-Timestamp'],
            'x-etag': metadata['ETag']})
        # apply any container update header overrides sent with request
        self._check_container_override(update_headers, request.headers)
        self._check_container_override(update_headers, footer_meta)
        self.container_update(
            'PUT', account, container, obj, request,
            update_headers,
            device, policy)
        return HTTPCreated(request=request, etag=etag)
Beispiel #12
0
class ObjectController(object):
    """Implements the WSGI application for the Swift Object Server."""
    def __init__(self, conf):
        """
        Creates a new WSGI application for the Swift Object Server. An
        example configuration is given at
        <source-dir>/etc/object-server.conf-sample or
        /etc/swift/object-server.conf-sample.
        """
        self.logger = get_logger(conf, log_route='object-server')
        self.devices = conf.get('devices', '/srv/node/')
        self.mount_check = config_true_value(conf.get('mount_check', 'true'))
        self.node_timeout = int(conf.get('node_timeout', 3))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
        self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
        self.keep_cache_size = int(conf.get('keep_cache_size', 5242880))
        self.keep_cache_private = \
            config_true_value(conf.get('keep_cache_private', 'false'))
        self.log_requests = config_true_value(conf.get('log_requests', 'true'))
        self.max_upload_time = int(conf.get('max_upload_time', 86400))
        self.slow = int(conf.get('slow', 0))
        self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
        replication_server = conf.get('replication_server', None)
        if replication_server is None:
            allowed_methods = [
                'DELETE', 'PUT', 'HEAD', 'GET', 'REPLICATE', 'POST'
            ]
        else:
            replication_server = config_true_value(replication_server)
            if replication_server:
                allowed_methods = ['REPLICATE']
            else:
                allowed_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
        self.replication_server = replication_server
        self.allowed_methods = allowed_methods
        default_allowed_headers = '''
            content-disposition,
            content-encoding,
            x-delete-at,
            x-object-manifest,
            x-static-large-object,
        '''
        self.allowed_headers = set(
            i.strip().lower() for i in conf.get(
                'allowed_headers', default_allowed_headers).split(',')
            if i.strip() and i.strip().lower() not in DISALLOWED_HEADERS)
        self.expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            'expiring_objects'
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)

    def async_update(self, op, account, container, obj, host, partition,
                     contdevice, headers_out, objdevice):
        """
        Sends or saves an async update.

        :param op: operation performed (ex: 'PUT', or 'DELETE')
        :param account: account name for the object
        :param container: container name for the object
        :param obj: object name
        :param host: host that the container is on
        :param partition: partition that the container is on
        :param contdevice: device name that the container is on
        :param headers_out: dictionary of headers to send in the container
                            request
        :param objdevice: device name that the object is in
        """
        headers_out['user-agent'] = 'obj-server %s' % os.getpid()
        full_path = '/%s/%s/%s' % (account, container, obj)
        if all([host, partition, contdevice]):
            try:
                with ConnectionTimeout(self.conn_timeout):
                    ip, port = host.rsplit(':', 1)
                    conn = http_connect(ip, port, contdevice, partition, op,
                                        full_path, headers_out)
                with Timeout(self.node_timeout):
                    response = conn.getresponse()
                    response.read()
                    if is_success(response.status):
                        return
                    else:
                        self.logger.error(
                            _('ERROR Container update failed '
                              '(saving for async update later): %(status)d '
                              'response from %(ip)s:%(port)s/%(dev)s'), {
                                  'status': response.status,
                                  'ip': ip,
                                  'port': port,
                                  'dev': contdevice
                              })
            except (Exception, Timeout):
                self.logger.exception(
                    _('ERROR container update failed with '
                      '%(ip)s:%(port)s/%(dev)s (saving for async update later)'
                      ), {
                          'ip': ip,
                          'port': port,
                          'dev': contdevice
                      })
        async_dir = os.path.join(self.devices, objdevice, ASYNCDIR)
        ohash = hash_path(account, container, obj)
        self.logger.increment('async_pendings')
        write_pickle(
            {
                'op': op,
                'account': account,
                'container': container,
                'obj': obj,
                'headers': headers_out
            },
            os.path.join(
                async_dir, ohash[-3:],
                ohash + '-' + normalize_timestamp(headers_out['x-timestamp'])),
            os.path.join(self.devices, objdevice, 'tmp'))

    def container_update(self, op, account, container, obj, request,
                         headers_out, objdevice):
        """
        Update the container when objects are updated.

        :param op: operation performed (ex: 'PUT', or 'DELETE')
        :param account: account name for the object
        :param container: container name for the object
        :param obj: object name
        :param request: the original request object driving the update
        :param headers_out: dictionary of headers to send in the container
                            request(s)
        :param objdevice: device name that the object is in
        """
        headers_in = request.headers
        conthosts = [
            h.strip()
            for h in headers_in.get('X-Container-Host', '').split(',')
        ]
        contdevices = [
            d.strip()
            for d in headers_in.get('X-Container-Device', '').split(',')
        ]
        contpartition = headers_in.get('X-Container-Partition', '')

        if len(conthosts) != len(contdevices):
            # This shouldn't happen unless there's a bug in the proxy,
            # but if there is, we want to know about it.
            self.logger.error(
                _('ERROR Container update failed: different  '
                  'numbers of hosts and devices in request: '
                  '"%s" vs "%s"' % (headers_in.get('X-Container-Host', ''),
                                    headers_in.get('X-Container-Device', ''))))
            return

        if contpartition:
            updates = zip(conthosts, contdevices)
        else:
            updates = []

        headers_out['x-trans-id'] = headers_in.get('x-trans-id', '-')
        headers_out['referer'] = request.as_referer()
        for conthost, contdevice in updates:
            self.async_update(op, account, container, obj, conthost,
                              contpartition, contdevice, headers_out,
                              objdevice)

    def delete_at_update(self, op, delete_at, account, container, obj, request,
                         objdevice):
        """
        Update the expiring objects container when objects are updated.

        :param op: operation performed (ex: 'PUT', or 'DELETE')
        :param account: account name for the object
        :param container: container name for the object
        :param obj: object name
        :param request: the original request driving the update
        :param objdevice: device name that the object is in
        """
        # Quick cap that will work from now until Sat Nov 20 17:46:39 2286
        # At that time, Swift will be so popular and pervasive I will have
        # created income for thousands of future programmers.
        delete_at = max(min(delete_at, 9999999999), 0)
        updates = [(None, None)]

        partition = None
        hosts = contdevices = [None]
        headers_in = request.headers
        headers_out = HeaderKeyDict({
            'x-timestamp':
            headers_in['x-timestamp'],
            'x-trans-id':
            headers_in.get('x-trans-id', '-'),
            'referer':
            request.as_referer()
        })
        if op != 'DELETE':
            partition = headers_in.get('X-Delete-At-Partition', None)
            hosts = headers_in.get('X-Delete-At-Host', '')
            contdevices = headers_in.get('X-Delete-At-Device', '')
            updates = [
                upd for upd in zip((h.strip() for h in hosts.split(',')), (
                    c.strip() for c in contdevices.split(',')))
                if all(upd) and partition
            ]
            if not updates:
                updates = [(None, None)]
            headers_out['x-size'] = '0'
            headers_out['x-content-type'] = 'text/plain'
            headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'

        for host, contdevice in updates:
            self.async_update(
                op, self.expiring_objects_account,
                str(delete_at / self.expiring_objects_container_divisor *
                    self.expiring_objects_container_divisor),
                '%s-%s/%s/%s' % (delete_at, account, container, obj), host,
                partition, contdevice, headers_out, objdevice)

    @public
    @timing_stats()
    def POST(self, request):
        """Handle HTTP POST requests for the Swift Object Server."""
        try:
            device, partition, account, container, obj = \
                split_path(unquote(request.path), 5, 5, True)
            validate_device_partition(device, partition)
        except ValueError, err:
            return HTTPBadRequest(body=str(err),
                                  request=request,
                                  content_type='text/plain')
        if 'x-timestamp' not in request.headers or \
                not check_float(request.headers['x-timestamp']):
            return HTTPBadRequest(body='Missing timestamp',
                                  request=request,
                                  content_type='text/plain')
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past',
                                  request=request,
                                  content_type='text/plain')
        if self.mount_check and not check_mount(self.devices, device):
            return HTTPInsufficientStorage(drive=device, request=request)
        disk_file = DiskFile(self.devices,
                             device,
                             partition,
                             account,
                             container,
                             obj,
                             self.logger,
                             disk_chunk_size=self.disk_chunk_size,
                             bytes_per_sync=self.bytes_per_sync)
        if disk_file.is_deleted() or disk_file.is_expired():
            return HTTPNotFound(request=request)
        try:
            disk_file.get_data_file_size()
        except (DiskFileError, DiskFileNotExist):
            disk_file.quarantine()
            return HTTPNotFound(request=request)
        metadata = {'X-Timestamp': request.headers['x-timestamp']}
        metadata.update(val for val in request.headers.iteritems()
                        if val[0].startswith('X-Object-Meta-'))
        for header_key in self.allowed_headers:
            if header_key in request.headers:
                header_caps = header_key.title()
                metadata[header_caps] = request.headers[header_key]
        old_delete_at = int(disk_file.metadata.get('X-Delete-At') or 0)
        if old_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update('PUT', new_delete_at, account, container,
                                      obj, request, device)
            if old_delete_at:
                self.delete_at_update('DELETE', old_delete_at, account,
                                      container, obj, request, device)
        disk_file.put_metadata(metadata)
        return HTTPAccepted(request=request)
Beispiel #13
0
 def GET(self, req):
     """Handle HTTP GET request."""
     drive, part, account, container, obj = split_and_validate_path(
         req, 4, 5, True)
     path = get_param(req, 'path')
     prefix = get_param(req, 'prefix')
     delimiter = get_param(req, 'delimiter')
     if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
         # delimiters can be made more flexible later
         return HTTPPreconditionFailed(body='Bad delimiter')
     marker = get_param(req, 'marker', '')
     end_marker = get_param(req, 'end_marker')
     limit = constraints.CONTAINER_LISTING_LIMIT
     given_limit = get_param(req, 'limit')
     reverse = config_true_value(get_param(req, 'reverse'))
     if given_limit and given_limit.isdigit():
         limit = int(given_limit)
         if limit > constraints.CONTAINER_LISTING_LIMIT:
             return HTTPPreconditionFailed(
                 request=req,
                 body='Maximum limit is %d' %
                 constraints.CONTAINER_LISTING_LIMIT)
     out_content_type = listing_formats.get_listing_content_type(req)
     if not check_drive(self.root, drive, self.mount_check):
         return HTTPInsufficientStorage(drive=drive, request=req)
     broker = self._get_container_broker(drive,
                                         part,
                                         account,
                                         container,
                                         pending_timeout=0.1,
                                         stale_reads_ok=True)
     info, is_deleted = broker.get_info_is_deleted()
     record_type = req.headers.get('x-backend-record-type', '').lower()
     if record_type == 'auto' and info.get('db_state') in (SHARDING,
                                                           SHARDED):
         record_type = 'shard'
     if record_type == 'shard':
         override_deleted = info and config_true_value(
             req.headers.get('x-backend-override-deleted', False))
         resp_headers = gen_resp_headers(info,
                                         is_deleted=is_deleted
                                         and not override_deleted)
         if is_deleted and not override_deleted:
             return HTTPNotFound(request=req, headers=resp_headers)
         resp_headers['X-Backend-Record-Type'] = 'shard'
         includes = get_param(req, 'includes')
         states = get_param(req, 'states')
         fill_gaps = False
         if states:
             states = list_from_csv(states)
             fill_gaps = any(('listing' in states, 'updating' in states))
             try:
                 states = broker.resolve_shard_range_states(states)
             except ValueError:
                 return HTTPBadRequest(request=req, body='Bad state')
         include_deleted = config_true_value(
             req.headers.get('x-backend-include-deleted', False))
         container_list = broker.get_shard_ranges(
             marker,
             end_marker,
             includes,
             reverse,
             states=states,
             include_deleted=include_deleted,
             fill_gaps=fill_gaps)
     else:
         resp_headers = gen_resp_headers(info, is_deleted=is_deleted)
         if is_deleted:
             return HTTPNotFound(request=req, headers=resp_headers)
         resp_headers['X-Backend-Record-Type'] = 'object'
         # Use the retired db while container is in process of sharding,
         # otherwise use current db
         src_broker = broker.get_brokers()[0]
         container_list = src_broker.list_objects_iter(
             limit,
             marker,
             end_marker,
             prefix,
             delimiter,
             path,
             storage_policy_index=info['storage_policy_index'],
             reverse=reverse)
     return self.create_listing(req, out_content_type, info, resp_headers,
                                broker.metadata, container_list, container)
Beispiel #14
0
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj, policy_idx = \
            get_name_and_placement(request, 5, 5, True)
        req_timestamp = valid_timestamp(request)
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past', request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e), request=request,
                                  content_type='text/plain')
        try:
            disk_file = self.get_diskfile(
                device, partition, account, container, obj,
                policy_idx=policy_idx)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            orig_metadata = disk_file.read_metadata()
        except DiskFileXattrNotSupported:
            return HTTPInsufficientStorage(drive=device, request=request)
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}

        # Checks for If-None-Match
        if request.if_none_match is not None and orig_metadata:
            if '*' in request.if_none_match:
                # File exists already so return 412
                return HTTPPreconditionFailed(request=request)
            if orig_metadata.get('ETag') in request.if_none_match:
                # The current ETag matches, so return 412
                return HTTPPreconditionFailed(request=request)

        orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
        if orig_timestamp >= req_timestamp:
            return HTTPConflict(
                request=request,
                headers={'X-Backend-Timestamp': orig_timestamp.internal})
        orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                upload_size = 0

                def timeout_reader():
                    with ChunkReadTimeout(self.client_timeout):
                        return request.environ['wsgi.input'].read(
                            self.network_chunk_size)

                try:
                    for chunk in iter(lambda: timeout_reader(), ''):
                        start_time = time.time()
                        if start_time > upload_expiration:
                            self.logger.increment('PUT.timeouts')
                            return HTTPRequestTimeout(request=request)
                        etag.update(chunk)
                        upload_size = writer.write(chunk)
                        elapsed_time += time.time() - start_time
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)
                if upload_size:
                    self.logger.transfer_rate(
                        'PUT.' + device + '.timing', elapsed_time,
                        upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)
                etag = etag.hexdigest()
                if 'etag' in request.headers and \
                        request.headers['etag'].lower() != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.timestamp.internal,
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.iteritems()
                                if is_sys_or_user_meta('object', val[0]))
                headers_to_copy = (
                    request.headers.get(
                        'X-Backend-Replication-Headers', '').split() +
                    list(self.allowed_headers))
                for header_key in headers_to_copy:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)
        except (DiskFileXattrNotSupported, DiskFileNoSpace):
            return HTTPInsufficientStorage(drive=device, request=request)
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update(
                    'PUT', new_delete_at, account, container, obj, request,
                    device, policy_idx)
            if orig_delete_at:
                self.delete_at_update(
                    'DELETE', orig_delete_at, account, container, obj,
                    request, device, policy_idx)
        self.container_update(
            'PUT', account, container, obj, request,
            HeaderKeyDict({
                'x-size': metadata['Content-Length'],
                'x-content-type': metadata['Content-Type'],
                'x-timestamp': metadata['X-Timestamp'],
                'x-etag': metadata['ETag']}),
            device, policy_idx)
        return HTTPCreated(request=request, etag=etag)
Beispiel #15
0
    def PUT(self, req):
        """Handle HTTP PUT request."""
        drive, part, account, container, obj = split_and_validate_path(
            req, 4, 5, True)
        req_timestamp = valid_timestamp(req)
        if 'x-container-sync-to' in req.headers:
            err, sync_to, realm, realm_key = validate_sync_to(
                req.headers['x-container-sync-to'], self.allowed_sync_hosts,
                self.realms_conf)
            if err:
                return HTTPBadRequest(err)
        try:
            check_drive(self.root, drive, self.mount_check)
        except ValueError:
            return HTTPInsufficientStorage(drive=drive, request=req)
        if not self.check_free_space(drive):
            return HTTPInsufficientStorage(drive=drive, request=req)
        requested_policy_index = self.get_and_validate_policy_index(req)
        broker = self._get_container_broker(drive, part, account, container)
        if obj:  # put container object
            # obj put expects the policy_index header, default is for
            # legacy support during upgrade.
            obj_policy_index = requested_policy_index or 0
            self._maybe_autocreate(broker, req_timestamp, account,
                                   obj_policy_index)
            # redirect if a shard exists for this object name
            response = self._redirect_to_shard(req, broker, obj)
            if response:
                return response

            broker.put_object(
                obj, req_timestamp.internal, int(req.headers['x-size']),
                wsgi_to_str(req.headers['x-content-type']),
                wsgi_to_str(req.headers['x-etag']), 0, obj_policy_index,
                wsgi_to_str(req.headers.get('x-content-type-timestamp')),
                wsgi_to_str(req.headers.get('x-meta-timestamp')))
            return HTTPCreated(request=req)

        record_type = req.headers.get('x-backend-record-type', '').lower()
        if record_type == RECORD_TYPE_SHARD:
            try:
                # validate incoming data...
                shard_ranges = [
                    ShardRange.from_dict(sr) for sr in json.loads(req.body)
                ]
            except (ValueError, KeyError, TypeError) as err:
                return HTTPBadRequest('Invalid body: %r' % err)
            created = self._maybe_autocreate(broker, req_timestamp, account,
                                             requested_policy_index)
            self._update_metadata(req, broker, req_timestamp, 'PUT')
            if shard_ranges:
                # TODO: consider writing the shard ranges into the pending
                # file, but if so ensure an all-or-none semantic for the write
                broker.merge_shard_ranges(shard_ranges)
        else:  # put container
            if requested_policy_index is None:
                # use the default index sent by the proxy if available
                new_container_policy = req.headers.get(
                    'X-Backend-Storage-Policy-Default', int(POLICIES.default))
            else:
                new_container_policy = requested_policy_index
            created = self._update_or_create(req, broker,
                                             req_timestamp.internal,
                                             new_container_policy,
                                             requested_policy_index)
            self._update_metadata(req, broker, req_timestamp, 'PUT')
            resp = self.account_update(req, account, container, broker)
            if resp:
                return resp
        if created:
            return HTTPCreated(request=req,
                               headers={
                                   'x-backend-storage-policy-index':
                                   broker.storage_policy_index
                               })
        else:
            return HTTPAccepted(request=req,
                                headers={
                                    'x-backend-storage-policy-index':
                                    broker.storage_policy_index
                                })
Beispiel #16
0
     drive, part, account, container, obj = req.split_path(4, 5, True)
     validate_device_partition(drive, part)
 except ValueError, err:
     return HTTPBadRequest(body=str(err), content_type='text/plain',
                           request=req)
 if 'x-timestamp' not in req.headers or \
         not check_float(req.headers['x-timestamp']):
     return HTTPBadRequest(body='Missing timestamp', request=req,
                           content_type='text/plain')
 if 'x-container-sync-to' in req.headers:
     err = validate_sync_to(req.headers['x-container-sync-to'],
                            self.allowed_sync_hosts)
     if err:
         return HTTPBadRequest(err)
 if self.mount_check and not check_mount(self.root, drive):
     return HTTPInsufficientStorage(drive=drive, request=req)
 timestamp = normalize_timestamp(req.headers['x-timestamp'])
 broker = self._get_container_broker(drive, part, account, container)
 if obj:     # put container object
     if account.startswith(self.auto_create_account_prefix) and \
             not os.path.exists(broker.db_file):
         broker.initialize(timestamp)
     if not os.path.exists(broker.db_file):
         return HTTPNotFound()
     broker.put_object(obj, timestamp, int(req.headers['x-size']),
                       req.headers['x-content-type'],
                       req.headers['x-etag'])
     return HTTPCreated(request=req)
 else:   # put container
     if not os.path.exists(broker.db_file):
         broker.initialize(timestamp)
Beispiel #17
0
    def GET(self, req):
        """
        Handle HTTP GET request.

        The body of the response to a successful GET request contains a listing
        of either objects or shard ranges. The exact content of the listing is
        determined by a combination of request headers and query string
        parameters, as follows:

        * The type of the listing is determined by the
          ``X-Backend-Record-Type`` header. If this header has value ``shard``
          then the response body will be a list of shard ranges; if this header
          has value ``auto``, and the container state is ``sharding`` or
          ``sharded``, then the listing will be a list of shard ranges;
          otherwise the response body will be a list of objects.

        * Both shard range and object listings may be constrained to a name
          range by the ``marker`` and ``end_marker`` query string parameters.
          Object listings will only contain objects whose names are greater
          than any ``marker`` value and less than any ``end_marker`` value.
          Shard range listings will only contain shard ranges whose namespace
          is greater than or includes any ``marker`` value and is less than or
          includes any ``end_marker`` value.

        * Shard range listings may also be constrained by an ``includes`` query
          string parameter. If this parameter is present the listing will only
          contain shard ranges whose namespace includes the value of the
          parameter; any ``marker`` or ``end_marker`` parameters are ignored

        * The length of an object listing may be constrained by the ``limit``
          parameter. Object listings may also be constrained by ``prefix``,
          ``delimiter`` and ``path`` query string parameters.

        * Shard range listings will include deleted shard ranges if and only if
          the ``X-Backend-Include-Deleted`` header value is one of
          :attr:`swift.common.utils.TRUE_VALUES`. Object listings never
          include deleted objects.

        * Shard range listings may be constrained to include only shard ranges
          whose state is specified by a query string ``states`` parameter. If
          present, the ``states`` parameter should be a comma separated list of
          either the string or integer representation of
          :data:`~swift.common.utils.ShardRange.STATES`.

          Two alias values may be used in a ``states`` parameter value:
          ``listing`` will cause the listing to include all shard ranges in a
          state suitable for contributing to an object listing; ``updating``
          will cause the listing to include all shard ranges in a state
          suitable to accept an object update.

          If either of these aliases is used then the shard range listing will
          if necessary be extended with a synthesised 'filler' range in order
          to satisfy the requested name range when insufficient actual shard
          ranges are found. Any 'filler' shard range will cover the otherwise
          uncovered tail of the requested name range and will point back to the
          same container.

        * Listings are not normally returned from a deleted container. However,
          the ``X-Backend-Override-Deleted`` header may be used with a value in
          :attr:`swift.common.utils.TRUE_VALUES` to force a shard range
          listing to be returned from a deleted container whose DB file still
          exists.

        :param req: an instance of :class:`swift.common.swob.Request`
        :returns: an instance of :class:`swift.common.swob.Response`
        """
        drive, part, account, container, obj = split_and_validate_path(
            req, 4, 5, True)
        path = get_param(req, 'path')
        prefix = get_param(req, 'prefix')
        delimiter = get_param(req, 'delimiter')
        if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
            # delimiters can be made more flexible later
            return HTTPPreconditionFailed(body='Bad delimiter')
        marker = get_param(req, 'marker', '')
        end_marker = get_param(req, 'end_marker')
        limit = constraints.CONTAINER_LISTING_LIMIT
        given_limit = get_param(req, 'limit')
        reverse = config_true_value(get_param(req, 'reverse'))
        if given_limit and given_limit.isdigit():
            limit = int(given_limit)
            if limit > constraints.CONTAINER_LISTING_LIMIT:
                return HTTPPreconditionFailed(
                    request=req,
                    body='Maximum limit is %d' %
                    constraints.CONTAINER_LISTING_LIMIT)
        out_content_type = listing_formats.get_listing_content_type(req)
        try:
            check_drive(self.root, drive, self.mount_check)
        except ValueError:
            return HTTPInsufficientStorage(drive=drive, request=req)
        broker = self._get_container_broker(drive,
                                            part,
                                            account,
                                            container,
                                            pending_timeout=0.1,
                                            stale_reads_ok=True)
        info, is_deleted = broker.get_info_is_deleted()
        record_type = req.headers.get('x-backend-record-type', '').lower()
        if record_type == 'auto' and info.get('db_state') in (SHARDING,
                                                              SHARDED):
            record_type = 'shard'
        if record_type == 'shard':
            override_deleted = info and config_true_value(
                req.headers.get('x-backend-override-deleted', False))
            resp_headers = gen_resp_headers(info,
                                            is_deleted=is_deleted
                                            and not override_deleted)
            if is_deleted and not override_deleted:
                return HTTPNotFound(request=req, headers=resp_headers)
            resp_headers['X-Backend-Record-Type'] = 'shard'
            includes = get_param(req, 'includes')
            states = get_param(req, 'states')
            fill_gaps = False
            if states:
                states = list_from_csv(states)
                fill_gaps = any(('listing' in states, 'updating' in states))
                try:
                    states = broker.resolve_shard_range_states(states)
                except ValueError:
                    return HTTPBadRequest(request=req, body='Bad state')
            include_deleted = config_true_value(
                req.headers.get('x-backend-include-deleted', False))
            container_list = broker.get_shard_ranges(
                marker,
                end_marker,
                includes,
                reverse,
                states=states,
                include_deleted=include_deleted,
                fill_gaps=fill_gaps)
        else:
            resp_headers = gen_resp_headers(info, is_deleted=is_deleted)
            if is_deleted:
                return HTTPNotFound(request=req, headers=resp_headers)
            resp_headers['X-Backend-Record-Type'] = 'object'
            # Use the retired db while container is in process of sharding,
            # otherwise use current db
            src_broker = broker.get_brokers()[0]
            container_list = src_broker.list_objects_iter(
                limit,
                marker,
                end_marker,
                prefix,
                delimiter,
                path,
                storage_policy_index=info['storage_policy_index'],
                reverse=reverse)
        return self.create_listing(req, out_content_type, info, resp_headers,
                                   broker.metadata, container_list, container)
Beispiel #18
0
class ContainerController(object):
    """WSGI Controller for the container server."""

    # Ensure these are all lowercase
    save_headers = ['x-container-read', 'x-container-write',
                    'x-container-sync-key', 'x-container-sync-to']

    def __init__(self, conf):
        self.logger = get_logger(conf, log_route='container-server')
        self.root = conf.get('devices', '/srv/node/')
        self.mount_check = config_true_value(conf.get('mount_check', 'true'))
        self.node_timeout = int(conf.get('node_timeout', 3))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.allowed_sync_hosts = [
            h.strip()
            for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
            if h.strip()]
        self.replicator_rpc = ReplicatorRpc(
            self.root, DATADIR, ContainerBroker, self.mount_check,
            logger=self.logger)
        self.auto_create_account_prefix = \
            conf.get('auto_create_account_prefix') or '.'
        if config_true_value(conf.get('allow_versions', 'f')):
            self.save_headers.append('x-versions-location')
        swift.common.db.DB_PREALLOCATION = \
            config_true_value(conf.get('db_preallocation', 'f'))

    def _get_container_broker(self, drive, part, account, container):
        """
        Get a DB broker for the container.

        :param drive: drive that holds the container
        :param part: partition the container is in
        :param account: account name
        :param container: container name
        :returns: ContainerBroker object
        """
        hsh = hash_path(account, container)
        db_dir = storage_directory(DATADIR, part, hsh)
        db_path = os.path.join(self.root, drive, db_dir, hsh + '.db')
        return ContainerBroker(db_path, account=account, container=container,
                               logger=self.logger)

    def account_update(self, req, account, container, broker):
        """
        Update the account server(s) with latest container info.

        :param req: swob.Request object
        :param account: account name
        :param container: container name
        :param broker: container DB broker object
        :returns: if all the account requests return a 404 error code,
                  HTTPNotFound response object,
                  if the account cannot be updated due to a malformed header,
                  an HTTPBadRequest response object,
                  otherwise None.
        """
        account_hosts = [h.strip() for h in
                         req.headers.get('X-Account-Host', '').split(',')]
        account_devices = [d.strip() for d in
                           req.headers.get('X-Account-Device', '').split(',')]
        account_partition = req.headers.get('X-Account-Partition', '')

        if len(account_hosts) != len(account_devices):
            # This shouldn't happen unless there's a bug in the proxy,
            # but if there is, we want to know about it.
            self.logger.error(_('ERROR Account update failed: different  '
                                'numbers of hosts and devices in request: '
                                '"%s" vs "%s"' %
                                (req.headers.get('X-Account-Host', ''),
                                 req.headers.get('X-Account-Device', ''))))
            return HTTPBadRequest(req=req)

        if account_partition:
            updates = zip(account_hosts, account_devices)
        else:
            updates = []

        account_404s = 0

        for account_host, account_device in updates:
            account_ip, account_port = account_host.rsplit(':', 1)
            new_path = '/' + '/'.join([account, container])
            info = broker.get_info()
            account_headers = {
                'x-put-timestamp': info['put_timestamp'],
                'x-delete-timestamp': info['delete_timestamp'],
                'x-object-count': info['object_count'],
                'x-bytes-used': info['bytes_used'],
                'x-trans-id': req.headers.get('x-trans-id', '-')}
            if req.headers.get('x-account-override-deleted', 'no').lower() == \
                    'yes':
                account_headers['x-account-override-deleted'] = 'yes'
            try:
                with ConnectionTimeout(self.conn_timeout):
                    conn = http_connect(
                        account_ip, account_port, account_device,
                        account_partition, 'PUT', new_path, account_headers)
                with Timeout(self.node_timeout):
                    account_response = conn.getresponse()
                    account_response.read()
                    if account_response.status == HTTP_NOT_FOUND:
                        account_404s += 1
                    elif not is_success(account_response.status):
                        self.logger.error(_(
                            'ERROR Account update failed '
                            'with %(ip)s:%(port)s/%(device)s (will retry '
                            'later): Response %(status)s %(reason)s'),
                            {'ip': account_ip, 'port': account_port,
                             'device': account_device,
                             'status': account_response.status,
                             'reason': account_response.reason})
            except (Exception, Timeout):
                self.logger.exception(_(
                    'ERROR account update failed with '
                    '%(ip)s:%(port)s/%(device)s (will retry later)'),
                    {'ip': account_ip, 'port': account_port,
                     'device': account_device})
        if updates and account_404s == len(updates):
            return HTTPNotFound(req=req)
        else:
            return None

    @public
    @timing_stats()
    def DELETE(self, req):
        """Handle HTTP DELETE request."""
        try:
            drive, part, account, container, obj = req.split_path(4, 5, True)
            validate_device_partition(drive, part)
        except ValueError, err:
            return HTTPBadRequest(body=str(err), content_type='text/plain',
                                  request=req)
        if 'x-timestamp' not in req.headers or \
                not check_float(req.headers['x-timestamp']):
            return HTTPBadRequest(body='Missing timestamp', request=req,
                                  content_type='text/plain')
        if self.mount_check and not check_mount(self.root, drive):
            return HTTPInsufficientStorage(drive=drive, request=req)
        broker = self._get_container_broker(drive, part, account, container)
        if account.startswith(self.auto_create_account_prefix) and obj and \
                not os.path.exists(broker.db_file):
            broker.initialize(normalize_timestamp(
                req.headers.get('x-timestamp') or time.time()))
        if not os.path.exists(broker.db_file):
            return HTTPNotFound()
        if obj:     # delete object
            broker.delete_object(obj, req.headers.get('x-timestamp'))
            return HTTPNoContent(request=req)
        else:
            # delete container
            if not broker.empty():
                return HTTPConflict(request=req)
            existed = float(broker.get_info()['put_timestamp']) and \
                not broker.is_deleted()
            broker.delete_db(req.headers['X-Timestamp'])
            if not broker.is_deleted():
                return HTTPConflict(request=req)
            resp = self.account_update(req, account, container, broker)
            if resp:
                return resp
            if existed:
                return HTTPNoContent(request=req)
            return HTTPNotFound()
Beispiel #19
0
 def PUT(self, req):
     """Handle HTTP PUT request."""
     drive, part, account, container = split_and_validate_path(req, 3, 4)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     if container:  # put account container
         pending_timeout = None
         if 'x-trans-id' in req.headers:
             pending_timeout = 3
         broker = self._get_account_broker(drive,
                                           part,
                                           account,
                                           pending_timeout=pending_timeout)
         if account.startswith(self.auto_create_account_prefix) and \
                 not os.path.exists(broker.db_file):
             try:
                 broker.initialize(
                     normalize_timestamp(
                         req.headers.get('x-timestamp') or time.time()))
             except DatabaseAlreadyExists:
                 pass
         if req.headers.get('x-account-override-deleted', 'no').lower() != \
                 'yes' and broker.is_deleted():
             return HTTPNotFound(request=req)
         broker.put_container(container, req.headers['x-put-timestamp'],
                              req.headers['x-delete-timestamp'],
                              req.headers['x-object-count'],
                              req.headers['x-bytes-used'])
         if req.headers['x-delete-timestamp'] > \
                 req.headers['x-put-timestamp']:
             return HTTPNoContent(request=req)
         else:
             return HTTPCreated(request=req)
     else:  # put account
         broker = self._get_account_broker(drive, part, account)
         timestamp = normalize_timestamp(req.headers['x-timestamp'])
         if not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp)
                 created = True
             except DatabaseAlreadyExists:
                 pass
         elif broker.is_status_deleted():
             return self._deleted_response(broker,
                                           req,
                                           HTTPForbidden,
                                           body='Recently deleted')
         else:
             created = broker.is_deleted()
             broker.update_put_timestamp(timestamp)
             if broker.is_deleted():
                 return HTTPConflict(request=req)
         metadata = {}
         metadata.update((key, (value, timestamp))
                         for key, value in req.headers.iteritems()
                         if key.lower().startswith('x-account-meta-'))
         if metadata:
             broker.update_metadata(metadata)
         if created:
             return HTTPCreated(request=req)
         else:
             return HTTPAccepted(request=req)
Beispiel #20
0
    def GET(self, request):
        """Handle HTTP GET requests for the Swift Object Server."""
        device, partition, account, container, obj, policy = \
            get_name_and_placement(request, 5, 5, True)
        keep_cache = self.keep_cache_private or (
            'X-Auth-Token' not in request.headers and
            'X-Storage-Token' not in request.headers)
        try:
            disk_file = self.get_diskfile(
                device, partition, account, container, obj,
                policy=policy)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            with disk_file.open() as writer:
                metadata = disk_file.get_metadata()
                obj_size = int(metadata['Content-Length'])
                file_x_ts = Timestamp(metadata['X-Timestamp'])
                keep_cache = (self.keep_cache_private or
                              ('X-Auth-Token' not in request.headers and
                               'X-Storage-Token' not in request.headers))
                conditional_etag = None
                if 'X-Backend-Etag-Is-At' in request.headers:
                    conditional_etag = metadata.get(
                        request.headers['X-Backend-Etag-Is-At'])
                response = Response(
                    app_iter=disk_file.reader(keep_cache=keep_cache),
                    request=request, conditional_response=True,
                    conditional_etag=conditional_etag)

                if metadata['Disk-Info'] == 'hdd'\
                        and metadata['Hot-Flag'] == 'cold' \
                        and time.time()-float(metadata['X-Timestamp']) <= 60*600000000000000000000000:
                            metadata['Hot-Flag']  = 'warm'
                            writer.write_metadata(metadata)
                elif metadata['Disk-Info'] == 'hdd'\
                        and metadata['Hot-Flag'] == 'warm' \
                        and time.time()-float(metadata['X-Timestamp']) <= 60*60000000000000000000000000:
                            metadata['Hot-Flag']  = 'hot'
                            response.headers['REPHTS'] = True
                            writer.write_metadata(metadata)
                elif metadata['Disk-Info'] == 'hdd'\
                        and metadata['Hot-Flag'] == 'warm' \
                        and time.time()-float(metadata['X-Timestamp']) >= 60*60*2000000000000000000000000:
                            metadata['Hot-Flag']  = 'cold'
                            writer.write_metadata(metadata)

                response.headers['Content-Type'] = metadata.get(
                    'Content-Type', 'application/octet-stream')
                for key, value in metadata.iteritems():
                    if is_sys_or_user_meta('object', key) or \
                            key.lower() in self.allowed_headers:
                        response.headers[key] = value
                response.etag = metadata['ETag']
                response.last_modified = math.ceil(float(file_x_ts))
                response.content_length = obj_size
                try:
                    response.content_encoding = metadata[
                        'Content-Encoding']
                except KeyError:
                    pass
                response.headers['X-Timestamp'] = file_x_ts.normal
                response.headers['X-Backend-Timestamp'] = file_x_ts.internal
                response.headers['Hot-Flag'] = metadata['Hot-Flag']
                response.headers['Disk-Info'] = metadata['Disk-Info']
                resp = request.get_response(response)
        except DiskFileXattrNotSupported:
            return HTTPInsufficientStorage(drive=device, request=request)
        except (DiskFileNotExist, DiskFileQuarantined) as e:
            headers = {}
            if hasattr(e, 'timestamp'):
                headers['X-Backend-Timestamp'] = e.timestamp.internal
            resp = HTTPNotFound(request=request, headers=headers,
                                conditional_response=True)
        return resp