예제 #1
0
    def test_check_object_creation_copy(self):
        headers = {
            'Content-Length': '0',
            'X-Copy-From': 'c/o2',
            'Content-Type': 'text/plain'
        }
        self.assertEquals(
            constraints.check_object_creation(
                Request.blank('/', headers=headers), 'object_name'), None)

        headers = {
            'Content-Length': '1',
            'X-Copy-From': 'c/o2',
            'Content-Type': 'text/plain'
        }
        self.assertEquals(
            constraints.check_object_creation(
                Request.blank('/', headers=headers), 'object_name').status_int,
            HTTP_BAD_REQUEST)

        headers = {
            'Transfer-Encoding': 'chunked',
            'X-Copy-From': 'c/o2',
            'Content-Type': 'text/plain'
        }
        self.assertEquals(
            constraints.check_object_creation(
                Request.blank('/', headers=headers), 'object_name'), None)

        # a content-length header is always required
        headers = {'X-Copy-From': 'c/o2', 'Content-Type': 'text/plain'}
        self.assertEquals(
            constraints.check_object_creation(
                Request.blank('/', headers=headers), 'object_name').status_int,
            HTTP_LENGTH_REQUIRED)
예제 #2
0
 def test_check_object_creation_content_length(self):
     headers = {
         'Content-Length': str(constraints.MAX_FILE_SIZE),
         'Content-Type': 'text/plain'
     }
     self.assertEquals(
         constraints.check_object_creation(
             Request.blank('/', headers=headers), 'object_name'), None)
     headers = {
         'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
         'Content-Type': 'text/plain'
     }
     self.assertEquals(
         constraints.check_object_creation(
             Request.blank('/', headers=headers), 'object_name').status_int,
         HTTP_REQUEST_ENTITY_TOO_LARGE)
     headers = {
         'Transfer-Encoding': 'chunked',
         'Content-Type': 'text/plain'
     }
     self.assertEquals(
         constraints.check_object_creation(
             Request.blank('/', headers=headers), 'object_name'), None)
     headers = {'Content-Type': 'text/plain'}
     self.assertEquals(
         constraints.check_object_creation(
             Request.blank('/', headers=headers), 'object_name').status_int,
         HTTP_LENGTH_REQUIRED)
예제 #3
0
    def test_check_object_creation_copy(self):
        headers = {'Content-Length': '0',
                   'X-Copy-From': 'c/o2',
                   'Content-Type': 'text/plain'}
        self.assertEquals(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name'), None)

        headers = {'Content-Length': '1',
                   'X-Copy-From': 'c/o2',
                   'Content-Type': 'text/plain'}
        self.assertEquals(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name').status_int,
            HTTP_BAD_REQUEST)

        headers = {'Transfer-Encoding': 'chunked',
                   'X-Copy-From': 'c/o2',
                   'Content-Type': 'text/plain'}
        self.assertEquals(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name'), None)

        # a content-length header is always required
        headers = {'X-Copy-From': 'c/o2',
                   'Content-Type': 'text/plain'}
        self.assertEquals(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name').status_int,
            HTTP_LENGTH_REQUIRED)
예제 #4
0
 def test_check_object_creation_content_length(self):
     headers = {
         'Content-Length': str(constraints.MAX_FILE_SIZE),
         'Content-Type': 'text/plain'
     }
     self.assertEquals(
         constraints.check_object_creation(
             Request.blank('/', headers=headers), 'object_name'), None)
     headers = {
         'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
         'Content-Type': 'text/plain'
     }
     self.assert_(
         isinstance(
             constraints.check_object_creation(
                 Request.blank('/', headers=headers), 'object_name'),
             HTTPRequestEntityTooLarge))
     headers = {
         'Transfer-Encoding': 'chunked',
         'Content-Type': 'text/plain'
     }
     self.assertEquals(
         constraints.check_object_creation(
             Request.blank('/', headers=headers), 'object_name'), None)
     headers = {'Content-Type': 'text/plain'}
     self.assert_(
         isinstance(
             constraints.check_object_creation(
                 Request.blank('/', headers=headers), 'object_name'),
             HTTPLengthRequired))
예제 #5
0
 def test_check_object_creation_content_type(self):
     headers = {"Transfer-Encoding": "chunked", "Content-Type": "text/plain"}
     self.assertEquals(constraints.check_object_creation(Request.blank("/", headers=headers), "object_name"), None)
     headers = {"Transfer-Encoding": "chunked"}
     self.assertEquals(
         constraints.check_object_creation(Request.blank("/", headers=headers), "object_name").status_int,
         HTTP_BAD_REQUEST,
     )
예제 #6
0
 def test_check_object_creation_name_length(self):
     headers = {"Transfer-Encoding": "chunked", "Content-Type": "text/plain"}
     name = "o" * constraints.MAX_OBJECT_NAME_LENGTH
     self.assertEquals(constraints.check_object_creation(Request.blank("/", headers=headers), name), None)
     name = "o" * (constraints.MAX_OBJECT_NAME_LENGTH + 1)
     self.assertEquals(
         constraints.check_object_creation(Request.blank("/", headers=headers), name).status_int, HTTP_BAD_REQUEST
     )
예제 #7
0
 def test_check_object_creation_content_type(self):
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': 'text/plain'}
     self.assertEqual(constraints.check_object_creation(Request.blank(
         '/', headers=headers), 'object_name'), None)
     headers = {'Transfer-Encoding': 'chunked'}
     self.assertEqual(constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name').status_int,
         HTTP_BAD_REQUEST)
예제 #8
0
 def test_check_object_creation_content_type(self):
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': 'text/plain'}
     self.assertEquals(constraints.check_object_creation(Request.blank('/',
         headers=headers), 'object_name'), None)
     headers = {'Transfer-Encoding': 'chunked'}
     self.assert_(isinstance(constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name'),
         HTTPBadRequest))
예제 #9
0
 def test_check_object_creation_content_type(self):
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': 'text/plain'}
     self.assertEquals(constraints.check_object_creation(Request.blank(
         '/', headers=headers), 'object_name'), None)
     headers = {'Transfer-Encoding': 'chunked'}
     self.assertEquals(constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name').status_int,
         HTTP_BAD_REQUEST)
예제 #10
0
 def test_check_object_creation_name_length(self):
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': 'text/plain'}
     name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
     self.assertEquals(constraints.check_object_creation(Request.blank(
         '/', headers=headers), name), None)
     name = 'o' * (constraints.MAX_OBJECT_NAME_LENGTH + 1)
     self.assertEquals(constraints.check_object_creation(
         Request.blank('/', headers=headers), name).status_int,
         HTTP_BAD_REQUEST)
예제 #11
0
 def test_check_object_creation_name_length(self):
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': 'text/plain'}
     name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
     self.assertEqual(constraints.check_object_creation(Request.blank(
         '/', headers=headers), name), None)
     name = 'o' * (constraints.MAX_OBJECT_NAME_LENGTH + 1)
     self.assertEqual(constraints.check_object_creation(
         Request.blank('/', headers=headers), name).status_int,
         HTTP_BAD_REQUEST)
예제 #12
0
    def test_check_object_creation_content_type(self):
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain'}
        self.assertIsNone(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name'))

        headers = {'Transfer-Encoding': 'chunked'}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
        self.assertIn('No content type', resp.body)
예제 #13
0
    def test_check_object_creation_bad_delete_headers(self):
        headers = {"Transfer-Encoding": "chunked", "Content-Type": "text/plain", "X-Delete-After": "abc"}
        resp = constraints.check_object_creation(Request.blank("/", headers=headers), "object_name")
        self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
        self.assertTrue("Non-integer X-Delete-After" in resp.body)

        t = str(int(time.time() - 60))
        headers = {"Transfer-Encoding": "chunked", "Content-Type": "text/plain", "X-Delete-At": t}
        resp = constraints.check_object_creation(Request.blank("/", headers=headers), "object_name")
        self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
        self.assertTrue("X-Delete-At in past" in resp.body)
예제 #14
0
    def test_check_object_creation_content_type(self):
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain',
                   'X-Timestamp': str(time.time())}
        self.assertIsNone(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name'))

        headers = {'Transfer-Encoding': 'chunked',
                   'X-Timestamp': str(time.time())}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
        self.assertIn(b'No content type', resp.body)
예제 #15
0
    def test_check_object_creation_name_length(self):
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain'}
        name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
        self.assertIsNone(constraints.check_object_creation(Request.blank(
            '/', headers=headers), name))

        name = 'o' * (MAX_OBJECT_NAME_LENGTH + 1)
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), name)
        self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
        self.assertIn('Object name length of %d longer than %d' %
                      (MAX_OBJECT_NAME_LENGTH + 1, MAX_OBJECT_NAME_LENGTH),
                      resp.body)
예제 #16
0
    def test_check_object_creation_name_length(self):
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain',
                   'X-Timestamp': str(time.time())}
        name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
        self.assertIsNone(constraints.check_object_creation(Request.blank(
            '/', headers=headers), name))

        name = 'o' * (MAX_OBJECT_NAME_LENGTH + 1)
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), name)
        self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
        self.assertIn(b'Object name length of %d longer than %d' %
                      (MAX_OBJECT_NAME_LENGTH + 1, MAX_OBJECT_NAME_LENGTH),
                      resp.body)
예제 #17
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        container_info = self.container_info(self.account_name,
                                             self.container_name, req)

        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']

        # is request authorized
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        self._update_content_type(req)

        # check constraints on object name and request headers
        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response

        if req.headers.get('Oio-Copy-From'):
            return self._link_object(req)

        self._update_x_timestamp(req)

        data_source = req.environ['wsgi.input']
        if req.content_length:
            data_source = ExpectedSizeReader(data_source, req.content_length)

        headers = self._prepare_headers(req)
        with closing_if_possible(data_source):
            resp = self._store_object(req, data_source, headers)
        return resp
예제 #18
0
 def test_check_object_creation_bad_content_type(self):
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': '\xff\xff'}
     resp = constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name')
     self.assert_(isinstance(resp, HTTPBadRequest))
     self.assert_('Content-Type' in resp.body)
예제 #19
0
 def test_check_object_creation_bad_content_type(self):
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': '\xff\xff'}
     resp = constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name')
     self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
     self.assertTrue('Content-Type' in resp.body)
예제 #20
0
 def test_check_object_creation_bad_content_type(self):
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': '\xff\xff'}
     resp = constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
     self.assert_('Content-Type' in resp.body)
예제 #21
0
 def test_check_object_creation_bad_content_type(self):
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': '\xff\xff'}
     resp = constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name')
     self.assert_(isinstance(resp, HTTPBadRequest))
     self.assert_('Content-Type' in resp.body)
예제 #22
0
파일: obj.py 프로젝트: fvennetier/oio-swift
    def PUT(self, req):
        """HTTP PUT request handler."""
        container_info = self.container_info(
            self.account_name, self.container_name, req)

        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']

        # is request authorized
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        old_slo_manifest = None
        # If versioning is disabled, we must check if the object exists.
        # If it's a SLO, we will have to delete the parts if the current
        # operation is a success.
        if (self.app.delete_slo_parts and
                not container_info['sysmeta'].get('versions-location', None)):
            try:
                dest_info = get_object_info(req.environ, self.app)
                if 'slo-size' in dest_info['sysmeta']:
                    manifest_env = req.environ.copy()
                    manifest_env['QUERY_STRING'] = 'multipart-manifest=get'
                    manifest_req = make_subrequest(manifest_env, 'GET')
                    manifest_resp = manifest_req.get_response(self.app)
                    old_slo_manifest = json.loads(manifest_resp.body)
            except Exception as exc:
                self.app.logger.warn(('Failed to check existence of %s. If '
                                      'overwriting a SLO, old parts may '
                                      'remain. Error was: %s') %
                                     (req.path, exc))

        self._update_content_type(req)

        self._update_x_timestamp(req)

        # check constraints on object name and request headers
        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response

        if req.headers.get('Oio-Copy-From'):
            return self._link_object(req)

        data_source = req.environ['wsgi.input']
        if req.content_length:
            data_source = ExpectedSizeReader(data_source, req.content_length)

        headers = self._prepare_headers(req)
        with closing_if_possible(data_source):
            resp = self._store_object(req, data_source, headers)
        if old_slo_manifest and resp.is_success:
            self.app.logger.debug(
                'Previous object %s was a SLO, deleting parts',
                req.path)
            self._delete_slo_parts(req, old_slo_manifest)
        return resp
예제 #23
0
파일: obj.py 프로젝트: logorn/oio-swift
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req,
                                  content_type='text/plain',
                                  body='If-None-Match only supports *')
        container_info = self.container_info(self.account_name,
                                             self.container_name, req)

        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']

        # is request authorized
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        self._update_content_type(req)

        # check constraints on object name and request headers
        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response

        self._update_x_timestamp(req)

        data_source = req.environ['wsgi.input']

        headers = self._prepare_headers(req)
        resp = self._store_object(req, data_source, headers)
        return resp
예제 #24
0
    def test_check_object_creation_bad_delete_headers(self):
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain',
                   'X-Delete-After': 'abc'}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
        self.assert_('Non-integer X-Delete-After' in resp.body)

        t = str(int(time.time() - 60))
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain',
                   'X-Delete-At': t}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
        self.assert_('X-Delete-At in past' in resp.body)
예제 #25
0
    def test_check_object_creation_bad_delete_headers(self):
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain',
                   'X-Delete-After': 'abc'}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
        self.assertTrue('Non-integer X-Delete-After' in resp.body)

        t = str(int(time.time() - 60))
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain',
                   'X-Delete-At': t}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
        self.assertTrue('X-Delete-At in past' in resp.body)
예제 #26
0
 def test_check_object_creation_content_length(self):
     headers = {'Content-Length': str(constraints.MAX_FILE_SIZE),
                'Content-Type': 'text/plain'}
     self.assertEquals(constraints.check_object_creation(Request.blank(
         '/', headers=headers), 'object_name'), None)
     headers = {'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
                'Content-Type': 'text/plain'}
     self.assertEquals(constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name').status_int,
         HTTP_REQUEST_ENTITY_TOO_LARGE)
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': 'text/plain'}
     self.assertEquals(constraints.check_object_creation(Request.blank(
         '/', headers=headers), 'object_name'), None)
     headers = {'Content-Type': 'text/plain'}
     self.assertEquals(constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name').status_int,
         HTTP_LENGTH_REQUIRED)
예제 #27
0
 def test_check_object_creation_content_length(self):
     headers = {'Content-Length': str(constraints.MAX_FILE_SIZE),
                'Content-Type': 'text/plain'}
     self.assertEquals(constraints.check_object_creation(Request.blank('/',
         headers=headers), 'object_name'), None)
     headers = {'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
                'Content-Type': 'text/plain'}
     self.assert_(isinstance(constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name'),
         HTTPRequestEntityTooLarge))
     headers = {'Transfer-Encoding': 'chunked',
                'Content-Type': 'text/plain'}
     self.assertEquals(constraints.check_object_creation(Request.blank('/',
         headers=headers), 'object_name'), None)
     headers = {'Content-Type': 'text/plain'}
     self.assert_(isinstance(constraints.check_object_creation(
         Request.blank('/', headers=headers), 'object_name'),
         HTTPLengthRequired))
예제 #28
0
    def test_check_object_creation_copy(self):
        headers = {"Content-Length": "0", "X-Copy-From": "c/o2", "Content-Type": "text/plain"}
        self.assertEquals(constraints.check_object_creation(Request.blank("/", headers=headers), "object_name"), None)

        headers = {"Content-Length": "1", "X-Copy-From": "c/o2", "Content-Type": "text/plain"}
        self.assertEquals(
            constraints.check_object_creation(Request.blank("/", headers=headers), "object_name").status_int,
            HTTP_BAD_REQUEST,
        )

        headers = {"Transfer-Encoding": "chunked", "X-Copy-From": "c/o2", "Content-Type": "text/plain"}
        self.assertEquals(constraints.check_object_creation(Request.blank("/", headers=headers), "object_name"), None)

        # a content-length header is always required
        headers = {"X-Copy-From": "c/o2", "Content-Type": "text/plain"}
        self.assertEquals(
            constraints.check_object_creation(Request.blank("/", headers=headers), "object_name").status_int,
            HTTP_LENGTH_REQUIRED,
        )
예제 #29
0
 def test_check_object_manifest_header(self):
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': 'container/prefix', 'Content-Length':
         '0', 'Content-Type': 'text/plain'}), 'manifest')
     self.assert_(not resp)
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': 'container', 'Content-Length': '0',
         'Content-Type': 'text/plain'}), 'manifest')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': '/container/prefix',
         'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': 'container/prefix?query=param',
         'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': 'container/prefix&query=param',
         'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': 'http://host/container/prefix',
         'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
예제 #30
0
 def test_check_object_manifest_header(self):
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': 'container/prefix', 'Content-Length':
         '0', 'Content-Type': 'text/plain'}), 'manifest')
     self.assert_(not resp)
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': 'container', 'Content-Length': '0',
         'Content-Type': 'text/plain'}), 'manifest')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': '/container/prefix',
         'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': 'container/prefix?query=param',
         'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': 'container/prefix&query=param',
         'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
     resp = constraints.check_object_creation(Request.blank('/',
         headers={'X-Object-Manifest': 'http://host/container/prefix',
         'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
     self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
예제 #31
0
    def test_check_object_creation_content_length(self):
        headers = {
            'Content-Length': str(constraints.MAX_FILE_SIZE),
            'Content-Type': 'text/plain'
        }
        self.assertIsNone(
            constraints.check_object_creation(
                Request.blank('/', headers=headers), 'object_name'))

        headers = {
            'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
            'Content-Type': 'text/plain'
        }
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_REQUEST_ENTITY_TOO_LARGE)

        headers = {
            'Transfer-Encoding': 'chunked',
            'Content-Type': 'text/plain'
        }
        self.assertIsNone(
            constraints.check_object_creation(
                Request.blank('/', headers=headers), 'object_name'))

        headers = {'Transfer-Encoding': 'gzip', 'Content-Type': 'text/plain'}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
        self.assertIn('Invalid Transfer-Encoding header value', resp.body)

        headers = {'Content-Type': 'text/plain'}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_LENGTH_REQUIRED)

        headers = {'Content-Length': 'abc', 'Content-Type': 'text/plain'}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
        self.assertIn('Invalid Content-Length header value', resp.body)

        headers = {
            'Transfer-Encoding': 'gzip,chunked',
            'Content-Type': 'text/plain'
        }
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_NOT_IMPLEMENTED)
예제 #32
0
    def test_check_object_creation_content_length(self):
        headers = {'Content-Length': str(constraints.MAX_FILE_SIZE),
                   'Content-Type': 'text/plain',
                   'X-Timestamp': str(time.time())}
        self.assertIsNone(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name'))

        headers = {'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
                   'Content-Type': 'text/plain',
                   'X-Timestamp': str(time.time())}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_REQUEST_ENTITY_TOO_LARGE)

        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain',
                   'X-Timestamp': str(time.time())}
        self.assertIsNone(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name'))

        headers = {'Transfer-Encoding': 'gzip',
                   'Content-Type': 'text/plain',
                   'X-Timestamp': str(time.time())}
        resp = constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
        self.assertIn(b'Invalid Transfer-Encoding header value', resp.body)

        headers = {'Content-Type': 'text/plain',
                   'X-Timestamp': str(time.time())}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_LENGTH_REQUIRED)

        headers = {'Content-Length': 'abc',
                   'Content-Type': 'text/plain',
                   'X-Timestamp': str(time.time())}
        resp = constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
        self.assertIn(b'Invalid Content-Length header value', resp.body)

        headers = {'Transfer-Encoding': 'gzip,chunked',
                   'Content-Type': 'text/plain',
                   'X-Timestamp': str(time.time())}
        resp = constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name')
        self.assertEqual(resp.status_int, HTTP_NOT_IMPLEMENTED)
예제 #33
0
    def test_check_object_creation_content_length(self):
        headers = {"Content-Length": str(constraints.MAX_FILE_SIZE), "Content-Type": "text/plain"}
        self.assertEquals(constraints.check_object_creation(Request.blank("/", headers=headers), "object_name"), None)

        headers = {"Content-Length": str(constraints.MAX_FILE_SIZE + 1), "Content-Type": "text/plain"}
        self.assertEquals(
            constraints.check_object_creation(Request.blank("/", headers=headers), "object_name").status_int,
            HTTP_REQUEST_ENTITY_TOO_LARGE,
        )

        headers = {"Transfer-Encoding": "chunked", "Content-Type": "text/plain"}
        self.assertEquals(constraints.check_object_creation(Request.blank("/", headers=headers), "object_name"), None)

        headers = {"Transfer-Encoding": "gzip", "Content-Type": "text/plain"}
        self.assertEquals(
            constraints.check_object_creation(Request.blank("/", headers=headers), "object_name").status_int,
            HTTP_BAD_REQUEST,
        )

        headers = {"Content-Type": "text/plain"}
        self.assertEquals(
            constraints.check_object_creation(Request.blank("/", headers=headers), "object_name").status_int,
            HTTP_LENGTH_REQUIRED,
        )

        headers = {"Content-Length": "abc", "Content-Type": "text/plain"}
        self.assertEquals(
            constraints.check_object_creation(Request.blank("/", headers=headers), "object_name").status_int,
            HTTP_BAD_REQUEST,
        )

        headers = {"Transfer-Encoding": "gzip,chunked", "Content-Type": "text/plain"}
        self.assertEquals(
            constraints.check_object_creation(Request.blank("/", headers=headers), "object_name").status_int,
            HTTP_NOT_IMPLEMENTED,
        )
예제 #34
0
파일: server.py 프로젝트: mawentao007/swift
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj = \
            split_and_validate_path(request, 5, 5, True)

        if 'x-timestamp' not in request.headers or \
                not check_float(request.headers['x-timestamp']):
            return HTTPBadRequest(body='Missing timestamp', request=request,
                                  content_type='text/plain')
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past', request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e), request=request,
                                  content_type='text/plain')
        try:
            disk_file = self._diskfile(device, partition, account, container,
                                       obj)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        with disk_file.open():
            orig_metadata = disk_file.get_metadata()
        old_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        orig_timestamp = orig_metadata.get('X-Timestamp')
        if orig_timestamp and orig_timestamp >= request.headers['x-timestamp']:
            return HTTPConflict(request=request)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                reader = request.environ['wsgi.input'].read
                for chunk in iter(lambda: reader(self.network_chunk_size), ''):
                    start_time = time.time()
                    if start_time > upload_expiration:
                        self.logger.increment('PUT.timeouts')
                        return HTTPRequestTimeout(request=request)
                    etag.update(chunk)
                    writer.write(chunk)
                    sleep()
                    elapsed_time += time.time() - start_time
                upload_size = writer.upload_size
                if upload_size:
                    self.logger.transfer_rate(
                        'PUT.' + device + '.timing', elapsed_time,
                        upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)
                etag = etag.hexdigest()
                if 'etag' in request.headers and \
                        request.headers['etag'].lower() != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.headers['x-timestamp'],
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.iteritems()
                                if val[0].lower().startswith('x-object-meta-')
                                and len(val[0]) > 14)
                for header_key in self.allowed_headers:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)
        except DiskFileNoSpace:
            return HTTPInsufficientStorage(drive=device, request=request)
        if old_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update(
                    'PUT', new_delete_at, account, container, obj,
                    request, device)
            if old_delete_at:
                self.delete_at_update(
                    'DELETE', old_delete_at, account, container, obj,
                    request, device)
        if not orig_timestamp or \
                orig_timestamp < request.headers['x-timestamp']:
            self.container_update(
                'PUT', account, container, obj, request,
                HeaderKeyDict({
                    'x-size': metadata['Content-Length'],
                    'x-content-type': metadata['Content-Type'],
                    'x-timestamp': metadata['X-Timestamp'],
                    'x-etag': metadata['ETag']}),
                device)
        resp = HTTPCreated(request=request, etag=etag)
        return resp
예제 #35
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req, content_type='text/plain',
                                  body='If-None-Match only supports *')
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = \
            config_true_value(req.headers.get('x-detect-content-type'))
        if detect_content_type or not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                                          'application/octet-stream'
            if detect_content_type:
                req.headers.pop('x-detect-content-type')
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or \
                         check_content_type(req)
        if error_response:
            return error_response

        req.headers['X-Timestamp'] = Timestamp(time.time()).internal

        stream = req.environ['wsgi.input']
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            if req.environ.get('swift.orig_req_method', req.method) != 'POST':
                req.environ.setdefault('swift.log_info', []).append(
                    'x-copy-from:%s' % source_header)
            ver, acct, _rest = req.split_path(2, 3, True)
            src_account_name = req.headers.get('X-Copy-From-Account', None)
            if src_account_name:
                src_account_name = check_account_format(req, src_account_name)
            else:
                src_account_name = acct
            src_container_name, src_obj_name = check_copy_from_header(req)
            source_header = '/%s/%s/%s/%s' % (ver, src_account_name,
                                              src_container_name, src_obj_name)
            source_req = req.copy_get()

            # make sure the source request uses it's container_info
            source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            orig_account_name = self.account_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            self.account_name = src_account_name
            sink_req = Request.blank(req.path_info,
                                     environ=req.environ, headers=req.headers)
            source_resp = self.GET(source_req)

            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get(
                'swift.copy_hook',
                (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            self.account_name = orig_account_name
            stream = IterO(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > constraints.MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag

            # we no longer need the X-Copy-From header
            del sink_req.headers['X-Copy-From']
            if 'X-Copy-From-Account' in sink_req.headers:
                del sink_req.headers['X-Copy-From-Account']
            if not content_type_manually_set:
                sink_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if config_true_value(
                    sink_req.headers.get('x-fresh-metadata', 'false')):
                # post-as-copy: ignore new sysmeta, copy existing sysmeta
                condition = lambda k: is_sys_meta('object', k)
                remove_items(sink_req.headers, condition)
                copy_header_subset(source_resp, sink_req, condition)
            else:
                # copy/update existing sysmeta and user meta
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)

            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                            req.params.get('multipart-manifest') == 'get':
                sink_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = sink_req

        content_length = req.content_length
        content_type = req.headers.get('content-type', 'octet/stream')
        storage = self.app.storage

        if content_length is None:
            content_length = 0
        try:
            chunks, size, checksum = storage.object_create(self.account_name, self.container_name,
                                  obj_name=self.object_name,
                                  file_or_path=stream,
                                  content_length=content_length,
                                  content_type=content_type)
        except exceptions.NoSuchContainer:
            return HTTPNotFound(request=req)
        except exceptions.ClientReadTimeout:
            return HTTPRequestTimeout(request=req)
        resp = HTTPCreated(request=req, etag=checksum)
        if source_header:
            acct, path = source_header.split('/', 3)[2:4]
            resp.headers['X-Copied-From-Account'] = quote(acct)
            resp.headers['X-Copied-From'] = quote(path)
            if 'last-modified' in source_resp.headers:
                resp.headers['X-Copied-From-Last-Modified'] = \
                    source_resp.headers['last-modified']
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(
            float(Timestamp(req.headers['X-Timestamp'])))
        return resp
예제 #36
0
파일: obj.py 프로젝트: absolutarin/swift
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req, content_type='text/plain',
                                  body='If-None-Match only supports *')
        container_info = self.container_info(
            self.account_name, self.container_name, req)
        policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
                                       container_info['storage_policy'])
        obj_ring = self.app.get_object_ring(policy_index)

        # pass the policy index to storage nodes via req header
        req.headers['X-Backend-Storage-Policy-Index'] = policy_index
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        if not containers:
            return HTTPNotFound(request=req)

        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = \
            config_true_value(req.headers.get('x-detect-content-type'))
        if detect_content_type or not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            if detect_content_type:
                req.headers.pop('x-detect-content-type')
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response

        partition, nodes = obj_ring.get_nodes(
            self.account_name, self.container_name, self.object_name)

        # do a HEAD request for container sync and checking object versions
        if 'x-timestamp' in req.headers or \
                (object_versions and not
                 req.environ.get('swift_versioned_copy')):
            # make sure proxy-server uses the right policy index
            _headers = {'X-Backend-Storage-Policy-Index': policy_index,
                        'X-Newest': 'True'}
            hreq = Request.blank(req.path_info, headers=_headers,
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(
                hreq, _('Object'), obj_ring, partition,
                hreq.swift_entity_path)

        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req_timestamp = Timestamp(req.headers['X-Timestamp'])
                if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
                        hresp.environ['swift_x_timestamp'] >= req_timestamp:
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req, content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                         'was %r' % req.headers['x-timestamp'])
            req.headers['X-Timestamp'] = req_timestamp.internal
        else:
            req.headers['X-Timestamp'] = Timestamp(time.time()).internal

        if object_versions and not req.environ.get('swift_versioned_copy'):
            if hresp.status_int != HTTP_NOT_FOUND:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(
                                            hresp.headers['last-modified'],
                                            '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = Timestamp(ts_source).internal
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
                copy_environ = {'REQUEST_METHOD': 'COPY',
                                'swift_versioned_copy': True
                                }
                copy_req = Request.blank(req.path_info, headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            if req.environ.get('swift.orig_req_method', req.method) != 'POST':
                req.environ.setdefault('swift.log_info', []).append(
                    'x-copy-from:%s' % source_header)
            ver, acct, _rest = req.split_path(2, 3, True)
            src_account_name = req.headers.get('X-Copy-From-Account', None)
            if src_account_name:
                src_account_name = check_account_format(req, src_account_name)
            else:
                src_account_name = acct
            src_container_name, src_obj_name = check_copy_from_header(req)
            source_header = '/%s/%s/%s/%s' % (ver, src_account_name,
                            src_container_name, src_obj_name)
            source_req = req.copy_get()

            # make sure the source request uses it's container_info
            source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            orig_account_name = self.account_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            self.account_name = src_account_name
            sink_req = Request.blank(req.path_info,
                                     environ=req.environ, headers=req.headers)
            source_resp = self.GET(source_req)

            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get(
                'swift.copy_hook',
                (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            self.account_name = orig_account_name
            data_source = iter(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > constraints.MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag

            # we no longer need the X-Copy-From header
            del sink_req.headers['X-Copy-From']
            if 'X-Copy-From-Account' in sink_req.headers:
                del sink_req.headers['X-Copy-From-Account']
            if not content_type_manually_set:
                sink_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if config_true_value(
                    sink_req.headers.get('x-fresh-metadata', 'false')):
                # post-as-copy: ignore new sysmeta, copy existing sysmeta
                condition = lambda k: is_sys_meta('object', k)
                remove_items(sink_req.headers, condition)
                copy_header_subset(source_resp, sink_req, condition)
            else:
                # copy/update existing sysmeta and user meta
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)

            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                sink_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = sink_req

        req, delete_at_container, delete_at_part, \
            delete_at_nodes = self._config_obj_expiration(req)

        node_iter = GreenthreadSafeIterator(
            self.iter_nodes_local_first(obj_ring, partition))
        pile = GreenPile(len(nodes))
        te = req.headers.get('transfer-encoding', '')
        chunked = ('chunked' in te)

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_container, delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'
            pile.spawn(self._connect_put_node, node_iter, partition,
                       req.swift_entity_path, nheaders,
                       self.app.logger.thread_locals)

        conns = [conn for conn in pile if conn]
        min_conns = quorum_size(len(nodes))

        if req.if_none_match is not None and '*' in req.if_none_match:
            statuses = [conn.resp.status for conn in conns if conn.resp]
            if HTTP_PRECONDITION_FAILED in statuses:
                # If we find any copy of the file, it shouldn't be uploaded
                self.app.logger.debug(
                    _('Object PUT returning 412, %(statuses)r'),
                    {'statuses': statuses})
                return HTTPPreconditionFailed(request=req)

        if len(conns) < min_conns:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'),
                {'conns': len(conns), 'nodes': min_conns})
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                for conn in conns:
                                    conn.queue.put('0\r\n\r\n')
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > constraints.MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(
                                '%x\r\n%s\r\n' % (len(chunk), chunk)
                                if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) < min_conns:
                        self.app.logger.error(_(
                            'Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': min_conns})
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout as err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)
        except (Exception, Timeout):
            self.app.logger.exception(
                _('ERROR Exception causing client disconnect'))
            return HTTPClientDisconnect(request=req)
        if req.content_length and bytes_transferred < req.content_length:
            req.client_disconnect = True
            self.app.logger.warn(
                _('Client disconnected without sending enough data'))
            self.app.logger.increment('client_disconnects')
            return HTTPClientDisconnect(request=req)

        statuses, reasons, bodies, etags = self._get_put_responses(req, conns,
                                                                   nodes)

        if len(etags) > 1:
            self.app.logger.error(
                _('Object servers returned %s mismatched etags'), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req, statuses, reasons, bodies,
                                  _('Object PUT'), etag=etag)
        if source_header:
            acct, path = source_header.split('/', 3)[2:4]
            resp.headers['X-Copied-From-Account'] = quote(acct)
            resp.headers['X-Copied-From'] = quote(path)
            if 'last-modified' in source_resp.headers:
                resp.headers['X-Copied-From-Last-Modified'] = \
                    source_resp.headers['last-modified']
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(
            float(Timestamp(req.headers['X-Timestamp'])))
        return resp
예제 #37
0
파일: server.py 프로젝트: zuiwufenghua/zft
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        try:
            device, partition, account, container, obj = \
                split_path(unquote(request.path), 5, 5, True)
        except ValueError, err:
            return HTTPBadRequest(body=str(err), request=request,
                        content_type='text/plain')
        if self.mount_check and not check_mount(self.devices, device):
            return Response(status='507 %s is not mounted' % device)
        if 'x-timestamp' not in request.headers or \
                    not check_float(request.headers['x-timestamp']):
            return HTTPBadRequest(body='Missing timestamp', request=request,
                        content_type='text/plain')
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        file = DiskFile(self.devices, device, partition, account, container,
                        obj, disk_chunk_size=self.disk_chunk_size)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        upload_size = 0
        last_sync = 0
        with file.mkstemp() as (fd, tmppath):
            if 'content-length' in request.headers:
                fallocate(fd, int(request.headers['content-length']))
            for chunk in iter(lambda: request.body_file.read(
                    self.network_chunk_size), ''):
                upload_size += len(chunk)
                if time.time() > upload_expiration:
예제 #38
0
파일: obj.py 프로젝트: saebyuk/swift
    def PUT(self, req):
        """HTTP PUT request handler."""
        container_info = self.container_info(
            self.account_name, self.container_name,
            account_autocreate=self.app.account_autocreate)
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp
        if not containers:
            return HTTPNotFound(request=req)
        if 'x-delete-after' in req.headers:
            try:
                x_delete_after = int(req.headers['x-delete-after'])
            except ValueError:
                    return HTTPBadRequest(request=req,
                                          content_type='text/plain',
                                          body='Non-integer X-Delete-After')
            req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
        if 'x-delete-at' in req.headers:
            try:
                x_delete_at = int(req.headers['x-delete-at'])
                if x_delete_at < time.time():
                    return HTTPBadRequest(
                        body='X-Delete-At in past', request=req,
                        content_type='text/plain')
            except ValueError:
                return HTTPBadRequest(request=req, content_type='text/plain',
                                      body='Non-integer X-Delete-At')
            delete_at_container = str(
                x_delete_at /
                self.app.expiring_objects_container_divisor *
                self.app.expiring_objects_container_divisor)
            delete_at_part, delete_at_nodes = \
                self.app.container_ring.get_nodes(
                    self.app.expiring_objects_account, delete_at_container)
        else:
            delete_at_part = delete_at_nodes = None
        partition, nodes = self.app.object_ring.get_nodes(
            self.account_name, self.container_name, self.object_name)
        # do a HEAD request for container sync and checking object versions
        if 'x-timestamp' in req.headers or \
                (object_versions and not
                 req.environ.get('swift_versioned_copy')):
            hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(hreq, _('Object'), partition, nodes,
                                        hreq.path_info, len(nodes))
        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req.headers['X-Timestamp'] = \
                    normalize_timestamp(float(req.headers['x-timestamp']))
                if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
                    float(hresp.environ['swift_x_timestamp']) >= \
                        float(req.headers['x-timestamp']):
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req, content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                         'was %r' % req.headers['x-timestamp'])
        else:
            req.headers['X-Timestamp'] = normalize_timestamp(time.time())
        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        if not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            content_type_manually_set = False
        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response
        if object_versions and not req.environ.get('swift_versioned_copy'):
            is_manifest = 'x-object-manifest' in req.headers or \
                          'x-object-manifest' in hresp.headers
            if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(
                                            hresp.headers['last-modified'],
                                            '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = normalize_timestamp(ts_source)
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
                copy_environ = {'REQUEST_METHOD': 'COPY',
                                'swift_versioned_copy': True
                                }
                copy_req = Request.blank(req.path_info, headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            source_header = unquote(source_header)
            acct = req.path_info.split('/', 2)[1]
            if isinstance(acct, unicode):
                acct = acct.encode('utf-8')
            if not source_header.startswith('/'):
                source_header = '/' + source_header
            source_header = '/' + acct + source_header
            try:
                src_container_name, src_obj_name = \
                    source_header.split('/', 3)[2:]
            except ValueError:
                return HTTPPreconditionFailed(
                    request=req,
                    body='X-Copy-From header must be of the form'
                         '<container name>/<object name>')
            source_req = req.copy_get()
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            source_resp = self.GET(source_req)
            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            new_req = Request.blank(req.path_info,
                                    environ=req.environ, headers=req.headers)
            data_source = source_resp.app_iter
            new_req.content_length = source_resp.content_length
            if new_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if new_req.content_length > MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            new_req.etag = source_resp.etag
            # we no longer need the X-Copy-From header
            del new_req.headers['X-Copy-From']
            if not content_type_manually_set:
                new_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if not config_true_value(
                    new_req.headers.get('x-fresh-metadata', 'false')):
                copy_headers_into(source_resp, new_req)
                copy_headers_into(req, new_req)
            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                new_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = new_req
        node_iter = self.iter_nodes(partition, nodes, self.app.object_ring)
        pile = GreenPile(len(nodes))
        chunked = req.headers.get('transfer-encoding')

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'
            pile.spawn(self._connect_put_node, node_iter, partition,
                       req.path_info, nheaders, self.app.logger.thread_locals)

        conns = [conn for conn in pile if conn]
        if len(conns) <= len(nodes) / 2:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'),
                {'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                [conn.queue.put('0\r\n\r\n') for conn in conns]
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(
                                '%x\r\n%s\r\n' % (len(chunk), chunk)
                                if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) <= len(nodes) / 2:
                        self.app.logger.error(_(
                            'Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout, err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)
예제 #39
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and "*" not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req, content_type="text/plain", body="If-None-Match only supports *")
        container_info = self.container_info(self.account_name, self.container_name, req)
        policy_index = req.headers.get("X-Backend-Storage-Policy-Index", container_info["storage_policy"])
        obj_ring = self.app.get_object_ring(policy_index)
        # pass the policy index to storage nodes via req header
        req.headers["X-Backend-Storage-Policy-Index"] = policy_index
        container_partition = container_info["partition"]
        containers = container_info["nodes"]
        req.acl = container_info["write_acl"]
        req.environ["swift_sync_key"] = container_info["sync_key"]
        object_versions = container_info["versions"]
        if "swift.authorize" in req.environ:
            aresp = req.environ["swift.authorize"](req)
            if aresp:
                return aresp
        if not containers:
            return HTTPNotFound(request=req)
        try:
            ml = req.message_length()
        except ValueError as e:
            return HTTPBadRequest(request=req, content_type="text/plain", body=str(e))
        except AttributeError as e:
            return HTTPNotImplemented(request=req, content_type="text/plain", body=str(e))
        if ml is not None and ml > constraints.MAX_FILE_SIZE:
            return HTTPRequestEntityTooLarge(request=req)
        if "x-delete-after" in req.headers:
            try:
                x_delete_after = int(req.headers["x-delete-after"])
            except ValueError:
                return HTTPBadRequest(request=req, content_type="text/plain", body="Non-integer X-Delete-After")
            req.headers["x-delete-at"] = normalize_delete_at_timestamp(time.time() + x_delete_after)
        partition, nodes = obj_ring.get_nodes(self.account_name, self.container_name, self.object_name)
        # do a HEAD request for container sync and checking object versions
        if "x-timestamp" in req.headers or (object_versions and not req.environ.get("swift_versioned_copy")):
            # make sure proxy-server uses the right policy index
            _headers = {
                "X-Backend-Storage-Policy-Index": req.headers["X-Backend-Storage-Policy-Index"],
                "X-Newest": "True",
            }
            hreq = Request.blank(req.path_info, headers=_headers, environ={"REQUEST_METHOD": "HEAD"})
            hresp = self.GETorHEAD_base(hreq, _("Object"), obj_ring, partition, hreq.swift_entity_path)
        # Used by container sync feature
        if "x-timestamp" in req.headers:
            try:
                req_timestamp = Timestamp(req.headers["X-Timestamp"])
                if (
                    hresp.environ
                    and "swift_x_timestamp" in hresp.environ
                    and hresp.environ["swift_x_timestamp"] >= req_timestamp
                ):
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req,
                    content_type="text/plain",
                    body="X-Timestamp should be a UNIX timestamp float value; " "was %r" % req.headers["x-timestamp"],
                )
            req.headers["X-Timestamp"] = req_timestamp.internal
        else:
            req.headers["X-Timestamp"] = Timestamp(time.time()).internal
        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = config_true_value(req.headers.get("x-detect-content-type"))
        if detect_content_type or not req.headers.get("content-type"):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers["Content-Type"] = guessed_type or "application/octet-stream"
            if detect_content_type:
                req.headers.pop("x-detect-content-type")
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or check_content_type(req)
        if error_response:
            return error_response
        if object_versions and not req.environ.get("swift_versioned_copy"):
            if hresp.status_int != HTTP_NOT_FOUND:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split("/")[0]
                prefix_len = "%03x" % len(self.object_name)
                lprefix = prefix_len + self.object_name + "/"
                ts_source = hresp.environ.get("swift_x_timestamp")
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(hresp.headers["last-modified"], "%a, %d %b %Y %H:%M:%S GMT"))
                new_ts = Timestamp(ts_source).internal
                vers_obj_name = lprefix + new_ts
                copy_headers = {"Destination": "%s/%s" % (lcontainer, vers_obj_name)}
                copy_environ = {"REQUEST_METHOD": "COPY", "swift_versioned_copy": True}
                copy_req = Request.blank(req.path_info, headers=copy_headers, environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ["wsgi.input"].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), "")
        source_header = req.headers.get("X-Copy-From")
        source_resp = None
        if source_header:
            if req.environ.get("swift.orig_req_method", req.method) != "POST":
                req.environ.setdefault("swift.log_info", []).append("x-copy-from:%s" % source_header)
            src_container_name, src_obj_name = check_copy_from_header(req)
            ver, acct, _rest = req.split_path(2, 3, True)
            if isinstance(acct, unicode):
                acct = acct.encode("utf-8")
            source_header = "/%s/%s/%s/%s" % (ver, acct, src_container_name, src_obj_name)
            source_req = req.copy_get()
            # make sure the source request uses it's container_info
            source_req.headers.pop("X-Backend-Storage-Policy-Index", None)
            source_req.path_info = source_header
            source_req.headers["X-Newest"] = "true"
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            sink_req = Request.blank(req.path_info, environ=req.environ, headers=req.headers)
            source_resp = self.GET(source_req)
            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get("swift.copy_hook", (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            data_source = iter(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > constraints.MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag
            # we no longer need the X-Copy-From header
            del sink_req.headers["X-Copy-From"]
            if not content_type_manually_set:
                sink_req.headers["Content-Type"] = source_resp.headers["Content-Type"]
            if not config_true_value(sink_req.headers.get("x-fresh-metadata", "false")):
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)
            # copy over x-static-large-object for POSTs and manifest copies
            if "X-Static-Large-Object" in source_resp.headers and req.params.get("multipart-manifest") == "get":
                sink_req.headers["X-Static-Large-Object"] = source_resp.headers["X-Static-Large-Object"]

            req = sink_req

        if "x-delete-at" in req.headers:
            try:
                x_delete_at = normalize_delete_at_timestamp(int(req.headers["x-delete-at"]))
                if int(x_delete_at) < time.time():
                    return HTTPBadRequest(body="X-Delete-At in past", request=req, content_type="text/plain")
            except ValueError:
                return HTTPBadRequest(request=req, content_type="text/plain", body="Non-integer X-Delete-At")
            req.environ.setdefault("swift.log_info", []).append("x-delete-at:%s" % x_delete_at)
            delete_at_container = normalize_delete_at_timestamp(
                int(x_delete_at)
                / self.app.expiring_objects_container_divisor
                * self.app.expiring_objects_container_divisor
            )
            delete_at_part, delete_at_nodes = self.app.container_ring.get_nodes(
                self.app.expiring_objects_account, delete_at_container
            )
        else:
            delete_at_container = delete_at_part = delete_at_nodes = None

        node_iter = GreenthreadSafeIterator(self.iter_nodes_local_first(obj_ring, partition))
        pile = GreenPile(len(nodes))
        te = req.headers.get("transfer-encoding", "")
        chunked = "chunked" in te

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers, delete_at_container, delete_at_part, delete_at_nodes
        )

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders["Expect"] = "100-continue"
            pile.spawn(
                self._connect_put_node,
                node_iter,
                partition,
                req.swift_entity_path,
                nheaders,
                self.app.logger.thread_locals,
            )

        conns = [conn for conn in pile if conn]
        min_conns = quorum_size(len(nodes))

        if req.if_none_match is not None and "*" in req.if_none_match:
            statuses = [conn.resp.status for conn in conns if conn.resp]
            if HTTP_PRECONDITION_FAILED in statuses:
                # If we find any copy of the file, it shouldn't be uploaded
                self.app.logger.debug(_("Object PUT returning 412, %(statuses)r"), {"statuses": statuses})
                return HTTPPreconditionFailed(request=req)

        if len(conns) < min_conns:
            self.app.logger.error(
                _("Object PUT returning 503, %(conns)s/%(nodes)s " "required connections"),
                {"conns": len(conns), "nodes": min_conns},
            )
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                for conn in conns:
                                    conn.queue.put("0\r\n\r\n")
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > constraints.MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put("%x\r\n%s\r\n" % (len(chunk), chunk) if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) < min_conns:
                        self.app.logger.error(
                            _("Object PUT exceptions during" " send, %(conns)s/%(nodes)s required connections"),
                            {"conns": len(conns), "nodes": min_conns},
                        )
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout as err:
            self.app.logger.warn(_("ERROR Client read timeout (%ss)"), err.seconds)
            self.app.logger.increment("client_timeouts")
            return HTTPRequestTimeout(request=req)
        except (Exception, Timeout):
            self.app.logger.exception(_("ERROR Exception causing client disconnect"))
            return HTTPClientDisconnect(request=req)
        if req.content_length and bytes_transferred < req.content_length:
            req.client_disconnect = True
            self.app.logger.warn(_("Client disconnected without sending enough data"))
            self.app.logger.increment("client_disconnects")
            return HTTPClientDisconnect(request=req)

        statuses, reasons, bodies, etags = self._get_put_responses(req, conns, nodes)

        if len(etags) > 1:
            self.app.logger.error(_("Object servers returned %s mismatched etags"), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req, statuses, reasons, bodies, _("Object PUT"), etag=etag)
        if source_header:
            resp.headers["X-Copied-From"] = quote(source_header.split("/", 3)[3])
            if "last-modified" in source_resp.headers:
                resp.headers["X-Copied-From-Last-Modified"] = source_resp.headers["last-modified"]
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(float(Timestamp(req.headers["X-Timestamp"])))
        return resp
예제 #40
0
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj = \
            split_and_validate_path(request, 5, 5, True)

        if 'x-timestamp' not in request.headers or \
                not check_float(request.headers['x-timestamp']):
            return HTTPBadRequest(body='Missing timestamp',
                                  request=request,
                                  content_type='text/plain')
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past',
                                  request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e),
                                  request=request,
                                  content_type='text/plain')
        try:
            disk_file = self._diskfile(device, partition, account, container,
                                       obj)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        old_delete_at = int(disk_file.metadata.get('X-Delete-At') or 0)
        orig_timestamp = disk_file.metadata.get('X-Timestamp')
        if orig_timestamp and orig_timestamp >= request.headers['x-timestamp']:
            return HTTPConflict(request=request)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                reader = request.environ['wsgi.input'].read
                for chunk in iter(lambda: reader(self.network_chunk_size), ''):
                    start_time = time.time()
                    if start_time > upload_expiration:
                        self.logger.increment('PUT.timeouts')
                        return HTTPRequestTimeout(request=request)
                    etag.update(chunk)
                    writer.write(chunk)
                    sleep()
                    elapsed_time += time.time() - start_time
                upload_size = writer.upload_size
                if upload_size:
                    self.logger.transfer_rate('PUT.' + device + '.timing',
                                              elapsed_time, upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)
                etag = etag.hexdigest()
                if 'etag' in request.headers and \
                        request.headers['etag'].lower() != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.headers['x-timestamp'],
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.iteritems()
                                if val[0].lower().startswith('x-object-meta-')
                                and len(val[0]) > 14)
                for header_key in self.allowed_headers:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)
        except DiskFileNoSpace:
            return HTTPInsufficientStorage(drive=device, request=request)
        if old_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update('PUT', new_delete_at, account, container,
                                      obj, request, device)
            if old_delete_at:
                self.delete_at_update('DELETE', old_delete_at, account,
                                      container, obj, request, device)
        if not orig_timestamp or \
                orig_timestamp < request.headers['x-timestamp']:
            self.container_update(
                'PUT', account, container, obj, request,
                HeaderKeyDict({
                    'x-size':
                    disk_file.metadata['Content-Length'],
                    'x-content-type':
                    disk_file.metadata['Content-Type'],
                    'x-timestamp':
                    disk_file.metadata['X-Timestamp'],
                    'x-etag':
                    disk_file.metadata['ETag']
                }), device)
        resp = HTTPCreated(request=request, etag=etag)
        return resp
예제 #41
0
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj, policy_idx = get_name_and_placement(request, 5, 5, True)
        req_timestamp = valid_timestamp(request)
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get("X-Delete-At") or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body="X-Delete-At in past", request=request, content_type="text/plain")
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e), request=request, content_type="text/plain")
        try:
            disk_file = self.get_diskfile(device, partition, account, container, obj, policy_idx=policy_idx)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            orig_metadata = disk_file.read_metadata()
        except DiskFileXattrNotSupported:
            return HTTPInsufficientStorage(drive=device, request=request)
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}

        # Checks for If-None-Match
        if request.if_none_match is not None and orig_metadata:
            if "*" in request.if_none_match:
                # File exists already so return 412
                return HTTPPreconditionFailed(request=request)
            if orig_metadata.get("ETag") in request.if_none_match:
                # The current ETag matches, so return 412
                return HTTPPreconditionFailed(request=request)

        orig_timestamp = Timestamp(orig_metadata.get("X-Timestamp", 0))
        if orig_timestamp >= req_timestamp:
            return HTTPConflict(request=request, headers={"X-Backend-Timestamp": orig_timestamp.internal})
        orig_delete_at = int(orig_metadata.get("X-Delete-At") or 0)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                upload_size = 0

                def timeout_reader():
                    with ChunkReadTimeout(self.client_timeout):
                        return request.environ["wsgi.input"].read(self.network_chunk_size)

                try:
                    for chunk in iter(lambda: timeout_reader(), ""):
                        start_time = time.time()
                        if start_time > upload_expiration:
                            self.logger.increment("PUT.timeouts")
                            return HTTPRequestTimeout(request=request)
                        etag.update(chunk)
                        upload_size = writer.write(chunk)
                        elapsed_time += time.time() - start_time
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)
                if upload_size:
                    self.logger.transfer_rate("PUT." + device + ".timing", elapsed_time, upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)
                etag = etag.hexdigest()
                if "etag" in request.headers and request.headers["etag"].lower() != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    "X-Timestamp": request.timestamp.internal,
                    "Content-Type": request.headers["content-type"],
                    "ETag": etag,
                    "Content-Length": str(upload_size),
                }
                metadata.update(val for val in request.headers.iteritems() if is_sys_or_user_meta("object", val[0]))
                headers_to_copy = request.headers.get("X-Backend-Replication-Headers", "").split() + list(
                    self.allowed_headers
                )
                for header_key in headers_to_copy:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)
        except (DiskFileXattrNotSupported, DiskFileNoSpace):
            return HTTPInsufficientStorage(drive=device, request=request)
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update("PUT", new_delete_at, account, container, obj, request, device, policy_idx)
            if orig_delete_at:
                self.delete_at_update("DELETE", orig_delete_at, account, container, obj, request, device, policy_idx)
        self.container_update(
            "PUT",
            account,
            container,
            obj,
            request,
            HeaderKeyDict(
                {
                    "x-size": metadata["Content-Length"],
                    "x-content-type": metadata["Content-Type"],
                    "x-timestamp": metadata["X-Timestamp"],
                    "x-etag": metadata["ETag"],
                }
            ),
            device,
            policy_idx,
        )
        return HTTPCreated(request=request, etag=etag)
예제 #42
0
파일: server.py 프로젝트: HoO-Group/swift
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj = \
            split_and_validate_path(request, 5, 5, True)

        if 'x-timestamp' not in request.headers or \
                not check_float(request.headers['x-timestamp']):
            return HTTPBadRequest(body='Missing timestamp', request=request,
                                  content_type='text/plain')
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past', request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e), request=request,
                                  content_type='text/plain')
        try:
            disk_file = self.get_diskfile(
                device, partition, account, container, obj)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            orig_metadata = disk_file.read_metadata()
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}

        # Checks for If-None-Match
        if request.if_none_match is not None and orig_metadata:
            if '*' in request.if_none_match:
                # File exists already so return 412
                return HTTPPreconditionFailed(request=request)
            if orig_metadata.get('ETag') in request.if_none_match:
                # The current ETag matches, so return 412
                return HTTPPreconditionFailed(request=request)

        orig_timestamp = orig_metadata.get('X-Timestamp')
        if orig_timestamp and orig_timestamp >= request.headers['x-timestamp']:
            return HTTPConflict(request=request)
        orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                upload_size = 0

                def timeout_reader():
                    with ChunkReadTimeout(self.client_timeout):
                        return request.environ['wsgi.input'].read(
                            self.network_chunk_size)

                try:
                    for chunk in iter(lambda: timeout_reader(), ''):
                        start_time = time.time()
                        if start_time > upload_expiration:
                            self.logger.increment('PUT.timeouts')
                            return HTTPRequestTimeout(request=request)
                        etag.update(chunk)
                        upload_size = writer.write(chunk)
                        elapsed_time += time.time() - start_time
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)
                if upload_size:
                    self.logger.transfer_rate(
                        'PUT.' + device + '.timing', elapsed_time,
                        upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)
                etag = etag.hexdigest()
                if 'etag' in request.headers and \
                        request.headers['etag'].lower() != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.headers['x-timestamp'],
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.iteritems()
                                if is_user_meta('object', val[0]))
                for header_key in (
                        request.headers.get('X-Backend-Replication-Headers') or
                        self.allowed_headers):
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)
        except DiskFileNoSpace:
            return HTTPInsufficientStorage(drive=device, request=request)
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update(
                    'PUT', new_delete_at, account, container, obj,
                    request, device)
            if orig_delete_at:
                self.delete_at_update(
                    'DELETE', orig_delete_at, account, container, obj,
                    request, device)
        self.container_update(
            'PUT', account, container, obj, request,
            HeaderKeyDict({
                'x-size': metadata['Content-Length'],
                'x-content-type': metadata['Content-Type'],
                'x-timestamp': metadata['X-Timestamp'],
                'x-etag': metadata['ETag']}),
            device)
        return HTTPCreated(request=request, etag=etag)
예제 #43
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req, content_type='text/plain',
                                  body='If-None-Match only supports *')
        container_info = self.container_info(
            self.account_name, self.container_name, req)
        policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
                                       container_info['storage_policy'])
        obj_ring = self.app.get_object_ring(policy_index)
        # pass the policy index to storage nodes via req header
        req.headers['X-Backend-Storage-Policy-Index'] = policy_index
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp
        if not containers:
            return HTTPNotFound(request=req)
        try:
            ml = req.message_length()
        except ValueError as e:
            return HTTPBadRequest(request=req, content_type='text/plain',
                                  body=str(e))
        except AttributeError as e:
            return HTTPNotImplemented(request=req, content_type='text/plain',
                                      body=str(e))
        if ml is not None and ml > constraints.MAX_FILE_SIZE:
            return HTTPRequestEntityTooLarge(request=req)
        if 'x-delete-after' in req.headers:
            try:
                x_delete_after = int(req.headers['x-delete-after'])
            except ValueError:
                return HTTPBadRequest(request=req,
                                      content_type='text/plain',
                                      body='Non-integer X-Delete-After')
            req.headers['x-delete-at'] = normalize_delete_at_timestamp(
                time.time() + x_delete_after)
        partition, nodes = obj_ring.get_nodes(
            self.account_name, self.container_name, self.object_name)
        # do a HEAD request for container sync and checking object versions
        if 'x-timestamp' in req.headers or \
                (object_versions and not
                 req.environ.get('swift_versioned_copy')):
            # make sure proxy-server uses the right policy index
            _headers = {'X-Backend-Storage-Policy-Index': policy_index,
                        'X-Newest': 'True'}
            hreq = Request.blank(req.path_info, headers=_headers,
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(
                hreq, _('Object'), obj_ring, partition,
                hreq.swift_entity_path)
        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req_timestamp = Timestamp(req.headers['X-Timestamp'])
                if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
                        hresp.environ['swift_x_timestamp'] >= req_timestamp:
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req, content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                         'was %r' % req.headers['x-timestamp'])
            req.headers['X-Timestamp'] = req_timestamp.internal
        else:
            req.headers['X-Timestamp'] = Timestamp(time.time()).internal
        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = \
            config_true_value(req.headers.get('x-detect-content-type'))
        if detect_content_type or not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            if detect_content_type:
                req.headers.pop('x-detect-content-type')
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response
        if object_versions and not req.environ.get('swift_versioned_copy'):
            if hresp.status_int != HTTP_NOT_FOUND:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(
                                            hresp.headers['last-modified'],
                                            '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = Timestamp(ts_source).internal
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
                copy_environ = {'REQUEST_METHOD': 'COPY',
                                'swift_versioned_copy': True
                                }
                copy_req = Request.blank(req.path_info, headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            if req.environ.get('swift.orig_req_method', req.method) != 'POST':
                req.environ.setdefault('swift.log_info', []).append(
                    'x-copy-from:%s' % source_header)
            src_container_name, src_obj_name = check_copy_from_header(req)
            ver, acct, _rest = req.split_path(2, 3, True)
            if isinstance(acct, unicode):
                acct = acct.encode('utf-8')
            source_header = '/%s/%s/%s/%s' % (ver, acct,
                                              src_container_name, src_obj_name)
            source_req = req.copy_get()
            # make sure the source request uses it's container_info
            source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            sink_req = Request.blank(req.path_info,
                                     environ=req.environ, headers=req.headers)
            source_resp = self.GET(source_req)
            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get(
                'swift.copy_hook',
                (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            data_source = iter(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > constraints.MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag
            # we no longer need the X-Copy-From header
            del sink_req.headers['X-Copy-From']
            if not content_type_manually_set:
                sink_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if not config_true_value(
                    sink_req.headers.get('x-fresh-metadata', 'false')):
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)
            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                sink_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = sink_req

        if 'x-delete-at' in req.headers:
            try:
                x_delete_at = normalize_delete_at_timestamp(
                    int(req.headers['x-delete-at']))
                if int(x_delete_at) < time.time():
                    return HTTPBadRequest(
                        body='X-Delete-At in past', request=req,
                        content_type='text/plain')
            except ValueError:
                return HTTPBadRequest(request=req, content_type='text/plain',
                                      body='Non-integer X-Delete-At')
            req.environ.setdefault('swift.log_info', []).append(
                'x-delete-at:%s' % x_delete_at)
            delete_at_container = normalize_delete_at_timestamp(
                int(x_delete_at) /
                self.app.expiring_objects_container_divisor *
                self.app.expiring_objects_container_divisor)
            delete_at_part, delete_at_nodes = \
                self.app.container_ring.get_nodes(
                    self.app.expiring_objects_account, delete_at_container)
        else:
            delete_at_container = delete_at_part = delete_at_nodes = None

        node_iter = GreenthreadSafeIterator(
            self.iter_nodes_local_first(obj_ring, partition))
        pile = GreenPile(len(nodes))
        te = req.headers.get('transfer-encoding', '')
        chunked = ('chunked' in te)

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_container, delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'
            pile.spawn(self._connect_put_node, node_iter, partition,
                       req.swift_entity_path, nheaders,
                       self.app.logger.thread_locals)

        conns = [conn for conn in pile if conn]
        min_conns = quorum_size(len(nodes))

        if req.if_none_match is not None and '*' in req.if_none_match:
            statuses = [conn.resp.status for conn in conns if conn.resp]
            if HTTP_PRECONDITION_FAILED in statuses:
                # If we find any copy of the file, it shouldn't be uploaded
                self.app.logger.debug(
                    _('Object PUT returning 412, %(statuses)r'),
                    {'statuses': statuses})
                return HTTPPreconditionFailed(request=req)

        if len(conns) < min_conns:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'),
                {'conns': len(conns), 'nodes': min_conns})
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                for conn in conns:
                                    conn.queue.put('0\r\n\r\n')
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > constraints.MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(
                                '%x\r\n%s\r\n' % (len(chunk), chunk)
                                if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) < min_conns:
                        self.app.logger.error(_(
                            'Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': min_conns})
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout as err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)
        except (Exception, Timeout):
            self.app.logger.exception(
                _('ERROR Exception causing client disconnect'))
            return HTTPClientDisconnect(request=req)
        if req.content_length and bytes_transferred < req.content_length:
            req.client_disconnect = True
            self.app.logger.warn(
                _('Client disconnected without sending enough data'))
            self.app.logger.increment('client_disconnects')
            return HTTPClientDisconnect(request=req)

        statuses, reasons, bodies, etags = self._get_put_responses(req, conns,
                                                                   nodes)

        if len(etags) > 1:
            self.app.logger.error(
                _('Object servers returned %s mismatched etags'), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req, statuses, reasons, bodies,
                                  _('Object PUT'), etag=etag)
        if source_header:
            resp.headers['X-Copied-From'] = quote(
                source_header.split('/', 3)[3])
            if 'last-modified' in source_resp.headers:
                resp.headers['X-Copied-From-Last-Modified'] = \
                    source_resp.headers['last-modified']
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(
            float(Timestamp(req.headers['X-Timestamp'])))
        return resp
예제 #44
0
파일: obj.py 프로젝트: sun7shines/Cloudfs
    def PUT(self, req):
        
        account_partition, accounts = self.account_info(self.account_name,autocreate=False)
        account = accounts[0]
        (container_partition, containers,object_versions ) = self.container_info(self.account_name, self.container_name,
                account_autocreate=self.app.account_autocreate)
        
        if not containers:
            return jresponse('-1', 'not found', req,404)
        
        
        delete_at_part = delete_at_nodes = None
        
        partition, nodes = self.app.object_ring.get_nodes(self.account_name, self.container_name, self.object_name)
        req.headers['X-Timestamp'] = normalize_timestamp(time.time())
        
        error_response = check_object_creation(req, self.object_name)
        if error_response:
            return error_response
        
        overwrite = req.GET.get('overwrite')
        
        if 'true'==overwrite and object_versions :
            
            hreq = Request.blank(req.path_info, environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(hreq, _('Object'), partition, nodes,
                hreq.path_info, len(nodes))
            
            is_manifest = 'x-static-large-object' in req.headers or \
                          'x-static-large-object' in hresp.headers
                          
            if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
                
                lcontainer = object_versions.split('/')[0]
                lprefix = self.object_name + '/'
                
                new_ts = normalize_timestamp(float(time.time()))
                vers_obj_name = lprefix + new_ts
                
                move_headers = {
                    'Destination': '/%s/%s' % (lcontainer, vers_obj_name)}
                move_req = Request.blank(req.path_info, headers=move_headers)
                move_resp = self.MOVE_VERSION(move_req)
                if is_client_error(move_resp.status_int):
                    # missing container or bad permissions
                    return jresponse('-1', 'bad permissions', req,412)
                elif not is_success(move_resp.status_int):
                    # could not copy the data, bail
                    return jresponse('-1', 'ServiceUnavailable', req,503)
                
        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        
            
        node_iter = self.iter_nodes(partition, nodes, self.app.object_ring)
        pile = GreenPile(len(nodes))
        for container in containers:
            nheaders = dict(req.headers.iteritems())
            nheaders['Connection'] = 'close'
            nheaders['X-Container-Host'] = '%(ip)s:%(port)s' % container
            nheaders['X-Container-Partition'] = container_partition
            nheaders['X-Container-Device'] = container['device']
            
            nheaders['X-Account-Host'] = '%(ip)s:%(port)s' % account
            nheaders['X-Account-Partition'] = account_partition
            nheaders['X-Account-Device'] = self.account_name
                        
            nheaders['Expect'] = '100-continue'
            if delete_at_nodes:
                node = delete_at_nodes.pop(0)
                nheaders['X-Delete-At-Host'] = '%(ip)s:%(port)s' % node
                nheaders['X-Delete-At-Partition'] = delete_at_part
                nheaders['X-Delete-At-Device'] = node['device']
                
            if overwrite:
                nheaders['x-overwrite'] = overwrite
                
            pile.spawn(self._connect_put_node,self.account_name, node_iter, partition,
                       req.path_info, nheaders, self.app.logger.thread_locals,req.query_string)
        conns = [conn for conn in pile if conn]
        if len(conns) <= len(nodes) / 2:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                'required connections'),
                {'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
            return jresponse('-1', 'ServiceUnavailable', req,503)
        
        bytes_transferred = 0
        start_time=time.time()

        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            
                            break
                    bytes_transferred += len(chunk)

                    dural_time=float(time.time()) - float(start_time)
                    if(dural_time>0):
                        speed = float(bytes_transferred)/float(dural_time)/(1000*1000)
                        while(speed >1):
                            sleep(0.1)
                            dural_time=float(time.time()) - float(start_time)
                            speed = float(bytes_transferred)/float(dural_time)/(1000*1000)

                    if bytes_transferred > MAX_FILE_SIZE:
                        return jresponse('-1', 'RequestEntityTooLarge', req,413)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) <= len(nodes) / 2:
                        self.app.logger.error(_('Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
                        return jresponse('-1', 'ServiceUnavailable', req,503)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout, err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            return jresponse('-1', 'RequestTimeout', req,408)
예제 #45
0
		    pickle.dump(dict_info,open("/usr/bin/device.p","wb"))
		    self.logger.info(_("Dumped %s"%(device)))
		    break
        except ValueError, err:
            self.logger.increment('PUT.errors')
            return HTTPBadRequest(body=str(err), request=request,
                                  content_type='text/plain')
        if self.mount_check and not check_mount(self.devices, device):
            self.logger.increment('PUT.errors')
            return HTTPInsufficientStorage(drive=device, request=request)
        if 'x-timestamp' not in request.headers or \
                not check_float(request.headers['x-timestamp']):
            self.logger.increment('PUT.errors')
            return HTTPBadRequest(body='Missing timestamp', request=request,
                                  content_type='text/plain')
        error_response = check_object_creation(request, obj)
        if error_response:
            self.logger.increment('PUT.errors')
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            self.logger.increment('PUT.errors')
            return HTTPBadRequest(body='X-Delete-At in past', request=request,
                                  content_type='text/plain')
        file = DiskFile(self.devices, device, partition, account, container,
                        obj, self.logger, disk_chunk_size=self.disk_chunk_size)
        orig_timestamp = file.metadata.get('X-Timestamp')
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        upload_size = 0
        last_sync = 0
예제 #46
0
파일: server.py 프로젝트: wenhuizhang/swift
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj, policy = \
            get_name_and_placement(request, 5, 5, True)
        req_timestamp = valid_timestamp(request)
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past', request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e), request=request,
                                  content_type='text/plain')

        # In case of multipart-MIME put, the proxy sends a chunked request,
        # but may let us know the real content length so we can verify that
        # we have enough disk space to hold the object.
        if fsize is None:
            fsize = request.headers.get('X-Backend-Obj-Content-Length')
            if fsize is not None:
                try:
                    fsize = int(fsize)
                except ValueError as e:
                    return HTTPBadRequest(body=str(e), request=request,
                                          content_type='text/plain')
        # SSYNC will include Frag-Index header for subrequests to primary
        # nodes; handoff nodes should 409 subrequests to over-write an
        # existing data fragment until they offloaded the existing fragment
        frag_index = request.headers.get('X-Backend-Ssync-Frag-Index')
        try:
            disk_file = self.get_diskfile(
                device, partition, account, container, obj,
                policy=policy, frag_index=frag_index)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            orig_metadata = disk_file.read_metadata()
        except DiskFileXattrNotSupported:
            return HTTPInsufficientStorage(drive=device, request=request)
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}

        # Checks for If-None-Match
        if request.if_none_match is not None and orig_metadata:
            if '*' in request.if_none_match:
                # File exists already so return 412
                return HTTPPreconditionFailed(request=request)
            if orig_metadata.get('ETag') in request.if_none_match:
                # The current ETag matches, so return 412
                return HTTPPreconditionFailed(request=request)

        orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
        if orig_timestamp >= req_timestamp:
            return HTTPConflict(
                request=request,
                headers={'X-Backend-Timestamp': orig_timestamp.internal})
        orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                upload_size = 0

                # If the proxy wants to send us object metadata after the
                # object body, it sets some headers. We have to tell the
                # proxy, in the 100 Continue response, that we're able to
                # parse a multipart MIME document and extract the object and
                # metadata from it. If we don't, then the proxy won't
                # actually send the footer metadata.
                have_metadata_footer = False
                use_multiphase_commit = False
                mime_documents_iter = iter([])
                obj_input = request.environ['wsgi.input']

                hundred_continue_headers = []
                if config_true_value(
                        request.headers.get(
                            'X-Backend-Obj-Multiphase-Commit')):
                    use_multiphase_commit = True
                    hundred_continue_headers.append(
                        ('X-Obj-Multiphase-Commit', 'yes'))

                if config_true_value(
                        request.headers.get('X-Backend-Obj-Metadata-Footer')):
                    have_metadata_footer = True
                    hundred_continue_headers.append(
                        ('X-Obj-Metadata-Footer', 'yes'))

                if have_metadata_footer or use_multiphase_commit:
                    obj_input.set_hundred_continue_response_headers(
                        hundred_continue_headers)
                    mime_boundary = request.headers.get(
                        'X-Backend-Obj-Multipart-Mime-Boundary')
                    if not mime_boundary:
                        return HTTPBadRequest("no MIME boundary")

                    try:
                        with ChunkReadTimeout(self.client_timeout):
                            mime_documents_iter = iter_mime_headers_and_bodies(
                                request.environ['wsgi.input'],
                                mime_boundary, self.network_chunk_size)
                            _junk_hdrs, obj_input = next(mime_documents_iter)
                    except ChunkReadTimeout:
                        return HTTPRequestTimeout(request=request)

                timeout_reader = self._make_timeout_reader(obj_input)
                try:
                    for chunk in iter(timeout_reader, ''):
                        start_time = time.time()
                        if start_time > upload_expiration:
                            self.logger.increment('PUT.timeouts')
                            return HTTPRequestTimeout(request=request)
                        etag.update(chunk)
                        upload_size = writer.write(chunk)
                        elapsed_time += time.time() - start_time
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)
                if upload_size:
                    self.logger.transfer_rate(
                        'PUT.' + device + '.timing', elapsed_time,
                        upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)

                footer_meta = {}
                if have_metadata_footer:
                    footer_meta = self._read_metadata_footer(
                        mime_documents_iter)

                request_etag = (footer_meta.get('etag') or
                                request.headers.get('etag', '')).lower()
                etag = etag.hexdigest()
                if request_etag and request_etag != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.timestamp.internal,
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.items()
                                if is_sys_or_user_meta('object', val[0]))
                metadata.update(val for val in footer_meta.items()
                                if is_sys_or_user_meta('object', val[0]))
                headers_to_copy = (
                    request.headers.get(
                        'X-Backend-Replication-Headers', '').split() +
                    list(self.allowed_headers))
                for header_key in headers_to_copy:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)

                # if the PUT requires a two-phase commit (a data and a commit
                # phase) send the proxy server another 100-continue response
                # to indicate that we are finished writing object data
                if use_multiphase_commit:
                    request.environ['wsgi.input'].\
                        send_hundred_continue_response()
                    if not self._read_put_commit_message(mime_documents_iter):
                        return HTTPServerError(request=request)
                    # got 2nd phase confirmation, write a timestamp.durable
                    # state file to indicate a successful PUT

                writer.commit(request.timestamp)

                # Drain any remaining MIME docs from the socket. There
                # shouldn't be any, but we must read the whole request body.
                try:
                    while True:
                        with ChunkReadTimeout(self.client_timeout):
                            _junk_hdrs, _junk_body = next(mime_documents_iter)
                        drain(_junk_body, self.network_chunk_size,
                              self.client_timeout)
                except ChunkReadTimeout:
                    raise HTTPClientDisconnect()
                except StopIteration:
                    pass

        except (DiskFileXattrNotSupported, DiskFileNoSpace):
            return HTTPInsufficientStorage(drive=device, request=request)
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update(
                    'PUT', new_delete_at, account, container, obj, request,
                    device, policy)
            if orig_delete_at:
                self.delete_at_update(
                    'DELETE', orig_delete_at, account, container, obj,
                    request, device, policy)
        update_headers = HeaderKeyDict({
            'x-size': metadata['Content-Length'],
            'x-content-type': metadata['Content-Type'],
            'x-timestamp': metadata['X-Timestamp'],
            'x-etag': metadata['ETag']})
        # apply any container update header overrides sent with request
        self._check_container_override(update_headers, request.headers)
        self._check_container_override(update_headers, footer_meta)
        self.container_update(
            'PUT', account, container, obj, request,
            update_headers,
            device, policy)
        return HTTPCreated(request=request, etag=etag)
예제 #47
0
파일: server.py 프로젝트: gayana06/Thesis
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        a=time.time()
        #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
        #    tran_file.write("A = "+str(request.path)+"="+str(datetime.now())+"***")
        #with open("/home/ubuntu/spawn.txt", "a") as tran_file:
        #    tran_file.write("At PUT  Datetime ="+str(datetime.now())+" \n")
	device, partition, account, container, obj, policy_idx = \
            get_name_and_placement(request, 5, 5, True)
        req_timestamp = valid_timestamp(request)
        error_response = check_object_creation(request, obj)
        #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
        #    tran_file.write("B = "+str(request.path)+"="+str(datetime.now())+"***")
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past', request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e), request=request,
                                  content_type='text/plain')
        #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
        #    tran_file.write("C = "+str(request.path)+"="+str(datetime.now())+"***")
        try:
            disk_file = self.get_diskfile(
                device, partition, account, container, obj,
                policy_idx=policy_idx)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
        #    tran_file.write("D = "+str(request.path)+"="+str(datetime.now())+"***")
        try:
            orig_metadata = disk_file.read_metadata()
        except DiskFileXattrNotSupported:
            return HTTPInsufficientStorage(drive=device, request=request)
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}
        #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
        #    tran_file.write("E = "+str(request.path)+"="+str(datetime.now())+"***")

        # Checks for If-None-Match
        if request.if_none_match is not None and orig_metadata:
            if '*' in request.if_none_match:
                # File exists already so return 412
                return HTTPPreconditionFailed(request=request)
            if orig_metadata.get('ETag') in request.if_none_match:
                # The current ETag matches, so return 412
                return HTTPPreconditionFailed(request=request)

        orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
        if orig_timestamp >= req_timestamp:
            return HTTPConflict(
                request=request,
                headers={'X-Backend-Timestamp': orig_timestamp.internal})
        orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
        #    tran_file.write("F = "+str(request.path)+"="+str(datetime.now())+"***")
        try:
            with disk_file.create(size=fsize) as writer:
                upload_size = 0

                def timeout_reader():
                    with ChunkReadTimeout(self.client_timeout):
                        return request.environ['wsgi.input'].read(
                            self.network_chunk_size)

                try:
                    for chunk in iter(lambda: timeout_reader(), ''):
                        start_time = time.time()
                        if start_time > upload_expiration:
                            self.logger.increment('PUT.timeouts')
                            return HTTPRequestTimeout(request=request)
                        etag.update(chunk)
                        upload_size = writer.write(chunk)
                        elapsed_time += time.time() - start_time
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)
                #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
                #   tran_file.write("G = "+str(request.path)+"="+str(datetime.now())+"***")
                if upload_size:
                    self.logger.transfer_rate(
                        'PUT.' + device + '.timing', elapsed_time,
                        upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)
                etag = etag.hexdigest()
                if 'etag' in request.headers and \
                        request.headers['etag'].lower() != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.timestamp.internal,
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.iteritems()
                                if is_sys_or_user_meta('object', val[0]))
                headers_to_copy = (
                    request.headers.get(
                        'X-Backend-Replication-Headers', '').split() +
                    list(self.allowed_headers))
                for header_key in headers_to_copy:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
                #    tran_file.write("H = "+str(request.path)+"="+str(datetime.now())+"***")
                writer.put(metadata)
                #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
                #    tran_file.write("I = "+str(request.path)+"="+str(datetime.now())+"***")
        except (DiskFileXattrNotSupported, DiskFileNoSpace):
            return HTTPInsufficientStorage(drive=device, request=request)
        #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
        #    tran_file.write("J = "+str(request.path)+"="+str(datetime.now())+"***")
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update(
                    'PUT', new_delete_at, account, container, obj, request,
                    device, policy_idx)
            if orig_delete_at:
                self.delete_at_update(
                    'DELETE', orig_delete_at, account, container, obj,
                    request, device, policy_idx)
        #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
        #    tran_file.write("K = "+str(request.path)+"="+str(datetime.now())+"***")
        self.container_update(
            'PUT', account, container, obj, request,
            HeaderKeyDict({
                'x-size': metadata['Content-Length'],
                'x-content-type': metadata['Content-Type'],
                'x-timestamp': metadata['X-Timestamp'],
                'x-etag': metadata['ETag']}),
            device, policy_idx)
        #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
        #    tran_file.write("L = "+str(request.path)+"="+str(datetime.now())+"***")
	#with open("/home/ubuntu/obj_create.txt", "a") as tran_file:
        #    tran_file.write("Started = "+str(datetime.now())+"\n")
	#a=2
	#num=22227727
	#for a in range(a, num):
    	#    if a % num == 0:
        #	print('not prime')
        #	break
	#else: # loop not exited via break
   	#    print('prime')
	#time.sleep(0.1)
        #with open("/home/ubuntu/obj_create.txt", "a") as tran_file:
        #    tran_file.write("Stopped = "+str(datetime.now())+"\n")
        b=time.time()-a
        #with open("/home/ubuntu/WSTORAGE.txt", "a") as tran_file:
        #    tran_file.write("Total PUT duration of obj = "+str(request.path)+"="+str(b)+"="+str(datetime.now())+"\n")
        return HTTPCreated(request=request, etag=etag)
예제 #48
0
파일: server.py 프로젝트: eckeman/swift
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj, policy_idx = \
            get_name_and_placement(request, 5, 5, True)
        req_timestamp = valid_timestamp(request)
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past',
                                  request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e),
                                  request=request,
                                  content_type='text/plain')
        try:
            disk_file = self.get_diskfile(device,
                                          partition,
                                          account,
                                          container,
                                          obj,
                                          policy_idx=policy_idx)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            orig_metadata = disk_file.read_metadata()
        except DiskFileXattrNotSupported:
            return HTTPInsufficientStorage(drive=device, request=request)
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}

        # Checks for If-None-Match
        if request.if_none_match is not None and orig_metadata:
            if '*' in request.if_none_match:
                # File exists already so return 412
                return HTTPPreconditionFailed(request=request)
            if orig_metadata.get('ETag') in request.if_none_match:
                # The current ETag matches, so return 412
                return HTTPPreconditionFailed(request=request)

        orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
        if orig_timestamp >= req_timestamp:
            return HTTPConflict(
                request=request,
                headers={'X-Backend-Timestamp': orig_timestamp.internal})
        orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0
        try:
            with disk_file.create(size=fsize) as writer:
                upload_size = 0

                def timeout_reader():
                    with ChunkReadTimeout(self.client_timeout):
                        return request.environ['wsgi.input'].read(
                            self.network_chunk_size)

                try:
                    for chunk in iter(lambda: timeout_reader(), ''):
                        start_time = time.time()
                        if start_time > upload_expiration:
                            self.logger.increment('PUT.timeouts')
                            return HTTPRequestTimeout(request=request)
                        etag.update(chunk)
                        upload_size = writer.write(chunk)
                        elapsed_time += time.time() - start_time
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)
                if upload_size:
                    self.logger.transfer_rate('PUT.' + device + '.timing',
                                              elapsed_time, upload_size)
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)
                etag = etag.hexdigest()
                if 'etag' in request.headers and \
                        request.headers['etag'].lower() != etag:
                    return HTTPUnprocessableEntity(request=request)
                metadata = {
                    'X-Timestamp': request.timestamp.internal,
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                metadata.update(val for val in request.headers.iteritems()
                                if is_sys_or_user_meta('object', val[0]))
                headers_to_copy = (request.headers.get(
                    'X-Backend-Replication-Headers', '').split() +
                                   list(self.allowed_headers))
                for header_key in headers_to_copy:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]
                writer.put(metadata)
        except (DiskFileXattrNotSupported, DiskFileNoSpace):
            return HTTPInsufficientStorage(drive=device, request=request)
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update('PUT', new_delete_at, account, container,
                                      obj, request, device, policy_idx)
            if orig_delete_at:
                self.delete_at_update('DELETE', orig_delete_at, account,
                                      container, obj, request, device,
                                      policy_idx)
        self.container_update(
            'PUT', account, container, obj, request,
            HeaderKeyDict({
                'x-size': metadata['Content-Length'],
                'x-content-type': metadata['Content-Type'],
                'x-timestamp': metadata['X-Timestamp'],
                'x-etag': metadata['ETag']
            }), device, policy_idx)
        return HTTPCreated(request=request, etag=etag)
예제 #49
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req,
                                  content_type='text/plain',
                                  body='If-None-Match only supports *')
        container_info = self.container_info(self.account_name,
                                             self.container_name, req)
        policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
                                       container_info['storage_policy'])
        obj_ring = self.app.get_object_ring(policy_index)
        container_nodes = container_info['nodes']
        container_partition = container_info['partition']
        partition, nodes = obj_ring.get_nodes(self.account_name,
                                              self.container_name,
                                              self.object_name)

        # pass the policy index to storage nodes via req header
        req.headers['X-Backend-Storage-Policy-Index'] = policy_index
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']

        # is request authorized
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        if not container_info['nodes']:
            return HTTPNotFound(request=req)

        # update content type in case it is missing
        self._update_content_type(req)

        # check constraints on object name and request headers
        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response

        self._update_x_timestamp(req)

        # check if versioning is enabled and handle copying previous version
        self._handle_object_versions(req)

        # check if request is a COPY of an existing object
        source_header = req.headers.get('X-Copy-From')
        if source_header:
            error_response, req, data_source, update_response = \
                self._handle_copy_request(req)
            if error_response:
                return error_response
        else:
            reader = req.environ['wsgi.input'].read
            data_source = iter(lambda: reader(self.app.client_chunk_size), '')
            update_response = lambda req, resp: resp

        # check if object is set to be automaticaly deleted (i.e. expired)
        req, delete_at_container, delete_at_part, \
            delete_at_nodes = self._config_obj_expiration(req)

        # add special headers to be handled by storage nodes
        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, container_nodes,
            delete_at_container, delete_at_part, delete_at_nodes)

        # send object to storage nodes
        resp = self._store_object(req, data_source, nodes, partition,
                                  outgoing_headers)
        return update_response(req, resp)
예제 #50
0
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj, policy = \
            get_name_and_placement(request, 5, 5, True)
        req_timestamp = valid_timestamp(request)
        # 检查请求的头信息,及对象的元数据信息
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past', request=request,
                                  content_type='text/plain')
        try:
            # 获取请求文件的长度
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e), request=request,
                                  content_type='text/plain')

        # In case of multipart-MIME put, the proxy sends a chunked request,
        # but may let us know the real content length so we can verify that
        # we have enough disk space to hold the object.
        if fsize is None:
            fsize = request.headers.get('X-Backend-Obj-Content-Length')
            if fsize is not None:
                try:
                    fsize = int(fsize)
                except ValueError as e:
                    return HTTPBadRequest(body=str(e), request=request,
                                          content_type='text/plain')
        # SSYNC will include Frag-Index header for subrequests to primary
        # nodes; handoff nodes should 409 subrequests to over-write an
        # existing data fragment until they offloaded the existing fragment
        frag_index = request.headers.get('X-Backend-Ssync-Frag-Index')
        try:
            # 生成磁盘文件管理对象实例
            disk_file = self.get_diskfile(
                device, partition, account, container, obj,
                policy=policy, frag_index=frag_index)
        except DiskFileDeviceUnavailable:
            return HTTPInsufficientStorage(drive=device, request=request)
        try:
            # 读取元数据
            orig_metadata = disk_file.read_metadata()
        except DiskFileXattrNotSupported:
            return HTTPInsufficientStorage(drive=device, request=request)
        # 如果文件不存在,则原始元数据清空
        except (DiskFileNotExist, DiskFileQuarantined):
            orig_metadata = {}

        # Checks for If-None-Match
        if request.if_none_match is not None and orig_metadata:
            if '*' in request.if_none_match:
                # File exists already so return 412
                return HTTPPreconditionFailed(request=request)
            if orig_metadata.get('ETag') in request.if_none_match:
                # The current ETag matches, so return 412
                return HTTPPreconditionFailed(request=request)

        orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
        # 处理网络乱序,先上传的文件后到达了
        if orig_timestamp >= req_timestamp:
            return HTTPConflict(
                request=request,
                headers={'X-Backend-Timestamp': orig_timestamp.internal})
        orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
        # 计算上传超时时间点
        upload_expiration = time.time() + self.max_upload_time
        # 初始化MD5
        etag = md5()
        elapsed_time = 0
        try:
            # 创建临时文件,并做truncate文件大小,返回写文件对象
            with disk_file.create(size=fsize) as writer:
                upload_size = 0

                # If the proxy wants to send us object metadata after the
                # object body, it sets some headers. We have to tell the
                # proxy, in the 100 Continue response, that we're able to
                # parse a multipart MIME document and extract the object and
                # metadata from it. If we don't, then the proxy won't
                # actually send the footer metadata.
                have_metadata_footer = False
                use_multiphase_commit = False
                mime_documents_iter = iter([])
                obj_input = request.environ['wsgi.input']

                hundred_continue_headers = []
                if config_true_value(
                        request.headers.get(
                            'X-Backend-Obj-Multiphase-Commit')):
                    use_multiphase_commit = True
                    hundred_continue_headers.append(
                        ('X-Obj-Multiphase-Commit', 'yes'))

                if config_true_value(
                        request.headers.get('X-Backend-Obj-Metadata-Footer')):
                    have_metadata_footer = True
                    hundred_continue_headers.append(
                        ('X-Obj-Metadata-Footer', 'yes'))

                if have_metadata_footer or use_multiphase_commit:
                    obj_input.set_hundred_continue_response_headers(
                        hundred_continue_headers)
                    mime_boundary = request.headers.get(
                        'X-Backend-Obj-Multipart-Mime-Boundary')
                    if not mime_boundary:
                        return HTTPBadRequest("no MIME boundary")

                    try:
                        with ChunkReadTimeout(self.client_timeout):
                            mime_documents_iter = iter_mime_headers_and_bodies(
                                request.environ['wsgi.input'],
                                mime_boundary, self.network_chunk_size)
                            _junk_hdrs, obj_input = next(mime_documents_iter)
                    except ChunkReadTimeout:
                        return HTTPRequestTimeout(request=request)

                # 创建读取网络数据的对象
                timeout_reader = self._make_timeout_reader(obj_input)
                try:
                    # 循环读取网络数据,以network_chunk_size大小
                    for chunk in iter(timeout_reader, ''):
                        start_time = time.time()
                        if start_time > upload_expiration:
                            self.logger.increment('PUT.timeouts')
                            return HTTPRequestTimeout(request=request)
                        # 更新MD5值
                        etag.update(chunk)
                        # 写入临时文件,更新写入字节数
                        upload_size = writer.write(chunk)
                        elapsed_time += time.time() - start_time
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)
                if upload_size:
                    self.logger.transfer_rate(
                        'PUT.' + device + '.timing', elapsed_time,
                        upload_size)
                # 如果读取的字节数与上传文件请求的大小不同,则报错499
                if fsize is not None and fsize != upload_size:
                    return HTTPClientDisconnect(request=request)

                footer_meta = {}
                if have_metadata_footer:
                    footer_meta = self._read_metadata_footer(
                        mime_documents_iter)

                # 获取请求中记录的文件MD5值
                request_etag = (footer_meta.get('etag') or
                                request.headers.get('etag', '')).lower()
                etag = etag.hexdigest()
                # 如果写入数据MD5值与请求中记录的MD5值不同,则报错422
                if request_etag and request_etag != etag:
                    return HTTPUnprocessableEntity(request=request)

                metadata = {
                    'X-Timestamp': request.timestamp.internal,
                    'Content-Type': request.headers['content-type'],
                    'ETag': etag,
                    'Content-Length': str(upload_size),
                }
                # 更新object的自定义元数据到字典中
                metadata.update(val for val in request.headers.items()
                                if is_sys_or_user_meta('object', val[0]))
                metadata.update(val for val in footer_meta.items()
                                if is_sys_or_user_meta('object', val[0]))
                headers_to_copy = (
                    request.headers.get(
                        'X-Backend-Replication-Headers', '').split() +
                    list(self.allowed_headers))
                for header_key in headers_to_copy:
                    if header_key in request.headers:
                        header_caps = header_key.title()
                        metadata[header_caps] = request.headers[header_key]

                # 将元数据写入文件的扩展属性,将文件rename到目的目录,移动文件
                writer.put(metadata)

                # if the PUT requires a two-phase commit (a data and a commit
                # phase) send the proxy server another 100-continue response
                # to indicate that we are finished writing object data
                if use_multiphase_commit:
                    request.environ['wsgi.input'].\
                        send_hundred_continue_response()
                    if not self._read_put_commit_message(mime_documents_iter):
                        return HTTPServerError(request=request)
                    # got 2nd phase confirmation, write a timestamp.durable
                    # state file to indicate a successful PUT

                # 用于EC的二次提交
                writer.commit(request.timestamp)

                # 排空socket流中的数据
                # Drain any remaining MIME docs from the socket. There
                # shouldn't be any, but we must read the whole request body.
                try:
                    while True:
                        with ChunkReadTimeout(self.client_timeout):
                            _junk_hdrs, _junk_body = next(mime_documents_iter)
                        drain(_junk_body, self.network_chunk_size,
                              self.client_timeout)
                except ChunkReadTimeout:
                    raise HTTPClientDisconnect()
                except StopIteration:
                    pass

        except (DiskFileXattrNotSupported, DiskFileNoSpace):
            return HTTPInsufficientStorage(drive=device, request=request)
        if orig_delete_at != new_delete_at:
            if new_delete_at:
                self.delete_at_update(
                    'PUT', new_delete_at, account, container, obj, request,
                    device, policy)
            if orig_delete_at:
                self.delete_at_update(
                    'DELETE', orig_delete_at, account, container, obj,
                    request, device, policy)
        update_headers = HeaderKeyDict({
            'x-size': metadata['Content-Length'],
            'x-content-type': metadata['Content-Type'],
            'x-timestamp': metadata['X-Timestamp'],
            'x-etag': metadata['ETag']})
        # apply any container update header overrides sent with request
        self._check_container_override(update_headers, request.headers)
        self._check_container_override(update_headers, footer_meta)
        # 更新容器的元数据
        self.container_update(
            'PUT', account, container, obj, request,
            update_headers,
            device, policy)
        return HTTPCreated(request=request, etag=etag)
예제 #51
0
파일: obj.py 프로젝트: AymericDu/swift
    def PUT(self, req):
        """HTTP PUT request handler."""
        container_info = self.container_info(self.account_name,
                                             self.container_name, req)

        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']

        # is request authorized
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        self.enforce_versioning(req)

        old_slo_manifest = None
        old_slo_manifest_etag = None
        # If versioning is disabled, we must check if the object exists.
        # If it's a NEW SLO (we must check it is not the same manifest),
        # we will have to delete the parts if the current
        # operation is a success.
        if (self.app.delete_slo_parts and not config_true_value(
                container_info.get('sysmeta', {}).get('versions-enabled',
                                                      False))):
            try:
                dest_info = get_object_info(req.environ, self.app)
                if 'slo-size' in dest_info['sysmeta']:
                    manifest_env = req.environ.copy()
                    manifest_env['QUERY_STRING'] = 'multipart-manifest=get'
                    manifest_req = make_subrequest(manifest_env, 'GET')
                    manifest_resp = manifest_req.get_response(self.app)
                    old_slo_manifest = json.loads(manifest_resp.body)
                    old_slo_manifest_etag = dest_info.get('etag')
            except Exception as exc:
                self.app.logger.warn(
                    ('Failed to check existence of %s. If '
                     'overwriting a SLO, old parts may '
                     'remain. Error was: %s') % (req.path, exc))

        self._update_content_type(req)

        req.ensure_x_timestamp()

        # check constraints on object name and request headers
        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response

        if req.headers.get('Oio-Copy-From'):
            return self._link_object(req)

        data_source = req.environ['wsgi.input']
        if req.content_length:
            data_source = ExpectedSizeReader(data_source, req.content_length)

        headers = self._prepare_headers(req)

        with closing_if_possible(data_source):
            resp = self._store_object(req, data_source, headers)
        if (resp.is_success and old_slo_manifest
                and resp.etag != old_slo_manifest_etag):
            self.app.logger.debug(
                'Previous object %s was a different SLO, deleting parts',
                req.path)
            self._delete_slo_parts(req, old_slo_manifest)
        return resp
예제 #52
0
파일: obj.py 프로젝트: anishnarang/gswift
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req,
                                  content_type='text/plain',
                                  body='If-None-Match only supports *')
        container_info = self.container_info(self.account_name,
                                             self.container_name, req)
        policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
                                       container_info['storage_policy'])
        obj_ring = self.app.get_object_ring(policy_index)

        # pass the policy index to storage nodes via req header
        req.headers['X-Backend-Storage-Policy-Index'] = policy_index
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        if not containers:
            return HTTPNotFound(request=req)

        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = \
            config_true_value(req.headers.get('x-detect-content-type'))
        if detect_content_type or not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            if detect_content_type:
                req.headers.pop('x-detect-content-type')
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response

        partition, nodes = obj_ring.get_nodes(self.account_name,
                                              self.container_name,
                                              self.object_name)
        ####################################  CHANGED_CODE  ############################################################
        # Change the nodes list to contain only one dictionary item instead of the original 3 returned by the ring.
        d = dict()
        # d[partition] = nodes[1:]
        # f.write(str(d)+"\n")
        # f.close()
        print("===Original Nodes===")
        print(nodes)
        temp_nodes = []
        flag = 0
        f = open("/home/hduser/swift/swift/proxy/controllers/spindowndevices",
                 "r")
        sdlist = f.read().split("\n")
        print("===Spun down devices===:", sdlist)
        f.close()

        upnodes = [item for item in nodes if item['device'] not in sdlist]
        downnodes = [item for item in nodes if item['device'] in sdlist]
        temp_nodes = upnodes
        if (len(downnodes) > 0):
            d = ast.literal_eval(
                open("/home/hduser/swift/swift/proxy/controllers/nodes.txt",
                     "r").read())
            # d_temp=pickle.load("/home/hduser/swift/proxy/controllers/nodes.p","rb")
            # print("===Current dict===:",d)
            for item in downnodes:
                if (partition in d):
                    d[partition].append(item)
                    # print("===Modified dict===:",d)
                else:
                    d[partition] = [item]
                    # print("===Modified dict===:",d)
        # pickle.dump(d,open("/home/hduser/nodes.p","wb"))
        # print("Before writing:",d)
        fo = open("/home/hduser/swift/swift/proxy/controllers/nodes.txt", "w")
        fo.write(str(d) + "\n")
        fo.close()
        # pickle.dump(d,open("/home/hduser/swift/swift/proxy/controllers/nodes.p","wb"))
        ## Old method, IGNORE
        # for item in nodes:
        #     device = item['device']
        #     if(device not in sdlist):
        #     # if(os.path.ismount("path"))
        #         temp_nodes.append(item)
        #         flag = 1
        #         break
        #     else:
        #         pickle.dump(d,open("/home/hduser/nodes.p","wb"))
        #         # d = pickle.load(open("/home/hduser/nodes.p","rb"))
        #         import ast
        #         d = ast.literal_eval(open("/home/hduser/nodes.txt","r").read())
        #         print("===Current dict===:",d)
        #         if(partition in d):
        #             print("In IF")
        #             d[partition].append(item)
        #             print("===Modified dict===:",d)
        #         else:
        #             print("In ELSE")
        #             d[partition] = [item]
        #             print("===Modified dict===:",d)
        #         pickle.dump(d,open("/home/hduser/nodes.p","wb"))
        #         fo = open("/home/hduser/nodes.txt","w")
        #         fo.write(str(d)+"\n")

        # Code to spin up a device if none are running already.
        if (len(upnodes) == 0):
            dev = nodes[0]['device']
            print("===ALL NODES DOWN===")
            print("===Mounting device===", dev)
            os.system("mount /dev/" + str(dev))

        print('===In controller PUT===:')
        print("===Partition===", partition)
        nodes = temp_nodes

        print('===In controller PUT===:')
        print("===Partition===", partition)
        nodes = temp_nodes
        print("===Nodes===:", nodes)

        check_ssd()
        ############################################  CHANGED_CODE  ########################################################

        # do a HEAD request for checking object versions
        if object_versions and not req.environ.get('swift_versioned_copy'):
            # make sure proxy-server uses the right policy index
            _headers = {
                'X-Backend-Storage-Policy-Index': policy_index,
                'X-Newest': 'True'
            }
            hreq = Request.blank(req.path_info,
                                 headers=_headers,
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(hreq, _('Object'), obj_ring, partition,
                                        hreq.swift_entity_path)

        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req_timestamp = Timestamp(req.headers['X-Timestamp'])
            except ValueError:
                return HTTPBadRequest(
                    request=req,
                    content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                    'was %r' % req.headers['x-timestamp'])
            req.headers['X-Timestamp'] = req_timestamp.internal
        else:
            req.headers['X-Timestamp'] = Timestamp(time.time()).internal

        if object_versions and not req.environ.get('swift_versioned_copy'):
            is_manifest = 'X-Object-Manifest' in req.headers or \
                          'X-Object-Manifest' in hresp.headers
            if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(
                        time.strptime(hresp.headers['last-modified'],
                                      '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = Timestamp(ts_source).internal
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)
                }
                copy_environ = {
                    'REQUEST_METHOD': 'COPY',
                    'swift_versioned_copy': True
                }
                copy_req = Request.blank(req.path_info,
                                         headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            if req.environ.get('swift.orig_req_method', req.method) != 'POST':
                req.environ.setdefault('swift.log_info', []).append(
                    'x-copy-from:%s' % source_header)
            ver, acct, _rest = req.split_path(2, 3, True)
            src_account_name = req.headers.get('X-Copy-From-Account', None)
            if src_account_name:
                src_account_name = check_account_format(req, src_account_name)
            else:
                src_account_name = acct
            src_container_name, src_obj_name = check_copy_from_header(req)
            source_header = '/%s/%s/%s/%s' % (ver, src_account_name,
                                              src_container_name, src_obj_name)
            source_req = req.copy_get()

            # make sure the source request uses it's container_info
            source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            orig_account_name = self.account_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            self.account_name = src_account_name
            sink_req = Request.blank(req.path_info,
                                     environ=req.environ,
                                     headers=req.headers)
            source_resp = self.GET(source_req)

            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get(
                'swift.copy_hook',
                (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            self.account_name = orig_account_name
            data_source = iter(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > constraints.MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag

            # we no longer need the X-Copy-From header
            del sink_req.headers['X-Copy-From']
            if 'X-Copy-From-Account' in sink_req.headers:
                del sink_req.headers['X-Copy-From-Account']
            if not content_type_manually_set:
                sink_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if config_true_value(
                    sink_req.headers.get('x-fresh-metadata', 'false')):
                # post-as-copy: ignore new sysmeta, copy existing sysmeta
                condition = lambda k: is_sys_meta('object', k)
                remove_items(sink_req.headers, condition)
                copy_header_subset(source_resp, sink_req, condition)
            else:
                # copy/update existing sysmeta and user meta
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)

            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                sink_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = sink_req

        req, delete_at_container, delete_at_part, \
            delete_at_nodes = self._config_obj_expiration(req)

        node_iter = GreenthreadSafeIterator(
            self.iter_nodes_local_first(obj_ring, partition))
        pile = GreenPile(len(nodes))
        te = req.headers.get('transfer-encoding', '')
        chunked = ('chunked' in te)

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_container, delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'

#################################  CHANGED_CODE  ###################################################################
# Replaced node_iter by nodes in the following line to make sure that a new list with different order isnt used.
# Change from node_iter to nodes to make sure it writes to the same device.
# Without this, it gets a new list of nodes from the ring in a different order and connects to the first one.

            pile.spawn(self._connect_put_node, nodes, partition,
                       req.swift_entity_path, nheaders,
                       self.app.logger.thread_locals)

#################################  CHANGED_CODE ###################################################################

        conns = [conn for conn in pile if conn]
        min_conns = quorum_size(len(nodes))

        if req.if_none_match is not None and '*' in req.if_none_match:
            statuses = [conn.resp.status for conn in conns if conn.resp]
            if HTTP_PRECONDITION_FAILED in statuses:
                # If we find any copy of the file, it shouldn't be uploaded
                self.app.logger.debug(
                    _('Object PUT returning 412, %(statuses)r'),
                    {'statuses': statuses})
                return HTTPPreconditionFailed(request=req)

        if any(conn for conn in conns
               if conn.resp and conn.resp.status == HTTP_CONFLICT):
            timestamps = [
                HeaderKeyDict(
                    conn.resp.getheaders()).get('X-Backend-Timestamp')
                for conn in conns if conn.resp
            ]
            self.app.logger.debug(
                _('Object PUT returning 202 for 409: '
                  '%(req_timestamp)s <= %(timestamps)r'), {
                      'req_timestamp': req.timestamp.internal,
                      'timestamps': ', '.join(timestamps)
                  })
            return HTTPAccepted(request=req)

        if len(conns) < min_conns:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'), {
                      'conns': len(conns),
                      'nodes': min_conns
                  })
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                for conn in conns:
                                    conn.queue.put('0\r\n\r\n')
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > constraints.MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put('%x\r\n%s\r\n' %
                                           (len(chunk),
                                            chunk) if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) < min_conns:
                        self.app.logger.error(
                            _('Object PUT exceptions during'
                              ' send, %(conns)s/%(nodes)s required connections'
                              ), {
                                  'conns': len(conns),
                                  'nodes': min_conns
                              })
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout as err:
            self.app.logger.warn(_('ERROR Client read timeout (%ss)'),
                                 err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)
        except (Exception, Timeout):
            self.app.logger.exception(
                _('ERROR Exception causing client disconnect'))
            return HTTPClientDisconnect(request=req)
        if req.content_length and bytes_transferred < req.content_length:
            req.client_disconnect = True
            self.app.logger.warn(
                _('Client disconnected without sending enough data'))
            self.app.logger.increment('client_disconnects')
            return HTTPClientDisconnect(request=req)

        statuses, reasons, bodies, etags = self._get_put_responses(
            req, conns, nodes)

        if len(etags) > 1:
            self.app.logger.error(
                _('Object servers returned %s mismatched etags'), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req,
                                  statuses,
                                  reasons,
                                  bodies,
                                  _('Object PUT'),
                                  etag=etag)
        if source_header:
            acct, path = source_header.split('/', 3)[2:4]
            resp.headers['X-Copied-From-Account'] = quote(acct)
            resp.headers['X-Copied-From'] = quote(path)
            if 'last-modified' in source_resp.headers:
                resp.headers['X-Copied-From-Last-Modified'] = \
                    source_resp.headers['last-modified']
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(
            float(Timestamp(req.headers['X-Timestamp'])))
        return resp
예제 #53
0
    def PUT(self, request):
        """Handle HTTP PUT requests for the Swift Object Server."""
        device, partition, account, container, obj, policy = \
            get_name_and_placement(request, 5, 5, True)
        req_timestamp = valid_timestamp(request)
        error_response = check_object_creation(request, obj)
        if error_response:
            return error_response
        new_delete_at = int(request.headers.get('X-Delete-At') or 0)
        if new_delete_at and new_delete_at < time.time():
            return HTTPBadRequest(body='X-Delete-At in past', request=request,
                                  content_type='text/plain')
        try:
            fsize = request.message_length()
        except ValueError as e:
            return HTTPBadRequest(body=str(e), request=request,
                                  content_type='text/plain')

        # In case of multipart-MIME put, the proxy sends a chunked request,
        # but may let us know the real content length so we can verify that
        # we have enough disk space to hold the object.
        if fsize is None:
            fsize = request.headers.get('X-Backend-Obj-Content-Length')
            if fsize is not None:
                try:
                    fsize = int(fsize)
                except ValueError as e:
                    return HTTPBadRequest(body=str(e), request=request,
                                          content_type='text/plain')

        orig_metadata = {}
        orig_timestamp = Timestamp(0)
        orig_delete_at = 0
        upload_expiration = time.time() + self.max_upload_time
        etag = md5()
        elapsed_time = 0

        cloud_container_info = request.headers.get('X-Storage-Container')
        cloud_container = cloud_container_info.strip().split('/')

        try:
            upload_size = 0

            # If the proxy wants to send us object metadata after the
            # object body, it sets some headers. We have to tell the
            # proxy, in the 100 Continue response, that we're able to
            # parse a multipart MIME document and extract the object and
            # metadata from it. If we don't, then the proxy won't
            # actually send the footer metadata.
            have_metadata_footer = False
            use_multiphase_commit = False
            mime_documents_iter = iter([])
            obj_input = request.environ['wsgi.input']

            hundred_continue_headers = []
            if config_true_value(
                    request.headers.get(
                        'X-Backend-Obj-Multiphase-Commit')):
                use_multiphase_commit = True
                hundred_continue_headers.append(
                    ('X-Obj-Multiphase-Commit', 'yes'))

            if config_true_value(
                    request.headers.get('X-Backend-Obj-Metadata-Footer')):
                have_metadata_footer = True
                hundred_continue_headers.append(
                    ('X-Obj-Metadata-Footer', 'yes'))

            if have_metadata_footer or use_multiphase_commit:
                obj_input.set_hundred_continue_response_headers(
                    hundred_continue_headers)
                mime_boundary = request.headers.get(
                    'X-Backend-Obj-Multipart-Mime-Boundary')
                if not mime_boundary:
                    return HTTPBadRequest("no MIME boundary")

                try:
                    with ChunkReadTimeout(self.client_timeout):
                        mime_documents_iter = iter_mime_headers_and_bodies(
                            request.environ['wsgi.input'],
                            mime_boundary, self.network_chunk_size)
                        _junk_hdrs, obj_input = next(mime_documents_iter)
                except ChunkReadTimeout:
                    return HTTPRequestTimeout(request=request)

            timeout_reader = self._make_timeout_reader(obj_input)
            try:
                start_time = time.time()
                if start_time > upload_expiration:
                    self.logger.increment('PUT.timeouts')
                    return HTTPRequestTimeout(request=request)
                if have_metadata_footer:
                    put_res = put_file(cloud_container[0], cloud_container[1], obj,
                        iter(timeout_reader, ''))
                else:
                    put_res = put_file(cloud_container[0], cloud_container[1], obj,
                        iter(obj_input))
                upload_size += put_res.size
                etag = put_res.hash
                elapsed_time += time.time() - start_time
            except ChunkReadTimeout:
                return HTTPRequestTimeout(request=request)
            if upload_size:
                self.logger.transfer_rate(
                    'PUT.' + device + '.timing', elapsed_time,
                    upload_size)
            if fsize is not None and fsize != upload_size:
                return HTTPClientDisconnect(request=request)

            footer_meta = {}
            if have_metadata_footer:
                footer_meta = self._read_metadata_footer(
                    mime_documents_iter)

            request_etag = (footer_meta.get('etag') or
                            request.headers.get('etag', '')).lower()
            if request_etag and request_etag != etag:
                return HTTPUnprocessableEntity(request=request)
            metadata = {
                'X-Timestamp': request.timestamp.internal,
                'Content-Type': request.headers['content-type'],
                'ETag': etag,
                'Content-Length': str(upload_size),
            }
            metadata.update(val for val in request.headers.iteritems()
                            if is_sys_or_user_meta('object', val[0]))
            metadata.update(val for val in footer_meta.iteritems()
                            if is_sys_or_user_meta('object', val[0]))
            headers_to_copy = (
                request.headers.get(
                    'X-Backend-Replication-Headers', '').split() +
                list(self.allowed_headers))
            for header_key in headers_to_copy:
                if header_key in request.headers:
                    header_caps = header_key.title()
                    metadata[header_caps] = request.headers[header_key]
            #writer.put(metadata)

            # if the PUT requires a two-phase commit (a data and a commit
            # phase) send the proxy server another 100-continue response
            # to indicate that we are finished writing object data
            if use_multiphase_commit:
                request.environ['wsgi.input'].\
                    send_hundred_continue_response()
                if not self._read_put_commit_message(mime_documents_iter):
                    return HTTPServerError(request=request)
                # got 2nd phase confirmation, write a timestamp.durable
                # state file to indicate a successful PUT

            #writer.commit(request.timestamp)

            # Drain any remaining MIME docs from the socket. There
            # shouldn't be any, but we must read the whole request body.
            try:
                while True:
                    with ChunkReadTimeout(self.client_timeout):
                        _junk_hdrs, _junk_body = next(mime_documents_iter)
                    drain(_junk_body, self.network_chunk_size,
                            self.client_timeout)
            except ChunkReadTimeout:
                raise HTTPClientDisconnect()
            except StopIteration:
                pass

        except (DiskFileXattrNotSupported, DiskFileNoSpace):
            return HTTPInsufficientStorage(drive=device, request=request)

        if request_etag and request_etag != etag:
            return HTTPUnprocessableEntity(request=request)

        update_headers = HeaderKeyDict({
            'x-size': metadata['Content-Length'],
            'x-content-type': metadata['Content-Type'],
            'x-timestamp': metadata['X-Timestamp'],
            'x-etag': metadata['ETag']})
        # apply any container update header overrides sent with request
        self._check_container_override(update_headers, request.headers)
        self._check_container_override(update_headers, footer_meta)
        self.container_update(
            'PUT', account, container, obj, request,
            update_headers,
            device, policy)
        return HTTPCreated(request=request, etag=etag)
예제 #54
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        container_info = self.container_info(
            self.account_name, self.container_name)
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp
        if not containers:
            return HTTPNotFound(request=req)
        if 'x-delete-after' in req.headers:
            try:
                x_delete_after = int(req.headers['x-delete-after'])
            except ValueError:
                    return HTTPBadRequest(request=req,
                                          content_type='text/plain',
                                          body='Non-integer X-Delete-After')
            req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
        partition, nodes = self.app.object_ring.get_nodes(
            self.account_name, self.container_name, self.object_name)
        # do a HEAD request for container sync and checking object versions
        if 'x-timestamp' in req.headers or \
                (object_versions and not
                 req.environ.get('swift_versioned_copy')):
            hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(
                hreq, _('Object'), self.app.object_ring, partition,
                hreq.path_info)
        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req.headers['X-Timestamp'] = \
                    normalize_timestamp(float(req.headers['x-timestamp']))
                if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
                    float(hresp.environ['swift_x_timestamp']) >= \
                        float(req.headers['x-timestamp']):
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req, content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                         'was %r' % req.headers['x-timestamp'])
        else:
            req.headers['X-Timestamp'] = normalize_timestamp(time.time())
        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        if not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            content_type_manually_set = False
        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response
        if object_versions and not req.environ.get('swift_versioned_copy'):
            is_manifest = 'x-object-manifest' in req.headers or \
                          'x-object-manifest' in hresp.headers
            if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(
                                            hresp.headers['last-modified'],
                                            '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = normalize_timestamp(ts_source)
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
                copy_environ = {'REQUEST_METHOD': 'COPY',
                                'swift_versioned_copy': True
                                }
                copy_req = Request.blank(req.path_info, headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            source_header = unquote(source_header)
            acct = req.path_info.split('/', 2)[1]
            if isinstance(acct, unicode):
                acct = acct.encode('utf-8')
            if not source_header.startswith('/'):
                source_header = '/' + source_header
            source_header = '/' + acct + source_header
            try:
                src_container_name, src_obj_name = \
                    source_header.split('/', 3)[2:]
            except ValueError:
                return HTTPPreconditionFailed(
                    request=req,
                    body='X-Copy-From header must be of the form'
                         '<container name>/<object name>')
            source_req = req.copy_get()
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            source_resp = self.GET(source_req)
            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            new_req = Request.blank(req.path_info,
                                    environ=req.environ, headers=req.headers)
            data_source = source_resp.app_iter
            new_req.content_length = source_resp.content_length
            if new_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if new_req.content_length > MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            new_req.etag = source_resp.etag
            # we no longer need the X-Copy-From header
            del new_req.headers['X-Copy-From']
            if not content_type_manually_set:
                new_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if not config_true_value(
                    new_req.headers.get('x-fresh-metadata', 'false')):
                copy_headers_into(source_resp, new_req)
                copy_headers_into(req, new_req)
            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                new_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = new_req

        if 'x-delete-at' in req.headers:
            try:
                x_delete_at = int(req.headers['x-delete-at'])
                if x_delete_at < time.time():
                    return HTTPBadRequest(
                        body='X-Delete-At in past', request=req,
                        content_type='text/plain')
            except ValueError:
                return HTTPBadRequest(request=req, content_type='text/plain',
                                      body='Non-integer X-Delete-At')
            delete_at_container = str(
                x_delete_at /
                self.app.expiring_objects_container_divisor *
                self.app.expiring_objects_container_divisor)
            delete_at_part, delete_at_nodes = \
                self.app.container_ring.get_nodes(
                    self.app.expiring_objects_account, delete_at_container)
        else:
            delete_at_part = delete_at_nodes = None

        node_iter = GreenthreadSafeIterator(
            self.iter_nodes(self.app.object_ring, partition))
        pile = GreenPile(len(nodes))
        chunked = req.headers.get('transfer-encoding')

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'
            pile.spawn(self._connect_put_node, node_iter, partition,
                       req.path_info, nheaders, self.app.logger.thread_locals)

        conns = [conn for conn in pile if conn]
        if len(conns) <= len(nodes) / 2:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'),
                {'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                [conn.queue.put('0\r\n\r\n') for conn in conns]
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(
                                '%x\r\n%s\r\n' % (len(chunk), chunk)
                                if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) <= len(nodes) / 2:
                        self.app.logger.error(_(
                            'Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout, err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)