コード例 #1
0
    def testS3AssetstoreAdapter(self):
        # Delete the default assetstore
        self.model('assetstore').remove(self.assetstore)
        s3Regex = r'^https://s3.amazonaws.com(:443)?/bucketname/foo/bar'

        params = {
            'name': 'S3 Assetstore',
            'type': AssetstoreType.S3,
            'bucket': '',
            'accessKeyId': 'someKey',
            'secret': 'someSecret',
            'prefix': '/foo/bar/'
        }

        # Validation should fail with empty bucket name
        resp = self.request(path='/assetstore', method='POST', user=self.admin,
                            params=params)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'field': 'bucket',
            'message': 'Bucket must not be empty.'
        })

        params['bucket'] = 'bucketname'
        # Validation should fail with a missing bucket
        resp = self.request(path='/assetstore', method='POST', user=self.admin,
                            params=params)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'field': 'bucket',
            'message': 'Unable to write into bucket "bucketname".'
        })

        # Validation should fail with a bogus service name
        params['service'] = 'ftp://nowhere'
        resp = self.request(path='/assetstore', method='POST', user=self.admin,
                            params=params)
        self.assertStatus(resp, 400)
        del params['service']

        # Create a bucket (mocked using moto), so that we can create an
        # assetstore in it
        botoParams = makeBotoConnectParams(params['accessKeyId'],
                                           params['secret'])
        bucket = mock_s3.createBucket(botoParams, 'bucketname')

        # Create an assetstore
        resp = self.request(path='/assetstore', method='POST', user=self.admin,
                            params=params)
        self.assertStatusOk(resp)
        assetstore = self.model('assetstore').load(resp.json['_id'])

        # Set the assetstore to current.  This is really to test the edit
        # assetstore code.
        params['current'] = True
        resp = self.request(path='/assetstore/{}'.format(assetstore['_id']),
                            method='PUT', user=self.admin, params=params)
        self.assertStatusOk(resp)

        # Test init for a single-chunk upload
        folders = self.model('folder').childFolders(self.admin, 'user')
        parentFolder = six.next(folders)
        params = {
            'parentType': 'folder',
            'parentId': parentFolder['_id'],
            'name': 'My File.txt',
            'size': 1024,
            'mimeType': 'text/plain'
        }
        resp = self.request(path='/file', method='POST', user=self.admin,
                            params=params)
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['received'], 0)
        self.assertEqual(resp.json['size'], 1024)
        self.assertEqual(resp.json['behavior'], 's3')

        singleChunkUpload = resp.json
        s3Info = singleChunkUpload['s3']
        self.assertEqual(s3Info['chunked'], False)
        self.assertEqual(type(s3Info['chunkLength']), int)
        self.assertEqual(s3Info['request']['method'], 'PUT')
        six.assertRegex(self, s3Info['request']['url'], s3Regex)
        self.assertEqual(s3Info['request']['headers']['x-amz-acl'], 'private')

        # Test resume of a single-chunk upload
        resp = self.request(path='/file/offset', method='GET', user=self.admin,
                            params={'uploadId': resp.json['_id']})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['method'], 'PUT')
        self.assertTrue('headers' in resp.json)
        six.assertRegex(self, resp.json['url'], s3Regex)

        # Test finalize for a single-chunk upload
        resp = self.request(path='/file/completion', method='POST',
                            user=self.admin,
                            params={'uploadId': singleChunkUpload['_id']})
        self.assertStatusOk(resp)
        self.assertFalse(resp.json['s3Verified'])
        self.assertEqual(resp.json['size'], 1024)
        self.assertEqual(resp.json['assetstoreId'], str(assetstore['_id']))
        self.assertTrue('s3Key' in resp.json)
        self.assertTrue(resp.json['relpath'].startswith('/bucketname/foo/bar/'))

        # Test init for a multi-chunk upload
        params['size'] = 1024 * 1024 * 1024 * 5
        resp = self.request(path='/file', method='POST', user=self.admin,
                            params=params)
        self.assertStatusOk(resp)

        multiChunkUpload = resp.json
        s3Info = multiChunkUpload['s3']
        self.assertEqual(s3Info['chunked'], True)
        self.assertEqual(type(s3Info['chunkLength']), int)
        self.assertEqual(s3Info['request']['method'], 'POST')
        six.assertRegex(self, s3Info['request']['url'], s3Regex)

        # Test uploading a chunk
        resp = self.request(path='/file/chunk', method='POST',
                            user=self.admin, params={
                                'uploadId': multiChunkUpload['_id'],
                                'offset': 0,
                                'chunk': json.dumps({
                                    'partNumber': 1,
                                    's3UploadId': 'abcd'
                                })
                            })
        self.assertStatusOk(resp)
        six.assertRegex(self, resp.json['s3']['request']['url'], s3Regex)
        self.assertEqual(resp.json['s3']['request']['method'], 'PUT')

        # We should not be able to call file/offset with multi-chunk upload
        resp = self.request(path='/file/offset', method='GET', user=self.admin,
                            params={'uploadId': multiChunkUpload['_id']})
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'message': 'You should not call requestOffset on a chunked '
                       'direct-to-S3 upload.'
        })

        # Test finalize for a multi-chunk upload
        resp = self.request(path='/file/completion', method='POST',
                            user=self.admin,
                            params={'uploadId': multiChunkUpload['_id']})
        largeFile = resp.json
        self.assertStatusOk(resp)
        six.assertRegex(self, resp.json['s3FinalizeRequest']['url'], s3Regex)
        self.assertEqual(resp.json['s3FinalizeRequest']['method'], 'POST')

        # Test init for an empty file (should be no-op)
        params['size'] = 0
        resp = self.request(path='/file', method='POST', user=self.admin,
                            params=params)
        emptyFile = resp.json
        self.assertStatusOk(resp)
        self.assertFalse('behavior' in resp.json)
        self.assertFalse('s3' in resp.json)

        # Test download for an empty file
        resp = self.request(path='/file/{}/download'.format(emptyFile['_id']),
                            user=self.admin, method='GET', isJson=False)
        self.assertStatusOk(resp)
        self.assertEqual(self.getBody(resp), '')
        self.assertEqual(resp.headers['Content-Length'], 0)
        self.assertEqual(resp.headers['Content-Disposition'],
                         'attachment; filename="My File.txt"')

        # Test download of a non-empty file
        resp = self.request(path='/file/{}/download'.format(largeFile['_id']),
                            user=self.admin, method='GET', isJson=False)
        self.assertStatus(resp, 303)
        six.assertRegex(self, resp.headers['Location'], s3Regex)

        # Test download as part of a streaming zip
        @httmock.all_requests
        def s3_pipe_mock(url, request):
            if(url.netloc.startswith('s3.amazonaws.com') and
                    url.scheme == 'https'):
                return 'dummy file contents'
            else:
                raise Exception('Unexpected url {}'.format(url))

        with httmock.HTTMock(s3_pipe_mock):
            resp = self.request(
                '/folder/{}/download'.format(parentFolder['_id']),
                method='GET', user=self.admin, isJson=False)
            self.assertStatusOk(resp)
            zip = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)),
                                  'r')
            self.assertTrue(zip.testzip() is None)

            extracted = zip.read('Public/My File.txt')
            self.assertEqual(extracted, b'dummy file contents')

        # Attempt to import item directly into user; should fail
        resp = self.request(
            '/assetstore/%s/import' % assetstore['_id'], method='POST', params={
                'importPath': '/foo/bar',
                'destinationType': 'user',
                'destinationId': self.admin['_id']
            }, user=self.admin)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json['message'],
                         'Keys cannot be imported directly underneath a user.')

        # Import existing data from S3
        resp = self.request('/folder', method='POST', params={
            'parentType': 'folder',
            'parentId': parentFolder['_id'],
            'name': 'import destinaton'
        }, user=self.admin)
        self.assertStatusOk(resp)
        importFolder = resp.json

        resp = self.request(
            '/assetstore/%s/import' % assetstore['_id'], method='POST', params={
                'importPath': '',
                'destinationType': 'folder',
                'destinationId': importFolder['_id'],
            }, user=self.admin)
        self.assertStatusOk(resp)

        # Data should now appear in the tree
        resp = self.request('/folder', user=self.admin, params={
            'parentId': importFolder['_id'],
            'parentType': 'folder'
        })
        self.assertStatusOk(resp)
        children = resp.json
        self.assertEqual(len(children), 1)
        self.assertEqual(children[0]['name'], 'foo')

        resp = self.request('/folder', user=self.admin, params={
            'parentId': children[0]['_id'],
            'parentType': 'folder'
        })
        self.assertStatusOk(resp)
        children = resp.json
        self.assertEqual(len(children), 1)
        self.assertEqual(children[0]['name'], 'bar')

        resp = self.request('/item', user=self.admin, params={
            'folderId': children[0]['_id']
        })
        self.assertStatusOk(resp)
        self.assertEqual(len(resp.json), 1)
        item = resp.json[0]
        self.assertEqual(item['name'], 'test')
        self.assertEqual(item['size'], 0)

        resp = self.request('/item/%s/files' % str(item['_id']),
                            user=self.admin)
        self.assertStatusOk(resp)
        self.assertEqual(len(resp.json), 1)
        file = resp.json[0]
        self.assertTrue(file['imported'])
        self.assertFalse('relpath' in file)
        self.assertEqual(file['size'], 0)
        self.assertEqual(file['assetstoreId'], str(assetstore['_id']))

        # Deleting an imported file should not delete it from S3
        self.assertTrue(bucket.get_key('/foo/bar/test') is not None)

        with mock.patch('girder.events.daemon.trigger') as daemon:
            resp = self.request('/item/%s' % str(item['_id']), method='DELETE',
                                user=self.admin)
            self.assertStatusOk(resp)
            self.assertEqual(len(daemon.mock_calls), 0)

        # Create the file key in the moto s3 store so that we can test that it
        # gets deleted.
        file = self.model('file').load(largeFile['_id'], user=self.admin)
        bucket.initiate_multipart_upload(file['s3Key'])
        key = bucket.new_key(file['s3Key'])
        key.set_contents_from_string("test")

        # Test delete for a non-empty file
        resp = self.request(path='/file/{}'.format(largeFile['_id']),
                            user=self.admin, method='DELETE')
        self.assertStatusOk(resp)

        # The file should be gone now
        resp = self.request(path='/file/{}/download'.format(largeFile['_id']),
                            user=self.admin, method='GET', isJson=False)
        self.assertStatus(resp, 400)
        # The actual delete may still be in the event queue, so we want to
        # check the S3 bucket directly.
        startTime = time.time()
        while True:
            if bucket.get_key(file['s3Key']) is None:
                break
            if time.time()-startTime > 15:
                break  # give up and fail
            time.sleep(0.1)
        self.assertIsNone(bucket.get_key(file['s3Key']))
コード例 #2
0
ファイル: file_test.py プロジェクト: jbeezley/girder
    def testS3Assetstore(self):
        botoParams = makeBotoConnectParams('access', 'secret')
        mock_s3.createBucket(botoParams, 'b')

        self.model('assetstore').remove(self.model('assetstore').getCurrent())
        assetstore = self.model('assetstore').createS3Assetstore(
            name='test', bucket='b', accessKeyId='access', secret='secret',
            prefix='test')
        self.assetstore = assetstore

        # Initialize the upload
        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)

        self.assertFalse(resp.json['s3']['chunked'])
        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, we should get a 400
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'],
            'Uploads of this length must be sent in a single chunk.')

        # Attempting to send second chunk with incorrect offset should fail
        fields = [('offset', 100), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk2)]
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'],
            'Server has received 0 bytes, but client sent offset 100.')

        # Request offset from server (simulate a resume event)
        resp = self.request(path='/file/offset', method='GET', user=self.user,
                            params={'uploadId': uploadId})
        self.assertStatusOk(resp)

        # Trying to send too many bytes should fail
        currentOffset = resp.json['offset']
        fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', "extra_"+chunk2+"_bytes")]
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'message': 'Received too many bytes.'
        })

        # The offset should not have changed
        resp = self.request(path='/file/offset', method='GET', user=self.user,
                            params={'uploadId': uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], currentOffset)

        # Send all in one chunk
        files = [('chunk', 'hello.txt', chunk1 + chunk2)]
        fields = [('offset', 0), ('uploadId', uploadId)]
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        file = self.model('file').load(resp.json['_id'], force=True)

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], self.assetstore['_id'])
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        # Make sure metadata is updated in S3 when file info changes
        # (moto API doesn't cover this at all, so we manually mock.)
        with mock.patch('boto.s3.key.Key.set_remote_metadata') as m:
            resp = self.request(
                '/file/%s' % str(file['_id']), method='PUT', params={
                    'mimeType': 'application/csv',
                    'name': 'new name'
                }, user=self.user)
            self.assertEqual(len(m.mock_calls), 1)
            self.assertEqual(m.mock_calls[0][2], {
                'metadata_plus': {
                    'Content-Type': 'application/csv',
                    'Content-Disposition': b'attachment; filename="new name"'
                },
                'metadata_minus': [],
                'preserve_acl': True
            })

        # Enable testing of multi-chunk proxied upload
        S3AssetstoreAdapter.CHUNK_LEN = 5

        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)
        self.assertTrue(resp.json['s3']['chunked'])

        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, should now work
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        resp = self.request(path='/file/offset', user=self.user, params={
            'uploadId': uploadId
        })
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], len(chunk1))

        # Hack: make moto accept our too-small chunks
        moto.s3.models.UPLOAD_PART_MIN_SIZE = 5

        # Send the second chunk
        files = [('chunk', 'hello.txt', chunk2)]
        fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        file = resp.json

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        # Test copying a file ( we don't assert to content in the case because
        # the S3 download will fail )
        self._testCopyFile(file, assertContent=False)
コード例 #3
0
ファイル: assetstore_test.py プロジェクト: simhaonline/girder
    def testS3AssetstoreAdapter(self):
        # Delete the default assetstore
        Assetstore().remove(self.assetstore)
        s3Regex = (r'^(https://s3.amazonaws.com(:443)?/bucketname/foo/bar|'
                   'https://bucketname.s3.amazonaws.com(:443)?/foo/bar)')

        params = {
            'name': 'S3 Assetstore',
            'type': AssetstoreType.S3,
            'bucket': '',
            'accessKeyId': 'someKey',
            'secret': 'someSecret',
            'prefix': '/foo/bar/'
        }

        # Validation should fail with empty bucket name
        resp = self.request(path='/assetstore',
                            method='POST',
                            user=self.admin,
                            params=params)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json, {
                'type': 'validation',
                'field': 'bucket',
                'message': 'Bucket must not be empty.'
            })

        params['bucket'] = 'bucketname'
        # Validation should fail with a missing bucket
        resp = self.request(path='/assetstore',
                            method='POST',
                            user=self.admin,
                            params=params)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json, {
                'type': 'validation',
                'field': 'bucket',
                'message': 'Unable to write into bucket "bucketname".'
            })

        # Validation should fail with a bogus service name
        params['service'] = 'ftp://nowhere'
        resp = self.request(path='/assetstore',
                            method='POST',
                            user=self.admin,
                            params=params)
        self.assertStatus(resp, 400)
        del params['service']

        # Create a bucket (mocked using moto), so that we can create an assetstore in it
        botoParams = makeBotoConnectParams(params['accessKeyId'],
                                           params['secret'])
        client = mock_s3.createBucket(botoParams, 'bucketname')

        # Create an assetstore
        resp = self.request(path='/assetstore',
                            method='POST',
                            user=self.admin,
                            params=params)
        self.assertStatusOk(resp)
        assetstore = Assetstore().load(resp.json['_id'])

        # Set the assetstore to current.  This is really to test the edit assetstore code.
        params['current'] = True
        resp = self.request(path='/assetstore/%s' % assetstore['_id'],
                            method='PUT',
                            user=self.admin,
                            params=params)
        self.assertStatusOk(resp)

        # Test init for a single-chunk upload
        folders = Folder().childFolders(self.admin, 'user')
        parentFolder = six.next(folders)
        params = {
            'parentType': 'folder',
            'parentId': parentFolder['_id'],
            'name': 'My File.txt',
            'size': 1024,
            'mimeType': 'text/plain'
        }
        resp = self.request(path='/file',
                            method='POST',
                            user=self.admin,
                            params=params)
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['received'], 0)
        self.assertEqual(resp.json['size'], 1024)
        self.assertEqual(resp.json['behavior'], 's3')

        singleChunkUpload = resp.json
        s3Info = singleChunkUpload['s3']
        self.assertEqual(s3Info['chunked'], False)
        self.assertIsInstance(s3Info['chunkLength'], int)
        self.assertEqual(s3Info['request']['method'], 'PUT')
        six.assertRegex(self, s3Info['request']['url'], s3Regex)
        self.assertEqual(s3Info['request']['headers']['x-amz-acl'], 'private')

        # Test resume of a single-chunk upload
        resp = self.request(path='/file/offset',
                            method='GET',
                            user=self.admin,
                            params={'uploadId': resp.json['_id']})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['method'], 'PUT')
        self.assertTrue('headers' in resp.json)
        six.assertRegex(self, resp.json['url'], s3Regex)

        # Test finalize for a single-chunk upload
        resp = self.request(path='/file/completion',
                            method='POST',
                            user=self.admin,
                            params={'uploadId': singleChunkUpload['_id']})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['size'], 1024)
        self.assertEqual(resp.json['assetstoreId'], str(assetstore['_id']))
        self.assertFalse('s3Key' in resp.json)
        self.assertFalse('relpath' in resp.json)

        file = File().load(resp.json['_id'], force=True)
        self.assertTrue('s3Key' in file)
        six.assertRegex(self, file['relpath'], '^/bucketname/foo/bar/')

        # Test init for a multi-chunk upload
        params['size'] = 1024 * 1024 * 1024 * 5
        resp = self.request(path='/file',
                            method='POST',
                            user=self.admin,
                            params=params)
        self.assertStatusOk(resp)

        multiChunkUpload = resp.json
        s3Info = multiChunkUpload['s3']
        self.assertEqual(s3Info['chunked'], True)
        self.assertIsInstance(s3Info['chunkLength'], int)
        self.assertEqual(s3Info['request']['method'], 'POST')
        six.assertRegex(self, s3Info['request']['url'], s3Regex)

        # Test uploading a chunk
        resp = self.request(path='/file/chunk',
                            method='POST',
                            user=self.admin,
                            params={
                                'uploadId':
                                multiChunkUpload['_id'],
                                'offset':
                                0,
                                'chunk':
                                json.dumps({
                                    'partNumber': 1,
                                    's3UploadId': 'abcd'
                                })
                            })
        self.assertStatusOk(resp)
        six.assertRegex(self, resp.json['s3']['request']['url'], s3Regex)
        self.assertEqual(resp.json['s3']['request']['method'], 'PUT')

        # We should not be able to call file/offset with multi-chunk upload
        resp = self.request(path='/file/offset',
                            method='GET',
                            user=self.admin,
                            params={'uploadId': multiChunkUpload['_id']})
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json, {
                'type':
                'validation',
                'message':
                'You should not call requestOffset on a chunked '
                'direct-to-S3 upload.'
            })

        # Test finalize for a multi-chunk upload
        resp = self.request(path='/file/completion',
                            method='POST',
                            user=self.admin,
                            params={'uploadId': multiChunkUpload['_id']})
        largeFile = resp.json
        self.assertStatusOk(resp)
        six.assertRegex(self, resp.json['s3FinalizeRequest']['url'], s3Regex)
        self.assertEqual(resp.json['s3FinalizeRequest']['method'], 'POST')

        # Test init for an empty file (should be no-op)
        params['size'] = 0
        resp = self.request(path='/file',
                            method='POST',
                            user=self.admin,
                            params=params)
        emptyFile = resp.json
        self.assertStatusOk(resp)
        self.assertFalse('behavior' in resp.json)
        self.assertFalse('s3' in resp.json)

        # Test download for an empty file
        resp = self.request(path='/file/%s/download' % emptyFile['_id'],
                            user=self.admin,
                            method='GET',
                            isJson=False)
        self.assertStatusOk(resp)
        self.assertEqual(self.getBody(resp), '')
        self.assertEqual(resp.headers['Content-Length'], 0)
        self.assertEqual(resp.headers['Content-Disposition'],
                         'attachment; filename="My File.txt"')

        # Test download of a non-empty file
        resp = self.request(path='/file/%s/download' % largeFile['_id'],
                            user=self.admin,
                            method='GET',
                            isJson=False)
        self.assertStatus(resp, 303)
        six.assertRegex(self, resp.headers['Location'], s3Regex)

        # Test download of a non-empty file, with Content-Disposition=inline.
        # Expect the special S3 header response-content-disposition.
        params = {'contentDisposition': 'inline'}
        inlineRegex = r'response-content-disposition=inline%3B%20filename%3D%22My%20File.txt%22'
        resp = self.request(path='/file/%s/download' % largeFile['_id'],
                            user=self.admin,
                            method='GET',
                            isJson=False,
                            params=params)
        self.assertStatus(resp, 303)
        six.assertRegex(self, resp.headers['Location'], s3Regex)
        six.assertRegex(self, resp.headers['Location'], inlineRegex)

        # Test download as part of a streaming zip
        @httmock.all_requests
        def s3_pipe_mock(url, request):
            if 's3.amazonaws.com' in url.netloc and url.scheme == 'https':
                return 'dummy file contents'
            else:
                raise Exception('Unexpected url %s' % url)

        with httmock.HTTMock(s3_pipe_mock):
            resp = self.request('/folder/%s/download' % parentFolder['_id'],
                                method='GET',
                                user=self.admin,
                                isJson=False)
            self.assertStatusOk(resp)
            zip = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)),
                                  'r')
            self.assertTrue(zip.testzip() is None)

            extracted = zip.read('Public/My File.txt')
            self.assertEqual(extracted, b'dummy file contents')

        # Create a "test" key for importing
        client.put_object(Bucket='bucketname', Key='foo/bar/test', Body=b'')

        # Attempt to import item directly into user; should fail
        resp = self.request('/assetstore/%s/import' % assetstore['_id'],
                            method='POST',
                            params={
                                'importPath': '/foo/bar',
                                'destinationType': 'user',
                                'destinationId': self.admin['_id']
                            },
                            user=self.admin)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'],
            'Keys cannot be imported directly underneath a user.')

        # Import existing data from S3
        resp = self.request('/folder',
                            method='POST',
                            params={
                                'parentType': 'folder',
                                'parentId': parentFolder['_id'],
                                'name': 'import destinaton'
                            },
                            user=self.admin)
        self.assertStatusOk(resp)
        importFolder = resp.json

        resp = self.request('/assetstore/%s/import' % assetstore['_id'],
                            method='POST',
                            params={
                                'importPath': '',
                                'destinationType': 'folder',
                                'destinationId': importFolder['_id'],
                            },
                            user=self.admin)
        self.assertStatusOk(resp)

        # Data should now appear in the tree
        resp = self.request('/folder',
                            user=self.admin,
                            params={
                                'parentId': importFolder['_id'],
                                'parentType': 'folder'
                            })
        self.assertStatusOk(resp)
        children = resp.json
        self.assertEqual(len(children), 1)
        self.assertEqual(children[0]['name'], 'foo')

        resp = self.request('/folder',
                            user=self.admin,
                            params={
                                'parentId': children[0]['_id'],
                                'parentType': 'folder'
                            })
        self.assertStatusOk(resp)
        children = resp.json
        self.assertEqual(len(children), 1)
        self.assertEqual(children[0]['name'], 'bar')

        resp = self.request('/item',
                            user=self.admin,
                            params={'folderId': children[0]['_id']})
        self.assertStatusOk(resp)
        self.assertEqual(len(resp.json), 1)
        item = resp.json[0]
        self.assertEqual(item['name'], 'test')
        self.assertEqual(item['size'], 0)

        resp = self.request('/item/%s/files' % item['_id'], user=self.admin)
        self.assertStatusOk(resp)
        self.assertEqual(len(resp.json), 1)
        self.assertFalse('imported' in resp.json[0])
        self.assertFalse('relpath' in resp.json[0])
        file = File().load(resp.json[0]['_id'], force=True)
        self.assertTrue(file['imported'])
        self.assertFalse('relpath' in file)
        self.assertEqual(file['size'], 0)
        self.assertEqual(file['assetstoreId'], assetstore['_id'])
        self.assertTrue(
            client.get_object(Bucket='bucketname', Key='foo/bar/test')
            is not None)

        # Deleting an imported file should not delete it from S3
        with mock.patch('girder.events.daemon.trigger') as daemon:
            resp = self.request('/item/%s' % str(item['_id']),
                                method='DELETE',
                                user=self.admin)
            self.assertStatusOk(resp)
            self.assertEqual(len(daemon.mock_calls), 0)

        # Create the file key in the moto s3 store so that we can test that it gets deleted.
        file = File().load(largeFile['_id'], user=self.admin)
        client.create_multipart_upload(Bucket='bucketname', Key=file['s3Key'])
        client.put_object(Bucket='bucketname', Key=file['s3Key'], Body=b'test')

        # Test delete for a non-empty file
        resp = self.request(path='/file/%s' % largeFile['_id'],
                            user=self.admin,
                            method='DELETE')
        self.assertStatusOk(resp)

        # The file should be gone now
        resp = self.request(path='/file/%s/download' % largeFile['_id'],
                            user=self.admin,
                            isJson=False)
        self.assertStatus(resp, 400)
        # The actual delete may still be in the event queue, so we want to
        # check the S3 bucket directly.
        startTime = time.time()
        while True:
            try:
                client.get_object(Bucket='bucketname', Key=file['s3Key'])
            except botocore.exceptions.ClientError:
                break
            if time.time() - startTime > 15:
                break  # give up and fail
            time.sleep(0.1)
        with self.assertRaises(botocore.exceptions.ClientError):
            client.get_object(Bucket='bucketname', Key=file['s3Key'])

        resp = self.request(path='/folder/%s' % parentFolder['_id'],
                            method='DELETE',
                            user=self.admin)
        self.assertStatusOk(resp)
コード例 #4
0
ファイル: mock_s3.py プロジェクト: richstoner/girder
 def __init__(self, port=_startPort):
     threading.Thread.__init__(self)
     self.port = port
     self.daemon = True
     self.service = 'http://127.0.0.1:%d' % port
     self.botoConnect = makeBotoConnectParams('abc', '123', self.service)
コード例 #5
0
ファイル: mock_s3.py プロジェクト: xinlaoda/girder
 def __init__(self, port=_startPort):
     threading.Thread.__init__(self)
     self.port = port
     self.daemon = True
     self.service = 'http://127.0.0.1:%d' % port
     self.botoConnect = makeBotoConnectParams('abc', '123', self.service)
コード例 #6
0
ファイル: file_test.py プロジェクト: polyorca/girder
    def testS3Assetstore(self):
        botoParams = makeBotoConnectParams('access', 'secret')
        bucket = mock_s3.createBucket(botoParams, 'b')

        self.model('assetstore').remove(self.model('assetstore').getCurrent())
        assetstore = self.model('assetstore').createS3Assetstore(
            name='test', bucket='b', accessKeyId='access', secret='secret',
            prefix='test')
        self.assetstore = assetstore

        # Initialize the upload
        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)

        self.assertFalse(resp.json['s3']['chunked'])
        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, we should get a 400
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'], 'Uploads of this length must be sent in a single chunk.')

        # Attempting to send second chunk with incorrect offset should fail
        fields = [('offset', 100), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk2)]
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'], 'Server has received 0 bytes, but client sent offset 100.')

        # Request offset from server (simulate a resume event)
        resp = self.request(
            path='/file/offset', method='GET', user=self.user, params={'uploadId': uploadId})
        self.assertStatusOk(resp)

        @httmock.all_requests
        def mockChunkUpload(url, request):
            """
            We used to be able to use moto to mock the sending of chunks to
            S3, however we now no longer use the boto API to do so internally,
            and must mock this out at the level of requests.
            """
            if url.netloc != 's3.amazonaws.com':
                raise Exception('Unexpected request to host ' + url.netloc)

            body = request.body.read(65536)  # sufficient for now, we have short bodies

            # Actually set the key in moto
            self.assertEqual(url.path[:3], '/b/')
            key = boto.s3.key.Key(bucket=bucket, name=url.path[3:])
            key.set_contents_from_string(body)

            return {
                'status_code': 200
            }

        # Trying to send too many bytes should fail
        currentOffset = resp.json['offset']
        fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', "extra_"+chunk2+"_bytes")]
        with httmock.HTTMock(mockChunkUpload):
            resp = self.multipartRequest(
                path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'message': 'Received too many bytes.'
        })

        # The offset should not have changed
        resp = self.request(
            path='/file/offset', method='GET', user=self.user, params={'uploadId': uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], currentOffset)

        # Send all in one chunk
        files = [('chunk', 'hello.txt', chunk1 + chunk2)]
        fields = [('offset', 0), ('uploadId', uploadId)]
        with httmock.HTTMock(mockChunkUpload):
            resp = self.multipartRequest(
                path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        file = self.model('file').load(resp.json['_id'], force=True)

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], self.assetstore['_id'])
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        # Make sure metadata is updated in S3 when file info changes
        # (moto API doesn't cover this at all, so we manually mock.)
        with mock.patch('boto.s3.key.Key.set_remote_metadata') as m:
            self.request(
                '/file/%s' % str(file['_id']), method='PUT', params={
                    'mimeType': 'application/csv',
                    'name': 'new name'
                }, user=self.user)
            self.assertEqual(len(m.mock_calls), 1)
            self.assertEqual(m.mock_calls[0][2], {
                'metadata_plus': {
                    'Content-Type': 'application/csv',
                    'Content-Disposition': b'attachment; filename="new name"'
                },
                'metadata_minus': [],
                'preserve_acl': True
            })

        # Enable testing of multi-chunk proxied upload
        S3AssetstoreAdapter.CHUNK_LEN = 5

        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)
        self.assertTrue(resp.json['s3']['chunked'])

        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, should now work
        with httmock.HTTMock(mockChunkUpload):
            resp = self.multipartRequest(
                path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        resp = self.request(path='/file/offset', user=self.user, params={
            'uploadId': uploadId
        })
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], len(chunk1))

        # Hack: make moto accept our too-small chunks
        moto.s3.models.UPLOAD_PART_MIN_SIZE = 5

        # Send the second chunk
        with httmock.HTTMock(mockChunkUpload):
            resp = self.request(
                path='/file/chunk', method='POST', user=self.user, body=chunk2, params={
                    'offset': resp.json['offset'],
                    'uploadId': uploadId
                }, type='text/plain')
        self.assertStatusOk(resp)

        file = resp.json

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        # Test copying a file ( we don't assert to content in the case because
        # the S3 download will fail )
        self._testCopyFile(file, assertContent=False)
コード例 #7
0
    def testS3Assetstore(self):
        botoParams = makeBotoConnectParams('access', 'secret')
        mock_s3.createBucket(botoParams, 'bname')

        Assetstore().remove(Assetstore().getCurrent())
        assetstore = Assetstore().createS3Assetstore(
            name='test', bucket='bname', accessKeyId='access', secret='secret',
            prefix='test', serverSideEncryption=True)
        self.assetstore = assetstore

        # Initialize the upload
        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)

        self.assertFalse(resp.json['s3']['chunked'])
        uploadId = resp.json['_id']

        # Send the first chunk, we should get a 400
        resp = self.request(
            path='/file/chunk', method='POST', user=self.user, body=chunk1, params={
                'uploadId': uploadId
            }, type='application/octet-stream')
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'], 'Uploads of this length must be sent in a single chunk.')

        # Attempting to send second chunk with incorrect offset should fail
        resp = self.request(
            path='/file/chunk', method='POST', user=self.user, body=chunk2, params={
                'offset': 100,
                'uploadId': uploadId
            }, type='application/octet-stream')
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'], 'Server has received 0 bytes, but client sent offset 100.')

        # Request offset from server (simulate a resume event)
        resp = self.request(
            path='/file/offset', method='GET', user=self.user, params={'uploadId': uploadId})
        self.assertStatusOk(resp)

        initRequests = []

        @httmock.all_requests
        def mockChunkUpload(url, request):
            """
            We used to be able to use moto to mock the sending of chunks to
            S3, however we now no longer use the boto API to do so internally,
            and must mock this out at the level of requests.
            """
            if url.netloc != 'bname.s3.amazonaws.com':
                raise Exception('Unexpected request to host ' + url.netloc)

            body = request.body.read(65536)  # sufficient for now, we have short bodies

            if 'x-amz-meta-uploader-ip' in url.query:
                # this is an init request, not a chunk upload
                initRequests.append(request)

            # Actually set the key in moto
            self.assertTrue(url.path.startswith('/test/'))
            client = boto3.client('s3')
            client.put_object(Bucket='bname', Key=url.path[1:], Body=body)

            return {
                'status_code': 200
            }

        # Trying to send too many bytes should fail
        currentOffset = resp.json['offset']
        with httmock.HTTMock(mockChunkUpload):
            resp = self.request(
                path='/file/chunk', method='POST', body='extra_' + chunk2 + '_bytes', params={
                    'offset': currentOffset,
                    'uploadId': uploadId
                }, user=self.user, type='application/octet-stream')
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'message': 'Received too many bytes.'
        })
        self.assertEqual(len(initRequests), 1)
        self.assertEqual(initRequests[-1].headers['x-amz-server-side-encryption'], 'AES256')

        # The offset should not have changed
        resp = self.request(
            path='/file/offset', method='GET', user=self.user, params={'uploadId': uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], currentOffset)

        # Send all in one chunk
        with httmock.HTTMock(mockChunkUpload):
            resp = self.request(
                path='/file/chunk', method='POST', body=chunk1 + chunk2, user=self.user, params={
                    'uploadId': uploadId
                }, type='application/octet-stream')
        self.assertStatusOk(resp)
        self.assertEqual(len(initRequests), 2)
        self.assertEqual(initRequests[-1].headers['x-amz-server-side-encryption'], 'AES256')

        file = File().load(resp.json['_id'], force=True)

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], self.assetstore['_id'])
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        resp = self.request('/file/%s' % file['_id'], method='PUT', params={
            'mimeType': 'application/csv',
            'name': 'new name'
        }, user=self.user)
        self.assertStatusOk(resp)

        # Make sure our metadata got updated in S3
        obj = boto3.client('s3').get_object(Bucket='bname', Key=file['s3Key'])
        self.assertEqual(obj['ContentDisposition'], 'attachment; filename="new name"')
        self.assertEqual(obj['ContentType'], 'application/csv')

        # Test with SSE disabled
        self.assetstore['serverSideEncryption'] = False
        self.assetstore = Assetstore().save(self.assetstore)
        initRequests = []

        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)
        uploadId = resp.json['_id']

        with httmock.HTTMock(mockChunkUpload):
            resp = self.request(
                path='/file/chunk', method='POST', body=chunk1 + chunk2, user=self.user, params={
                    'uploadId': uploadId
                }, type='application/octet-stream')
        self.assertStatusOk(resp)
        self.assertEqual(len(initRequests), 1)
        self.assertNotIn('x-amz-server-side-encryption', initRequests[0].headers)

        # Enable testing of multi-chunk proxied upload
        S3AssetstoreAdapter.CHUNK_LEN = 5

        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)
        self.assertTrue(resp.json['s3']['chunked'])

        uploadId = resp.json['_id']

        # Send the first chunk, should now work
        with httmock.HTTMock(mockChunkUpload):
            resp = self.request(
                path='/file/chunk', method='POST', body=chunk1, user=self.user, params={
                    'uploadId': uploadId
                }, type='application/octet-stream')
        self.assertStatusOk(resp)

        resp = self.request(path='/file/offset', user=self.user, params={
            'uploadId': uploadId
        })
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], len(chunk1))

        # Hack: make moto accept our too-small chunks
        moto.s3.models.UPLOAD_PART_MIN_SIZE = 5

        # Send the second chunk
        with httmock.HTTMock(mockChunkUpload):
            resp = self.request(
                path='/file/chunk', method='POST', user=self.user, body=chunk2, params={
                    'offset': resp.json['offset'],
                    'uploadId': uploadId
                }, type='text/plain')
        self.assertStatusOk(resp)

        file = resp.json

        self.assertEqual(file['_modelType'], 'file')
        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        # Test copying a file ( we don't assert to content in the case because
        # the S3 download will fail )
        self._testCopyFile(file, assertContent=False)

        # The file we get back from the rest call doesn't have the s3Key value,
        # so reload the file from the database
        file = File().load(file['_id'], force=True)

        # Mock Serve range requests
        @httmock.urlmatch(netloc=r'^bname.s3.amazonaws.com')
        def s3_range_mock(url, request):
            data = chunk1 + chunk2
            if request.headers.get('range', '').startswith('bytes='):
                start, end = request.headers['range'].split('bytes=')[1].split('-')
                data = data[int(start):int(end) + 1]
            return data

        with httmock.HTTMock(s3_range_mock):
            self._testFileContext(file, chunk1 + chunk2)
コード例 #8
0
ファイル: file_test.py プロジェクト: richstoner/girder
    def testS3Assetstore(self):
        botoParams = makeBotoConnectParams('access', 'secret')
        mock_s3.createBucket(botoParams, 'b')

        self.model('assetstore').remove(self.model('assetstore').getCurrent())
        assetstore = self.model('assetstore').createS3Assetstore(
            name='test', bucket='b', accessKeyId='access', secret='secret',
            prefix='test')
        self.assetstore = assetstore

        # Initialize the upload
        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)

        self.assertFalse(resp.json['s3']['chunked'])
        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, we should get a 400
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'],
            'Uploads of this length must be sent in a single chunk.')

        # Attempting to send second chunk with incorrect offset should fail
        fields = [('offset', 100), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk2)]
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'],
            'Server has received 0 bytes, but client sent offset 100.')

        # Request offset from server (simulate a resume event)
        resp = self.request(path='/file/offset', method='GET', user=self.user,
                            params={'uploadId': uploadId})
        self.assertStatusOk(resp)

        # Trying to send too many bytes should fail
        currentOffset = resp.json['offset']
        fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', "extra_"+chunk2+"_bytes")]
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'message': 'Received too many bytes.'
        })

        # The offset should not have changed
        resp = self.request(path='/file/offset', method='GET', user=self.user,
                            params={'uploadId': uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], currentOffset)

        # Send all in one chunk
        files = [('chunk', 'hello.txt', chunk1 + chunk2)]
        fields = [('offset', 0), ('uploadId', uploadId)]
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        file = resp.json

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        # Enable testing of multi-chunk proxied upload
        S3AssetstoreAdapter.CHUNK_LEN = 5

        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)
        self.assertTrue(resp.json['s3']['chunked'])

        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, should now work
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        resp = self.request(path='/file/offset', user=self.user, params={
            'uploadId': uploadId
        })
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], len(chunk1))

        # Hack: make moto accept our too-small chunks
        moto.s3.models.UPLOAD_PART_MIN_SIZE = 5

        # Send the second chunk
        files = [('chunk', 'hello.txt', chunk2)]
        fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        file = resp.json

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))
コード例 #9
0
ファイル: assetstore_test.py プロジェクト: cryos/girder
    def testS3AssetstoreAdapter(self):
        # Delete the default assetstore
        self.model('assetstore').remove(self.assetstore)

        params = {
            'name': 'S3 Assetstore',
            'type': AssetstoreType.S3,
            'bucket': '',
            'accessKeyId': 'someKey',
            'secret': 'someSecret',
            'prefix': '/foo/bar/'
        }

        # Validation should fail with empty bucket name
        resp = self.request(path='/assetstore', method='POST', user=self.admin,
                            params=params)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'field': 'bucket',
            'message': 'Bucket must not be empty.'
        })

        params['bucket'] = 'bucketname'
        # Validation should fail with a missing bucket
        resp = self.request(path='/assetstore', method='POST', user=self.admin,
                            params=params)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'field': 'bucket',
            'message': 'Unable to write into bucket "bucketname".'
        })

        # Validation should fail with a bogus service name
        params['service'] = 'ftp://nowhere'
        resp = self.request(path='/assetstore', method='POST', user=self.admin,
                            params=params)
        self.assertStatus(resp, 400)
        del params['service']

        # Create a bucket (mocked using moto), so that we can create an
        # assetstore in it
        botoParams = makeBotoConnectParams(params['accessKeyId'],
                                           params['secret'])
        bucket = mock_s3.createBucket(botoParams, 'bucketname')

        # Create an assetstore
        resp = self.request(path='/assetstore', method='POST', user=self.admin,
                            params=params)
        self.assertStatusOk(resp)
        assetstore = self.model('assetstore').load(resp.json['_id'])

        # Set the assetstore to current.  This is really to test the edit
        # assetstore code.
        params['current'] = True
        resp = self.request(path='/assetstore/{}'.format(assetstore['_id']),
                            method='PUT', user=self.admin, params=params)
        self.assertStatusOk(resp)

        # Test init for a single-chunk upload
        folders = self.model('folder').childFolders(self.admin, 'user')
        parentFolder = folders.next()
        params = {
            'parentType': 'folder',
            'parentId': parentFolder['_id'],
            'name': 'My File.txt',
            'size': 1024,
            'mimeType': 'text/plain'
        }
        resp = self.request(path='/file', method='POST', user=self.admin,
                            params=params)
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['received'], 0)
        self.assertEqual(resp.json['size'], 1024)
        self.assertEqual(resp.json['behavior'], 's3')

        singleChunkUpload = resp.json
        s3Info = singleChunkUpload['s3']
        self.assertEqual(s3Info['chunked'], False)
        self.assertEqual(type(s3Info['chunkLength']), int)
        self.assertEqual(s3Info['request']['method'], 'PUT')
        self.assertTrue(s3Info['request']['url'].startswith(
                        'https://s3.amazonaws.com/bucketname/foo/bar'))
        self.assertEqual(s3Info['request']['headers']['x-amz-acl'], 'private')

        # Test resume of a single-chunk upload
        resp = self.request(path='/file/offset', method='GET', user=self.admin,
                            params={'uploadId': resp.json['_id']})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['method'], 'PUT')
        self.assertTrue('headers' in resp.json)
        self.assertTrue(resp.json['url'].startswith(
            'https://s3.amazonaws.com/bucketname/foo/bar/'))

        # Test finalize for a single-chunk upload
        resp = self.request(path='/file/completion', method='POST',
                            user=self.admin,
                            params={'uploadId': singleChunkUpload['_id']})
        self.assertStatusOk(resp)
        self.assertFalse(resp.json['s3Verified'])
        self.assertEqual(resp.json['size'], 1024)
        self.assertEqual(resp.json['assetstoreId'], str(assetstore['_id']))
        self.assertTrue('s3Key' in resp.json)
        self.assertTrue(resp.json['relpath'].startswith(
            '/bucketname/foo/bar/'))

        # Test init for a multi-chunk upload
        params['size'] = 1024 * 1024 * 1024 * 5
        resp = self.request(path='/file', method='POST', user=self.admin,
                            params=params)
        self.assertStatusOk(resp)

        multiChunkUpload = resp.json
        s3Info = multiChunkUpload['s3']
        self.assertEqual(s3Info['chunked'], True)
        self.assertEqual(type(s3Info['chunkLength']), int)
        self.assertEqual(s3Info['request']['method'], 'POST')
        self.assertTrue(s3Info['request']['url'].startswith(
                        'https://s3.amazonaws.com/bucketname/foo/bar'))

        # Test uploading a chunk
        resp = self.request(path='/file/chunk', method='POST',
                            user=self.admin, params={
                                'uploadId': multiChunkUpload['_id'],
                                'offset': 0,
                                'chunk': json.dumps({
                                    'partNumber': 1,
                                    's3UploadId': 'abcd'
                                })
                            })
        self.assertStatusOk(resp)
        self.assertTrue(resp.json['s3']['request']['url'].startswith(
                        'https://s3.amazonaws.com/bucketname/foo/bar'))
        self.assertEqual(resp.json['s3']['request']['method'], 'PUT')

        # We should not be able to call file/offset with multi-chunk upload
        resp = self.request(path='/file/offset', method='GET', user=self.admin,
                            params={'uploadId': multiChunkUpload['_id']})
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'message': 'Do not call requestOffset on a chunked S3 upload.'
        })

        # Test finalize for a multi-chunk upload
        resp = self.request(path='/file/completion', method='POST',
                            user=self.admin,
                            params={'uploadId': multiChunkUpload['_id']})
        largeFile = resp.json
        self.assertStatusOk(resp)
        self.assertTrue(resp.json['s3FinalizeRequest']['url'].startswith(
                        'https://s3.amazonaws.com/bucketname/foo/bar'))
        self.assertEqual(resp.json['s3FinalizeRequest']['method'], 'POST')

        # Test init for an empty file (should be no-op)
        params['size'] = 0
        resp = self.request(path='/file', method='POST', user=self.admin,
                            params=params)
        emptyFile = resp.json
        self.assertStatusOk(resp)
        self.assertFalse('behavior' in resp.json)
        self.assertFalse('s3' in resp.json)

        # Test download for an empty file
        resp = self.request(path='/file/{}/download'.format(emptyFile['_id']),
                            user=self.admin, method='GET', isJson=False)
        self.assertStatusOk(resp)
        self.assertEqual(resp.collapse_body(), '')
        self.assertEqual(resp.headers['Content-Length'], '0')
        self.assertEqual(resp.headers['Content-Disposition'],
                         'attachment; filename="My File.txt"')

        # Test download of a non-empty file
        resp = self.request(path='/file/{}/download'.format(largeFile['_id']),
                            user=self.admin, method='GET', isJson=False)
        self.assertStatus(resp, 303)
        self.assertTrue(resp.headers['Location'].startswith(
            'https://s3.amazonaws.com/bucketname/foo/bar/'))

        # Test download as part of a streaming zip
        @httmock.all_requests
        def s3_pipe_mock(url, request):
            if url.netloc == 's3.amazonaws.com' and url.scheme == 'https':
                return 'dummy file contents'
            else:
                raise Exception('Unexpected url {}'.format(url))

        with httmock.HTTMock(s3_pipe_mock):
            resp = self.request(
                '/folder/{}/download'.format(parentFolder['_id']),
                method='GET', user=self.admin, isJson=False)
            self.assertStatusOk(resp)
            zip = zipfile.ZipFile(io.BytesIO(resp.collapse_body()), 'r')
            self.assertTrue(zip.testzip() is None)

            extracted = zip.read('Public/My File.txt')
            self.assertEqual(extracted, 'dummy file contents')

        # Create the file key in the moto s3 store so that we can test that it
        # gets deleted.
        file = self.model('file').load(largeFile['_id'])
        bucket.initiate_multipart_upload(file['s3Key'])
        key = bucket.new_key(file['s3Key'])
        key.set_contents_from_string("test")

        # Test delete for a non-empty file
        resp = self.request(path='/file/{}'.format(largeFile['_id']),
                            user=self.admin, method='DELETE')
        self.assertStatusOk(resp)

        # The file should be gone now
        resp = self.request(path='/file/{}/download'.format(largeFile['_id']),
                            user=self.admin, method='GET', isJson=False)
        self.assertStatus(resp, 400)
        # The actual delete may still be in the event queue, so we want to
        # check the S3 bucket directly.
        startTime = time.time()
        while True:
            if bucket.get_key(file['s3Key']) is None:
                break
            if time.time()-startTime > 15:
                break  # give up and fail
            time.sleep(0.1)
        self.assertIsNone(bucket.get_key(file['s3Key']))
コード例 #10
0
ファイル: file_test.py プロジェクト: kotfic/girder
    def atestS3Assetstore(self):
        botoParams = makeBotoConnectParams("access", "secret")
        mock_s3.createBucket(botoParams, "b")

        self.model("assetstore").remove(self.model("assetstore").getCurrent())
        assetstore = self.model("assetstore").createS3Assetstore(
            name="test", bucket="b", accessKeyId="access", secret="secret", prefix="test"
        )
        self.assetstore = assetstore

        # Initialize the upload
        resp = self.request(
            path="/file",
            method="POST",
            user=self.user,
            params={
                "parentType": "folder",
                "parentId": self.privateFolder["_id"],
                "name": "hello.txt",
                "size": len(chunk1) + len(chunk2),
                "mimeType": "text/plain",
            },
        )
        self.assertStatusOk(resp)

        self.assertFalse(resp.json["s3"]["chunked"])
        uploadId = resp.json["_id"]
        fields = [("offset", 0), ("uploadId", uploadId)]
        files = [("chunk", "hello.txt", chunk1)]

        # Send the first chunk, we should get a 400
        resp = self.multipartRequest(path="/file/chunk", user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json["message"], "Uploads of this length must be sent in a single chunk.")

        # Attempting to send second chunk with incorrect offset should fail
        fields = [("offset", 100), ("uploadId", uploadId)]
        files = [("chunk", "hello.txt", chunk2)]
        resp = self.multipartRequest(path="/file/chunk", user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json["message"], "Server has received 0 bytes, but client sent offset 100.")

        # Request offset from server (simulate a resume event)
        resp = self.request(path="/file/offset", method="GET", user=self.user, params={"uploadId": uploadId})
        self.assertStatusOk(resp)

        # Trying to send too many bytes should fail
        currentOffset = resp.json["offset"]
        fields = [("offset", resp.json["offset"]), ("uploadId", uploadId)]
        files = [("chunk", "hello.txt", "extra_" + chunk2 + "_bytes")]
        resp = self.multipartRequest(path="/file/chunk", user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {"type": "validation", "message": "Received too many bytes."})

        # The offset should not have changed
        resp = self.request(path="/file/offset", method="GET", user=self.user, params={"uploadId": uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json["offset"], currentOffset)

        # Send all in one chunk
        files = [("chunk", "hello.txt", chunk1 + chunk2)]
        fields = [("offset", 0), ("uploadId", uploadId)]
        resp = self.multipartRequest(path="/file/chunk", user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        file = self.model("file").load(resp.json["_id"], force=True)

        self.assertHasKeys(file, ["itemId"])
        self.assertEqual(file["assetstoreId"], self.assetstore["_id"])
        self.assertEqual(file["name"], "hello.txt")
        self.assertEqual(file["size"], len(chunk1 + chunk2))

        # Make sure metadata is updated in S3 when file info changes
        # (moto API doesn't cover this at all, so we manually mock.)
        with mock.patch("boto.s3.key.Key.set_remote_metadata") as m:
            resp = self.request(
                "/file/%s" % str(file["_id"]),
                method="PUT",
                params={"mimeType": "application/csv", "name": "new name"},
                user=self.user,
            )
            self.assertEqual(len(m.mock_calls), 1)
            self.assertEqual(
                m.mock_calls[0][2],
                {
                    "metadata_plus": {
                        "Content-Type": "application/csv",
                        "Content-Disposition": b'attachment; filename="new name"',
                    },
                    "metadata_minus": [],
                    "preserve_acl": True,
                },
            )

        # Enable testing of multi-chunk proxied upload
        S3AssetstoreAdapter.CHUNK_LEN = 5

        resp = self.request(
            path="/file",
            method="POST",
            user=self.user,
            params={
                "parentType": "folder",
                "parentId": self.privateFolder["_id"],
                "name": "hello.txt",
                "size": len(chunk1) + len(chunk2),
                "mimeType": "text/plain",
            },
        )
        self.assertStatusOk(resp)
        self.assertTrue(resp.json["s3"]["chunked"])

        uploadId = resp.json["_id"]
        fields = [("offset", 0), ("uploadId", uploadId)]
        files = [("chunk", "hello.txt", chunk1)]

        # Send the first chunk, should now work
        resp = self.multipartRequest(path="/file/chunk", user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        resp = self.request(path="/file/offset", user=self.user, params={"uploadId": uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json["offset"], len(chunk1))

        # Hack: make moto accept our too-small chunks
        moto.s3.models.UPLOAD_PART_MIN_SIZE = 5

        # Send the second chunk
        files = [("chunk", "hello.txt", chunk2)]
        fields = [("offset", resp.json["offset"]), ("uploadId", uploadId)]
        resp = self.multipartRequest(path="/file/chunk", user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        file = resp.json

        self.assertHasKeys(file, ["itemId"])
        self.assertEqual(file["assetstoreId"], str(self.assetstore["_id"]))
        self.assertEqual(file["name"], "hello.txt")
        self.assertEqual(file["size"], len(chunk1 + chunk2))

        # Test copying a file ( we don't assert to content in the case because
        # the S3 download will fail )
        self._testCopyFile(file, assertContent=False)
コード例 #11
0
ファイル: file_test.py プロジェクト: nagyist/girder
    def testS3Assetstore(self):
        botoParams = makeBotoConnectParams('access', 'secret')
        mock_s3.createBucket(botoParams, 'b')

        self.model('assetstore').remove(self.model('assetstore').getCurrent())
        assetstore = self.model('assetstore').createS3Assetstore(
            name='test',
            bucket='b',
            accessKeyId='access',
            secret='secret',
            prefix='test')
        self.assetstore = assetstore

        # Initialize the upload
        resp = self.request(path='/file',
                            method='POST',
                            user=self.user,
                            params={
                                'parentType': 'folder',
                                'parentId': self.privateFolder['_id'],
                                'name': 'hello.txt',
                                'size': len(chunk1) + len(chunk2),
                                'mimeType': 'text/plain'
                            })
        self.assertStatusOk(resp)

        self.assertFalse(resp.json['s3']['chunked'])
        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, we should get a 400
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'],
            'Uploads of this length must be sent in a single chunk.')

        # Attempting to send second chunk with incorrect offset should fail
        fields = [('offset', 100), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk2)]
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'],
            'Server has received 0 bytes, but client sent offset 100.')

        # Request offset from server (simulate a resume event)
        resp = self.request(path='/file/offset',
                            method='GET',
                            user=self.user,
                            params={'uploadId': uploadId})
        self.assertStatusOk(resp)

        # Trying to send too many bytes should fail
        currentOffset = resp.json['offset']
        fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', "extra_" + chunk2 + "_bytes")]
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'message': 'Received too many bytes.'
        })

        # The offset should not have changed
        resp = self.request(path='/file/offset',
                            method='GET',
                            user=self.user,
                            params={'uploadId': uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], currentOffset)

        # Send all in one chunk
        files = [('chunk', 'hello.txt', chunk1 + chunk2)]
        fields = [('offset', 0), ('uploadId', uploadId)]
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatusOk(resp)

        file = self.model('file').load(resp.json['_id'], force=True)

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], self.assetstore['_id'])
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        # Make sure metadata is updated in S3 when file info changes
        # (moto API doesn't cover this at all, so we manually mock.)
        with mock.patch('boto.s3.key.Key.set_remote_metadata') as m:
            resp = self.request('/file/%s' % str(file['_id']),
                                method='PUT',
                                params={
                                    'mimeType': 'application/csv',
                                    'name': 'new name'
                                },
                                user=self.user)
            self.assertEqual(len(m.mock_calls), 1)
            self.assertEqual(
                m.mock_calls[0][2], {
                    'metadata_plus': {
                        'Content-Type': 'application/csv',
                        'Content-Disposition':
                        b'attachment; filename="new name"'
                    },
                    'metadata_minus': [],
                    'preserve_acl': True
                })

        # Enable testing of multi-chunk proxied upload
        S3AssetstoreAdapter.CHUNK_LEN = 5

        resp = self.request(path='/file',
                            method='POST',
                            user=self.user,
                            params={
                                'parentType': 'folder',
                                'parentId': self.privateFolder['_id'],
                                'name': 'hello.txt',
                                'size': len(chunk1) + len(chunk2),
                                'mimeType': 'text/plain'
                            })
        self.assertStatusOk(resp)
        self.assertTrue(resp.json['s3']['chunked'])

        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, should now work
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatusOk(resp)

        resp = self.request(path='/file/offset',
                            user=self.user,
                            params={'uploadId': uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], len(chunk1))

        # Hack: make moto accept our too-small chunks
        moto.s3.models.UPLOAD_PART_MIN_SIZE = 5

        # Send the second chunk
        files = [('chunk', 'hello.txt', chunk2)]
        fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatusOk(resp)

        file = resp.json

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        # Test copying a file ( we don't assert to content in the case because
        # the S3 download will fail )
        self._testCopyFile(file, assertContent=False)
コード例 #12
0
ファイル: file_test.py プロジェクト: data-exp-lab/girder
    def testS3Assetstore(self):
        botoParams = makeBotoConnectParams('access', 'secret')
        mock_s3.createBucket(botoParams, 'b')

        Assetstore().remove(Assetstore().getCurrent())
        assetstore = Assetstore().createS3Assetstore(
            name='test', bucket='b', accessKeyId='access', secret='secret',
            prefix='test')
        self.assetstore = assetstore

        # Initialize the upload
        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)

        self.assertFalse(resp.json['s3']['chunked'])
        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, we should get a 400
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'], 'Uploads of this length must be sent in a single chunk.')

        # Attempting to send second chunk with incorrect offset should fail
        fields = [('offset', 100), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk2)]
        resp = self.multipartRequest(
            path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'], 'Server has received 0 bytes, but client sent offset 100.')

        # Request offset from server (simulate a resume event)
        resp = self.request(
            path='/file/offset', method='GET', user=self.user, params={'uploadId': uploadId})
        self.assertStatusOk(resp)

        @httmock.all_requests
        def mockChunkUpload(url, request):
            """
            We used to be able to use moto to mock the sending of chunks to
            S3, however we now no longer use the boto API to do so internally,
            and must mock this out at the level of requests.
            """
            if url.netloc != 's3.amazonaws.com':
                raise Exception('Unexpected request to host ' + url.netloc)

            body = request.body.read(65536)  # sufficient for now, we have short bodies

            # Actually set the key in moto
            self.assertEqual(url.path[:3], '/b/')
            client = boto3.client('s3')
            client.put_object(Bucket='b', Key=url.path[3:], Body=body)

            return {
                'status_code': 200
            }

        # Trying to send too many bytes should fail
        currentOffset = resp.json['offset']
        fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', 'extra_'+chunk2+'_bytes')]
        with httmock.HTTMock(mockChunkUpload):
            resp = self.multipartRequest(
                path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'message': 'Received too many bytes.'
        })

        # The offset should not have changed
        resp = self.request(
            path='/file/offset', method='GET', user=self.user, params={'uploadId': uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], currentOffset)

        # Send all in one chunk
        files = [('chunk', 'hello.txt', chunk1 + chunk2)]
        fields = [('offset', 0), ('uploadId', uploadId)]
        with httmock.HTTMock(mockChunkUpload):
            resp = self.multipartRequest(
                path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        file = File().load(resp.json['_id'], force=True)

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], self.assetstore['_id'])
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        resp = self.request('/file/%s' % file['_id'], method='PUT', params={
            'mimeType': 'application/csv',
            'name': 'new name'
        }, user=self.user)
        self.assertStatusOk(resp)

        # Make sure our metadata got updated in S3
        obj = boto3.client('s3').get_object(Bucket='b', Key=file['s3Key'])
        self.assertEqual(obj['ContentDisposition'], 'attachment; filename="new name"')
        self.assertEqual(obj['ContentType'], 'application/csv')

        # Enable testing of multi-chunk proxied upload
        S3AssetstoreAdapter.CHUNK_LEN = 5

        resp = self.request(
            path='/file', method='POST', user=self.user, params={
                'parentType': 'folder',
                'parentId': self.privateFolder['_id'],
                'name': 'hello.txt',
                'size': len(chunk1) + len(chunk2),
                'mimeType': 'text/plain'
            })
        self.assertStatusOk(resp)
        self.assertTrue(resp.json['s3']['chunked'])

        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, should now work
        with httmock.HTTMock(mockChunkUpload):
            resp = self.multipartRequest(
                path='/file/chunk', user=self.user, fields=fields, files=files)
        self.assertStatusOk(resp)

        resp = self.request(path='/file/offset', user=self.user, params={
            'uploadId': uploadId
        })
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], len(chunk1))

        # Hack: make moto accept our too-small chunks
        moto.s3.models.UPLOAD_PART_MIN_SIZE = 5

        # Send the second chunk
        with httmock.HTTMock(mockChunkUpload):
            resp = self.request(
                path='/file/chunk', method='POST', user=self.user, body=chunk2, params={
                    'offset': resp.json['offset'],
                    'uploadId': uploadId
                }, type='text/plain')
        self.assertStatusOk(resp)

        file = resp.json

        self.assertEqual(file['_modelType'], 'file')
        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        # Test copying a file ( we don't assert to content in the case because
        # the S3 download will fail )
        self._testCopyFile(file, assertContent=False)

        # The file we get back from the rest call doesn't have the s3Key value,
        # so reload the file from the database
        file = File().load(file['_id'], force=True)

        # Mock Serve range requests
        @httmock.urlmatch(netloc=r'^s3.amazonaws.com')
        def s3_range_mock(url, request):
            data = chunk1 + chunk2
            if request.headers.get('range', '').startswith('bytes='):
                start, end = request.headers['range'].split('bytes=')[1].split('-')
                data = data[int(start):int(end)+1]
            return data

        with httmock.HTTMock(s3_range_mock):
            self._testFileContext(file, chunk1 + chunk2)
コード例 #13
0
    def testS3Assetstore(self):
        botoParams = makeBotoConnectParams('access', 'secret')
        mock_s3.createBucket(botoParams, 'b')

        self.model('assetstore').remove(self.model('assetstore').getCurrent())
        assetstore = self.model('assetstore').createS3Assetstore(
            name='test',
            bucket='b',
            accessKeyId='access',
            secret='secret',
            prefix='test')
        self.assetstore = assetstore

        # Initialize the upload
        resp = self.request(path='/file',
                            method='POST',
                            user=self.user,
                            params={
                                'parentType': 'folder',
                                'parentId': self.privateFolder['_id'],
                                'name': 'hello.txt',
                                'size': len(chunk1) + len(chunk2),
                                'mimeType': 'text/plain'
                            })
        self.assertStatusOk(resp)

        self.assertFalse(resp.json['s3']['chunked'])
        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, we should get a 400
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'],
            'Uploads of this length must be sent in a single chunk.')

        # Attempting to send second chunk with incorrect offset should fail
        fields = [('offset', 100), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk2)]
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(
            resp.json['message'],
            'Server has received 0 bytes, but client sent offset 100.')

        # Request offset from server (simulate a resume event)
        resp = self.request(path='/file/offset',
                            method='GET',
                            user=self.user,
                            params={'uploadId': uploadId})
        self.assertStatusOk(resp)

        # Trying to send too many bytes should fail
        currentOffset = resp.json['offset']
        fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', "extra_" + chunk2 + "_bytes")]
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatus(resp, 400)
        self.assertEqual(resp.json, {
            'type': 'validation',
            'message': 'Received too many bytes.'
        })

        # The offset should not have changed
        resp = self.request(path='/file/offset',
                            method='GET',
                            user=self.user,
                            params={'uploadId': uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], currentOffset)

        # Send all in one chunk
        files = [('chunk', 'hello.txt', chunk1 + chunk2)]
        fields = [('offset', 0), ('uploadId', uploadId)]
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatusOk(resp)

        file = resp.json

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))

        # Enable testing of multi-chunk proxied upload
        S3AssetstoreAdapter.CHUNK_LEN = 5

        resp = self.request(path='/file',
                            method='POST',
                            user=self.user,
                            params={
                                'parentType': 'folder',
                                'parentId': self.privateFolder['_id'],
                                'name': 'hello.txt',
                                'size': len(chunk1) + len(chunk2),
                                'mimeType': 'text/plain'
                            })
        self.assertStatusOk(resp)
        self.assertTrue(resp.json['s3']['chunked'])

        uploadId = resp.json['_id']
        fields = [('offset', 0), ('uploadId', uploadId)]
        files = [('chunk', 'hello.txt', chunk1)]

        # Send the first chunk, should now work
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatusOk(resp)

        resp = self.request(path='/file/offset',
                            user=self.user,
                            params={'uploadId': uploadId})
        self.assertStatusOk(resp)
        self.assertEqual(resp.json['offset'], len(chunk1))

        # Hack: make moto accept our too-small chunks
        moto.s3.models.UPLOAD_PART_MIN_SIZE = 5

        # Send the second chunk
        files = [('chunk', 'hello.txt', chunk2)]
        fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
        resp = self.multipartRequest(path='/file/chunk',
                                     user=self.user,
                                     fields=fields,
                                     files=files)
        self.assertStatusOk(resp)

        file = resp.json

        self.assertHasKeys(file, ['itemId'])
        self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
        self.assertEqual(file['name'], 'hello.txt')
        self.assertEqual(file['size'], len(chunk1 + chunk2))