def test_account_sas(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange table_name = self._create_table() entity = {"PartitionKey": "test", "RowKey": "test1", "text": "hello"} self.ts.insert_entity(table_name, entity) entity["RowKey"] = "test2" self.ts.insert_entity(table_name, entity) token = self.ts.generate_account_shared_access_signature( ResourceTypes.OBJECT, AccountPermissions.READ, datetime.utcnow() + timedelta(hours=1), datetime.utcnow() - timedelta(minutes=1), ) # Act service = TableService(account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token) self._set_service_options(service, self.settings) entities = list(service.query_entities(table_name)) # Assert self.assertEqual(len(entities), 2) self.assertEqual(entities[0].text, "hello") self.assertEqual(entities[1].text, "hello")
def test_shared_read_access_file(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange file_name = self._create_file() token = self.fs.generate_file_shared_access_signature( self.share_name, None, file_name, permission=FilePermissions.READ, expiry=datetime.utcnow() + timedelta(hours=1), ) # Act url = self.fs.make_file_url( self.share_name, None, file_name, sas_token=token, ) response = requests.get(url) # Assert self.assertTrue(response.ok) self.assertEqual(self.short_byte_data, response.content)
def test_shared_delete_access_file(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange file_name = self._create_file() token = self.fs.generate_file_shared_access_signature( self.share_name, None, file_name, permission=FilePermissions.DELETE, expiry=datetime.utcnow() + timedelta(hours=1), ) url = self.fs.make_file_url( self.share_name, None, file_name, sas_token=token, ) # Act response = requests.delete(url) # Assert self.assertTrue(response.ok) with self.assertRaises(AzureMissingResourceHttpError): file = self.fs.get_file_to_bytes(self.share_name, None, file_name)
def test_generate_account_sas(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recordingfile(self.test_mode): return # Arrange token = self.account.generate_shared_access_signature( Services.BLOB, ResourceTypes.OBJECT, AccountPermissions.READ, datetime.utcnow() + timedelta(hours=1), ) service = self.account.create_block_blob_service() data = b'shared access signature with read permission on blob' container_name='container1' blob_name = 'blob1.txt' try: service.create_container(container_name) service.create_blob_from_bytes(container_name, blob_name, data) # Act url = service.make_blob_url( container_name, blob_name, sas_token=token, ) response = requests.get(url) # Assert self.assertTrue(response.ok) self.assertEqual(data, response.content) finally: service.delete_container(container_name)
def test_sas_access_file(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange file_name = self._create_file() token = self.fs.generate_file_shared_access_signature( self.share_name, None, file_name, permission=FilePermissions.READ, expiry=datetime.utcnow() + timedelta(hours=1), ) # Act service = FileService( self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, request_session=requests.Session(), ) self._set_test_proxy(service, self.settings) result = service.get_file_to_bytes(self.share_name, None, file_name) # Assert self.assertEqual(self.short_byte_data, result.content)
def test_sas_signed_identifier(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() access_policy = AccessPolicy() access_policy.start = '2011-10-11' access_policy.expiry = '2018-10-12' access_policy.permission = TablePermissions.QUERY identifiers = {'testid': access_policy} entities = self.ts.set_table_acl(self.table_name, identifiers) token = self.ts.generate_table_shared_access_signature( self.table_name, id='testid', ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) entities = list(self.ts.query_entities(self.table_name, filter="PartitionKey eq '{}'".format(entity.PartitionKey))) # Assert self.assertEqual(len(entities), 1) self._assert_default_entity(entities[0])
def test_create_large_blob_from_stream_chunked_upload_with_properties( self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._get_blob_reference() data = bytearray(os.urandom(LARGE_BLOB_SIZE)) with open(FILE_PATH, 'wb') as stream: stream.write(data) # Act content_settings = ContentSettings(content_type='image/png', content_language='spanish') with open(FILE_PATH, 'rb') as stream: self.bs.create_blob_from_stream(self.container_name, blob_name, stream, content_settings=content_settings) # Assert self.assertBlobEqual(self.container_name, blob_name, data) properties = self.bs.get_blob_properties(self.container_name, blob_name).properties self.assertEqual(properties.content_settings.content_type, content_settings.content_type) self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
def test_sas_process(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange queue_name = self._create_queue() self.qs.put_message(queue_name, u'message1') token = self.qs.generate_queue_shared_access_signature( queue_name, QueuePermissions.PROCESS, datetime.utcnow() + timedelta(hours=1), ) # Act service = QueueService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_test_proxy(service, self.settings) result = service.get_messages(queue_name) # Assert self.assertIsNotNone(result) self.assertEqual(1, len(result)) message = result[0] self.assertIsNotNone(message) self.assertNotEqual('', message.id) self.assertEqual(u'message1', message.content)
def test_copy_source_sas_is_scrubbed_off(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange dest_blob_name = self.get_resource_name('destblob') # parse out the signed signature token_components = parse_qs(self.source_blob_url) signed_signature = token_components[ _QueryStringConstants.SIGNED_SIGNATURE][0] # Act with LogCaptured(self) as log_captured: self.bs.copy_blob(self.container_name, dest_blob_name, self.source_blob_url, requires_sync=True) log_as_str = log_captured.getvalue() # Assert # make sure the query parameter 'sig' is logged, but its value is not self.assertTrue( _QueryStringConstants.SIGNED_SIGNATURE in log_as_str) self.assertFalse(signed_signature in log_as_str) # make sure authorization header is logged, but its value is not # the keyword SharedKey is present in the authorization header's value self.assertTrue(_AUTHORIZATION_HEADER_NAME in log_as_str) self.assertFalse('SharedKey' in log_as_str)
def test_sas_delete(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.DELETE, datetime.utcnow() + timedelta(hours=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_test_proxy(service, self.settings) service.delete_entity(self.table_name, entity.PartitionKey, entity.RowKey) # Assert with self.assertRaises(AzureMissingResourceHttpError): self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey)
def test_sas_update(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange queue_name = self._create_queue() self.qs.put_message(queue_name, u'message1') token = self.qs.generate_queue_shared_access_signature( queue_name, QueuePermissions.UPDATE, datetime.utcnow() + timedelta(hours=1), ) result = self.qs.get_messages(queue_name) # Act service = QueueService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_test_proxy(service, self.settings) service.update_message( queue_name, result[0].id, result[0].pop_receipt, visibility_timeout=0, content=u'updatedmessage1', ) # Assert result = self.qs.get_messages(queue_name) self.assertEqual(u'updatedmessage1', result[0].content)
def test_get_file_exact_chunk_size(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange file_name = self._get_file_reference() byte_data = self.get_random_bytes(self.fs.MAX_SINGLE_GET_SIZE + self.fs.MAX_CHUNK_GET_SIZE) self.fs.create_file_from_bytes(self.share_name, self.directory_name, file_name, byte_data) progress = [] def callback(current, total): progress.append((current, total)) # Act file = self.fs.get_file_to_bytes(self.share_name, self.directory_name, file_name, progress_callback=callback) # Assert self.assertEqual(byte_data, file.content) self.assert_download_progress(len(byte_data), self.fs.MAX_CHUNK_GET_SIZE, self.fs.MAX_SINGLE_GET_SIZE, progress)
def test_get_file_to_text_with_progress(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange text_file = self.get_resource_name('textfile') text_data = self.get_random_text_data(self.fs.MAX_SINGLE_GET_SIZE + 1) self.fs.create_file_from_text(self.share_name, self.directory_name, text_file, text_data) progress = [] def callback(current, total): progress.append((current, total)) # Act file = self.fs.get_file_to_text(self.share_name, self.directory_name, text_file, progress_callback=callback) # Assert self.assertEqual(text_data, file.content) self.assert_download_progress(len(text_data.encode('utf-8')), self.fs.MAX_CHUNK_GET_SIZE, self.fs.MAX_SINGLE_GET_SIZE, progress)
def test_ranged_get_file_to_path_invalid_range_non_parallel(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange file_size = 1024 file_data = self.get_random_bytes(file_size) file_name = self._get_file_reference() self.fs.create_file_from_bytes(self.share_name, self.directory_name, file_name, file_data) # Act end_range = 2 * self.fs.MAX_SINGLE_GET_SIZE file = self.fs.get_file_to_path(self.share_name, self.directory_name, file_name, FILE_PATH, start_range=1, end_range=end_range) # Assert self.assertIsInstance(file, File) with open(FILE_PATH, 'rb') as stream: actual = stream.read() self.assertEqual(file_data[1:file_size], actual)
def test_generate_account_sas(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange token = self.account.generate_shared_access_signature( Services.BLOB, ResourceTypes.OBJECT, AccountPermissions.READ, datetime.utcnow() + timedelta(hours=1), ) service = self.account.create_block_blob_service() data = b'shared access signature with read permission on blob' container_name = 'container1' blob_name = 'blob1.txt' try: service.create_container(container_name) service.create_blob_from_bytes(container_name, blob_name, data) # Act url = service.make_blob_url( container_name, blob_name, sas_token=token, ) response = requests.get(url) # Assert self.assertTrue(response.ok) self.assertEqual(data, response.content) finally: service.delete_container(container_name)
def test_sas_signature_is_scrubbed_off(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange token = self.bs.generate_container_shared_access_signature( self.container_name, permission=ContainerPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=1), ) # parse out the signed signature token_components = parse_qs(token) signed_signature = token_components[ _QueryStringConstants.SIGNED_SIGNATURE][0] bs_with_sas = BlockBlobService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, protocol=self.settings.PROTOCOL) # Act with LogCaptured(self) as log_captured: bs_with_sas.get_blob_account_information(self.container_name) log_as_str = log_captured.getvalue() # Assert # make sure the query parameter 'sig' is logged, but its value is not self.assertTrue( _QueryStringConstants.SIGNED_SIGNATURE in log_as_str) self.assertFalse(signed_signature in log_as_str)
def test_sas_update(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.UPDATE, datetime.utcnow() + timedelta(hours=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) updated_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey) resp = service.update_entity(self.table_name, updated_entity) # Assert received_entity = self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey) self._assert_updated_entity(received_entity)
def test_sas_add_inside_range(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.ADD, datetime.utcnow() + timedelta(hours=1), start_pk='test', start_rk='test1', end_pk='test', end_rk='test1', ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_test_proxy(service, self.settings) entity = self._create_random_entity_dict('test', 'test1') service.insert_entity(self.table_name, entity) # Assert resp = self.ts.get_entity(self.table_name, 'test', 'test1') self._assert_default_entity(resp)
def test_sas_process(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange queue_name = self._create_queue() self.qs.put_message(queue_name, u'message1') token = self.qs.generate_queue_shared_access_signature( queue_name, QueuePermissions.PROCESS, datetime.utcnow() + timedelta(hours=1), ) # Act service = QueueService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) result = service.get_messages(queue_name) # Assert self.assertIsNotNone(result) self.assertEqual(1, len(result)) message = result[0] self.assertIsNotNone(message) self.assertNotEqual('', message.id) self.assertEqual(u'message1', message.content)
def test_sas_update(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.UPDATE, datetime.utcnow() + timedelta(hours=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_test_proxy(service, self.settings) updated_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey) resp = service.update_entity(self.table_name, updated_entity) # Assert received_entity = self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey) self._assert_updated_entity(received_entity)
def test_shared_access_share(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange file_name = 'file1' dir_name = 'dir1' data = b'hello world' share_name = self._create_share() self.fs.create_directory(share_name, dir_name) self.fs.create_file_from_bytes(share_name, dir_name, file_name, data) token = self.fs.generate_share_shared_access_signature( share_name, expiry=datetime.utcnow() + timedelta(hours=1), permission=SharePermissions.READ, ) url = self.fs.make_file_url( share_name, dir_name, file_name, sas_token=token, ) # Act response = requests.get(url) # Assert self.assertTrue(response.ok) self.assertEqual(data, response.content)
def test_sas_upper_case_table_name(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() # Table names are case insensitive, so simply upper case our existing table name to test token = self.ts.generate_table_shared_access_signature( self.table_name.upper(), TablePermissions.QUERY, datetime.utcnow() + timedelta(hours=1), datetime.utcnow() - timedelta(minutes=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_test_proxy(service, self.settings) entities = list(service.query_entities(self.table_name, filter="PartitionKey eq '{}'".format(entity['PartitionKey']))) # Assert self.assertEqual(len(entities), 1) self._assert_default_entity(entities[0])
def test_create_blob_from_stream_with_progress_truncated(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._get_blob_reference() data = self.get_random_bytes(LARGE_BLOB_SIZE) with open(FILE_PATH, 'wb') as stream: stream.write(data) # Act progress = [] def callback(current, total): progress.append((current, total)) blob_size = len(data) - 512 with open(FILE_PATH, 'rb') as stream: self.bs.create_blob_from_stream(self.container_name, blob_name, stream, blob_size, progress_callback=callback) # Assert self.assertBlobEqual(self.container_name, blob_name, data[:blob_size]) self.assert_upload_progress(blob_size, self.bs.MAX_PAGE_SIZE, progress)
def test_sas_signed_identifier(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() access_policy = AccessPolicy() access_policy.start = '2011-10-11' access_policy.expiry = '2018-10-12' access_policy.permission = TablePermissions.QUERY identifiers = {'testid': access_policy} entities = self.ts.set_table_acl(self.table_name, identifiers) token = self.ts.generate_table_shared_access_signature( self.table_name, id='testid', ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_test_proxy(service, self.settings) entities = list(self.ts.query_entities(self.table_name, filter="PartitionKey eq '{}'".format(entity.PartitionKey))) # Assert self.assertEqual(len(entities), 1) self._assert_default_entity(entities[0])
def test_sas_signed_identifier(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange file_name = self._create_file() access_policy = AccessPolicy() access_policy.start = '2011-10-11' access_policy.expiry = '2018-10-12' access_policy.permission = FilePermissions.READ identifiers = {'testid': access_policy} resp = self.fs.set_share_acl(self.share_name, identifiers) token = self.fs.generate_file_shared_access_signature(self.share_name, None, file_name, id='testid') # Act service = FileService( self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, request_session=requests.Session(), ) self._set_test_proxy(service, self.settings) result = service.get_file_to_bytes(self.share_name, None, file_name) # Assert self.assertEqual(self.short_byte_data, result.content)
def test_create_blob_from_stream_with_empty_pages(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange # data is almost all empty (0s) except two ranges blob_name = self._get_blob_reference() data = bytearray(LARGE_BLOB_SIZE) data[512:1024] = self.get_random_bytes(512) data[8192:8196] = self.get_random_bytes(4) with open(FILE_PATH, 'wb') as stream: stream.write(data) # Act blob_size = len(data) with open(FILE_PATH, 'rb') as stream: create_resp = self.bs.create_blob_from_stream( self.container_name, blob_name, stream, blob_size) blob = self.bs.get_blob_properties(self.container_name, blob_name) # Assert # the uploader should have skipped the empty ranges self.assertBlobEqual(self.container_name, blob_name, data[:blob_size]) page_ranges = list( self.bs.get_page_ranges(self.container_name, blob_name)) self.assertEqual(len(page_ranges), 2) self.assertEqual(page_ranges[0].start, 0) self.assertEqual(page_ranges[0].end, 4095) self.assertEqual(page_ranges[1].start, 8192) self.assertEqual(page_ranges[1].end, 12287) self.assertEqual(blob.properties.etag, create_resp.etag) self.assertEqual(blob.properties.last_modified, create_resp.last_modified)
def test_shared_write_access_file(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange updated_data = b'updated file data' file_name = self._create_file() token = self.fs.generate_file_shared_access_signature( self.share_name, None, file_name, permission=FilePermissions.WRITE, expiry=datetime.utcnow() + timedelta(hours=1), ) url = self.fs.make_file_url( self.share_name, None, file_name, sas_token=token, ) # Act headers = {'x-ms-range': 'bytes=0-16', 'x-ms-write': 'update'} response = requests.put(url + '&comp=range', headers=headers, data=updated_data) # Assert self.assertTrue(response.ok) file = self.fs.get_file_to_bytes(self.share_name, None, file_name) self.assertEqual(updated_data, file.content[:len(updated_data)])
def test_account_sas(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._create_block_blob() token = self.bs.generate_account_shared_access_signature( ResourceTypes.OBJECT, AccountPermissions.READ, datetime.utcnow() + timedelta(hours=1), ) # Act url = self.bs.make_blob_url( self.container_name, blob_name, sas_token=token, ) response = requests.get(url) # Assert self.assertTrue(response.ok) self.assertEqual(self.byte_data, response.content)
def test_create_file_from_stream_non_seekable(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange file_name = self._get_file_reference() data = self.get_random_bytes(LARGE_FILE_SIZE) with open(INPUT_FILE_PATH, 'wb') as stream: stream.write(data) # Act file_size = len(data) with open(INPUT_FILE_PATH, 'rb') as stream: non_seekable_file = StorageFileTest.NonSeekableFile(stream) self.fs.create_file_from_stream(self.share_name, None, file_name, non_seekable_file, file_size, max_connections=1) # Assert self.assertFileEqual(self.share_name, None, file_name, data[:file_size])
def test_shared_read_access_blob_with_content_query_params(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._create_block_blob() token = self.bs.generate_blob_shared_access_signature( self.container_name, blob_name, permission=BlobPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=1), cache_control='no-cache', content_disposition='inline', content_encoding='utf-8', content_language='fr', content_type='text', ) url = self.bs.make_blob_url( self.container_name, blob_name, sas_token=token, ) # Act response = requests.get(url) # Assert self.assertEqual(self.byte_data, response.content) self.assertEqual(response.headers['cache-control'], 'no-cache') self.assertEqual(response.headers['content-disposition'], 'inline') self.assertEqual(response.headers['content-encoding'], 'utf-8') self.assertEqual(response.headers['content-language'], 'fr') self.assertEqual(response.headers['content-type'], 'text')
def test_get_file_to_stream_with_progress_from_snapshot(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange # Create a snapshot of the share and delete the file share_snapshot = self.fs.snapshot_share(self.share_name) self.fs.delete_file(self.share_name, self.directory_name, self.byte_file) progress = [] def callback(current, total): progress.append((current, total)) # Act with open(FILE_PATH, 'wb') as stream: file = self.fs.get_file_to_stream( self.share_name, self.directory_name, self.byte_file, stream, progress_callback=callback, snapshot=share_snapshot.snapshot) # Assert self.assertIsInstance(file, File) with open(FILE_PATH, 'rb') as stream: actual = stream.read() self.assertEqual(self.byte_data, actual) self.assert_download_progress(len(self.byte_data), self.fs.MAX_CHUNK_GET_SIZE, self.fs.MAX_SINGLE_GET_SIZE, progress)
def test_shared_write_access_blob(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange updated_data = b'updated blob data' blob_name = self._create_block_blob() token = self.bs.generate_blob_shared_access_signature( self.container_name, blob_name, permission=BlobPermissions.WRITE, expiry=datetime.utcnow() + timedelta(hours=1), ) url = self.bs.make_blob_url( self.container_name, blob_name, sas_token=token, ) # Act headers = {'x-ms-blob-type': self.bs.blob_type} response = requests.put(url, headers=headers, data=updated_data) # Assert self.assertTrue(response.ok) blob = self.bs.get_blob_to_bytes(self.container_name, blob_name) self.assertEqual(updated_data, blob.content)
def test_sas_add_inside_range(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.ADD, datetime.utcnow() + timedelta(hours=1), start_pk='test', start_rk='test1', end_pk='test', end_rk='test1', ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) entity = self._create_random_entity_dict('test', 'test1') service.insert_entity(self.table_name, entity) # Assert resp = self.ts.get_entity(self.table_name, 'test', 'test1') self._assert_default_entity(resp)
def test_sas_access_blob(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._create_block_blob() token = self.bs.generate_blob_shared_access_signature( self.container_name, blob_name, permission=BlobPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=1), ) # Act service = BlockBlobService( self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, request_session=requests.Session(), ) self._set_service_options(service, self.settings) result = service.get_blob_to_bytes(self.container_name, blob_name) # Assert self.assertEqual(self.byte_data, result.content)
def test_sas_upper_case_table_name(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() # Table names are case insensitive, so simply upper case our existing table name to test token = self.ts.generate_table_shared_access_signature( self.table_name.upper(), TablePermissions.QUERY, datetime.utcnow() + timedelta(hours=1), datetime.utcnow() - timedelta(minutes=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) entities = list(service.query_entities(self.table_name, filter="PartitionKey eq '{}'".format(entity['PartitionKey']))) # Assert self.assertEqual(len(entities), 1) self._assert_default_entity(entities[0])
def test_create_large_blob_from_stream_with_progress_chunked_upload(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._get_blob_reference() data = bytearray(os.urandom(LARGE_BLOB_SIZE)) with open(FILE_PATH, 'wb') as stream: stream.write(data) # Act progress = [] def callback(current, total): progress.append((current, total)) with open(FILE_PATH, 'rb') as stream: self.bs.create_blob_from_stream(self.container_name, blob_name, stream, progress_callback=callback) # Assert self.assertBlobEqual(self.container_name, blob_name, data) self.assert_upload_progress(len(data), self.bs.MAX_BLOCK_SIZE, progress, unknown_size=True)
def test_sas_update(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange queue_name = self._create_queue() self.qs.put_message(queue_name, u'message1') token = self.qs.generate_queue_shared_access_signature( queue_name, QueuePermissions.UPDATE, datetime.utcnow() + timedelta(hours=1), ) result = self.qs.get_messages(queue_name) # Act service = QueueService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) service.update_message( queue_name, result[0].id, result[0].pop_receipt, visibility_timeout=0, content=u'updatedmessage1', ) # Assert result = self.qs.get_messages(queue_name) self.assertEqual(u'updatedmessage1', result[0].content)
def test_create_file_from_stream_with_progress_truncated(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange file_name = self._get_file_reference() data = self.get_random_bytes(LARGE_FILE_SIZE) with open(INPUT_FILE_PATH, 'wb') as stream: stream.write(data) # Act progress = [] def callback(current, total): progress.append((current, total)) file_size = len(data) - 5 with open(INPUT_FILE_PATH, 'rb') as stream: self.fs.create_file_from_stream(self.share_name, None, file_name, stream, file_size, progress_callback=callback) # Assert self.assertFileEqual(self.share_name, None, file_name, data[:file_size]) self.assert_upload_progress(file_size, self.fs.MAX_RANGE_SIZE, progress, unknown_size=False)
def test_shared_access_share(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recordingfile(self.test_mode): return # Arrange file_name = 'file1' dir_name = 'dir1' data = b'hello world' share_name = self._create_share() self.fs.create_directory(share_name, dir_name) self.fs.create_file_from_bytes(share_name, dir_name, file_name, data) token = self.fs.generate_share_shared_access_signature( share_name, expiry=datetime.utcnow() + timedelta(hours=1), permission=SharePermissions.READ, ) url = self.fs.make_file_url( share_name, dir_name, file_name, sas_token=token, ) # Act response = requests.get(url) # Assert self.assertTrue(response.ok) self.assertEqual(data, response.content)
def test_ranged_get_file_to_path_with_progress(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange progress = [] def callback(current, total): progress.append((current, total)) # Act start_range = 3 end_range = self.fs.MAX_SINGLE_GET_SIZE + 1024 file = self.fs.get_file_to_path(self.share_name, self.directory_name, self.byte_file, FILE_PATH, start_range=start_range, end_range=end_range, progress_callback=callback) # Assert self.assertIsInstance(file, File) with open(FILE_PATH, 'rb') as stream: actual = stream.read() self.assertEqual(self.byte_data[start_range:end_range + 1], actual) self.assert_download_progress(end_range - start_range + 1, self.fs.MAX_CHUNK_GET_SIZE, self.fs.MAX_SINGLE_GET_SIZE, progress)
def test_sas_signed_identifier(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._create_block_blob() access_policy = AccessPolicy() access_policy.start = '2011-10-11' access_policy.expiry = '2018-10-12' access_policy.permission = BlobPermissions.READ identifiers = {'testid': access_policy} resp = self.bs.set_container_acl(self.container_name, identifiers) token = self.bs.generate_blob_shared_access_signature( self.container_name, blob_name, id='testid' ) # Act service = BlockBlobService( self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, request_session=requests.Session(), ) self._set_test_proxy(service, self.settings) result = service.get_blob_to_bytes(self.container_name, blob_name) # Assert self.assertEqual(self.byte_data, result.content)
def test_get_blob_to_stream_with_progress_parallel(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recordingfile(self.test_mode): return # Arrange # Act progress = [] def callback(current, total): progress.append((current, total)) with open(FILE_PATH, 'wb') as stream: blob = self.bs.get_blob_to_stream( self.container_name, self.byte_blob, stream, progress_callback=callback, max_connections=5) # Assert self.assertIsInstance(blob, Blob) with open(FILE_PATH, 'rb') as stream: actual = stream.read() self.assertEqual(self.byte_data, actual) self.assert_download_progress(len(self.byte_data), self.bs.MAX_CHUNK_GET_SIZE, progress, single_download=False)
def test_get_blob_range_with_range_md5(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return blob = self.bs.get_blob_to_bytes(self.container_name, self.byte_blob, start_range=0, end_range=1024, validate_content=True) # Arrange props = self.bs.get_blob_properties(self.container_name, self.byte_blob) props.properties.content_settings.content_md5 = None; self.bs.set_blob_properties(self.container_name, self.byte_blob, props.properties.content_settings) # Act blob = self.bs.get_blob_to_bytes(self.container_name, self.byte_blob, start_range=0, end_range=1024, validate_content=True) # Assert self.assertTrue(hasattr(blob.properties.content_settings, "content_type")); self.assertFalse(hasattr(blob.properties.content_settings, "content_md5")); # Act blob = self.bs.get_blob_to_bytes(self.container_name, self.byte_blob, start_range=0, end_range=1024, validate_content=True) # Assert self.assertTrue(hasattr(blob.properties.content_settings, "content_type")); self.assertFalse(hasattr(blob.properties.content_settings, "content_md5"));
def test_shared_delete_access_blob(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._create_block_blob() token = self.bs.generate_blob_shared_access_signature( self.container_name, blob_name, permission=BlobPermissions.DELETE, expiry=datetime.utcnow() + timedelta(hours=1), ) url = self.bs.make_blob_url( self.container_name, blob_name, sas_token=token, ) # Act response = requests.delete(url) # Assert self.assertTrue(response.ok) with self.assertRaises(AzureMissingResourceHttpError): blob = self.bs.get_blob_to_bytes(self.container_name, blob_name)
def test_sas_access_blob(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._create_block_blob() token = self.bs.generate_blob_shared_access_signature( self.container_name, blob_name, permission=BlobPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=1), ) # Act service = BlockBlobService( self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, request_session=requests.Session(), ) self._set_test_proxy(service, self.settings) result = service.get_blob_to_bytes(self.container_name, blob_name) # Assert self.assertEqual(self.byte_data, result.content)
def test_shared_write_access_file(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange updated_data = b'updated file data' file_name = self._create_file() token = self.fs.generate_file_shared_access_signature( self.share_name, None, file_name, permission=FilePermissions.WRITE, expiry=datetime.utcnow() + timedelta(hours=1), ) url = self.fs.make_file_url( self.share_name, None, file_name, sas_token=token, ) # Act headers={'x-ms-range': 'bytes=0-16', 'x-ms-write': 'update'} response = requests.put(url + '&comp=range', headers=headers, data=updated_data) # Assert self.assertTrue(response.ok) file = self.fs.get_file_to_bytes(self.share_name, None, file_name) self.assertEqual(updated_data, file.content[:len(updated_data)])
def test_shared_access_container(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange container_name = self._create_container() blob_name = 'blob1' data = b'hello world' self.bs.create_blob_from_bytes (container_name, blob_name, data) token = self.bs.generate_container_shared_access_signature( container_name, expiry=datetime.utcnow() + timedelta(hours=1), permission=ContainerPermissions.READ, ) url = self.bs.make_blob_url( container_name, blob_name, sas_token=token, ) # Act response = requests.get(url) # Assert self.assertTrue(response.ok) self.assertEqual(data, response.content)
def test_create_blob_from_stream_chunked_upload_with_count_and_properties(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._get_blob_reference() data = self.get_random_bytes(LARGE_BLOB_SIZE) with open(FILE_PATH, 'wb') as stream: stream.write(data) # Act content_settings=ContentSettings( content_type='image/png', content_language='spanish') blob_size = len(data) - 301 with open(FILE_PATH, 'rb') as stream: self.bs.create_blob_from_stream(self.container_name, blob_name, stream, blob_size, content_settings=content_settings) # Assert self.assertBlobEqual(self.container_name, blob_name, data[:blob_size]) properties = self.bs.get_blob_properties(self.container_name, blob_name).properties self.assertEqual(properties.content_settings.content_type, content_settings.content_type) self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
def test_shared_read_access_blob(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._create_block_blob() token = self.bs.generate_blob_shared_access_signature( self.container_name, blob_name, permission=BlobPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=1), ) # Act url = self.bs.make_blob_url( self.container_name, blob_name, sas_token=token, ) response = requests.get(url) # Assert self.assertTrue(response.ok) self.assertEqual(self.byte_data, response.content)
def test_account_sas(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange file_name = self._create_file() token = self.fs.generate_account_shared_access_signature( ResourceTypes.OBJECT, AccountPermissions.READ, datetime.utcnow() + timedelta(hours=1), ) # Act url = self.fs.make_file_url( self.share_name, None, file_name, sas_token=token, ) response = requests.get(url) # Assert self.assertTrue(response.ok) self.assertEqual(self.short_byte_data, response.content)
def test_ranged_get_blob_to_path_md5_without_end_range_fail(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recordingfile(self.test_mode): return # Arrange # Act with self.assertRaises(ValueError): blob = self.bs.get_blob_to_path( self.container_name, self.byte_blob, FILE_PATH, start_range=1, range_get_content_md5=True, max_connections=5)
def test_create_blob_with_md5_chunked(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange blob_name = self._get_blob_reference() data = self.get_random_bytes(LARGE_BLOB_SIZE) # Act self.bs.create_blob_from_bytes(self.container_name, blob_name, data, validate_content=True)
def test_create_file_with_md5_large(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange file_name = self._get_file_reference() data = self.get_random_bytes(LARGE_FILE_SIZE) # Act self.fs.create_file_from_bytes(self.share_name, None, file_name, data, validate_content=True)
def test_get_file_with_md5(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recording_file(self.test_mode): return # Arrange # Act file = self.fs.get_file_to_bytes(self.share_name, self.directory_name, self.byte_file, validate_content=True) # Assert self.assertEqual(self.byte_data, file.content)
def test_get_blob_to_bytes_parallel(self): # parallel tests introduce random order of requests, can only run live if TestMode.need_recordingfile(self.test_mode): return # Arrange # Act blob = self.bs.get_blob_to_bytes(self.container_name, self.byte_blob, max_connections=2) # Assert self.assertEqual(self.byte_data, blob.content)