async def test_create_container_with_default_cpk_n(self, resource_group, location, storage_account, storage_account_key): # Arrange bsc = BlobServiceClient( self.account_url(storage_account, "blob"), credential=storage_account_key, connection_data_block_size=1024, max_single_put_size=1024, min_large_block_upload_threshold=1024, max_block_size=1024, max_page_size=1024) container_client = await bsc.create_container( 'asynccpkcontainer', container_encryption_scope=TEST_CONTAINER_ENCRYPTION_KEY_SCOPE) container_props = await container_client.get_container_properties() self.assertEqual( container_props.encryption_scope.default_encryption_scope, TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope) self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, False) async for container in bsc.list_containers(name_starts_with='asynccpkcontainer'): self.assertEqual( container_props.encryption_scope.default_encryption_scope, TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope) self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, False) blob_client = container_client.get_blob_client("appendblob") # providing encryption scope when upload the blob resp = await blob_client.upload_blob(b'aaaa', BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE) # Use the provided encryption scope on the blob self.assertEqual(resp['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE) await container_client.delete_container()
async def test_create_container_with_default_cpk_n_deny_override( self, resource_group, location, storage_account, storage_account_key): # Arrange bsc = BlobServiceClient(self.account_url(storage_account, "blob"), credential=storage_account_key, connection_data_block_size=1024, max_single_put_size=1024, min_large_block_upload_threshold=1024, max_block_size=1024, max_page_size=1024) container_client = await bsc.create_container( 'asyncdenyoverridecpkcontainer', container_encryption_scope= TEST_CONTAINER_ENCRYPTION_KEY_SCOPE_DENY_OVERRIDE) container_props = await container_client.get_container_properties() self.assertEqual( container_props.encryption_scope.default_encryption_scope, TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope) self.assertEqual( container_props.encryption_scope.prevent_encryption_scope_override, True) async for container in bsc.list_containers( name_starts_with='asyncdenyoverridecpkcontainer'): self.assertEqual( container_props.encryption_scope.default_encryption_scope, TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope) self.assertEqual( container_props.encryption_scope. prevent_encryption_scope_override, True) blob_client = container_client.get_blob_client("appendblob") # It's not allowed to set encryption scope on the blob when the container denies encryption scope override. with self.assertRaises(HttpResponseError): await blob_client.upload_blob( b'aaaa', BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE) resp = await blob_client.upload_blob(b'aaaa', BlobType.AppendBlob) self.assertEqual( resp['encryption_scope'], TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope) await container_client.delete_container()
class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase): """A client to interact with the DataLake Service at the account level. This client provides operations to retrieve and configure the account properties as well as list, create and delete file systems within the account. For operations relating to a specific file system, directory or file, clients for those entities can also be retrieved using the `get_client` functions. :ivar str url: The full endpoint URL to the datalake service endpoint. :ivar str primary_endpoint: The full primary endpoint URL. :ivar str primary_hostname: The hostname of the primary endpoint. :param str account_url: The URL to the DataLake storage account. Any other entities included in the URL path (e.g. file system or file) will be discarded. This URL can be optionally authenticated with a SAS token. :param credential: The credentials with which to authenticate. This is optional if the account URL already has a SAS token. The value can be a SAS token string, an instance of a AzureSasCredential from azure.core.credentials, an account shared access key, or an instance of a TokenCredentials class from azure.identity. If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. .. admonition:: Example: .. literalinclude:: ../samples/datalake_samples_service_async.py :start-after: [START create_datalake_service_client] :end-before: [END create_datalake_service_client] :language: python :dedent: 4 :caption: Creating the DataLakeServiceClient from connection string. .. literalinclude:: ../samples/datalake_samples_service_async.py :start-after: [START create_datalake_service_client_oauth] :end-before: [END create_datalake_service_client_oauth] :language: python :dedent: 4 :caption: Creating the DataLakeServiceClient with Azure Identity credentials. """ def __init__( self, account_url, # type: str credential=None, # type: Optional[Any] **kwargs # type: Any ): # type: (...) -> None kwargs['retry_policy'] = kwargs.get( 'retry_policy') or ExponentialRetry(**kwargs) super(DataLakeServiceClient, self).__init__(account_url, credential=credential, **kwargs) self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs) self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access self._client = AzureDataLakeStorageRESTAPI(self.url, pipeline=self._pipeline) self._loop = kwargs.get('loop', None) async def __aenter__(self): await self._blob_service_client.__aenter__() return self async def __aexit__(self, *args): await self._blob_service_client.close() async def close(self): # type: () -> None """ This method is to close the sockets opened by the client. It need not be used when using with a context manager. """ await self._blob_service_client.close() async def get_user_delegation_key( self, key_start_time, # type: datetime key_expiry_time, # type: datetime **kwargs # type: Any ): # type: (...) -> UserDelegationKey """ Obtain a user delegation key for the purpose of signing SAS tokens. A token credential must be present on the service object for this request to succeed. :param ~datetime.datetime key_start_time: A DateTime value. Indicates when the key becomes valid. :param ~datetime.datetime key_expiry_time: A DateTime value. Indicates when the key stops being valid. :keyword int timeout: The timeout parameter is expressed in seconds. :return: The user delegation key. :rtype: ~azure.storage.filedatalake.UserDelegationKey .. admonition:: Example: .. literalinclude:: ../samples/datalake_samples_service_async.py :start-after: [START get_user_delegation_key] :end-before: [END get_user_delegation_key] :language: python :dedent: 8 :caption: Get user delegation key from datalake service client. """ delegation_key = await self._blob_service_client.get_user_delegation_key( key_start_time=key_start_time, key_expiry_time=key_expiry_time, **kwargs) # pylint: disable=protected-access return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access def list_file_systems( self, name_starts_with=None, # type: Optional[str] include_metadata=None, # type: Optional[bool] **kwargs): # type: (...) -> ItemPaged[FileSystemProperties] """Returns a generator to list the file systems under the specified account. The generator will lazily follow the continuation tokens returned by the service and stop when all file systems have been returned. :param str name_starts_with: Filters the results to return only file systems whose names begin with the specified prefix. :param bool include_metadata: Specifies that file system metadata be returned in the response. The default value is `False`. :keyword int results_per_page: The maximum number of file system names to retrieve per API call. If the request does not specify the server will return up to 5,000 items per page. :keyword int timeout: The timeout parameter is expressed in seconds. :returns: An iterable (auto-paging) of FileSystemProperties. :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] .. admonition:: Example: .. literalinclude:: ../samples/datalake_samples_service_async.py :start-after: [START list_file_systems] :end-before: [END list_file_systems] :language: python :dedent: 8 :caption: Listing the file systems in the datalake service. """ item_paged = self._blob_service_client.list_containers( name_starts_with=name_starts_with, include_metadata=include_metadata, **kwargs) # pylint: disable=protected-access item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access return item_paged async def create_file_system( self, file_system, # type: Union[FileSystemProperties, str] metadata=None, # type: Optional[Dict[str, str]] public_access=None, # type: Optional[PublicAccess] **kwargs): # type: (...) -> FileSystemClient """Creates a new file system under the specified account. If the file system with the same name already exists, a ResourceExistsError will be raised. This method returns a client with which to interact with the newly created file system. :param str file_system: The name of the file system to create. :param metadata: A dict with name-value pairs to associate with the file system as metadata. Example: `{'Category':'test'}` :type metadata: dict(str, str) :param public_access: Possible values include: file system, file. :type public_access: ~azure.storage.filedatalake.PublicAccess :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: ~azure.storage.filedatalake.FileSystemClient .. admonition:: Example: .. literalinclude:: ../samples/datalake_samples_service_async.py :start-after: [START create_file_system_from_service_client] :end-before: [END create_file_system_from_service_client] :language: python :dedent: 8 :caption: Creating a file system in the datalake service. """ file_system_client = self.get_file_system_client(file_system) await file_system_client.create_file_system( metadata=metadata, public_access=public_access, **kwargs) return file_system_client async def delete_file_system( self, file_system, # type: Union[FileSystemProperties, str] **kwargs): # type: (...) -> FileSystemClient """Marks the specified file system for deletion. The file system and any files contained within it are later deleted during garbage collection. If the file system is not found, a ResourceNotFoundError will be raised. :param file_system: The file system to delete. This can either be the name of the file system, or an instance of FileSystemProperties. :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties :keyword lease: If specified, delete_file_system only succeeds if the file system's lease is active and matches this ID. Required if the file system has an active lease. :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :keyword ~datetime.datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :keyword str etag: An ETag value, or the wildcard character (*). Used to check if the resource has changed, and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: None .. admonition:: Example: .. literalinclude:: ../samples/datalake_samples_service_async.py :start-after: [START delete_file_system_from_service_client] :end-before: [END delete_file_system_from_service_client] :language: python :dedent: 8 :caption: Deleting a file system in the datalake service. """ file_system_client = self.get_file_system_client(file_system) await file_system_client.delete_file_system(**kwargs) return file_system_client def get_file_system_client( self, file_system # type: Union[FileSystemProperties, str] ): # type: (...) -> FileSystemClient """Get a client to interact with the specified file system. The file system need not already exist. :param file_system: The file system. This can either be the name of the file system, or an instance of FileSystemProperties. :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties :returns: A FileSystemClient. :rtype: ~azure.storage.filedatalake.aio.FileSystemClient .. admonition:: Example: .. literalinclude:: ../samples/datalake_samples_file_system_async.py :start-after: [START create_file_system_client_from_service] :end-before: [END create_file_system_client_from_service] :language: python :dedent: 8 :caption: Getting the file system client to interact with a specific file system. """ try: file_system_name = file_system.name except AttributeError: file_system_name = file_system _pipeline = AsyncPipeline( transport=AsyncTransportWrapper( self._pipeline._transport ), # pylint: disable = protected-access policies=self._pipeline. _impl_policies # pylint: disable = protected-access ) return FileSystemClient( self.url, file_system_name, credential=self._raw_credential, _configuration=self._config, _pipeline=self._pipeline, _hosts=self._hosts, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) def get_directory_client( self, file_system, # type: Union[FileSystemProperties, str] directory # type: Union[DirectoryProperties, str] ): # type: (...) -> DataLakeDirectoryClient """Get a client to interact with the specified directory. The directory need not already exist. :param file_system: The file system that the directory is in. This can either be the name of the file system, or an instance of FileSystemProperties. :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties :param directory: The directory with which to interact. This can either be the name of the directory, or an instance of DirectoryProperties. :type directory: str or ~azure.storage.filedatalake.DirectoryProperties :returns: A DataLakeDirectoryClient. :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient .. admonition:: Example: .. literalinclude:: ../samples/datalake_samples_service_async.py :start-after: [START get_directory_client_from_service_client] :end-before: [END get_directory_client_from_service_client] :language: python :dedent: 8 :caption: Getting the directory client to interact with a specific directory. """ try: file_system_name = file_system.name except AttributeError: file_system_name = file_system try: directory_name = directory.name except AttributeError: directory_name = directory _pipeline = AsyncPipeline( transport=AsyncTransportWrapper( self._pipeline._transport ), # pylint: disable = protected-access policies=self._pipeline. _impl_policies # pylint: disable = protected-access ) return DataLakeDirectoryClient( self.url, file_system_name, directory_name=directory_name, credential=self._raw_credential, _configuration=self._config, _pipeline=self._pipeline, _hosts=self._hosts, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) def get_file_client( self, file_system, # type: Union[FileSystemProperties, str] file_path # type: Union[FileProperties, str] ): # type: (...) -> DataLakeFileClient """Get a client to interact with the specified file. The file need not already exist. :param file_system: The file system that the file is in. This can either be the name of the file system, or an instance of FileSystemProperties. :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties :param file_path: The file with which to interact. This can either be the full path of the file(from the root directory), or an instance of FileProperties. eg. directory/subdirectory/file :type file_path: str or ~azure.storage.filedatalake.FileProperties :returns: A DataLakeFileClient. :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient .. admonition:: Example: .. literalinclude:: ../samples/datalake_samples_service_async.py :start-after: [START get_file_client_from_service_client] :end-before: [END get_file_client_from_service_client] :language: python :dedent: 8 :caption: Getting the file client to interact with a specific file. """ try: file_system_name = file_system.name except AttributeError: file_system_name = file_system try: file_path = file_path.name except AttributeError: pass _pipeline = AsyncPipeline( transport=AsyncTransportWrapper( self._pipeline._transport ), # pylint: disable = protected-access policies=self._pipeline. _impl_policies # pylint: disable = protected-access ) return DataLakeFileClient( self.url, file_system_name, file_path=file_path, credential=self._raw_credential, _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
class AzBlobManagerAsync: """A utility class to help working with Azure Blob Storage. This class implements asynchronous methods based on the Microsoft Python SDK azure.storage.blob.aio See: https://docs.microsoft.com/en-us/python/api/azure-storage-blob/azure.storage.blob.aio?view=azure-python Available: - Basic methods to work with containers and blobs """ @classmethod def create(cls, connection_string=None, account_url=None, credential=None): """Instantiate an asynchronous AzBlobManagerAsync object. Args: connection_string (str): A connection string to an Azure Storage account. account_url (str): The URL to the blob storage account. Any other entities included in the URL path (e.g. container or blob) will be discarded. This URL can be optionally authenticated with a SAS token. credential (str): The credentials with which to authenticate. This is optional if the account URL already has a SAS token, or the connection string already has shared access key values. The value can be a SAS token string, an account shared access key, or an instance of a TokenCredentials class from azure.identity. Credentials provided here will take precedence over those in the connection string. Returns: AzBlobManagerAsync object Examples: Creating the AzBlobManagerAsync with account url and a shared access key: azStorageManager = AzBlobManagerAsync.create(account_url=self.url, credential=self.shared_access_key) Creating the AzBlobManagerAsync with a connection string that has the shared access key: azStorageManager = AzBlobManagerAsync.create(connection_string='DefaultEndpointsProtocol=http;...') """ self = AzBlobManagerAsync() self.connection_string = connection_string self.account_url = account_url self.credential = credential from azure.storage.blob.aio import BlobServiceClient self.blob_service_client = BlobServiceClient if (self.connection_string is not None): # Create BlobServiceClient from a Connection String self.blob_service_client = BlobServiceClient.from_connection_string( conn_str=self.connection_string, credential=self.credential) else: # Creating the BlobServiceClient with account url and credential. self.blob_service_client = BlobServiceClient( account_url=self.account_url, credential=self.credential) return self def _logAzureError(self, err=AzureError): msg = err.message.split('\n')[0] logger.error(f'AzureError error: {msg}') async def create_container(self, container_name): """Creates a new container. Args: container_name (str): The name of the container. See https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata for naming convention Returns: bool: The return value. True for success, False otherwise. """ success = False try: new_container = await self.blob_service_client.create_container( container_name) properties = await new_container.get_container_properties() success = properties is not None and properties.name == container_name except ResourceExistsError: logger.info(f'Container \"{container_name}\" already exists.') except AzureError as err: self._logAzureError(err=err) except Exception: logger.exception('') return success async def delete_container(self, container_name): """Deletes a container. Args: container_name (str): The name of the container. Returns: bool: The return value. True for success, False otherwise. """ success = False try: await self.blob_service_client.delete_container(container_name) success = True except ResourceNotFoundError: logger.info(f'Container \"{container_name}\" doesn not exist.') except AzureError as err: self._logAzureError(err=err) except Exception: logger.exception('') return success async def _list_containers(self, name_starts_with=None, include_metadata=False): """Lists containers. Args: name_starts_with (str): Filters the results to return only containers whose names begin with the specified prefix. include_metadata (bool): Specifies that container metadata to be returned in the response. Returns: ItemPaged[ContainerProperties]: An iterable (auto-paging) of ContainerProperties. """ try: containers = [] async for container in self.blob_service_client.list_containers( name_starts_with=name_starts_with, include_metadata=include_metadata): containers.append(container) return containers except AzureError as err: self._logAzureError(err=err) except Exception: logger.exception('') return None async def list_containers_name(self, name_starts_with=None): """Lists containers' name. Args: name_starts_with (str): Filters the results to return only containers whose names begin with the specified prefix. Returns: list: A list of strings representing the container names. """ containers_list = [] containers = await self._list_containers( name_starts_with=name_starts_with, include_metadata=False) if (containers is None): return containers_list for container in containers: containers_list.append(container['name']) return containers_list async def create_append_blob(self, container_name, blob_name, replace_blob=False): """Creates an append blob in an existing container. Args: container_name (str): The name of the container. blob_name (str): The name of the blob. replace_blob (bool): If True, deletes existing blob with same name Returns: bool: The return value. True for success, False otherwise. """ success = False try: blob_client = self.blob_service_client.get_blob_client( container_name, blob_name) # raise ResourceNotFoundError if blob does not exist await blob_client.get_blob_properties() # blob exists already if (replace_blob is True): await blob_client.create_append_blob() success = True except ResourceNotFoundError: await blob_client.create_append_blob() success = True except AzureError as err: self._logAzureError(err=err) except Exception: logger.exception('') return success async def create_page_blob(self, container_name, blob_name, size=1024, content_settings=None, metadata=None, premium_page_blob_tier=None): """Creates a page blob in an existing container. Args: container_name (str): The name of the container. blob_name (str): The name of the blob. size (int): This specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary content_settings (ContentSettings): ContentSettings object used to set blob properties. Used to set content type, encoding, language, disposition, md5, and cache control. metadata (dict(str, str)): Name-value pairs associated with the blob as metadata premium_page_blob_tier (PremiumPageBlobTier): A page blob tier value to set the blob to Returns: bool: The return value. True for success, False otherwise. """ success = False try: blob_client = self.blob_service_client.get_blob_client( container_name, blob_name) await blob_client.create_page_blob(size, content_settings, metadata, premium_page_blob_tier) success = True except AzureError as err: self._logAzureError(err=err) except Exception: logger.exception('') return success async def delete_blob(self, container_name, blob_name): """Deletes a blob. Args: container_name (str): The name of the container. blob_name (str): The name of the blob. Returns: bool: The return value. True for success, False otherwise. """ success = False try: blob_client = self.blob_service_client.get_blob_client( container_name, blob_name) await blob_client.delete_blob() success = True except AzureError as err: self._logAzureError(err=err) except Exception: logger.exception('') return success async def list_blobs(self, container_name): """Lists the blobs in the specified container. Args: container_name (str): The name of the container. Returns: list: A list of strings representing the blob names. """ blobs_list = [] try: container_client = self.blob_service_client.get_container_client( container_name) async for blob in container_client.list_blobs(): blobs_list.append(blob) except AzureError as err: self._logAzureError(err=err) except Exception: logger.exception(f'Fatal error') return blobs_list async def upload_data(self, data, container_name, blob_name, blob_type='BlockBlob'): """Creates a new blob from a data source with automatic chunking. Args: data: The blob data to upload. container_name (str): The name of the container. blob_name (str): The name of the blob. blob_typr (str): The type of the blob. This can be either BlockBlob, PageBlob or AppendBlob. Returns: bool: The return value. True for success, False otherwise. """ success = False try: blob_client = self.blob_service_client.get_blob_client( container_name, blob_name) await blob_client.upload_blob(data) success = True except AzureError as err: self._logAzureError(err=err) except Exception: logger.exception('') return success async def append_block(self, data, container_name, blob_name): """Commits a new block of data to the end of the existing append blob. Args: data: Content of the block. container_name (str): The name of the container. blob_name (str): The name of the blob. Returns: bool: The return value. True for success, False otherwise. """ success = False try: blob_client = self.blob_service_client.get_blob_client( container_name, blob_name) await blob_client.append_block(data) success = True except AzureError as err: self._logAzureError(err=err) except Exception: logger.exception('') return success async def download_data(self, container_name, blob_name): """Downloads a blob. Args: container_name (str): The name of the container. blob_name (str): The name of the blob. Returns: stream: The data stream """ try: blob_client = self.blob_service_client.get_blob_client( container_name, blob_name) stream = await blob_client.download_blob() return await stream.readall() except AzureError as err: self._logAzureError(err=err) except Exception: logger.exception('')
class StorageContainerTestAsync(StorageTestCase): def setUp(self): super(StorageContainerTestAsync, self).setUp() url = self._get_account_url() credential = self._get_shared_key_credential() self.bsc = BlobServiceClient(url, credential=credential, transport=AiohttpTestTransport()) loop = asyncio.get_event_loop() loop.run_until_complete(self.bsc.__aenter__()) self.test_containers = [] def tearDown(self): if not self.is_playback(): loop = asyncio.get_event_loop() for container_name in self.test_containers: try: container = self.bsc.get_container_client(container_name) loop.run_until_complete(container.delete_container()) except HttpResponseError: try: lease = LeaseClient(container) loop.run_until_complete(lease.break_lease(0)) loop.run_until_complete(container.delete_container()) except: pass except: pass loop.run_until_complete(self.bsc.__aexit__()) return super(StorageContainerTestAsync, self).tearDown() #--Helpers----------------------------------------------------------------- def _get_container_reference(self, prefix=TEST_CONTAINER_PREFIX): container_name = self.get_resource_name(prefix) self.test_containers.append(container_name) return container_name async def _create_container(self, prefix=TEST_CONTAINER_PREFIX): container_name = self._get_container_reference(prefix) container = self.bsc.get_container_client(container_name) try: await container.create_container() except ResourceExistsError: pass return container #--Test cases for containers ----------------------------------------- async def _test_create_container(self): # Arrange container_name = self._get_container_reference() # Act container = self.bsc.get_container_client(container_name) created = await container.create_container() # Assert self.assertTrue(created) @record def test_create_container(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_create_container()) async def _test_create_container_with_already_existing_container_fail_on_exist( self): # Arrange container_name = self._get_container_reference() # Act container = self.bsc.get_container_client(container_name) created = await container.create_container() with self.assertRaises(HttpResponseError): await container.create_container() # Assert self.assertTrue(created) @record def test_create_container_with_already_existing_container_fail_on_exist( self): loop = asyncio.get_event_loop() loop.run_until_complete( self. _test_create_container_with_already_existing_container_fail_on_exist( )) async def _test_create_container_with_public_access_container(self): # Arrange container_name = self._get_container_reference() # Act container = self.bsc.get_container_client(container_name) created = await container.create_container(public_access='container') # Assert self.assertTrue(created) @record def test_create_container_with_public_access_container(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_create_container_with_public_access_container()) async def _test_create_container_with_public_access_blob(self): # Arrange container_name = self._get_container_reference() # Act container = self.bsc.get_container_client(container_name) created = await container.create_container(public_access='blob') blob = container.get_blob_client("blob1") await blob.upload_blob(u'xyz') anonymous_service = BlobClient(self._get_account_url(), container=container_name, blob="blob1") # Assert self.assertTrue(created) await anonymous_service.download_blob() @record def test_create_container_with_public_access_blob(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_create_container_with_public_access_blob()) async def _test_create_container_with_metadata(self): # Arrange container_name = self._get_container_reference() metadata = {'hello': 'world', 'number': '42'} # Act container = self.bsc.get_container_client(container_name) created = await container.create_container(metadata) # Assert self.assertTrue(created) md_cr = await container.get_container_properties() md = md_cr.metadata self.assertDictEqual(md, metadata) @record def test_create_container_with_metadata(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_create_container_with_metadata()) async def _test_container_exists_with_lease(self): # Arrange container = await self._create_container() await container.acquire_lease() # Act exists = await container.get_container_properties() # Assert self.assertTrue(exists) @record def test_container_exists_with_lease(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_container_exists_with_lease()) async def _test_unicode_create_container_unicode_name(self): # Arrange container_name = u'啊齄丂狛狜' container = self.bsc.get_container_client(container_name) # Act with self.assertRaises(HttpResponseError): # not supported - container name must be alphanumeric, lowercase await container.create_container() # Assert @record def test_unicode_create_container_unicode_name(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_unicode_create_container_unicode_name()) async def _test_list_containers(self): # Arrange container = await self._create_container() # Act containers = [] async for c in self.bsc.list_containers(): containers.append(c) # Assert self.assertIsNotNone(containers) self.assertGreaterEqual(len(containers), 1) self.assertIsNotNone(containers[0]) self.assertNamedItemInContainer(containers, container.container_name) self.assertIsNotNone(containers[0].has_immutability_policy) self.assertIsNotNone(containers[0].has_legal_hold) @record def test_list_containers(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_containers()) async def _test_list_containers_with_prefix(self): # Arrange container = await self._create_container() # Act containers = [] async for c in self.bsc.list_containers( name_starts_with=container.container_name): containers.append(c) # Assert self.assertIsNotNone(containers) self.assertEqual(len(containers), 1) self.assertIsNotNone(containers[0]) self.assertEqual(containers[0].name, container.container_name) self.assertIsNone(containers[0].metadata) @record def test_list_containers_with_prefix(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_containers_with_prefix()) async def _test_list_containers_with_include_metadata(self): # Arrange container = await self._create_container() metadata = {'hello': 'world', 'number': '42'} resp = await container.set_container_metadata(metadata) # Act containers = [] async for c in self.bsc.list_containers( name_starts_with=container.container_name, include_metadata=True): containers.append(c) # Assert self.assertIsNotNone(containers) self.assertGreaterEqual(len(containers), 1) self.assertIsNotNone(containers[0]) self.assertNamedItemInContainer(containers, container.container_name) self.assertDictEqual(containers[0].metadata, metadata) @record def test_list_containers_with_include_metadata(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_list_containers_with_include_metadata()) async def _test_list_containers_with_public_access(self): # Arrange container = await self._create_container() resp = await container.set_container_access_policy( public_access=PublicAccess.Blob) # Act containers = [] async for c in self.bsc.list_containers( name_starts_with=container.container_name): containers.append(c) # Assert self.assertIsNotNone(containers) self.assertGreaterEqual(len(containers), 1) self.assertIsNotNone(containers[0]) self.assertNamedItemInContainer(containers, container.container_name) self.assertEqual(containers[0].public_access, PublicAccess.Blob) @record def test_list_containers_with_public_access(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_list_containers_with_public_access()) async def _test_list_containers_with_num_results_and_marker(self): # Arrange prefix = 'listcontainer' container_names = [] for i in range(0, 4): cr = await self._create_container(prefix + str(i)) container_names.append(cr.container_name) container_names.sort() # Act generator1 = self.bsc.list_containers(name_starts_with=prefix, results_per_page=2).by_page() containers1 = [] async for c in await generator1.__anext__(): containers1.append(c) generator2 = self.bsc.list_containers( name_starts_with=prefix, results_per_page=2).by_page(generator1.continuation_token) containers2 = [] async for c in await generator2.__anext__(): containers2.append(c) # Assert self.assertIsNotNone(containers1) self.assertEqual(len(containers1), 2) self.assertNamedItemInContainer(containers1, container_names[0]) self.assertNamedItemInContainer(containers1, container_names[1]) self.assertIsNotNone(containers2) self.assertEqual(len(containers2), 2) self.assertNamedItemInContainer(containers2, container_names[2]) self.assertNamedItemInContainer(containers2, container_names[3]) @record def test_list_containers_with_num_results_and_marker(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_list_containers_with_num_results_and_marker()) async def _test_set_container_metadata(self): # Arrange metadata = {'hello': 'world', 'number': '43'} container = await self._create_container() # Act await container.set_container_metadata(metadata) md = await container.get_container_properties() metadata_from_response = md.metadata # Assert self.assertDictEqual(metadata_from_response, metadata) @record def test_set_container_metadata(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_set_container_metadata()) async def _test_set_container_metadata_with_lease_id(self): # Arrange metadata = {'hello': 'world', 'number': '43'} container = await self._create_container() lease_id = await container.acquire_lease() # Act await container.set_container_metadata(metadata, lease_id) # Assert md = await container.get_container_properties() md = md.metadata self.assertDictEqual(md, metadata) @record def test_set_container_metadata_with_lease_id(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_set_container_metadata_with_lease_id()) async def _test_set_container_metadata_with_non_existing_container(self): # Arrange container_name = self._get_container_reference() container = self.bsc.get_container_client(container_name) # Act with self.assertRaises(ResourceNotFoundError): await container.set_container_metadata({ 'hello': 'world', 'number': '43' }) # Assert @record def test_set_container_metadata_with_non_existing_container(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_set_container_metadata_with_non_existing_container()) async def _test_get_container_metadata(self): # Arrange metadata = {'hello': 'world', 'number': '42'} container = await self._create_container() await container.set_container_metadata(metadata) # Act md_cr = await container.get_container_properties() md = md_cr.metadata # Assert self.assertDictEqual(md, metadata) @record def test_get_container_metadata(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_get_container_metadata()) async def _test_get_container_metadata_with_lease_id(self): # Arrange metadata = {'hello': 'world', 'number': '42'} container = await self._create_container() await container.set_container_metadata(metadata) lease_id = await container.acquire_lease() # Act md = await container.get_container_properties(lease_id) md = md.metadata # Assert self.assertDictEqual(md, metadata) @record def test_get_container_metadata_with_lease_id(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_get_container_metadata_with_lease_id()) async def _test_get_container_properties(self): # Arrange metadata = {'hello': 'world', 'number': '42'} container = await self._create_container() await container.set_container_metadata(metadata) # Act props = await container.get_container_properties() # Assert self.assertIsNotNone(props) self.assertDictEqual(props.metadata, metadata) # self.assertEqual(props.lease.duration, 'infinite') # self.assertEqual(props.lease.state, 'leased') # self.assertEqual(props.lease.status, 'locked') # self.assertEqual(props.public_access, 'container') self.assertIsNotNone(props.has_immutability_policy) self.assertIsNotNone(props.has_legal_hold) @record def test_get_container_properties(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_get_container_properties()) async def _test_get_container_properties_with_lease_id(self): # Arrange metadata = {'hello': 'world', 'number': '42'} container = await self._create_container() await container.set_container_metadata(metadata) lease_id = await container.acquire_lease() # Act props = await container.get_container_properties(lease_id) await lease_id.break_lease() # Assert self.assertIsNotNone(props) self.assertDictEqual(props.metadata, metadata) self.assertEqual(props.lease.duration, 'infinite') self.assertEqual(props.lease.state, 'leased') self.assertEqual(props.lease.status, 'locked') @record def test_get_container_properties_with_lease_id(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_get_container_properties_with_lease_id()) async def _test_get_container_acl(self): # Arrange container = await self._create_container() # Act acl = await container.get_container_access_policy() # Assert self.assertIsNotNone(acl) self.assertIsNone(acl.get('public_access')) self.assertEqual(len(acl.get('signed_identifiers')), 0) @record def test_get_container_acl(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_get_container_acl()) async def _test_get_container_acl_with_lease_id(self): # Arrange container = await self._create_container() lease_id = await container.acquire_lease() # Act acl = await container.get_container_access_policy(lease_id) # Assert self.assertIsNotNone(acl) self.assertIsNone(acl.get('public_access')) @record def test_get_container_acl_with_lease_id(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_get_container_acl_with_lease_id()) async def _test_set_container_acl(self): # Arrange container = await self._create_container() # Act response = await container.set_container_access_policy() self.assertIsNotNone(response.get('etag')) self.assertIsNotNone(response.get('last_modified')) # Assert acl = await container.get_container_access_policy() self.assertIsNotNone(acl) self.assertEqual(len(acl.get('signed_identifiers')), 0) self.assertIsNone(acl.get('public_access')) @record def test_set_container_acl(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_set_container_acl()) async def _test_set_container_acl_with_one_signed_identifier(self): # Arrange from dateutil.tz import tzutc container = await self._create_container() # Act access_policy = AccessPolicy(permission=ContainerPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=1), start=datetime.utcnow()) signed_identifier = {'testid': access_policy} response = await container.set_container_access_policy( signed_identifier) # Assert self.assertIsNotNone(response.get('etag')) self.assertIsNotNone(response.get('last_modified')) @record def test_set_container_acl_with_one_signed_identifier(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_set_container_acl_with_one_signed_identifier()) async def _test_set_container_acl_with_lease_id(self): # Arrange container = await self._create_container() lease_id = await container.acquire_lease() # Act await container.set_container_access_policy(lease=lease_id) # Assert acl = await container.get_container_access_policy() self.assertIsNotNone(acl) self.assertIsNone(acl.get('public_access')) @record def test_set_container_acl_with_lease_id(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_set_container_acl_with_lease_id()) async def _test_set_container_acl_with_public_access(self): # Arrange container = await self._create_container() # Act await container.set_container_access_policy(public_access='container') # Assert acl = await container.get_container_access_policy() self.assertIsNotNone(acl) self.assertEqual('container', acl.get('public_access')) @record def test_set_container_acl_with_public_access(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_set_container_acl_with_public_access()) async def _test_set_container_acl_with_empty_signed_identifiers(self): # Arrange container = await self._create_container() # Act await container.set_container_access_policy(signed_identifiers=dict()) # Assert acl = await container.get_container_access_policy() self.assertIsNotNone(acl) self.assertEqual(len(acl.get('signed_identifiers')), 0) self.assertIsNone(acl.get('public_access')) @record def test_set_container_acl_with_empty_signed_identifiers(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_set_container_acl_with_empty_signed_identifiers()) async def _test_set_container_acl_with_signed_identifiers(self): # Arrange container = await self._create_container() # Act access_policy = AccessPolicy( permission=ContainerPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=1), start=datetime.utcnow() - timedelta(minutes=1)) identifiers = {'testid': access_policy} await container.set_container_access_policy(identifiers) # Assert acl = await container.get_container_access_policy() self.assertIsNotNone(acl) self.assertEqual('testid', acl.get('signed_identifiers')[0].id) self.assertIsNone(acl.get('public_access')) @record def test_set_container_acl_with_signed_identifiers(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_set_container_acl_with_signed_identifiers()) async def _test_set_container_acl_with_empty_identifiers(self): # Arrange container = await self._create_container() identifiers = {i: None for i in range(0, 3)} # Act await container.set_container_access_policy(identifiers) # Assert acl = await container.get_container_access_policy() self.assertIsNotNone(acl) self.assertEqual(len(acl.get('signed_identifiers')), 3) self.assertEqual('0', acl.get('signed_identifiers')[0].id) self.assertIsNone(acl.get('signed_identifiers')[0].access_policy) self.assertIsNone(acl.get('public_access')) @record def test_set_container_acl_with_empty_identifiers(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_set_container_acl_with_empty_identifiers()) async def _test_set_container_acl_with_three_identifiers(self): # Arrange container = await self._create_container() access_policy = AccessPolicy( permission=ContainerPermissions.READ, expiry=datetime.utcnow() + timedelta(hours=1), start=datetime.utcnow() - timedelta(minutes=1)) identifiers = {i: access_policy for i in range(2)} # Act await container.set_container_access_policy(identifiers) # Assert acl = await container.get_container_access_policy() self.assertIsNotNone(acl) self.assertEqual(len(acl.get('signed_identifiers')), 2) self.assertEqual('0', acl.get('signed_identifiers')[0].id) self.assertIsNotNone(acl.get('signed_identifiers')[0].access_policy) self.assertIsNone(acl.get('public_access')) @record def test_set_container_acl_with_three_identifiers(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_set_container_acl_with_three_identifiers()) async def _test_set_container_acl_too_many_ids(self): # Arrange container_name = await self._create_container() # Act identifiers = dict() for i in range(0, 6): identifiers['id{}'.format(i)] = AccessPolicy() # Assert with self.assertRaises(ValueError) as e: await container_name.set_container_access_policy(identifiers) self.assertEqual( str(e.exception), 'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.' ) @record def test_set_container_acl_too_many_ids(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_set_container_acl_too_many_ids()) async def _test_lease_container_acquire_and_release(self): # Arrange container = await self._create_container() # Act lease = await container.acquire_lease() await lease.release() # Assert @record def test_lease_container_acquire_and_release(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_lease_container_acquire_and_release()) async def _test_lease_container_renew(self): # Arrange container = await self._create_container() lease = await container.acquire_lease(lease_duration=15) self.sleep(10) lease_id_start = lease.id # Act await lease.renew() # Assert self.assertEqual(lease.id, lease_id_start) self.sleep(5) with self.assertRaises(HttpResponseError): await container.delete_container() self.sleep(10) await container.delete_container() @record def test_lease_container_renew(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_lease_container_renew()) async def _test_lease_container_break_period(self): # Arrange container = await self._create_container() # Act lease = await container.acquire_lease(lease_duration=15) # Assert await lease.break_lease(lease_break_period=5) self.sleep(6) with self.assertRaises(HttpResponseError): await container.delete_container(lease=lease) @record def test_lease_container_break_period(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_lease_container_break_period()) async def _test_lease_container_break_released_lease_fails(self): # Arrange container = await self._create_container() lease = await container.acquire_lease() await lease.release() # Act with self.assertRaises(HttpResponseError): await lease.break_lease() # Assert @record def test_lease_container_break_released_lease_fails(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_lease_container_break_released_lease_fails()) async def _test_lease_container_with_duration(self): # Arrange container = await self._create_container() # Act lease = await container.acquire_lease(lease_duration=15) # Assert with self.assertRaises(HttpResponseError): await container.acquire_lease() self.sleep(15) await container.acquire_lease() @record def test_lease_container_with_duration(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_lease_container_with_duration()) async def _test_lease_container_twice(self): # Arrange container = await self._create_container() # Act lease = await container.acquire_lease(lease_duration=15) # Assert lease2 = await container.acquire_lease(lease_id=lease.id) self.assertEqual(lease.id, lease2.id) @record def test_lease_container_twice(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_lease_container_twice()) async def _test_lease_container_with_proposed_lease_id(self): # Arrange container = await self._create_container() # Act proposed_lease_id = '55e97f64-73e8-4390-838d-d9e84a374321' lease = await container.acquire_lease(lease_id=proposed_lease_id) # Assert self.assertEqual(proposed_lease_id, lease.id) @record def test_lease_container_with_proposed_lease_id(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_lease_container_with_proposed_lease_id()) async def _test_lease_container_change_lease_id(self): # Arrange container = await self._create_container() # Act lease_id = '29e0b239-ecda-4f69-bfa3-95f6af91464c' lease = await container.acquire_lease() lease_id1 = lease.id await lease.change(proposed_lease_id=lease_id) await lease.renew() lease_id2 = lease.id # Assert self.assertIsNotNone(lease_id1) self.assertIsNotNone(lease_id2) self.assertNotEqual(lease_id1, lease_id) self.assertEqual(lease_id2, lease_id) @record def test_lease_container_change_lease_id(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_lease_container_change_lease_id()) async def _test_delete_container_with_existing_container(self): # Arrange container = await self._create_container() # Act deleted = await container.delete_container() # Assert self.assertIsNone(deleted) @record def test_delete_container_with_existing_container(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_delete_container_with_existing_container()) async def _test_delete_container_with_non_existing_container_fail_not_exist( self): # Arrange container_name = self._get_container_reference() container = self.bsc.get_container_client(container_name) # Act with LogCaptured(self) as log_captured: with self.assertRaises(ResourceNotFoundError): await container.delete_container() log_as_str = log_captured.getvalue() #self.assertTrue('ERROR' in log_as_str) @record def test_delete_container_with_non_existing_container_fail_not_exist(self): loop = asyncio.get_event_loop() loop.run_until_complete( self. _test_delete_container_with_non_existing_container_fail_not_exist( )) async def _test_delete_container_with_lease_id(self): # Arrange container = await self._create_container() lease = await container.acquire_lease(lease_duration=15) # Act deleted = await container.delete_container(lease=lease) # Assert self.assertIsNone(deleted) with self.assertRaises(ResourceNotFoundError): await container.get_container_properties() @record def test_delete_container_with_lease_id(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_delete_container_with_lease_id()) async def _test_list_names(self): # Arrange container = await self._create_container() data = b'hello world' await (container.get_blob_client('blob1')).upload_blob(data) await (container.get_blob_client('blob2')).upload_blob(data) # Act blobs = [] async for b in container.list_blobs(): blobs.append(b.name) self.assertEqual(blobs, ['blob1', 'blob2']) @record def test_list_names(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_names()) async def _test_list_blobs(self): # Arrange container = await self._create_container() data = b'hello world' cr0 = container.get_blob_client('blob1') await cr0.upload_blob(data) cr1 = container.get_blob_client('blob2') await cr1.upload_blob(data) # Act blobs = [] async for b in container.list_blobs(): blobs.append(b) # Assert self.assertIsNotNone(blobs) self.assertGreaterEqual(len(blobs), 2) self.assertIsNotNone(blobs[0]) self.assertNamedItemInContainer(blobs, 'blob1') self.assertNamedItemInContainer(blobs, 'blob2') self.assertEqual(blobs[0].size, 11) self.assertEqual(blobs[1].content_settings.content_type, 'application/octet-stream') self.assertIsNotNone(blobs[0].creation_time) @record def test_list_blobs(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_blobs()) async def _test_list_blobs_leased_blob(self): # Arrange container = await self._create_container() data = b'hello world' blob1 = container.get_blob_client('blob1') await blob1.upload_blob(data) lease = await blob1.acquire_lease() # Act resp = [] async for b in container.list_blobs(): resp.append(b) # Assert self.assertIsNotNone(resp) self.assertGreaterEqual(len(resp), 1) self.assertIsNotNone(resp[0]) self.assertNamedItemInContainer(resp, 'blob1') self.assertEqual(resp[0].size, 11) self.assertEqual(resp[0].lease.duration, 'infinite') self.assertEqual(resp[0].lease.status, 'locked') self.assertEqual(resp[0].lease.state, 'leased') @record def test_list_blobs_leased_blob(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_blobs_leased_blob()) async def _test_list_blobs_with_prefix(self): # Arrange container = await self._create_container() data = b'hello world' c0 = container.get_blob_client('blob_a1') await c0.upload_blob(data) c1 = container.get_blob_client('blob_a2') await c1.upload_blob(data) c2 = container.get_blob_client('blob_b1') await c2.upload_blob(data) # Act resp = [] async for b in container.list_blobs(name_starts_with='blob_a'): resp.append(b) # Assert self.assertIsNotNone(resp) self.assertEqual(len(resp), 2) self.assertNamedItemInContainer(resp, 'blob_a1') self.assertNamedItemInContainer(resp, 'blob_a2') @record def test_list_blobs_with_prefix(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_blobs_with_prefix()) async def _test_list_blobs_with_num_results(self): # Arrange container = await self._create_container() data = b'hello world' c0 = container.get_blob_client('blob_a1') await c0.upload_blob(data) c1 = container.get_blob_client('blob_a2') await c1.upload_blob(data) c2 = container.get_blob_client('blob_a3') await c2.upload_blob(data) c3 = container.get_blob_client('blob_b1') await c3.upload_blob(data) # Act generator = container.list_blobs(results_per_page=2).by_page() blobs = [] async for b in await generator.__anext__(): blobs.append(b) # Assert self.assertIsNotNone(blobs) self.assertEqual(len(blobs), 2) self.assertNamedItemInContainer(generator.current_page, 'blob_a1') self.assertNamedItemInContainer(generator.current_page, 'blob_a2') @record def test_list_blobs_with_num_results(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_blobs_with_num_results()) async def _test_list_blobs_with_include_snapshots(self): # Arrange container = await self._create_container() data = b'hello world' blob1 = container.get_blob_client('blob1') await blob1.upload_blob(data) await blob1.create_snapshot() await (container.get_blob_client('blob2')).upload_blob(data) # Act blobs = [] async for b in container.list_blobs(include="snapshots"): blobs.append(b) # Assert self.assertEqual(len(blobs), 3) self.assertEqual(blobs[0].name, 'blob1') self.assertIsNotNone(blobs[0].snapshot) self.assertEqual(blobs[1].name, 'blob1') self.assertIsNone(blobs[1].snapshot) self.assertEqual(blobs[2].name, 'blob2') self.assertIsNone(blobs[2].snapshot) @record def test_list_blobs_with_include_snapshots(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_blobs_with_include_snapshots()) async def _test_list_blobs_with_include_metadata(self): # Arrange pytest.skip("Waiting on metadata XML fix in msrest") container = await self._create_container() data = b'hello world' blob1 = container.get_blob_client('blob1') await blob1.upload_blob(data, metadata={'number': '1', 'name': 'bob'}) await blob1.create_snapshot() cr = container.get_blob_client('blob2') await cr.upload_blob(data, metadata={'number': '2', 'name': 'car'}) # Act blobs = [] async for b in container.list_blobs(include="metadata"): blobs.append(b) # Assert self.assertEqual(len(blobs), 2) self.assertEqual(blobs[0].name, 'blob1') self.assertEqual(blobs[0].metadata['number'], '1') self.assertEqual(blobs[0].metadata['name'], 'bob') self.assertEqual(blobs[1].name, 'blob2') self.assertEqual(blobs[1].metadata['number'], '2') self.assertEqual(blobs[1].metadata['name'], 'car') @record def test_list_blobs_with_include_metadata(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_blobs_with_include_metadata()) async def _test_list_blobs_with_include_uncommittedblobs(self): # Arrange container = await self._create_container() data = b'hello world' blob1 = container.get_blob_client('blob1') await blob1.stage_block('1', b'AAA') await blob1.stage_block('2', b'BBB') await blob1.stage_block('3', b'CCC') blob2 = container.get_blob_client('blob2') await blob2.upload_blob(data, metadata={'number': '2', 'name': 'car'}) # Act blobs = [] async for b in container.list_blobs(include="uncommittedblobs"): blobs.append(b) # Assert self.assertEqual(len(blobs), 2) self.assertEqual(blobs[0].name, 'blob1') self.assertEqual(blobs[1].name, 'blob2') @record def test_list_blobs_with_include_uncommittedblobs(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_list_blobs_with_include_uncommittedblobs()) async def _test_list_blobs_with_include_copy(self): # Arrange container = await self._create_container() data = b'hello world' await (container.get_blob_client('blob1')).upload_blob( data, metadata={'status': 'original'}) sourceblob = 'https://{0}.blob.core.windows.net/{1}/blob1'.format( self.settings.STORAGE_ACCOUNT_NAME, container.container_name) blobcopy = container.get_blob_client('blob1copy') await blobcopy.start_copy_from_url(sourceblob, metadata={'status': 'copy'}) # Act blobs = [] async for b in container.list_blobs(include="copy"): blobs.append(b) # Assert self.assertEqual(len(blobs), 2) self.assertEqual(blobs[0].name, 'blob1') self.assertEqual(blobs[1].name, 'blob1copy') self.assertEqual(blobs[1].blob_type, blobs[0].blob_type) self.assertEqual(blobs[1].size, 11) self.assertEqual(blobs[1].content_settings.content_type, 'application/octet-stream') self.assertEqual(blobs[1].content_settings.cache_control, None) self.assertEqual(blobs[1].content_settings.content_encoding, None) self.assertEqual(blobs[1].content_settings.content_language, None) self.assertEqual(blobs[1].content_settings.content_disposition, None) self.assertNotEqual(blobs[1].content_settings.content_md5, None) self.assertEqual(blobs[1].lease.status, 'unlocked') self.assertEqual(blobs[1].lease.state, 'available') self.assertNotEqual(blobs[1].copy.id, None) self.assertEqual(blobs[1].copy.source, sourceblob) self.assertEqual(blobs[1].copy.status, 'success') self.assertEqual(blobs[1].copy.progress, '11/11') self.assertNotEqual(blobs[1].copy.completion_time, None) @record def test_list_blobs_with_include_copy(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_blobs_with_include_copy()) async def _test_list_blobs_with_delimiter(self): # Arrange container = await self._create_container() data = b'hello world' cr0 = container.get_blob_client('a/blob1') await cr0.upload_blob(data) cr1 = container.get_blob_client('a/blob2') await cr1.upload_blob(data) cr2 = container.get_blob_client('b/blob3') await cr2.upload_blob(data) cr4 = container.get_blob_client('blob4') await cr4.upload_blob(data) # Act resp = [] async for w in container.walk_blobs(): resp.append(w) # Assert self.assertIsNotNone(resp) self.assertEqual(len(resp), 3) self.assertNamedItemInContainer(resp, 'a/') self.assertNamedItemInContainer(resp, 'b/') self.assertNamedItemInContainer(resp, 'blob4') @record def test_list_blobs_with_delimiter(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_blobs_with_delimiter()) async def _test_walk_blobs_with_delimiter(self): # Arrange container = await self._create_container() data = b'hello world' cr0 = container.get_blob_client('a/blob1') await cr0.upload_blob(data) cr1 = container.get_blob_client('a/blob2') await cr1.upload_blob(data) cr2 = container.get_blob_client('b/c/blob3') await cr2.upload_blob(data) cr3 = container.get_blob_client('blob4') await cr3.upload_blob(data) blob_list = [] def recursive_walk(prefix): for b in prefix: if b.get('prefix'): recursive_walk(b) else: blob_list.append(b.name) # Act recursive_walk(container.walk_blobs()) # Assert self.assertEqual(len(blob_list), 4) self.assertEqual(blob_list, ['a/blob1', 'a/blob2', 'b/c/blob3', 'blob4']) @pytest.mark.skip def test_walk_blobs_with_delimiter(self): if TestMode.need_recording_file(self.test_mode): return loop = asyncio.get_event_loop() loop.run_until_complete(self._test_walk_blobs_with_delimiter()) async def _test_list_blobs_with_include_multiple(self): # Arrange pytest.skip("Waiting on metadata XML fix in msrest") container = await self._create_container() data = b'hello world' blob1 = container.get_blob_client('blob1') await blob1.upload_blob(data, metadata={'number': '1', 'name': 'bob'}) await blob1.create_snapshot() client = container.get_blob_client('blob2') await client.upload_blob(data, metadata={'number': '2', 'name': 'car'}) # Act blobs = [] async for b in container.list_blobs(include=["snapshots", "metadata"]): blobs.append(b) # Assert self.assertEqual(len(blobs), 3) self.assertEqual(blobs[0].name, 'blob1') self.assertIsNotNone(blobs[0].snapshot) self.assertEqual(blobs[0].metadata['number'], '1') self.assertEqual(blobs[0].metadata['name'], 'bob') self.assertEqual(blobs[1].name, 'blob1') self.assertIsNone(blobs[1].snapshot) self.assertEqual(blobs[1].metadata['number'], '1') self.assertEqual(blobs[1].metadata['name'], 'bob') self.assertEqual(blobs[2].name, 'blob2') self.assertIsNone(blobs[2].snapshot) self.assertEqual(blobs[2].metadata['number'], '2') self.assertEqual(blobs[2].metadata['name'], 'car') @record def test_list_blobs_with_include_multiple(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_list_blobs_with_include_multiple()) async def _test_shared_access_container(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange container = await self._create_container() blob_name = 'blob1' data = b'hello world' blob = container.get_blob_client(blob_name) await blob.upload_blob(data) token = container.generate_shared_access_signature( expiry=datetime.utcnow() + timedelta(hours=1), permission=ContainerPermissions.READ, ) blob = BlobClient(blob.url, credential=token) # Act response = requests.get(blob.url) # Assert self.assertTrue(response.ok) self.assertEqual(data, response.content) @record def test_shared_access_container(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_shared_access_container()) async def _test_web_container_normal_operations_working(self): web_container = "web" # create the web container in case it does not exist yet container = self.bsc.get_container_client(web_container) try: try: created = await container.create_container() self.assertIsNotNone(created) except ResourceExistsError: pass # test if web container exists exist = await container.get_container_properties() self.assertTrue(exist) # create a blob blob_name = self.get_resource_name("blob") blob_content = self.get_random_text_data(1024) blob = container.get_blob_client(blob_name) await blob.upload_blob(blob_content) # get a blob blob_data = await (await blob.download_blob()).content_as_bytes() self.assertIsNotNone(blob) self.assertEqual(blob_data.decode('utf-8'), blob_content) finally: # delete container await container.delete_container() @record def test_web_container_normal_operations_working(self): loop = asyncio.get_event_loop() loop.run_until_complete( self._test_web_container_normal_operations_working())