def write_to_blob(output_file_path, output_file): blob_client = azureblob.BlockBlobService( account_name=args.storageaccount, account_key=args.key) blob_client.create_blob_from_path(args.storagecontainer, output_file, output_file_path)
def _configure_plugin(self): """Set up the the config file, authenticate the SDK clients and set up the log file. """ if not os.path.exists(self._data_dir): os.makedirs(self._data_dir) config_file = os.path.join(self._data_dir, self._ini_file) try: self._cfg.read(config_file) self._storage = storage.BlockBlobService( self._cfg.get('AzureBatch', 'storage_account'), self._cfg.get('AzureBatch', 'storage_key'), endpoint_suffix=self._cfg.get('AzureBatch', 'storage_suffix')) self._storage.MAX_SINGLE_PUT_SIZE = 2 * 1024 * 1024 credentials = SharedKeyCredentials( self._cfg.get('AzureBatch', 'batch_account'), self._cfg.get('AzureBatch', 'batch_key')) self._client = batch.BatchExtensionsClient( credentials, base_url=self._cfg.get('AzureBatch', 'batch_url'), storage_client=self._storage) self._client.threads = self._cfg.getint('AzureBatch', 'threads') self._client.config.add_user_agent(self._user_agent) self._log = self._configure_logging( self._cfg.get('AzureBatch', 'logging')) except Exception as exp: # We should only worry about this if it happens when authenticating # using the UI, otherwise it's expected. if self.ui: raise ValueError("Invalid Configuration: {}".format(exp)) else: # We'll need a place holder logger self._log = self._configure_logging(LOG_LEVELS['debug'])
def create_blob_container_saskey( storage_settings, container, kind, create_container=False): # type: (StorageCredentialsSettings, str, str, bool) -> str """Create a saskey for a blob container with a 7day expiry time :param StorageCredentialsSettings storage_settings: storage settings :param str container: container :param str kind: ingress or egress :param bool create_container: create container :rtype: str :return: saskey """ blob_client = azureblob.BlockBlobService( account_name=storage_settings.account, account_key=storage_settings.account_key, endpoint_suffix=storage_settings.endpoint) if create_container: blob_client.create_container(container, fail_on_exist=False) if kind == 'ingress': perm = azureblob.ContainerPermissions(read=True, list=True) elif kind == 'egress': perm = azureblob.ContainerPermissions( read=True, write=True, delete=True, list=True) else: raise ValueError('{} type of transfer not supported'.format(kind)) return blob_client.generate_container_shared_access_signature( container, perm, expiry=datetime.datetime.utcnow() + datetime.timedelta(days=_DEFAULT_SAS_EXPIRY_DAYS) )
def create_storage_clients(): # type: (None) -> tuple """Create storage clients :rtype: tuple :return: blob_client, table_client, queue_client """ account_name = storage.get_storageaccount() account_key = storage.get_storageaccount_key() endpoint_suffix = storage.get_storageaccount_endpoint() blob_client = azureblob.BlockBlobService( account_name=account_name, account_key=account_key, endpoint_suffix=endpoint_suffix, ) table_client = azuretable.TableService( account_name=account_name, account_key=account_key, endpoint_suffix=endpoint_suffix, ) queue_client = azurequeue.QueueService( account_name=account_name, account_key=account_key, endpoint_suffix=endpoint_suffix, ) return blob_client, table_client, queue_client
def init_from_config(self): parsedStorageAccountId = msrestazuretools.parse_resource_id( self.storage_account_resource_id) self.storage_account = parsedStorageAccountId['name'] self.storage_mgmt_client = StorageManagementClient( self.mgmtCredentials, str(self.subscription_id), base_url=self.aad_environment_provider.getResourceManager( self.aad_environment_id)) self.storage_key = self._call( self.storage_mgmt_client.storage_accounts.list_keys, parsedStorageAccountId['resource_group'], self.storage_account).keys[0].value self._storage = storage.BlockBlobService(self.storage_account, self.storage_key) self._storage.MAX_SINGLE_PUT_SIZE = 2 * 1024 * 1024 #TODO refactor move the below shared block into def configureClient(client) self._client = batch.BatchExtensionsClient( self.batchCredentials, base_url=self.batch_url, storage_client=self._storage) self._client.config.add_user_agent(self._user_agent) self._client.threads = self.threads self.save_changes() self._log = self._configure_logging(self.logging_level)
def create_storage_clients(): # type: (None) -> tuple """Create storage clients :rtype: tuple :return: blob_client, table_client, queue_client """ account_name = storage.get_storageaccount() account_key = storage.get_storageaccount_key() if account_key is None: raise RuntimeError( 'No storage account key provided for storage account {}. If ' 'accessing via AAD, ensure that a subscription id is specified ' 'under management in the credentials configuration.'.format( account_name)) endpoint_suffix = storage.get_storageaccount_endpoint() blob_client = azureblob.BlockBlobService( account_name=account_name, account_key=account_key, endpoint_suffix=endpoint_suffix, ) table_client = azuretable.TableService( account_name=account_name, account_key=account_key, endpoint_suffix=endpoint_suffix, ) queue_client = azurequeue.QueueService( account_name=account_name, account_key=account_key, endpoint_suffix=endpoint_suffix, ) return blob_client, table_client, queue_client
def azure_service(azure_emulator_coords) -> blob.BlockBlobService: azure_service = blob.BlockBlobService(account_name=azure_emulator_coords.account, account_key=azure_emulator_coords.key, custom_domain=azure_emulator_coords.domain) azure_service.create_container(azure_emulator_coords.container) yield azure_service azure_service.delete_container(azure_emulator_coords.container)
def createBlobClient(account_name, account_key): blob_client = azureblob.BlockBlobService( account_name=account_name, account_key=account_key) return blob_client
def _azure_service(self) -> blob.BlockBlobService: if self._azure is None: self._azure = blob.BlockBlobService( account_name=self._coords.account, account_key=self._coords.key, custom_domain=self._coords.domain) return self._azure
def make_blob_client(secrets): """ Creates a blob client object :param str storage_account_key: storage account key :param str storage_account_name: storage account name :param str storage_account_suffix: storage account suffix """ if secrets.shared_key: # Set up SharedKeyCredentials blob_client = blob.BlockBlobService( account_name=secrets.shared_key.storage_account_name, account_key=secrets.shared_key.storage_account_key, endpoint_suffix=secrets.shared_key.storage_account_suffix) else: # Set up ServicePrincipalCredentials arm_credentials = ServicePrincipalCredentials( client_id=secrets.service_principal.client_id, secret=secrets.service_principal.credential, tenant=secrets.service_principal.tenant_id, resource='https://management.core.windows.net/') m = RESOURCE_ID_PATTERN.match( secrets.service_principal.storage_account_resource_id) accountname = m.group('account') subscription = m.group('subscription') resourcegroup = m.group('resourcegroup') mgmt_client = StorageManagementClient(arm_credentials, subscription) key = mgmt_client.storage_accounts.list_keys( resource_group_name=resourcegroup, account_name=accountname).keys[0].value storage_client = CloudStorageAccount(accountname, key) blob_client = storage_client.create_block_blob_service() return blob_client
def upload(self, id, max_size=10): """ Complete the file upload, or clear an existing upload. :param id: The resource_id. :param max_size: Ignored. """ if self.filename: if self.can_use_advanced_azure: from azure.storage import blob as azure_blob from azure.storage.blob.models import ContentSettings blob_service = azure_blob.BlockBlobService( self.driver_options['key'], self.driver_options['secret'] ) content_settings = None if self.guess_mimetype: content_type, _ = mimetypes.guess_type(self.filename) if content_type: content_settings = ContentSettings( content_type=content_type ) return blob_service.create_blob_from_stream( container_name=self.container_name, blob_name=self.path_from_filename( id, self.filename ), stream=self.file_upload, content_settings=content_settings ) else: self.container.upload_object_via_stream( self.file_upload, object_name=self.path_from_filename( id, self.filename ) ) elif self._clear and self.old_filename and not self.leave_files: # This is only set when a previously-uploaded file is replace # by a link. We want to delete the previously-uploaded file. try: self.container.delete_object( self.container.get_object( self.path_from_filename( id, self.old_filename ) ) ) except ObjectDoesNotExistError: # It's possible for the object to have already been deleted, or # for it to not yet exist in a committed state due to an # outstanding lease. return
def create_store(): blob_client = asb.BlockBlobService(is_emulated=True) blob_client.delete_container('test') blob_client.create_container('test') store = ABSStore(container='test', prefix='zarrtesting/', account_name='foo', account_key='bar', blob_service_kwargs={'is_emulated': True}) store.rmdir() return store, None
def execute_sample(global_config, sample_config): """Executes the sample with the specified configurations. :param global_config: The global configuration to use. :type global_config: `configparser.ConfigParser` :param sample_config: The sample specific configuration to use. :type sample_config: `configparser.ConfigParser` """ credentials = batchauth.SharedKeyCredentials(batch_account_name, batch_account_key) #credentials = ServicePrincipalCredentials( # client_id=aad_client_id, # secret=aad_client_secret, # tenant=aad_tenant_id, # resource="https://batch.core.windows.net/" #) batch_client = batch.BatchServiceClient(credentials, batch_url=batch_service_url) # Retry 5 times -- default is 3 batch_client.config.retry_policy.retries = 5 block_blob_client = azureblob.BlockBlobService( account_name=storage_account_name, account_key=storage_account_key, endpoint_suffix=storage_account_suffix) job_id = common.helpers.generate_unique_resource_name( "poolsandresourcefilesjob") try: create_pool(batch_client, block_blob_client, pool_id, pool_vm_size, pool_vm_count) submit_job_and_add_task(batch_client, block_blob_client, job_id, pool_id, storage_account_name) common.helpers.wait_for_tasks_to_complete( batch_client, job_id, datetime.timedelta(minutes=25)) tasks = batch_client.task.list(job_id) task_ids = [task.id for task in tasks] common.helpers.print_task_output(batch_client, job_id, task_ids) print("Completed job_id:" + job_id) finally: # clean up if should_delete_container: block_blob_client.delete_container(_RESOURCE_CONTAINER_NAME, fail_not_exist=False) if should_delete_job: print("Deleting job: ", job_id) batch_client.job.delete(job_id) if should_delete_pool: print("Deleting pool: ", pool_id) batch_client.pool.delete(pool_id)
def upload_bytes_to_azure(coords: StorageCoordinates, data: bytes, filename: str) -> bool: azure_service = blob.BlockBlobService(account_name=coords.account, account_key=coords.key, custom_domain=coords.domain) azure_service.create_blob_from_bytes(coords.container, filename, data) return filename in [ blob.name for blob in azure_service.list_blobs(coords.container) ]
def touch_one_file_in_azure(azure_emulator: transform.AzureCoordinates, energuide_zip_fixture: str) -> None: file_z = zipfile.ZipFile(energuide_zip_fixture) service = blob.BlockBlobService(account_name=azure_emulator.account, account_key=azure_emulator.key, custom_domain=azure_emulator.domain) json_file = [file_z.open(zipinfo) for zipinfo in file_z.infolist()][1] service.create_blob_from_bytes(azure_emulator.container, json_file.name, json_file.read())
def init_blob_service(credentials): # Initialize the blob service from the Azure SDK blobService = blob.BlockBlobService(account_name=credentials["accountName"], account_key=None, sas_token=credentials["sasToken"]) # Load the public key for the encryption. The key resolver object implements a specific interface defined by the Azure SDK # This would be an unnecessary overhead since we only have the one public key, but there's no way around it blobService.key_encryption_key, blobService.key_resolver_function = init_key_resolver(credentials=credentials) # Change the upload parameters, so that the progress callback gets called more frequently, this might also raise the robustness of the upload blobService.MAX_SINGLE_PUT_SIZE = 4*1024*1024 blobService.MAX_BLOCK_SIZE = 4*1024*1024 return blobService
def __init__(self, config_filename=None, **kwargs): self.batch_account_url = os.environ['AZURE_BATCH_URL'] self.storage_account_name = os.environ['AZURE_STORAGE_ACCOUNT'] self.client_id = os.environ['CLIENT_ID'] self.tenant_id = os.environ['TENANT_ID'] self.secret_key = os.environ['SECRET_KEY'] self.storage_account_name = os.environ['AZURE_STORAGE_ACCOUNT'] self.storage_account_key = _get_blob_key(self.storage_account_name) with open(config_filename) as f: self.config = yaml.load(f) self.logger = logging.getLogger('pypeliner.execqueue.azure_batch') self.run_id = _random_string(8) self.logger.info('creating blob client') self.blob_client = azureblob.BlockBlobService( account_name=self.storage_account_name, account_key=self.storage_account_key) self.credentials = ServicePrincipalCredentials( client_id=self.client_id, secret=self.secret_key, tenant=self.tenant_id, resource="https://batch.core.windows.net/") self.logger.info('creating batch client') self.batch_client = batch.BatchServiceClient( self.credentials, base_url=self.batch_account_url) self.logger.info('creating task container') self.container_name = self.config['storage_container_name'] self.blob_client.create_container(self.container_name) self.compute_start_commands = self.config['compute_start_commands'] self.compute_run_command = self.config['compute_run_command'] self.compute_finish_commands = self.config['compute_finish_commands'] self.no_delete_pool = self.config.get('no_delete_pool', False) self.no_delete_job = self.config.get('no_delete_job', False) self.job_names = {} self.job_task_ids = {} self.job_temps_dir = {} self.job_blobname_prefix = {} self.most_recent_transition_time = None self.completed_task_ids = set() self.running_task_ids = set()
def get_blob_client(storage_account_name): storage_account_key = get_storage_account_key( storage_account, client_id=os.environ["CLIENT_ID"], secret_key=os.environ["SECRET_KEY"], tenant_id=os.environ["TENANT_ID"], keyvault_account=os.environ['AZURE_KEYVAULT_ACCOUNT']) blob_client = azureblob.BlockBlobService(account_name=storage_account_name, account_key=storage_account_key) return blob_client
def execute_script(global_config, script_config): """Executes the sample with the specified configurations. :param global_config: The global configuration to use. :type global_config: `configparser.ConfigParser` :param script_config: The script specific configuration to use. :type script_config: `configparser.ConfigParser` """ # Set up the configuration batch_account_key = global_config.get('Batch', 'batchaccountkey') batch_account_name = global_config.get('Batch', 'batchaccountname') batch_service_url = global_config.get('Batch', 'batchserviceurl') storage_account_key = global_config.get('Storage', 'storageaccountkey') storage_account_name = global_config.get('Storage', 'storageaccountname') #coming from the script specified config pool_id = script_config.get('DEFAULT', 'pool_id') pool_node_count = script_config.getint('DEFAULT', 'pool_node_count') vm_size = script_config.get('DEFAULT', 'vm_size') distro = script_config.get('DEFAULT', 'distribution') version = script_config.get('DEFAULT', 'version') # Print the settings we are running with common.helpers.print_configuration(global_config) common.helpers.print_configuration(script_config) credentials = batchauth.SharedKeyCredentials(batch_account_name, batch_account_key) batch_client = batch.BatchServiceClient(credentials, base_url=batch_service_url) blob_client = azureblob.BlockBlobService(account_name=storage_account_name, account_key=storage_account_key) #uploading script to Pool/Node/Blob Container bcplatform_container_name = 'bcp' application_file_paths = [os.path.realpath('bcpscript.sh')] blob_client.create_container(bcplatform_container_name, fail_on_exist=False) resource_files = [ common.helpers.upload_file_to_container(blob_client, bcplatform_container_name, file_path) for file_path in application_file_paths ] try: changePoolSettings(pool_id, vm_size, distro, version, pool_node_count, resource_files, batch_client) except Exception as e: print('error/exception: ' + e)
def run(config): batch_account_key = config.get('Batch', 'batchaccountkey') batch_account_name = config.get('Batch', 'batchaccountname') batch_service_url = config.get('Batch', 'batchserviceurl') storage_account_key = config.get('Storage', 'storageaccountkey') storage_account_name = config.get('Storage', 'storageaccountname') storage_account_suffix = config.get('Storage', 'storageaccountsuffix') delete_container = config.getboolean('Slicer', 'deletecontainer') delete_job = config.getboolean('Slicer', 'deletejob') delete_pool = config.getboolean('Slicer', 'deletepool') pool_vm_size = config.get('Slicer', 'poolvmsize') pool_vm_count = config.getint('Slicer', 'poolvmcount') credentials = batchauth.SharedKeyCredentials(batch_account_name, batch_account_key) batch_client = batch.BatchServiceClient(credentials, base_url=batch_service_url) block_blob_client = azureblob.BlockBlobService( account_name=storage_account_name, account_key=storage_account_key, endpoint_suffix=storage_account_suffix) pool_id = "SlicerPool" job_id = generate_unique_resource_name("SliceJob") try: create_pool(batch_client, block_blob_client, pool_id, pool_vm_size, pool_vm_count) submit_job_and_add_task(batch_client, block_blob_client, job_id, pool_id) wait_for_tasks_to_complete(batch_client, job_id, datetime.timedelta(minutes=25)) tasks = batch_client.task.list(job_id) task_ids = [task.id for task in tasks] print_task_output(batch_client, job_id, task_ids) finally: if delete_container: block_blob_client.delete_container(CONTAINER_NAME, fail_not_exist=False) if delete_job: print("Deleting job: ", job_id) batch_client.job.delete(job_id) if delete_pool: print("Deleting pool: ", pool_id) batch_client.pool.delete(pool_id)
def __init__(self, azure_container: Text, azure_account_name: Text, azure_account_key: Text) -> None: from azure.storage import blob as azureblob super(AzurePersistor, self).__init__() self.blob_client = azureblob.BlockBlobService( account_name=azure_account_name, account_key=azure_account_key, endpoint_suffix="core.windows.net") self._ensure_container_exists(azure_container) self.container_name = azure_container
def __init__(self, azure_container, azure_account_name, azure_account_key): from azure.storage import blob as azureblob from azure.storage.common import models as storageModel super(AzurePersistor, self).__init__() self.blob_client = azureblob.BlockBlobService( account_name=azure_account_name, account_key=azure_account_key, endpoint_suffix="core.windows.net") self._ensure_container_exists(azure_container) self.container_name = azure_container
def _create_credentials() -> tuple: """Create storage credentials :rtype: tuple :return: (blob_client, table_client) """ sa, ep, sakey = os.environ['SHIPYARD_STORAGE_ENV'].split(':') blob_client = azureblob.BlockBlobService(account_name=sa, account_key=sakey, endpoint_suffix=ep) table_client = azuretable.TableService(account_name=sa, account_key=sakey, endpoint_suffix=ep) return blob_client, table_client
def generate_blob_container_uri(storage_settings, container): # type: (StorageCredentialsSettings, str) -> str """Create a uri to a blob container :param StorageCredentialsSettings storage_settings: storage settings :param str container: container :rtype: str :return: blob container uri """ blob_client = azureblob.BlockBlobService( account_name=storage_settings.account, account_key=storage_settings.account_key, endpoint_suffix=storage_settings.endpoint) return '{}://{}/{}'.format( blob_client.protocol, blob_client.primary_endpoint, container)
def __init__(self, storage_account_name, client_id, tenant_id, secret_key, keyvault_account, storage_account_key=None, mq_username=None, mq_password=None, mq_ip=None, mq_vhost=None): """ abstraction around all azure blob related methods :param storage_account_name: :type storage_account_name: :param client_id: Azure AD client id (application id) :type client_id: str :param tenant_id: Azure AD tenant id :type tenant_id: str :param secret_key: secret key for the app :type secret_key: str :param keyvault_account: keyvault account for pulling storage keys :type keyvault_account: str :param storage_account_key: storage account key :type storage_account_key: str :param mq_username: rabbitmq username :type mq_username: str :param mq_password: rabbitmq password :type mq_password: str :param mq_ip: rabbitmq IP address :type mq_ip: str :param mq_vhost: rabbitmq vhost :type mq_vhost: str """ self.account_name = storage_account_name self.mq_username = mq_username self.mq_password = mq_password self.mq_ip = mq_ip self.mq_vhost = mq_vhost self.logger = self.__get_logger(add_azure_filter=True) if not storage_account_key: storage_account_key = self.__get_storage_account_key( storage_account_name, client_id, secret_key, tenant_id, keyvault_account) self.blob_client = azureblob.BlockBlobService( account_name=storage_account_name, account_key=storage_account_key)
def create_clients(): # type: (None) -> tuple """Create storage clients :rtype: tuple :return: blob_client, queue_client, table_client """ blob_client = azureblob.BlockBlobService(account_name=_STORAGEACCOUNT, account_key=_STORAGEACCOUNTKEY, endpoint_suffix=_STORAGEACCOUNTEP) queue_client = azurequeue.QueueService(account_name=_STORAGEACCOUNT, account_key=_STORAGEACCOUNTKEY, endpoint_suffix=_STORAGEACCOUNTEP) table_client = azuretable.TableService(account_name=_STORAGEACCOUNT, account_key=_STORAGEACCOUNTKEY, endpoint_suffix=_STORAGEACCOUNTEP) return blob_client, queue_client, table_client
def upload_blob(local_path, file_name): """ ファイルをblobにuploadする :param local_path: ローカルに吐き出したファイルのパス :param file_name: 画像名 :return: """ # blob setting account_name = 'xxxx' account_key = 'xxxxx' container_name = '$web' block_blob_service = azblob.BlockBlobService(account_name=account_name, account_key=account_key) block_blob_service.create_blob_from_path(container_name, file_name, local_path)
def make_blob_client(blob_config: BlobConfig): """ Creates a blob client object :param str storage_account_key: storage account key :param str storage_account_name: storage account name :param str storage_account_suffix: storage account suffix """ # Validate Blob config _validate_blob_config(blob_config) # Set up BlockBlobStorage blob_client = blob.BlockBlobService( account_name=blob_config.account_name, account_key=blob_config.account_key, endpoint_suffix=blob_config.account_suffix) return blob_client
def _fix_cors(args): cs = CloudStorage() if cs.can_use_advanced_azure: from azure.storage import blob as azure_blob from azure.storage import CorsRule blob_service = azure_blob.BlockBlobService(cs.driver_options['key'], cs.driver_options['secret']) blob_service.set_blob_service_properties(cors=[ CorsRule(allowed_origins=args['<domains>'], allowed_methods=['GET']) ]) print('Done!') else: print('The driver {driver_name} being used does not currently' ' support updating CORS rules through' ' cloudstorage.'.format(driver_name=cs.driver_name))
def _split(self, azure_path): if azure_path.count('/') == 2: root, account_name, container_name = azure_path.split('/') sub_path = None else: root, account_name, container_name, sub_path = azure_path.split( '/', 3) assert root == '', "Expect path to start with '/'" if not account_name in self._account_name_to_block_blob_service: assert account_name in self._account_name_to_key, "Don't know key for account_name '{0}'".format( account_name) self._account_name_to_block_blob_service[ account_name] = azureblob.BlockBlobService( account_name=account_name, account_key=self._account_name_to_key[account_name]) block_blob_service = self._account_name_to_block_blob_service[ account_name] self._create_container(block_blob_service, container_name) return block_blob_service, container_name, sub_path