def validator(namespace): # must run certain validators first for an update if update: validate_client_parameters(namespace) if update and _class_name(settings_class) == _class_name(FileContentSettings): get_file_path_validator()(namespace) ns = vars(namespace) # retrieve the existing object properties for an update if update: account = ns.get('account_name') key = ns.get('account_key') cs = ns.get('connection_string') sas = ns.get('sas_token') if _class_name(settings_class) == _class_name(BlobContentSettings): client = get_data_service_client(BaseBlobService, account, key, cs, sas) container = ns.get('container_name') blob = ns.get('blob_name') lease_id = ns.get('lease_id') props = client.get_blob_properties(container, blob, lease_id=lease_id).properties.content_settings # pylint: disable=line-too-long elif _class_name(settings_class) == _class_name(FileContentSettings): client = get_data_service_client(FileService, account, key, cs, sas) # pylint: disable=redefined-variable-type share = ns.get('share_name') directory = ns.get('directory_name') filename = ns.get('file_name') props = client.get_file_properties(share, directory, filename).properties.content_settings # pylint: disable=line-too-long # create new properties new_props = settings_class( content_type=ns.pop('content_type', None), content_disposition=ns.pop('content_disposition', None), content_encoding=ns.pop('content_encoding', None), content_language=ns.pop('content_language', None), content_md5=ns.pop('content_md5', None), cache_control=ns.pop('content_cache_control', None) ) # if update, fill in any None values with existing if update: new_props.content_type = new_props.content_type or props.content_type new_props.content_disposition = new_props.content_disposition \ or props.content_disposition new_props.content_encoding = new_props.content_encoding or props.content_encoding new_props.content_language = new_props.content_language or props.content_language new_props.content_md5 = new_props.content_md5 or props.content_md5 new_props.cache_control = new_props.cache_control or props.cache_control ns['content_settings'] = new_props namespace = argparse.Namespace(**ns)
def get_boot_log(resource_group_name, vm_name): import sys import io from azure.cli.core._profile import CLOUD from azure.storage.blob import BlockBlobService client = _compute_client_factory() virtual_machine = client.virtual_machines.get( resource_group_name, vm_name, expand='instanceView') # pylint: disable=no-member if (not virtual_machine.instance_view.boot_diagnostics or not virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri): raise CLIError('Please enable boot diagnostics.') blob_uri = virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri # Find storage account for diagnostics storage_mgmt_client = _get_storage_management_client() if not blob_uri: raise CLIError('No console log available') try: storage_accounts = storage_mgmt_client.storage_accounts.list() matching_storage_account = (a for a in list(storage_accounts) if blob_uri.startswith(a.primary_endpoints.blob)) storage_account = next(matching_storage_account) except StopIteration: raise CLIError('Failed to find storage accont for console log file') regex = r'/subscriptions/[^/]+/resourceGroups/(?P<rg>[^/]+)/.+' match = re.search(regex, storage_account.id, re.I) rg = match.group('rg') # Get account key keys = storage_mgmt_client.storage_accounts.list_keys(rg, storage_account.name) # Extract container and blob name from url... container, blob = urlparse(blob_uri).path.split('/')[-2:] storage_client = get_data_service_client( BlockBlobService, storage_account.name, keys.key1, endpoint_suffix=CLOUD.suffixes.storage_endpoint) # pylint: disable=no-member class StreamWriter(object): # pylint: disable=too-few-public-methods def __init__(self, out): self.out = out def write(self, str_or_bytes): if isinstance(str_or_bytes, bytes): self.out.write(str_or_bytes.decode()) else: self.out.write(str_or_bytes) #our streamwriter not seekable, so no parallel. storage_client.get_blob_to_stream(container, blob, StreamWriter(sys.stdout), max_connections=1)
def get_storage_data_service_client(service, name=None, key=None, connection_string=None, sas_token=None): return get_data_service_client(service, name, key, connection_string, sas_token, endpoint_suffix=CLOUD.suffixes.storage_endpoint)
def _get_storage_client(self, storage_account_name, key): BlockBlobService = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob.blockblobservice#BlockBlobService') return get_data_service_client( self._cmd.cli_ctx, BlockBlobService, storage_account_name, key, endpoint_suffix=self._cmd.cli_ctx.cloud.suffixes.storage_endpoint) # pylint: disable=no-member
def generic_data_service_factory(service, name=None, key=None, connection_string=None, sas_token=None): try: return get_data_service_client(service, name, key, connection_string, sas_token) except ValueError as val_exception: message = str(val_exception) if message == _ERROR_STORAGE_MISSING_INFO: message = NO_CREDENTIALS_ERROR_MESSAGE raise CLIError(message)
def validate_public_access(namespace): if namespace.public_access: namespace.public_access = public_access_types[namespace.public_access.lower()] if hasattr(namespace, 'signed_identifiers'): # must retrieve the existing ACL to simulate a patch operation because these calls # are needlessly conflated ns = vars(namespace) validate_client_parameters(namespace) account = ns.get('account_name') key = ns.get('account_key') cs = ns.get('connection_string') sas = ns.get('sas_token') client = get_data_service_client(BaseBlobService, account, key, cs, sas) container = ns.get('container_name') lease_id = ns.get('lease_id') ns['signed_identifiers'] = client.get_container_acl(container, lease_id=lease_id)
def _check_table_and_content(self, storage_account_name, key, table_name, filter_string, timeout_in_minutes): import time sleep_period = 15 TableService = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_COSMOS_TABLE, 'table#TableService') table_client = get_data_service_client( self._cmd.cli_ctx, TableService, storage_account_name, key, endpoint_suffix=self._cmd.cli_ctx.cloud.suffixes.storage_endpoint) seconds = 60 * timeout_in_minutes waited = 0 while waited < seconds: entities = table_client.query_entities(table_name, filter_string) if entities.items: return True logger.warning("\t\t\tWait %s seconds for table '%s' has date propagated ...", sleep_period, table_name) time.sleep(sleep_period) waited += sleep_period return False
def get_boot_log(resource_group_name, vm_name): import sys import io from azure.cli.core._profile import CLOUD from azure.storage.blob import BlockBlobService client = _compute_client_factory() virtual_machine = client.virtual_machines.get(resource_group_name, vm_name, expand='instanceView') # pylint: disable=no-member if (not virtual_machine.instance_view.boot_diagnostics or not virtual_machine.instance_view.boot_diagnostics. serial_console_log_blob_uri): raise CLIError('Please enable boot diagnostics.') blob_uri = virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri # Find storage account for diagnostics storage_mgmt_client = _get_storage_management_client() if not blob_uri: raise CLIError('No console log available') try: storage_accounts = storage_mgmt_client.storage_accounts.list() matching_storage_account = ( a for a in list(storage_accounts) if blob_uri.startswith(a.primary_endpoints.blob)) storage_account = next(matching_storage_account) except StopIteration: raise CLIError('Failed to find storage accont for console log file') regex = r'/subscriptions/[^/]+/resourceGroups/(?P<rg>[^/]+)/.+' match = re.search(regex, storage_account.id, re.I) rg = match.group('rg') # Get account key keys = storage_mgmt_client.storage_accounts.list_keys( rg, storage_account.name) # Extract container and blob name from url... container, blob = urlparse(blob_uri).path.split('/')[-2:] storage_client = get_data_service_client( BlockBlobService, storage_account.name, keys.key1, endpoint_suffix=CLOUD.suffixes.storage_endpoint) # pylint: disable=no-member class StreamWriter(object): # pylint: disable=too-few-public-methods def __init__(self, out): self.out = out def write(self, str_or_bytes): if isinstance(str_or_bytes, bytes): self.out.write(str_or_bytes.decode()) else: self.out.write(str_or_bytes) #our streamwriter not seekable, so no parallel. storage_client.get_blob_to_stream(container, blob, StreamWriter(sys.stdout), max_connections=1)
def get_storage_data_service_client(cli_ctx, service, name=None, key=None, connection_string=None, sas_token=None, socket_timeout=None, token_credential=None): return get_data_service_client(cli_ctx, service, name, key, connection_string, sas_token, socket_timeout=socket_timeout, token_credential=token_credential, endpoint_suffix=cli_ctx.cloud.suffixes.storage_endpoint)
def _get_client(service, parsed_args): account_name = parsed_args.account_name or az_config.get('storage', 'account', None) account_key = parsed_args.account_key or az_config.get('storage', 'key', None) connection_string = parsed_args.connection_string or az_config.get('storage', 'connection_string', None) sas_token = parsed_args.sas_token or az_config.get('storage', 'sas_token', None) return get_data_service_client(service, account_name, account_key, connection_string, sas_token)