def _get_client(service, parsed_args): account_name = getattr(parsed_args, 'account_name', None) or az_config.get('storage', 'account', None) account_key = getattr(parsed_args, 'account_key', None) or az_config.get('storage', 'key', None) connection_string = getattr(parsed_args, 'connection_string', None) or az_config.get('storage', 'connection_string', None) sas_token = getattr(parsed_args, 'sas_token', None) or az_config.get('storage', 'sas_token', None) return get_storage_data_service_client( service, account_name, account_key, connection_string, sas_token)
def add_azure_container_to_cluster_create_parameters(params, container_name, mount_path): """Add Azure Storage container to the cluster create parameters. :param model.ClusterCreateParameters params: cluster create parameters. :param str container_name: container name. :param str mount_path: relative mount path for the container. """ if not mount_path: raise CLIError('Azure Storage container relative mount path cannot be empty.') if params.node_setup is None: params.node_setup = models.NodeSetup() if params.node_setup.mount_volumes is None: params.node_setup.mount_volumes = models.MountVolumes() if params.node_setup.mount_volumes.azure_blob_file_systems is None: params.node_setup.mount_volumes.azure_blob_file_systems = [] storage_account_name = az_config.get('batchai', 'storage_account', fallback=None) if not storage_account_name: raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT) storage_account_key = az_config.get('batchai', 'storage_key', fallback=None) if not storage_account_key: raise CLIError(MSG_CONFIGURE_STORAGE_KEY) params.node_setup.mount_volumes.azure_blob_file_systems.append(models.AzureBlobFileSystemReference( relative_mount_path=mount_path, account_name=storage_account_name, container_name=container_name, credentials=models.AzureStorageCredentialsInfo(account_key=storage_account_key)))
def add_azure_file_share_to_cluster_create_parameters(params, azure_file_share, mount_path): """Add Azure File share to the cluster create parameters. :param model.ClusterCreateParameters params: cluster create parameters. :param str azure_file_share: name of the azure file share. :param str mount_path: relative mount path for Azure File share. """ if not mount_path: raise CLIError('Azure File share relative mount path cannot be empty.') if params.node_setup is None: params.node_setup = models.NodeSetup() if params.node_setup.mount_volumes is None: params.node_setup.mount_volumes = models.MountVolumes() if params.node_setup.mount_volumes.azure_file_shares is None: params.node_setup.mount_volumes.azure_file_shares = [] storage_account_name = az_config.get('batchai', 'storage_account', fallback=None) if not storage_account_name: raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT) storage_account_key = az_config.get('batchai', 'storage_key', fallback=None) if not storage_account_key: raise CLIError(MSG_CONFIGURE_STORAGE_KEY) params.node_setup.mount_volumes.azure_file_shares.append(models.AzureFileShareReference( relative_mount_path=mount_path, account_name=storage_account_name, azure_file_url='https://{0}.file.core.windows.net/{1}'.format(storage_account_name, azure_file_share), credentials=models.AzureStorageCredentialsInfo(storage_account_key)))
def validate_client_parameters(namespace): """ Retrieves storage connection parameters from environment variables and parses out connection string into account name and key """ n = namespace if not n.connection_string: n.connection_string = az_config.get('storage', 'connection_string', None) # if connection string supplied or in environment variables, extract account key and name if n.connection_string: conn_dict = validate_key_value_pairs(n.connection_string) n.account_name = conn_dict['AccountName'] n.account_key = conn_dict['AccountKey'] # otherwise, simply try to retrieve the remaining variables from environment variables if not n.account_name: n.account_name = az_config.get('storage', 'account', None) if not n.account_key: n.account_key = az_config.get('storage', 'key', None) if not n.sas_token: n.sas_token = az_config.get('storage', 'sas_token', None) # if account name is specified but no key, attempt to query if n.account_name and not n.account_key: scf = get_mgmt_service_client(StorageManagementClient) acc = next((x for x in scf.storage_accounts.list() if x.name == n.account_name), None) if acc: from azure.cli.core.commands.arm import parse_resource_id rg = parse_resource_id(acc.id)['resource_group'] n.account_key = \ scf.storage_accounts.list_keys(rg, n.account_name).keys[0].value # pylint: disable=no-member else: raise ValueError("Storage account '{}' not found.".format(n.account_name))
def validate_client_parameters(namespace): """Retrieves Batch connection parameters from environment variables""" from azure.mgmt.batch import BatchManagementClient from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core._config import az_config # simply try to retrieve the remaining variables from environment variables if not namespace.account_name: namespace.account_name = az_config.get('batch', 'account', None) if not namespace.account_key: namespace.account_key = az_config.get('batch', 'access_key', None) if not namespace.account_endpoint: namespace.account_endpoint = az_config.get('batch', 'endpoint', None) # if account name is specified but no key, attempt to query if namespace.account_name and namespace.account_endpoint and not namespace.account_key: endpoint = urlsplit(namespace.account_endpoint) host = endpoint.netloc client = get_mgmt_service_client(BatchManagementClient) acc = next((x for x in client.batch_account.list() if x.name == namespace.account_name and x.account_endpoint == host), None) if acc: from azure.cli.core.commands.arm import parse_resource_id rg = parse_resource_id(acc.id)['resource_group'] namespace.account_key = \ client.batch_account.get_keys(rg, namespace.account_name).primary # pylint: disable=no-member else: raise ValueError("Batch account '{}' not found.".format(namespace.account_name)) else: if not namespace.account_name: raise ValueError("Need specifiy batch account in command line or enviroment variable.") if not namespace.account_endpoint: raise ValueError("Need specifiy batch endpoint in command line or enviroment variable.")
def add_azure_file_share_to_cluster_create_parameters(params, azure_file_share, mount_path): """Add Azure File share to the cluster create parameters. :param model.ClusterCreateParameters params: cluster create parameters. :param str azure_file_share: name of the azure file share. :param str mount_path: relative mount path for Azure File share. """ if not mount_path: raise CLIError('Azure File share relative mount path cannot be empty.') if params.node_setup is None: params.node_setup = models.NodeSetup() if params.node_setup.mount_volumes is None: params.node_setup.mount_volumes = models.MountVolumes() if params.node_setup.mount_volumes.azure_file_shares is None: params.node_setup.mount_volumes.azure_file_shares = [] storage_account_name = az_config.get('batchai', 'storage_account', fallback=None) if not storage_account_name: raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT) storage_account_key = az_config.get('batchai', 'storage_key', fallback=None) if not storage_account_key: raise CLIError(MSG_CONFIGURE_STORAGE_KEY) params.node_setup.mount_volumes.azure_file_shares.append( models.AzureFileShareReference( relative_mount_path=mount_path, account_name=storage_account_name, azure_file_url='https://{0}.file.core.windows.net/{1}'.format( storage_account_name, azure_file_share), credentials=models.AzureStorageCredentialsInfo( storage_account_key)))
def _install_or_update(package_list, link, private, pre): import pip options = ['--isolated', '--disable-pip-version-check', '--upgrade'] if pre: options.append('--pre') if _installed_in_user(): options.append('--user') pkg_index_options = ['--find-links', link] if link else [] if private: package_index_url = az_config.get('component', 'package_index_url', fallback=None) package_index_trusted_host = az_config.get( 'component', 'package_index_trusted_host', fallback=None) if package_index_url: pkg_index_options += ['--extra-index-url', package_index_url] else: raise CLIError( 'AZURE_COMPONENT_PACKAGE_INDEX_URL environment variable not set and not ' 'specified in config. AZURE_COMPONENT_PACKAGE_INDEX_TRUSTED_HOST may ' 'also need to be set.\nIf executing az with sudo, you may want sudo\'s ' '-E and -H flags.') pkg_index_options += ['--trusted-host', package_index_trusted_host ] if package_index_trusted_host else [] pip_args = ['install'] + options + package_list + pkg_index_options _run_pip(pip, pip_args) # Fix to make sure that we have empty __init__.py files for the azure site-packages folder. nspkg_pip_args = ['install'] + options + ['--force-reinstall', 'azure-nspkg', 'azure-mgmt-nspkg'] + pkg_index_options # pylint: disable=line-too-long _run_pip(pip, nspkg_pip_args)
def add_azure_container_to_cluster_create_parameters(params, container_name, mount_path): """Add Azure Storage container to the cluster create parameters. :param model.ClusterCreateParameters params: cluster create parameters. :param str container_name: container name. :param str mount_path: relative mount path for the container. """ if not mount_path: raise CLIError( 'Azure Storage container relative mount path cannot be empty.') if params.node_setup is None: params.node_setup = models.NodeSetup() if params.node_setup.mount_volumes is None: params.node_setup.mount_volumes = models.MountVolumes() if params.node_setup.mount_volumes.azure_blob_file_systems is None: params.node_setup.mount_volumes.azure_blob_file_systems = [] storage_account_name = az_config.get('batchai', 'storage_account', fallback=None) if not storage_account_name: raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT) storage_account_key = az_config.get('batchai', 'storage_key', fallback=None) if not storage_account_key: raise CLIError(MSG_CONFIGURE_STORAGE_KEY) params.node_setup.mount_volumes.azure_blob_file_systems.append( models.AzureBlobFileSystemReference( relative_mount_path=mount_path, account_name=storage_account_name, container_name=container_name, credentials=models.AzureStorageCredentialsInfo( account_key=storage_account_key)))
def validate_client_parameters(namespace): """ Retrieves storage connection parameters from environment variables and parses out connection string into account name and key """ n = namespace if not n.connection_string: n.connection_string = az_config.get('storage', 'connection_string', None) # if connection string supplied or in environment variables, extract account key and name if n.connection_string: conn_dict = validate_key_value_pairs(n.connection_string) n.account_name = conn_dict['AccountName'] n.account_key = conn_dict['AccountKey'] # otherwise, simply try to retrieve the remaining variables from environment variables if not n.account_name: n.account_name = az_config.get('storage', 'account', None) if not n.account_key: n.account_key = az_config.get('storage', 'key', None) if not n.sas_token: n.sas_token = az_config.get('storage', 'sas_token', None) # strip the '?' from sas token. the portal and command line are returns sas token in different # forms if n.sas_token: n.sas_token = n.sas_token.lstrip('?') # if account name is specified but no key, attempt to query if n.account_name and not n.account_key and not n.sas_token: n.account_key = _query_account_key(n.account_name)
def _install_or_update(package_list, link, private, pre): import pip options = [ '--isolated', '--disable-pip-version-check', '--upgrade', '--ignore-installed' ] if pre: options.append('--pre') if _installed_in_user(): options.append('--user') pkg_index_options = ['--find-links', link] if link else [] if private: package_index_url = az_config.get('component', 'package_index_url', fallback=None) package_index_trusted_host = az_config.get('component', 'package_index_trusted_host', fallback=None) #pylint: disable=line-too-long if package_index_url: pkg_index_options += ['--extra-index-url', package_index_url] else: raise CLIError('AZURE_COMPONENT_PACKAGE_INDEX_URL environment variable not set and not specified in config. ' #pylint: disable=line-too-long 'AZURE_COMPONENT_PACKAGE_INDEX_TRUSTED_HOST may also need to be set.\n' 'If executing az with sudo, you may want sudo\'s -E and -H flags.') #pylint: disable=line-too-long pkg_index_options += ['--trusted-host', package_index_trusted_host] if package_index_trusted_host else [] #pylint: disable=line-too-long pip_args = ['install'] + options + package_list + pkg_index_options _run_pip(pip, pip_args)
def _install_or_update(package_list, link, private, pre): import pip options = ['--isolated', '--disable-pip-version-check', '--upgrade'] if pre: options.append('--pre') if _installed_in_user(): options.append('--user') pkg_index_options = ['--find-links', link] if link else [] if private: package_index_url = az_config.get('component', 'package_index_url', fallback=None) package_index_trusted_host = az_config.get('component', 'package_index_trusted_host', fallback=None) if package_index_url: pkg_index_options += ['--extra-index-url', package_index_url] else: raise CLIError('AZURE_COMPONENT_PACKAGE_INDEX_URL environment variable not set and not ' 'specified in config. AZURE_COMPONENT_PACKAGE_INDEX_TRUSTED_HOST may ' 'also need to be set.\nIf executing az with sudo, you may want sudo\'s ' '-E and -H flags.') pkg_index_options += ['--trusted-host', package_index_trusted_host] if package_index_trusted_host else [] pip_args = ['install'] + options + package_list + pkg_index_options _run_pip(pip, pip_args) # Fix to make sure that we have empty __init__.py files for the azure site-packages folder. nspkg_pip_args = ['install'] + options + ['--force-reinstall', 'azure-nspkg', 'azure-mgmt-nspkg'] + pkg_index_options # pylint: disable=line-too-long _run_pip(pip, nspkg_pip_args)
def update_cluster_create_parameters_with_env_variables(params): """Replaces placeholders with information from the environment variables. Currently we support replacing of storage account name and key in mount volumes. :param models.ClusterCreateParameters params: cluster creation parameters to patch. """ storage_account_name = az_config.get( 'batchai', 'storage_account', fallback=AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER) storage_account_key = az_config.get( 'batchai', 'storage_key', fallback=AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER) # Patch parameters of azure file share. if params.node_setup and \ params.node_setup.mount_volumes and \ params.node_setup.mount_volumes.azure_file_shares: for ref in params.node_setup.mount_volumes.azure_file_shares: if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER: ref.account_name = storage_account_name if ref.azure_file_url and AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER in ref.azure_file_url: ref.azure_file_url = ref.azure_file_url.replace( AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER, storage_account_name) if ref.credentials_info and ref.credentials_info.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER: ref.credentials_info.account_key = storage_account_key if not ref.credentials_info: ref.credentials_info = models.AzureStorageCredentialsInfo( account_key=storage_account_key) # Verify that all placeholders are replaced with real values. if (ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER or AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER in ref.azure_file_url): raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT) if ref.credentials_info.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER: raise CLIError(MSG_CONFIGURE_STORAGE_KEY) # Patch parameters of blob file system. if params.node_setup and \ params.node_setup.mount_volumes and \ params.node_setup.mount_volumes.azure_blob_file_systems: for ref in params.node_setup.mount_volumes.azure_blob_file_systems: if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER: ref.account_name = storage_account_name if ref.credentials_info and ref.credentials_info.account_key == \ AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER: ref.credentials_info.account_key = storage_account_key if not ref.credentials_info: ref.credentials_info = models.AzureStorageCredentialsInfo( account_key=storage_account_key) # Verify that all placeholders are replaced with real values. if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER: raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT) if ref.credentials_info.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER: raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
def get_effective_storage_account_name_and_key(account_name, account_key): """Returns storage account name and key to be used. :param str or None account_name: storage account name provided as command line argument. :param str or None account_key: storage account key provided as command line argument. """ if account_name: return account_name, get_storage_account_key(account_name, account_key) or '' return az_config.get('batchai', 'storage_account', ''), az_config.get('batchai', 'storage_key', '')
def __init__(self, client, account_name, resource_group_name, account_endpoint): self.resource_file_cache = {} self.resolved_storage_client = None self.batch_mgmt_client = client self.batch_account_name = account_name if not self.batch_account_name: self.batch_account_name = az_config.get('batch', 'account', None) self.batch_account_endpoint = account_endpoint if not self.batch_account_endpoint: self.batch_account_endpoint = az_config.get('batch', 'endpoint', None) self.batch_resource_group = resource_group_name
def _get_client(service, parsed_args): account_name = parsed_args.account_name or az_config.get( 'storage', 'account', None) account_key = parsed_args.account_key or az_config.get( 'storage', 'key', None) connection_string = parsed_args.connection_string or az_config.get( 'storage', 'connection_string', None) sas_token = parsed_args.sas_token or az_config.get('storage', 'sas_token', None) return get_data_service_client(service, account_name, account_key, connection_string, sas_token)
def update_cluster_create_parameters_with_env_variables(params): """Replaces placeholders with information from the environment variables. Currently we support replacing of storage account name and key in mount volumes. :param models.ClusterCreateParameters params: cluster creation parameters to patch. """ storage_account_name = az_config.get('batchai', 'storage_account', fallback=AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER) storage_account_key = az_config.get('batchai', 'storage_key', fallback=AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER) # Patch parameters of azure file share. if params.node_setup and \ params.node_setup.mount_volumes and \ params.node_setup.mount_volumes.azure_file_shares: for ref in params.node_setup.mount_volumes.azure_file_shares: if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER: ref.account_name = storage_account_name if ref.azure_file_url and AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER in ref.azure_file_url: ref.azure_file_url = ref.azure_file_url.replace( AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER, storage_account_name) if ref.credentials and ref.credentials.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER: ref.credentials.account_key = storage_account_key if not ref.credentials: ref.credentials = models.AzureStorageCredentialsInfo(account_key=storage_account_key) # Verify that all placeholders are replaced with real values. if (ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER or AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER in ref.azure_file_url): raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT) if ref.credentials.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER: raise CLIError(MSG_CONFIGURE_STORAGE_KEY) # Patch parameters of blob file system. if params.node_setup and \ params.node_setup.mount_volumes and \ params.node_setup.mount_volumes.azure_blob_file_systems: for ref in params.node_setup.mount_volumes.azure_blob_file_systems: if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER: ref.account_name = storage_account_name if ref.credentials and ref.credentials.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER: ref.credentials.account_key = storage_account_key if not ref.credentials: ref.credentials = models.AzureStorageCredentialsInfo(account_key=storage_account_key) # Verify that all placeholders are replaced with real values. if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER: raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT) if ref.credentials.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER: raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
def get_acr_api_version(): '''Returns the api version for container registry ''' customized_api_version = az_config.get('acr', 'apiversion', None) if customized_api_version: logger.warning('Customized api-version is used: %s', customized_api_version) return customized_api_version
def log_telemetry(name, log_type='event', **kwargs): """ IMPORTANT: do not log events with quotes in the name, properties or measurements; those events may fail to upload. Also, telemetry events must be verified in the backend because successful upload does not guarentee success. """ if not user_agrees_to_telemetry(): return try: name = _remove_cmd_chars(name) _sanitize_inputs(kwargs) source = 'az' if _in_ci(): source = 'CI' elif ARGCOMPLETE_ENV_NAME in os.environ: source = 'completer' types = ['event', 'pageview', 'trace'] if log_type not in types: raise ValueError('Type {} is not supported. Available types: {}'.format(log_type, types)) props = { 'telemetry-version': TELEMETRY_VERSION } _safe_exec(props, 'time', lambda: str(datetime.datetime.now())) _safe_exec(props, 'x-ms-client-request-id', lambda: APPLICATION.session['headers']['x-ms-client-request-id']) _safe_exec(props, 'command', lambda: APPLICATION.session.get('command', None)) _safe_exec(props, 'version', lambda: core_version) _safe_exec(props, 'source', lambda: source) _safe_exec(props, 'installation-id', _get_installation_id) _safe_exec(props, 'python-version', lambda: _remove_symbols(str(platform.python_version()))) _safe_exec(props, 'shell-type', _get_shell_type) _safe_exec(props, 'locale', lambda: '{},{}'.format(locale.getdefaultlocale()[0], locale.getdefaultlocale()[1])) _safe_exec(props, 'user-machine-id', _get_user_machine_id) _safe_exec(props, 'user-azure-id', _get_user_azure_id) _safe_exec(props, 'azure-subscription-id', _get_azure_subscription_id) _safe_exec(props, 'default-output-type', lambda: az_config.get('core', 'output', fallback='unknown')) _safe_exec(props, 'environment', _get_env_string) if log_type == 'trace': _safe_exec(props, 'trace', _get_stack_trace) _safe_exec(props, 'error-hash', _get_error_hash) if kwargs: props.update(**kwargs) telemetry_records.append({ 'name': name, 'type': log_type, 'properties': props }) except Exception as ex: #pylint: disable=broad-except # Never fail the command because of telemetry, unless debugging if _debugging(): raise ex
class AzRotatingFileHandler(logging.handlers.RotatingFileHandler): from azure.cli.core._environment import get_config_dir from azure.cli.core._config import az_config ENABLED = az_config.getboolean('logging', 'enable_log_file', fallback=False) LOGFILE_DIR = os.path.expanduser( az_config.get('logging', 'log_dir', fallback=os.path.join(get_config_dir(), 'logs'))) def __init__(self): logging_file_path = self.get_log_file_path() super(AzRotatingFileHandler, self).__init__(logging_file_path, maxBytes=10 * 1024 * 1024, backupCount=5) self.setFormatter( logging.Formatter( '%(process)d : %(asctime)s : %(levelname)s : %(name)s : %(message)s' )) self.setLevel(logging.DEBUG) def get_log_file_path(self): if not os.path.isdir(self.LOGFILE_DIR): os.makedirs(self.LOGFILE_DIR) return os.path.join(self.LOGFILE_DIR, 'az.log')
def get_acr_api_version(): """Returns the api version for container registry """ customized_api_version = az_config.get('acr', 'apiversion', None) if customized_api_version: logger.warning('Customized api-version is used: %s', customized_api_version) return customized_api_version
def _update_default_info(self): try: options = az_config.config_parser.options(DEFAULTS_SECTION) self.config_default = "" for opt in options: self.config_default += opt + ": " + az_config.get(DEFAULTS_SECTION, opt) + " " except configparser.NoSectionError: self.config_default = ""
def _find_configured_default(argument): if not (hasattr(argument.type, 'default_name_tooling') and argument.type.default_name_tooling): return None try: return az_config.get(DEFAULTS_SECTION, argument.type.default_name_tooling, None) except configparser.NoSectionError: return None
def _install_or_update(package_list, link, private, pre, show_logs=False): options = ['--isolated', '--disable-pip-version-check', '--upgrade'] if pre: options.append('--pre') if not show_logs: options.append('--quiet') pkg_index_options = ['--find-links', link] if link else [] if private: package_index_url = az_config.get('component', 'package_index_url', fallback=None) package_index_trusted_host = az_config.get('component', 'package_index_trusted_host', fallback=None) #pylint: disable=line-too-long if package_index_url: pkg_index_options += ['--extra-index-url', package_index_url] else: raise CLIError('AZURE_COMPONENT_PACKAGE_INDEX_URL environment variable not set and not specified in config. ' #pylint: disable=line-too-long 'AZURE_COMPONENT_PACKAGE_INDEX_TRUSTED_HOST may also need to be set.') #pylint: disable=line-too-long pkg_index_options += ['--trusted-host', package_index_trusted_host] if package_index_trusted_host else [] #pylint: disable=line-too-long pip.main(['install'] + options + package_list + pkg_index_options)
def _install_or_update(package_list, link, private, pre): import pip options = ['--isolated', '--disable-pip-version-check', '--upgrade'] if pre: options.append('--pre') pkg_index_options = ['--find-links', link] if link else [] if private: package_index_url = az_config.get('component', 'package_index_url', fallback=None) package_index_trusted_host = az_config.get('component', 'package_index_trusted_host', fallback=None) #pylint: disable=line-too-long if package_index_url: pkg_index_options += ['--extra-index-url', package_index_url] else: raise CLIError('AZURE_COMPONENT_PACKAGE_INDEX_URL environment variable not set and not specified in config. ' #pylint: disable=line-too-long 'AZURE_COMPONENT_PACKAGE_INDEX_TRUSTED_HOST may also need to be set.\n' 'If executing az with sudo, you may want sudo\'s -E and -H flags.') #pylint: disable=line-too-long pkg_index_options += ['--trusted-host', package_index_trusted_host] if package_index_trusted_host else [] #pylint: disable=line-too-long pip_args = ['install'] + options + package_list + pkg_index_options _run_pip(pip, pip_args)
def get_configured_defaults(): _reload_config() try: options = az_config.config_parser.options(DEFAULTS_SECTION) defaults = {} for opt in options: value = az_config.get(DEFAULTS_SECTION, opt) if value: defaults[opt] = value return defaults except configparser.NoSectionError: return {}
def _resolve_default_value_from_cfg_file(self, arg, overrides): if 'configured_default' in overrides.settings: def_config = overrides.settings.pop('configured_default', None) # same blunt mechanism like we handled id-parts, for create command, no name default if (self.name.split()[-1] == 'create' and overrides.settings.get('metavar', None) == 'NAME'): return setattr(arg.type, 'configured_default_applied', True) config_value = az_config.get(DEFAULTS_SECTION, def_config, None) if config_value: overrides.settings['default'] = config_value overrides.settings['required'] = False
def _register_builtin_arguments(**kwargs): global_group = kwargs['global_group'] global_group.add_argument('--output', '-o', dest='_output_format', choices=['json', 'tsv', 'table', 'jsonc'], default=az_config.get('core', 'output', fallback='json'), help='Output format', type=str.lower) # The arguments for verbosity don't get parsed by argparse but we add it here for help. global_group.add_argument('--verbose', dest='_log_verbosity_verbose', action='store_true', help='Increase logging verbosity. Use --debug for full debug logs.') global_group.add_argument('--debug', dest='_log_verbosity_debug', action='store_true', help='Increase logging verbosity to show all debug logs.')
def _register_builtin_arguments(**kwargs): global_group = kwargs['global_group'] global_group.add_argument('--output', '-o', dest='_output_format', choices=['json', 'tsv', 'table', 'jsonc','pandas'], default=az_config.get('core', 'output', fallback='json'), help='Output format', type=str.lower) # The arguments for verbosity don't get parsed by argparse but we add it here for help. global_group.add_argument('--verbose', dest='_log_verbosity_verbose', action='store_true', help='Increase logging verbosity. Use --debug for full debug logs.') # pylint: disable=line-too-long global_group.add_argument('--debug', dest='_log_verbosity_debug', action='store_true', help='Increase logging verbosity to show all debug logs.')
def load_node_agent_skus(prefix, **kwargs): # pylint: disable=unused-argument from msrest.exceptions import ClientRequestError from azure.batch.models import BatchErrorException from azure.cli.command_modules.batch._client_factory import account_client_factory from azure.cli.core._config import az_config all_images = [] client_creds = {} client_creds['account_name'] = az_config.get('batch', 'account', None) client_creds['account_key'] = az_config.get('batch', 'access_key', None) client_creds['account_endpoint'] = az_config.get('batch', 'endpoint', None) try: client = account_client_factory(client_creds) skus = client.list_node_agent_skus() for sku in skus: for image in sku['verifiedImageReferences']: all_images.append("{}:{}:{}:{}".format(image['publisher'], image['offer'], image['sku'], image['version'])) return all_images except (ClientRequestError, BatchErrorException): return []
def validate_client_parameters(namespace): """Retrieves Batch connection parameters from environment variables""" from azure.mgmt.batch import BatchManagementClient from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core._config import az_config # simply try to retrieve the remaining variables from environment variables if not namespace.account_name: namespace.account_name = az_config.get('batch', 'account', None) if not namespace.account_key: namespace.account_key = az_config.get('batch', 'access_key', None) if not namespace.account_endpoint: namespace.account_endpoint = az_config.get('batch', 'endpoint', None) # if account name is specified but no key, attempt to query if we use shared key auth if namespace.account_name and namespace.account_endpoint and not namespace.account_key: if az_config.get('batch', 'auth_mode', 'shared_key') == 'shared_key': endpoint = urlsplit(namespace.account_endpoint) host = endpoint.netloc client = get_mgmt_service_client(BatchManagementClient) acc = next((x for x in client.batch_account.list() if x.name == namespace.account_name and x.account_endpoint == host), None) if acc: from azure.cli.core.commands.arm import parse_resource_id rg = parse_resource_id(acc.id)['resource_group'] namespace.account_key = \ client.batch_account.get_keys(rg, # pylint: disable=no-member namespace.account_name).primary else: raise ValueError("Batch account '{}' not found.".format(namespace.account_name)) else: if not namespace.account_name: raise ValueError("Specify batch account in command line or environment variable.") if not namespace.account_endpoint: raise ValueError("Specify batch endpoint in command line or environment variable.") if az_config.get('batch', 'auth_mode', 'shared_key') == 'aad': namespace.account_key = None
def load_node_agent_skus(prefix, **kwargs): # pylint: disable=unused-argument from msrest.exceptions import ClientRequestError from azure.batch.models import BatchErrorException from azure.cli.command_modules.batch._client_factory import account_client_factory from azure.cli.core._config import az_config all_images = [] client_creds = {} client_creds['account_name'] = az_config.get('batch', 'account', None) client_creds['account_key'] = az_config.get('batch', 'access_key', None) client_creds['account_endpoint'] = az_config.get('batch', 'endpoint', None) try: client = account_client_factory(client_creds) skus = client.list_node_agent_skus() for sku in skus: for image in sku['verifiedImageReferences']: all_images.append("{}:{}:{}:{}".format( image['publisher'], image['offer'], image['sku'], image['version'])) return all_images except (ClientRequestError, BatchErrorException): return []
def validate_source_uri(namespace): usage_string = 'invalid usage: supply only one of the following argument sets:' + \ '\n\t --source-uri' + \ '\n\tOR --source-container --source-blob [--source-snapshot] [--source-sas]' + \ '\n\tOR --source-share --source-path [--source-sas]' ns = vars(namespace) validate_client_parameters(namespace) # must run first to resolve storage account storage_acc = ns.get('account_name', None) or az_config.get('storage', 'account', None) uri = ns.get('copy_source', None) container = ns.pop('source_container', None) blob = ns.pop('source_blob', None) sas = ns.pop('source_sas', None) snapshot = ns.pop('source_snapshot', None) share = ns.pop('source_share', None) path = ns.pop('source_path', None) if uri: if any([container, blob, sas, snapshot, share, path]): raise ValueError(usage_string) else: # simplest scenario--no further processing necessary return valid_blob_source = container and blob and not share and not path valid_file_source = share and path and not container and not blob and not snapshot if (not valid_blob_source and not valid_file_source) or (valid_blob_source and valid_file_source): # pylint: disable=line-too-long raise ValueError(usage_string) query_params = [] if sas: query_params.append(sas) if snapshot: query_params.append(snapshot) uri = 'https://{0}.{1}.{6}/{2}/{3}{4}{5}'.format( storage_acc, 'blob' if valid_blob_source else 'file', container if valid_blob_source else share, blob if valid_blob_source else path, '?' if query_params else '', '&'.join(query_params), CLOUD.suffixes.storage_endpoint) namespace.copy_source = uri
def _register_builtin_arguments(**kwargs): global_group = kwargs["global_group"] global_group.add_argument( "--output", "-o", dest="_output_format", choices=["json", "tsv", "list", "table", "jsonc"], default=az_config.get("core", "output", fallback="json"), help="Output format", type=str.lower, ) # The arguments for verbosity don't get parsed by argparse but we add it here for help. global_group.add_argument( "--verbose", dest="_log_verbosity_verbose", action="store_true", help="Increase logging verbosity. Use --debug for full debug logs.", ) # pylint: disable=line-too-long global_group.add_argument( "--debug", dest="_log_verbosity_debug", action="store_true", help="Increase logging verbosity to show all debug logs.", )
import time import uuid from subprocess import CalledProcessError, check_output import requests import yaml import azure.cli.core.azlogging as azlogging from azure.cli.core._config import az_config from azure.cli.core._profile import _SERVICE_PRINCIPAL, CredsCache, Profile # pylint: disable=too-few-public-methods,too-many-arguments,no-self-use,too-many-locals,line-too-long from azure.cli.core.util import CLIError logger = azlogging.get_az_logger(__name__) BASE_URL = az_config.get('container', 'service_url', fallback='https://api.mindaro.microsoft.io') SUBSCRIPTION_URL = "/subscriptions/{subscription_id}" RESOURCE_BASE_URL = SUBSCRIPTION_URL + "/resourceGroups/{resource_group_name}" CONTAINER_SERVICE_PROVIDER = "/providers/Microsoft.ContainerService" CONTAINER_SERVICE_RESOURCE_URL = RESOURCE_BASE_URL + \ CONTAINER_SERVICE_PROVIDER + \ "/containerServices/{container_service_name}" SERVICE_URL = BASE_URL + SUBSCRIPTION_URL API_VERSION = "2016-11-01-preview" SERVICE_RESOURCE_ID = "https://mindaro.microsoft.io/" DOCKERFILE_FILE = 'Dockerfile' DOCKER_COMPOSE_FILE = 'docker-compose.yml' DOCKER_COMPOSE_EXPECTED_VERSION = '2'
import logging from logging.handlers import RotatingFileHandler import colorama from azure.cli.core._environment import get_config_dir from azure.cli.core._config import az_config AZ_LOGFILE_NAME = 'az.log' DEFAULT_LOG_DIR = os.path.join(get_config_dir(), 'logs') ENABLE_LOG_FILE = az_config.getboolean('logging', 'enable_log_file', fallback=False) LOG_DIR = os.path.expanduser( az_config.get('logging', 'log_dir', fallback=DEFAULT_LOG_DIR)) CONSOLE_LOG_CONFIGS = [ # (default) { 'az': logging.WARNING, 'root': logging.CRITICAL, }, # --verbose { 'az': logging.INFO, 'root': logging.CRITICAL, }, # --debug { 'az': logging.DEBUG,
def _get_client(service, parsed_args): account_name = parsed_args.account_name or az_config.get('storage', 'account', None) account_key = parsed_args.account_key or az_config.get('storage', 'key', None) connection_string = parsed_args.connection_string or az_config.get('storage', 'connection_string', None) sas_token = parsed_args.sas_token or az_config.get('storage', 'sas_token', None) return get_data_service_client(service, account_name, account_key, connection_string, sas_token)
import os import platform import logging from logging.handlers import RotatingFileHandler import colorama from azure.cli.core._environment import get_config_dir from azure.cli.core._config import az_config AZ_LOGFILE_NAME = 'az.log' DEFAULT_LOG_DIR = os.path.join(get_config_dir(), 'logs') ENABLE_LOG_FILE = az_config.getboolean('logging', 'enable_log_file', fallback=False) LOG_DIR = os.path.expanduser(az_config.get('logging', 'log_dir', fallback=DEFAULT_LOG_DIR)) CONSOLE_LOG_CONFIGS = [ # (default) { 'az': logging.WARNING, 'root': logging.CRITICAL, }, # --verbose { 'az': logging.INFO, 'root': logging.CRITICAL, }, # --debug { 'az': logging.DEBUG,
def on_input_timeout(self, cli): """ When there is a pause in typing Brings up the metadata for the command if there is a valid command already typed """ rows, cols = get_window_dim() rows = int(rows) cols = int(cols) document = cli.current_buffer.document text = document.text command = "" all_params = "" example = "" empty_space = "" for i in range(cols): empty_space += " " any_documentation = False is_command = True for word in text.split(): if word.startswith("-"): is_command = False if is_command: if not word == 'az': command += str(word) + " " if self.completer.is_completable(command.rstrip()): cmdstp = command.rstrip() any_documentation = True if word in self.completer.command_parameters[cmdstp] and \ self.completer.has_description(cmdstp + " " + word): all_params = word + ":\n" + \ self.completer.get_param_description(cmdstp+ \ " " + word) self.description_docs = u"%s" % \ self.completer.get_description(cmdstp) if cmdstp in self.completer.command_examples: string_example = "" for example in self.completer.command_examples[cmdstp]: for part in example: string_example += part example = self.space_examples( self.completer.command_examples[cmdstp], rows) if not any_documentation: self.description_docs = u'' self.param_docs = u"%s" % all_params self.example_docs = u'%s' % example options = az_config.config_parser.options(DEFAULTS_SECTION) self.config_default = "" for opt in options: self.config_default += opt + ": " + az_config.get(DEFAULTS_SECTION, opt) + " " settings, empty_space = self._toolbar_info(cols, empty_space) cli.buffers['description'].reset( initial_document=Document(self.description_docs, cursor_position=0) ) cli.buffers['parameter'].reset( initial_document=Document(self.param_docs) ) cli.buffers['examples'].reset( initial_document=Document(self.example_docs) ) cli.buffers['bottom_toolbar'].reset( initial_document=Document(u'%s%s%s' % \ (NOTIFICATIONS, settings, empty_space)) ) cli.buffers['default_values'].reset( initial_document=Document(u'%s' %self.config_default) ) cli.request_redraw()
import re import time import uuid from subprocess import CalledProcessError, check_output import requests import yaml import azure.cli.core.azlogging as azlogging from azure.cli.core._config import az_config from azure.cli.core._profile import _SERVICE_PRINCIPAL, CredsCache, Profile # pylint: disable=too-few-public-methods,too-many-arguments,no-self-use,too-many-locals,line-too-long from azure.cli.core.util import CLIError logger = azlogging.get_az_logger(__name__) BASE_URL = az_config.get('container', 'service_url', fallback='https://api.mindaro.microsoft.io') SUBSCRIPTION_URL = "/subscriptions/{subscription_id}" RESOURCE_BASE_URL = SUBSCRIPTION_URL + "/resourceGroups/{resource_group_name}" CONTAINER_SERVICE_PROVIDER = "/providers/Microsoft.ContainerService" CONTAINER_SERVICE_RESOURCE_URL = (RESOURCE_BASE_URL + CONTAINER_SERVICE_PROVIDER + "/containerServices/{container_service_name}") SERVICE_URL = BASE_URL + SUBSCRIPTION_URL API_VERSION = "2016-11-01-preview" SERVICE_RESOURCE_ID = "https://mindaro.microsoft.io/" DOCKERFILE_FILE = 'Dockerfile' DOCKER_COMPOSE_FILE = 'docker-compose.yml' DOCKER_COMPOSE_EXPECTED_VERSION = '2' def add_release( target_name,
def log_telemetry(name, log_type='event', **kwargs): """ IMPORTANT: do not log events with quotes in the name, properties or measurements; those events may fail to upload. Also, telemetry events must be verified in the backend because successful upload does not guarentee success. """ if not user_agrees_to_telemetry(): return # Now we now we want to log telemetry, get the profile try: from azure.cli.core._profile import Profile profile = Profile() except: #pylint: disable=bare-except profile = None try: name = _remove_cmd_chars(name) _sanitize_inputs(kwargs) source = 'az' if _in_ci(): source = 'CI' elif ARGCOMPLETE_ENV_NAME in os.environ: source = 'completer' types = ['event', 'pageview', 'trace'] if log_type not in types: raise ValueError('Type {} is not supported. Available types: {}'.format(log_type, types)) props = { 'telemetry-version': TELEMETRY_VERSION } _safe_exec(props, 'time', lambda: str(datetime.datetime.now())) _safe_exec(props, 'x-ms-client-request-id', lambda: APPLICATION.session['headers']['x-ms-client-request-id']) _safe_exec(props, 'command', lambda: APPLICATION.session.get('command', None)) _safe_exec(props, 'version', lambda: core_version) _safe_exec(props, 'source', lambda: source) _safe_exec(props, 'installation-id', lambda: _get_installation_id(profile)) _safe_exec(props, 'python-version', lambda: _remove_symbols(str(platform.python_version()))) _safe_exec(props, 'shell-type', _get_shell_type) _safe_exec(props, 'locale', lambda: '{},{}'.format(locale.getdefaultlocale()[0], locale.getdefaultlocale()[1])) _safe_exec(props, 'user-machine-id', _get_user_machine_id) _safe_exec(props, 'user-azure-id', lambda: _get_user_azure_id(profile)) _safe_exec(props, 'azure-subscription-id', lambda: _get_azure_subscription_id(profile)) _safe_exec(props, 'default-output-type', lambda: az_config.get('core', 'output', fallback='unknown')) _safe_exec(props, 'environment', _get_env_string) if log_type == 'trace': _safe_exec(props, 'trace', _get_stack_trace) _safe_exec(props, 'error-hash', _get_error_hash) if kwargs: props.update(**kwargs) telemetry_records.append({ 'name': name, 'type': log_type, 'properties': props }) except Exception as ex: #pylint: disable=broad-except # Never fail the command because of telemetry, unless debugging if _debugging(): raise ex