def _validate_user_credentials(registry_name, path, resultIndex, username=None, password=None): registry, _ = get_registry_by_name(registry_name) login_server = registry.login_server #pylint: disable=no-member if username: if not password: try: password = prompt_pass(msg='Password: '******'Please specify both username and password in non-interactive mode.' ) return _obtain_data_from_registry(login_server, path, resultIndex, username, password) try: cred = acr_credential_show(registry_name) username = cred.username password = cred.passwords[0].value return _obtain_data_from_registry(login_server, path, resultIndex, username, password) except: #pylint: disable=bare-except pass try: username = prompt('Username: '******'Password: '******'Unable to authenticate using admin login credentials or admin is not enabled. ' + 'Please specify both username and password in non-interactive mode.' ) return _obtain_data_from_registry(login_server, path, resultIndex, username, password)
def _get_remote_url(): """ Tries to find a remote for the repo in the current folder. If only one remote is present return that remote, if more than one remote is present it looks for origin. """ try: remotes = check_output(['git', 'remote']).strip().splitlines() remote_url = '' if len(remotes) == 1: remote_url = check_output(['git', 'remote', 'get-url', remotes[0].decode()]).strip() else: remote_url = check_output(['git', 'remote', 'get-url', 'origin']).strip() except ValueError as e: logger.debug(e) raise CLIError( "A default remote was not found for the current folder. \ Please run this command in a git repository folder with \ an 'origin' remote or specify a remote using '--remote-url'") except CalledProcessError as e: raise CLIError(e) return remote_url.decode()
def _resolve_service_principal(client, identifier): # todo: confirm with graph team that a service principal name must be unique result = list( client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format( identifier))) if result: return result[0].object_id try: uuid.UUID(identifier) return identifier # assume an object id except ValueError: raise CLIError( "service principal '{}' doesn't exist".format(identifier))
def _object_id_args_helper(object_id, spn, upn): if not object_id: from azure.cli.core._profile import Profile, CLOUD profile = Profile() cred, _, tenant_id = profile.get_login_credentials( resource=CLOUD.endpoints.active_directory_graph_resource_id) graph_client = GraphRbacManagementClient(cred, tenant_id, base_url=CLOUD.endpoints.active_directory_graph_resource_id) # pylint: disable=line-too-long object_id = _get_object_id(graph_client, spn=spn, upn=upn) if not object_id: raise CLIError('Unable to get object id from principal name.') return object_id
def list_activity_log(client, filters=None, correlation_id=None, resource_group=None, resource_id=None, resource_provider=None, start_time=None, end_time=None, caller=None, status=None, max_events=50, select=None): '''Provides the list of activity log. :param str filters: The OData filter for the list activity logs. If this argument is provided OData Filter Arguments will be ignored :param str correlation_id: The correlation id of the query :param str resource_group: The resource group :param str resource_id: The identifier of the resource :param str resource_provider: The resource provider :param str start_time: The start time of the query. In ISO format with explicit indication of timezone: 1970-01-01T00:00:00Z, 1970-01-01T00:00:00-0500. Defaults to 1 Hour prior to the current time. :param str end_time: The end time of the query. In ISO format with explicit indication of timezone: 1970-01-01T00:00:00Z, 1970-01-01T00:00:00-0500. Defaults to current time. :param str caller: The caller to look for when querying :param str status: The status value to query (ex: Failed) :param str max_events: The maximum number of records to be returned by the command :param str select: The list of event names ''' if filters: odata_filters = filters else: collection = [ correlation_id, resource_group, resource_id, resource_provider ] if not _single(collection): raise CLIError( "usage error: [--correlation-id ID | --resource-group NAME | " "--resource-id ID | --resource-provider PROVIDER]") odata_filters = _build_activity_log_odata_filter( correlation_id, resource_group, resource_id, resource_provider, start_time, end_time, caller, status) if max_events: max_events = int(max_events) select_filters = _activity_log_select_filter_builder(select) activity_log = client.list(filter=odata_filters, select=select_filters) return _limit_results(activity_log, max_events)
def create_service_principal_for_rbac(name=None, password=None, years=1, #pylint:disable=too-many-arguments scopes=None, role=None, expanded_view=None): '''create a service principal that can access or modify resources :param str name: an unique uri. If missing, the command will generate one. :param str password: the password used to login. If missing, command will generate one. :param str years: Years the password will be valid. :param str scopes: space separated scopes the service principal's role assignment applies to. :param str role: role the service principal has on the resources. only use with 'resource-ids'. ''' if bool(scopes) != bool(role): raise CLIError("'--scopes' and '--role' must be used together.") client = _graph_client_factory() start_date = datetime.datetime.utcnow() app_display_name = 'azure-cli-' + start_date.strftime('%Y-%m-%d-%H-%M-%S') if name is None: name = 'http://' + app_display_name # just a valid uri, no need to exist end_date = start_date + relativedelta(years=years) password = password or str(uuid.uuid4()) aad_application = create_application(client.applications, display_name=app_display_name, #pylint: disable=too-many-function-args homepage='http://'+app_display_name, identifier_uris=[name], available_to_other_tenants=False, password=password, start_date=start_date, end_date=end_date) #pylint: disable=no-member aad_sp = _create_service_principal(aad_application.app_id, bool(scopes)) oid = aad_sp.output.object_id if scopes else aad_sp.object_id if scopes: #It is possible the SP has not been propagated to all servers, so creating assignments #might fail. The reliable workaround is to call out the server where creation occurred. #pylint: disable=protected-access session_key = aad_sp.response.headers._store['ocp-aad-session-key'][1] for scope in scopes: _create_role_assignment(role, oid, None, scope, ocp_aad_session_key=session_key) if expanded_view: from azure.cli.core._profile import Profile profile = Profile() result = profile.get_expanded_subscription_info(scopes[0].split('/')[2] if scopes else None, aad_application.app_id, password) else: result = { 'appId': aad_application.app_id, 'password': password, 'name': name, 'tenant': client.config.tenant_id } return result
def find_subscriptions_on_login( self, # pylint: disable=too-many-arguments interactive, username, password, is_service_principal, tenant): from azure.cli.core._debug import allow_debug_adal_connection allow_debug_adal_connection() subscriptions = [] if interactive: subscriptions = self._subscription_finder.find_through_interactive_flow( tenant, self._management_resource_uri) else: if is_service_principal: if not tenant: raise CLIError('Please supply tenant using "--tenant"') subscriptions = self._subscription_finder.find_from_service_principal_id( username, password, tenant, self._management_resource_uri) else: subscriptions = self._subscription_finder.find_from_user_account( username, password, tenant, self._management_resource_uri) if not subscriptions: raise CLIError('No subscriptions found for this account.') if is_service_principal: self._creds_cache.save_service_principal_cred( username, password, tenant) if self._creds_cache.adal_token_cache.has_state_changed: self._creds_cache.persist_cached_creds() consolidated = Profile._normalize_properties( self._subscription_finder.user_id, subscriptions, is_service_principal) self._set_subscriptions(consolidated) # use deepcopy as we don't want to persist these changes to file. return deepcopy(consolidated)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file): acs = acs_client.ACSClient() if not acs.connect(_get_host_name(acs_info), _get_username(acs_info), key_filename=ssh_key_file): raise CLIError('Error connecting to ACS: {}'.format( _get_host_name(acs_info))) octarine_bin = '/opt/mesosphere/bin/octarine' if not acs.file_exists(octarine_bin): raise CLIError( 'Proxy server ({}) does not exist on the cluster.'.format( octarine_bin)) proxy_id = _rand_str(16) proxy_cmd = '{} {}'.format(octarine_bin, proxy_id) acs.run(proxy_cmd, background=True) # Parse the output to get the remote PORT proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id) stdout, _ = acs.run(proxy_client_cmd) remote_port = int(stdout.read().decode().strip()) local_port = acs.get_available_local_port() # Set the proxy proxy.set_http_proxy('127.0.0.1', local_port) logger.warning('Proxy running on 127.0.0.1:%s', local_port) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async('http://127.0.0.1') try: acs.create_tunnel(remote_host='127.0.0.1', remote_port=remote_port, local_port=local_port) finally: proxy.disable_http_proxy() return
def _get_sku_name(tier): tier = tier.upper() if tier == 'F1': return 'FREE' elif tier == 'D1': return 'SHARED' elif tier in ['B1', 'B2', 'B3']: return 'BASIC' elif tier in ['S1', 'S2', 'S3']: return 'STANDARD' elif tier in ['P1', 'P2', 'P3']: return 'PREMIUM' else: raise CLIError("Invalid sku(pricing tier), please refer to command help for valid values")
def create_resource_group(resource_group_name, location, tags=None): ''' Create a new resource group. :param str resource_group_name:the desired resource group name :param str location:the resource group location :param str tags:tags in 'a=b;c' format ''' rcf = _resource_client_factory() if rcf.resource_groups.check_existence(resource_group_name): raise CLIError( 'resource group {} already exists'.format(resource_group_name)) parameters = ResourceGroup(location=location, tags=tags) return rcf.resource_groups.create_or_update(resource_group_name, parameters)
def dcos_install_cli(install_location=None, client_version='1.8'): """ Downloads the dcos command line from Mesosphere """ system = platform.system() if not install_location: raise CLIError("No install location specified and it could not be determined from the current platform '{}'".format(system)) if system == 'Windows': file_url = 'https://downloads.dcos.io/binaries/cli/windows/x86-64/dcos-{}/dcos.exe'.format(client_version) elif system == 'Linux': file_url = 'https://downloads.dcos.io/binaries/cli/linux/x86-64/dcos-{}/dcos'.format(client_version) elif system == 'Darwin': file_url = 'https://downloads.dcos.io/binaries/cli/darwin/x86-64/dcos-{}/dcos'.format(client_version) else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) logger.info('Downloading client to %s', install_location) try: urlretrieve(file_url, install_location) except IOError as err: raise CLIError('Connection error while attempting to download client ({})'.format(err))
def delete_certificate_contact(client, vault_base_url, contact_email): """ Remove a certificate contact from the specified vault. """ from azure.cli.command_modules.keyvault.keyvaultclient.generated.models import \ (Contacts, KeyVaultErrorException) contacts = client.get_certificate_contacts(vault_base_url).contact_list remaining = Contacts( [x for x in contacts if x.email_address != contact_email]) if len(contacts) == len(remaining.contact_list): raise CLIError("contact '{}' not found in vault '{}'".format( contact_email, vault_base_url)) if remaining.contact_list: return client.set_certificate_contacts(vault_base_url, remaining) else: return client.delete_certificate_contacts(vault_base_url)
def list_role_assignments(assignee=None, role=None, resource_group_name=None,#pylint: disable=too-many-arguments scope=None, include_inherited=False, show_all=False, include_groups=False): ''' :param include_groups: include extra assignments to the groups of which the user is a member(transitively). Supported only for a user principal. ''' graph_client = _graph_client_factory() factory = _auth_client_factory(scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions scope = None if show_all: if resource_group_name or scope: raise CLIError('group or scope are not required when --all is used') scope = None else: scope = _build_role_scope(resource_group_name, scope, definitions_client.config.subscription_id) assignments = _search_role_assignments(assignments_client, definitions_client, scope, assignee, role, include_inherited, include_groups) if not assignments: return [] #fill in logic names to get things understandable. #it's possible that associated roles and principals were deleted, and we just do nothing. results = todict(assignments) #pylint: disable=line-too-long #fill in role names role_defs = list(definitions_client.list( scope=scope or ('/subscriptions/' + definitions_client.config.subscription_id))) role_dics = {i.id: i.properties.role_name for i in role_defs} for i in results: i['properties']['roleDefinitionName'] = role_dics.get(i['properties']['roleDefinitionId'], None) #fill in principal names principal_ids = set(i['properties']['principalId'] for i in results) if principal_ids: principals = _get_object_stubs(graph_client, principal_ids) principal_dics = {i.object_id:_get_displayable_name(i) for i in principals} for i in results: i['properties']['principalName'] = principal_dics.get(i['properties']['principalId'], None) return results
def retrieve_token_for_user(self, username, tenant, resource): authority = get_authority_url(tenant) context = self._auth_ctx_factory(authority, cache=self.adal_token_cache) token_entry = context.acquire_token(resource, username, _CLIENT_ID) if not token_entry: raise CLIError( "Could not retrieve token from local cache, please run 'az login'." ) if self.adal_token_cache.has_state_changed: self.persist_cached_creds() return (token_entry[_TOKEN_ENTRY_TOKEN_TYPE], token_entry[_ACCESS_TOKEN])
def _resolve_api_version(provider_namespace, resource_type, parent_path): from azure.mgmt.resource.resources import ResourceManagementClient from azure.cli.core.commands.client_factory import get_mgmt_service_client client = get_mgmt_service_client(ResourceManagementClient) provider = client.providers.get(provider_namespace) # If available, we will use parent resource's api-version resource_type_str = (parent_path.split('/')[0] if parent_path else resource_type) rt = [ t for t in provider.resource_types # pylint: disable=no-member if t.resource_type.lower() == resource_type_str.lower() ] if not rt: raise CLIError('Resource type {} not found.'.format(resource_type_str)) if len(rt) == 1 and rt[0].api_versions: npv = [v for v in rt[0].api_versions if 'preview' not in v.lower()] return npv[0] if npv else rt[0].api_versions[0] else: raise CLIError( 'API version is required and could not be resolved for resource {}' .format(resource_type))
def __init__(self, password_arg_value): if not password_arg_value: raise CLIError('missing secret or certificate in order to ' 'authnenticate through a service principal') if os.path.isfile(password_arg_value): certificate_file = password_arg_value from OpenSSL.crypto import load_certificate, FILETYPE_PEM self.certificate_file = certificate_file with open(certificate_file, 'r') as file_reader: self.cert_file_string = file_reader.read() cert = load_certificate(FILETYPE_PEM, self.cert_file_string) self.thumbprint = cert.digest("sha1").decode() else: self.secret = password_arg_value
def set_acl_policy(client, container_name, policy_name, start=None, expiry=None, permission=None, **kwargs): ''' Set a stored access policy on a containing object ''' from azure.storage.models import AccessPolicy if not (start or expiry or permission): raise CLIError( 'Must specify at least one property when updating an access policy.' ) acl = _get_acl(client, container_name, **kwargs) try: policy = acl[policy_name] policy.start = start or policy.start policy.expiry = expiry or policy.expiry policy.permission = permission or policy.permission except KeyError: raise CLIError('ACL does not contain {}'.format(policy_name)) return _set_acl(client, container_name, acl, **kwargs)
def _validate_admin_password(password, os_type): is_linux = (os_type.lower() == 'linux') max_length = 72 if is_linux else 123 min_length = 12 if len(password) not in range(min_length, max_length + 1): raise CLIError('The pssword length must be between {} and {}'.format( min_length, max_length)) contains_lower = re.findall('[a-z]+', password) contains_upper = re.findall('[A-Z]+', password) contains_digit = re.findall('[0-9]+', password) contains_special_char = re.findall( r'[ `~!@#$%^&*()=+_\[\]{}\|;:.\/\'\",<>?]+', password) count = len([ x for x in [ contains_lower, contains_upper, contains_digit, contains_special_char ] if x ]) # pylint: disable=line-too-long if count < 3: raise CLIError( 'Password must have the 3 of the following: 1 lower case character, 1 upper case character, 1 number and 1 special character' )
def k8s_install_cli(client_version="1.4.5", install_location=None): """ Downloads the kubectl command line from Kubernetes """ file_url = '' system = platform.system() base_url = 'https://storage.googleapis.com/kubernetes-release/release/v{}/bin/{}/amd64/{}' if system == 'Windows': file_url = base_url.format(client_version, 'windows', 'kubectl.exe') elif system == 'Linux': # TODO: Support ARM CPU here file_url = base_url.format(client_version, 'linux', 'kubectl') elif system == 'Darwin': file_url = base_url.format(client_version, 'darwin', 'kubectl') else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) logger.info('Downloading client to %s', install_location) try: urlretrieve(file_url, install_location) os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) except IOError as err: raise CLIError('Connection error while attempting to download client ({})'.format(err))
def _post_check_names(zone): # get the origin name that has the SOA record # ensure the origin is in each record set origin = None for name in zone: for record_type in zone[name]: if record_type == 'soa': origin = name break if origin: break bad_names = [x for x in zone if origin not in x] if bad_names: raise CLIError( "Record names '{}' are not part of the domain.".format(bad_names))
def _resolve_application(client, identifier): result = list( client.list( filter="identifierUris/any(s:s eq '{}')".format(identifier))) if not result: try: uuid.UUID(identifier) #it is either app id or object id, let us verify result = list( client.list(filter="appId eq '{}'".format(identifier))) except ValueError: raise CLIError("Application '{}' doesn't exist".format(identifier)) return result[0].object_id if result else identifier
def iot_hub_show_connection_string(client, hub_name=None, resource_group_name=None, policy_name='iothubowner', key_type=KeyType.primary.value): if hub_name is None: hubs = iot_hub_list(client, resource_group_name) if hubs is None: raise CLIError('No IoT Hub found.') def conn_str_getter(h): return _get_single_hub_connection_string(client, h.name, h.resourcegroup, policy_name, key_type) return [{'name': h.name, 'connectionString': conn_str_getter(h)} for h in hubs] else: resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name) conn_str = _get_single_hub_connection_string(client, hub_name, resource_group_name, policy_name, key_type) return {'connectionString': conn_str}
def remove(component_name, show_logs=False): """ Remove a component """ full_component_name = COMPONENT_PREFIX + component_name found = bool([ dist for dist in pip.get_installed_distributions(local_only=True) if dist.key == full_component_name ]) if found: options = ['--isolated', '--yes'] options += [] if show_logs else ['--quiet'] pip.main(['uninstall'] + options + ['--disable-pip-version-check', full_component_name]) else: raise CLIError("Component not installed.")
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file): if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml') if os.path.exists(browse_path): os.remove(browse_path) _k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file) logger.warning('Proxy running on 127.0.0.1:8001/ui') logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async('http://127.0.0.1:8001/ui') subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def validate_address_prefixes(namespace): subnet_prefix_set = SPECIFIED_SENTINEL in namespace.subnet_address_prefix vnet_prefix_set = SPECIFIED_SENTINEL in namespace.vnet_address_prefix namespace.subnet_address_prefix = \ namespace.subnet_address_prefix.replace(SPECIFIED_SENTINEL, '') namespace.vnet_address_prefix = namespace.vnet_address_prefix.replace( SPECIFIED_SENTINEL, '') if namespace.subnet_type != 'new' and (subnet_prefix_set or vnet_prefix_set): raise CLIError( 'Existing subnet ({}) found. Cannot specify address prefixes when ' 'reusing an existing subnet.'.format(namespace.subnet))
def signed_session(self): session = super(AdalAuthentication, self).signed_session() try: scheme, token = self._token_retriever() except adal.AdalError as err: # pylint: disable=no-member if (hasattr(err, 'error_response') and ('error_description' in err.error_response) and ('AADSTS70008:' in err.error_response['error_description'])): raise CLIError( "Credentials have expired due to inactivity. Please run 'az login'" ) raise CLIError(err) except requests.exceptions.ConnectionError as err: raise CLIError( 'Please ensure you have network connection. Error detail: ' + str(err)) header = "{} {}".format(scheme, token) session.headers['Authorization'] = header return session
def insert_table_entity(client, table_name, entity, if_exists='fail', timeout=None): if if_exists == 'fail': return client.insert_entity(table_name, entity, timeout) elif if_exists == 'merge': return client.insert_or_merge_entity(table_name, entity, timeout) elif if_exists == 'replace': return client.insert_or_replace_entity(table_name, entity, timeout) else: raise CLIError( "Unrecognized value '{}' for --if-exists".format(if_exists))
def show_options(instance, part, path): options = instance.__dict__ if hasattr(instance, '__dict__') else instance parent = '.'.join(path[:-1]).replace('.[', '[') error_message = "Couldn't find '{}' in '{}'.".format(part, parent) if isinstance(options, dict): options = options.keys() options = sorted([make_camel_case(x) for x in options]) error_message = '{} Available options: {}'.format(error_message, options) elif isinstance(options, list): options = "index into the collection '{}' with [<index>] or [<key=value>]".format(parent) error_message = '{} Available options: {}'.format(error_message, options) else: error_message = "{} '{}' does not support further indexing.".format(error_message, parent) raise CLIError(error_message)
def _get_resource_group_from_account_name(client, account_name): """ Fetch resource group from vault name :param str vault_name: name of the key vault :return: resource group name or None :rtype: str """ for acct in client.list(): id_comps = parse_resource_id(acct.id) if id_comps['name'] == account_name: return id_comps['resource_group'] raise CLIError("The Resource 'Microsoft.DataLakeStore/accounts/{}'".format( account_name) + " not found within subscription: {}".format( client.config.subscription_id))
def set_adls_item_expiry(account_name, path, expiration_time): client = cf_dls_filesystem(account_name) if client.info(path)['type'] != 'FILE': # pylint: disable=line-too-long raise CLIError( 'The specified path does not exist or is not a file. Please ensure the path points to a file and it exists. Path supplied: {}' .format(path)) expiration_time = float(expiration_time) try: expiration_time = long(expiration_time) except NameError: expiration_time = int(expiration_time) client.set_expiry(path, ExpiryOptionType.absolute.value, expiration_time)