def list_wl_recovery_points(cmd, client, resource_group_name, vault_name, item, start_date=None, end_date=None, extended_info=None, is_ready_for_move=None, target_tier=None, use_secondary_region=None, tier=None, recommended_for_archive=None): if recommended_for_archive is not None: raise ArgumentUsageError("""--recommended-for-archive is supported by AzureIaasVM backup management type only.""") # Get container and item URIs container_uri = cust_help.get_protection_container_uri_from_id(item.id) item_uri = cust_help.get_protected_item_uri_from_id(item.id) query_end_date, query_start_date = cust_help.get_query_dates(end_date, start_date) if query_end_date and query_start_date: cust_help.is_range_valid(query_start_date, query_end_date) filter_string = cust_help.get_filter_string({ 'startDate': query_start_date, 'endDate': query_end_date}) if cmd.name.split()[2] == 'show-log-chain' or extended_info is not None: filter_string = cust_help.get_filter_string({ 'restorePointQueryType': 'Log', 'startDate': query_start_date, 'endDate': query_end_date, 'extendedInfo': extended_info}) if use_secondary_region: client = recovery_points_crr_cf(cmd.cli_ctx) # Get recovery points recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string) paged_recovery_points = cust_help.get_list_from_paged_response(recovery_points) common.fetch_tier(paged_recovery_points) if use_secondary_region: paged_recovery_points = [item for item in paged_recovery_points if item.properties.recovery_point_tier_details is None or (item.properties.recovery_point_tier_details is not None and item.tier_type != 'VaultArchive')] recovery_point_list = common.check_rp_move_readiness(paged_recovery_points, target_tier, is_ready_for_move) recovery_point_list = common.filter_rp_based_on_tier(recovery_point_list, tier) return recovery_point_list
def connect_to_flexible_server_mysql(cmd, server_name, administrator_login, administrator_login_password=None, database_name=None, interactive_mode=None, querytext=None): if querytext: raise ArgumentUsageError( "Use az mysql flexible-server execute command for query execution") mysql_server_endpoint = cmd.cli_ctx.cloud.suffixes.mysql_server_endpoint return connect_to_server_helper( server_type="mysql", endpoint=mysql_server_endpoint, default_db_name=DEFAULT_MYSQL_DB_NAME, server_name=server_name, administrator_login=administrator_login, administrator_login_password=administrator_login_password, database_name=database_name, interactive=interactive_mode)
def _populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=None): if instance is None or instance.api_server_access_profile is None: profile = ManagedClusterAPIServerAccessProfile() else: profile = instance.api_server_access_profile if api_server_authorized_ip_ranges is None or api_server_authorized_ip_ranges == "": authorized_ip_ranges = [] else: authorized_ip_ranges = [ ip.strip() for ip in api_server_authorized_ip_ranges.split(",") ] if profile.enable_private_cluster and authorized_ip_ranges: raise ArgumentUsageError( '--api-server-authorized-ip-ranges is not supported for private cluster' ) profile.authorized_ip_ranges = authorized_ip_ranges return profile
def validate_staticsite_link_function(cmd, namespace): from azure.mgmt.web import WebSiteManagementClient validate_staticsite_sku(cmd, namespace) if not is_valid_resource_id(namespace.function_resource_id): raise ArgumentUsageError( "--function-resource-id must specify a function resource ID. " "To get resource ID, use the following commmand, inserting the function " "group/name as needed: \n" "az functionapp show --resource-group \"[FUNCTION_RESOURCE_GROUP]\" " "--name \"[FUNCTION_NAME]\" --query id ") client = get_mgmt_service_client(cmd.cli_ctx, WebSiteManagementClient, api_version="2020-12-01").static_sites functions = client.get_user_provided_function_apps_for_static_site( name=namespace.name, resource_group_name=namespace.resource_group_name) if list(functions): raise ValidationError( "Cannot have more than one user provided function app associated with a Static Web App" )
def validate_msi(cmd, namespace, from_identity_command=False): identities = None if from_identity_command: if namespace.mi_system_assigned is not None or namespace.mi_user_assigned is None: identities = [MSI_LOCAL_ID] if namespace.mi_user_assigned is not None: raise ArgumentUsageError( 'Only one type of managed identity is allowed. ' 'Please use either --mi-system-assigned or --mi-user-assigned' ) else: if namespace.mi_system_assigned is not None or namespace.assign_identity is not None: identities = [MSI_LOCAL_ID] if namespace.mi_user_assigned is not None: raise ArgumentUsageError( 'Only one type of managed identity is allowed. ' 'Please use either --mi-system-assigned or --mi-user-assigned' ) if namespace.mi_user_assigned is not None: identities = [namespace.mi_user_assigned] if identities is not None: user_assigned_identities = [x for x in identities if x != MSI_LOCAL_ID] if user_assigned_identities and not cmd.supported_api_version( min_api='2021-06-01'): raise ArgumentUsageError( 'User assigned identity is only available under profile ' 'with minimum Authorization API version of 2021-06-01') if not namespace.identity_scope and getattr( namespace.identity_role, 'is_default', None) is None: raise ArgumentUsageError( "'--role {}' is not applicable as the '--identity-scope' is not provided" .format(namespace.identity_role)) if namespace.identity_scope: if identities and MSI_LOCAL_ID not in identities: raise ArgumentUsageError( "'--identity-scope'/'--role' is only applicable when assigning a system identity" ) elif namespace.identity_scope or getattr(namespace.identity_role, 'is_default', None) is None: raise ArgumentUsageError( "'--identity-scope'/'--role' is only applicable when assigning a system identity" )
def _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile): if (outbound_type != CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING and outbound_type != CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY and outbound_type != CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY): return CONST_OUTBOUND_TYPE_LOAD_BALANCER if outbound_type == CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY: if load_balancer_sku == "basic": raise ArgumentUsageError( "managedNATGateway doesn't support basic load balancer sku") return CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY if outbound_type == CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY: if load_balancer_sku == "basic": raise ArgumentUsageError( "userAssignedNATGateway doesn't support basic load balancer sku" ) if vnet_subnet_id in ["", None]: raise ArgumentUsageError( "--vnet-subnet-id must be specified for userAssignedNATGateway and it must " "be pre-associated with a NAT gateway with outbound public IPs or IP prefixes" ) return CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY if vnet_subnet_id in ["", None]: raise ArgumentUsageError( "--vnet-subnet-id must be specified for userDefinedRouting and it must " "be pre-configured with a route table with egress rules") if load_balancer_sku == "basic": raise ArgumentUsageError( "userDefinedRouting doesn't support basic load balancer sku") if load_balancer_profile: if (load_balancer_profile.managed_outbound_i_ps or load_balancer_profile.outbound_i_ps or load_balancer_profile.outbound_ip_prefixes): raise ArgumentUsageError( "userDefinedRouting doesn't support customizing a standard load balancer " "with IP addresses") return CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING
def _ensure_subnet_service_endpoint(cli_ctx, subnet_id): from azure.cli.core.profiles import AD_HOC_API_VERSIONS, ResourceType subnet_id_parts = parse_resource_id(subnet_id) subnet_subscription_id = subnet_id_parts['subscription'] subnet_resource_group = subnet_id_parts['resource_group'] subnet_vnet_name = subnet_id_parts['name'] subnet_name = subnet_id_parts['resource_name'] if get_subscription_id(cli_ctx).lower() != subnet_subscription_id.lower(): raise ArgumentUsageError( 'Cannot validate subnet in different subscription for missing service endpoint.' ' Use --ignore-missing-endpoint or -i to' ' skip validation and manually verify service endpoint.') vnet_client = network_client_factory( cli_ctx, api_version=AD_HOC_API_VERSIONS[ ResourceType.MGMT_NETWORK]['appservice_ensure_subnet']) subnet_obj = vnet_client.subnets.get(subnet_resource_group, subnet_vnet_name, subnet_name) subnet_obj.service_endpoints = subnet_obj.service_endpoints or [] service_endpoint_exists = False for s in subnet_obj.service_endpoints: if s.service == "Microsoft.Web": service_endpoint_exists = True break if not service_endpoint_exists: web_service_endpoint = ServiceEndpointPropertiesFormat( service="Microsoft.Web") subnet_obj.service_endpoints.append(web_service_endpoint) poller = vnet_client.subnets.begin_create_or_update( subnet_resource_group, subnet_vnet_name, subnet_name, subnet_parameters=subnet_obj) # Ensure subnet is updated to avoid update conflict LongRunningOperation(cli_ctx)(poller)
def _parse_http_headers(http_headers): logger.info(http_headers) header_dict = {} for header_str in http_headers: header = header_str.split('=') if len(header) != 2: err_msg = 'Http headers must have a format of `<name>=<value>`: "{}"'.format(header_str) raise InvalidArgumentValueError(err_msg) header_name = header[0].strip().lower() header_value = header[1].strip() if header_name not in ALLOWED_HTTP_HEADER_NAMES: raise InvalidArgumentValueError('Invalid http-header name: "{}"'.format(header_name)) if header_value: if header_name in header_dict: if len(header_dict[header_name]) > 7: err_msg = 'Only 8 values are allowed for each http-header: "{}"'.format(header_name) raise ArgumentUsageError(err_msg) header_dict[header_name].append(header_value) else: header_dict[header_name] = [header_value] return header_dict
def confluent_offer_detail_show(cmd, publisher_id=None, offer_id=None): from azure.cli.core.util import send_raw_request from azure.cli.core.azclierror import ArgumentUsageError url = f"https://management.azure.com/providers/Microsoft.Marketplace/offers/{publisher_id}.{offer_id}?" \ "excludePublic=true&api-version=2018-08-01-beta" response = send_raw_request(cmd.cli_ctx, 'get', url) try: plans = response.json()['plans'] plans = [{ 'planId': plan['planId'], 'planName': plan['displayName'], 'offerId': offer_id, 'publisherId': publisher_id, 'termUnits': [{ 'price': { 'currencyCode': item['price']['currencyCode'], 'listPrice': item['price']['listPrice'] }, 'termDescription': item['termDescription'], 'termUnits': item['termUnits'] } for a in plan['availabilities'] for item in a['terms']] } for plan in plans] except KeyError as ex: raise ArgumentUsageError( 'Not able to get offer details for the provided publisher id and offer id.' ) from ex for plan in plans: for term in plan['termUnits']: if term['termUnits'] not in ['P1M', 'P1Y']: del term['termUnits'] return plans
def stream_analytics_function_inspect( client, resource_group_name, job_name, function_name, azure_machine_learning_web_service_function_retrieve_default_definition_parameters=None ): all_function_retrieve_default_definition_parameters = [] if azure_machine_learning_web_service_function_retrieve_default_definition_parameters is not None: all_function_retrieve_default_definition_parameters.append( azure_machine_learning_web_service_function_retrieve_default_definition_parameters ) if len(all_function_retrieve_default_definition_parameters) > 1: err_msg = "At most one {--ml-properties} is needed for function inspect." raise ArgumentUsageError(err_msg) function_retrieve_default_definition_parameters = all_function_retrieve_default_definition_parameters[ 0] if len( all_function_retrieve_default_definition_parameters) == 1 else None return client.retrieve_default_definition( resource_group_name=resource_group_name, job_name=job_name, function_name=function_name, function_retrieve_default_definition_parameters= function_retrieve_default_definition_parameters)
def restore_AzureFileShare(cmd, client, resource_group_name, vault_name, rp_name, item, restore_mode, resolve_conflict, restore_request_type, source_file_type=None, source_file_path=None, target_storage_account_name=None, target_file_share_name=None, target_folder=None): container_uri = helper.get_protection_container_uri_from_id(item.id) item_uri = helper.get_protected_item_uri_from_id(item.id) sa_name = item.properties.container_name afs_restore_request = AzureFileShareRestoreRequest() target_details = None afs_restore_request.copy_options = resolve_conflict afs_restore_request.recovery_type = restore_mode afs_restore_request.source_resource_id = _get_storage_account_id( cmd.cli_ctx, sa_name.split(';')[-1], sa_name.split(';')[-2]) afs_restore_request.restore_request_type = restore_request_type restore_file_specs = None if source_file_path is not None: if len(source_file_path) > 99: raise ArgumentUsageError(""" You can only recover a maximum of 99 Files/Folder. Please ensure you have provided less than 100 source file paths. """) restore_file_specs = [] for filepath in source_file_path: restore_file_specs.append( RestoreFileSpecs(path=filepath, file_spec_type=source_file_type, target_folder_path=target_folder)) if restore_mode == "AlternateLocation": target_sa_name, target_sa_rg = helper.get_resource_name_and_rg( resource_group_name, target_storage_account_name) target_details = TargetAFSRestoreInfo() target_details.name = target_file_share_name target_details.target_resource_id = _get_storage_account_id( cmd.cli_ctx, target_sa_name, target_sa_rg) afs_restore_request.target_details = target_details afs_restore_request.restore_file_specs = restore_file_specs trigger_restore_request = RestoreRequestResource( properties=afs_restore_request) result = client.begin_trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, rp_name, trigger_restore_request, cls=helper.get_pipeline_response, polling=False).result() return helper.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def validate_source(self, **kwargs): invalid_input = {k: v for k, v in kwargs.items() if k in ['jvm_options', 'main_entry', 'target_module'] and v is not None} if any(invalid_input): invalid_input_str = convert_argument_to_parameter_list(invalid_input.keys()) raise ArgumentUsageError('{} cannot be set when --container-image is set.' .format(invalid_input_str))
def validate_message_of_the_day(namespace): """Validates message of the day can only be used on Linux.""" if namespace.message_of_the_day is not None and namespace.message_of_the_day != "": if namespace.os_type is not None and namespace.os_type != "Linux": raise ArgumentUsageError( '--message-of-the-day can only be set for linux nodepools')
def validate_defender_disable_and_enable_parameters(namespace): if namespace.disable_defender and namespace.enable_defender: raise ArgumentUsageError( 'Providing both --disable-defender and --enable-defender flags is invalid' )
def raise_unsupported_error_for_flex_vmss(vmss, error_message): if hasattr(vmss, 'orchestration_mode') and vmss.orchestration_mode \ and vmss.orchestration_mode.lower() == 'flexible': from azure.cli.core.azclierror import ArgumentUsageError raise ArgumentUsageError(error_message)
def validate_runtime_version(cmd, namespace): if namespace.runtime_version is not None and namespace.resource_group and namespace.service and is_enterprise_tier( cmd, namespace.resource_group, namespace.service): raise ArgumentUsageError( "'--runtime-version' doesn't support for Enterprise tier Spring instance." )
def validate_build_env(cmd, namespace): if namespace.build_env is not None and namespace.resource_group and namespace.service and not is_enterprise_tier( cmd, namespace.resource_group, namespace.service): raise ArgumentUsageError( "'--build-env' only supports for Enterprise tier Spring instance.")
def validate_enable_custom_ca_trust(namespace): """Validates Custom CA Trust can only be used on Linux.""" if namespace.enable_custom_ca_trust: if hasattr(namespace, 'os_type') and namespace.os_type != "Linux": raise ArgumentUsageError( '--enable_custom_ca_trust can only be set for Linux nodepools')
def app_update(cmd, client, resource_group, service, name, deployment=None, # set by validator # app assign_endpoint=None, enable_persistent_storage=None, enable_ingress_to_app_tls=None, https_only=None, persistent_storage=None, loaded_public_certificate_file=None, # deployment.source runtime_version=None, jvm_options=None, main_entry=None, # deployment.settings env=None, disable_probe=None, config_file_patterns=None, # general no_wait=False): '''app_update Update app and active deployment according to the input 1. Update app 2. Update deployment ''' logger.warning(LOG_RUNNING_PROMPT) basic_kwargs = { 'cmd': cmd, 'client': client, 'resource_group': resource_group, 'service': service, 'app': name, 'sku': deployment.sku if deployment else get_spring_cloud_sku(client, resource_group, service), 'deployment': deployment.name if deployment else None, 'deployment_resource': deployment, } deployment_kwargs = { 'disable_probe': disable_probe, 'config_file_patterns': config_file_patterns, 'env': env, 'runtime_version': runtime_version, 'jvm_options': jvm_options, 'main_entry': main_entry, 'source_type': deployment.properties.source.type if deployment else None } app_kwargs = { 'public': assign_endpoint, 'enable_persistent_storage': enable_persistent_storage, 'persistent_storage': persistent_storage, 'loaded_public_certificate_file': loaded_public_certificate_file, 'enable_end_to_end_tls': enable_ingress_to_app_tls, 'https_only': https_only, } if deployment is None: updated_deployment_kwargs = {k: v for k, v in deployment_kwargs.items() if v} if updated_deployment_kwargs: raise ArgumentUsageError('{} cannot be set when there is no active deployment.' .format(convert_argument_to_parameter_list(updated_deployment_kwargs.keys()))) deployment_factory = deployment_selector(**deployment_kwargs, **basic_kwargs) app_factory = app_selector(**basic_kwargs) deployment_kwargs.update(deployment_factory.source_factory .fulfilled_options_from_original_source_info(**deployment_kwargs, **basic_kwargs)) app_resource = app_factory.format_resource(**app_kwargs, **basic_kwargs) deployment_factory.source_factory.validate_source(**deployment_kwargs, **basic_kwargs) deployment_resource = deployment_factory.format_resource(**deployment_kwargs, **basic_kwargs) pollers = [ client.apps.begin_update(resource_group, service, name, app_resource) ] if deployment: pollers.append(client.deployments.begin_update(resource_group, service, name, DEFAULT_DEPLOYMENT_NAME, deployment_resource)) if no_wait: return wait_till_end(cmd, *pollers) return app_get(cmd, client, resource_group, service, name)
def _mysql_georedundant_backup_validator(geo_redundant_backup, geo_paired_regions): if geo_redundant_backup and geo_redundant_backup.lower() == 'enabled' and len(geo_paired_regions) == 0: raise ArgumentUsageError("The region of the server does not support geo-restore feature.")
def validate_gateway_instance_count(namespace): if namespace.gateway_instance_count is not None: if namespace.enable_gateway is False: raise ArgumentUsageError("--gateway-instance-count can only be set when enable gateway.") if namespace.gateway_instance_count < 1: raise ArgumentUsageError("--gateway-instance-count must be greater than 0")
def validate_target_module(cmd, namespace): if namespace.target_module is not None and namespace.resource_group and namespace.service and is_enterprise_tier( cmd, namespace.resource_group, namespace.service): raise ArgumentUsageError( "'--target-module' doesn't support for Enterprise tier Spring instance." )
def validate_api_portal_instance_count(namespace): if namespace.api_portal_instance_count is not None: if namespace.enable_api_portal is False: raise ArgumentUsageError("--api-portal-instance-count can only be set when enable API portal.") if namespace.api_portal_instance_count < 1: raise ArgumentUsageError("--api-portal-instance-count must be greater than 0")
def flexible_server_create(cmd, client, resource_group_name=None, server_name=None, sku_name=None, tier=None, location=None, storage_mb=None, administrator_login=None, administrator_login_password=None, version=None, backup_retention=None, tags=None, public_access=None, database_name=None, subnet_arm_resource_id=None, high_availability=None, zone=None, assign_identity=False, vnet_resource_id=None, vnet_address_prefix=None, subnet_address_prefix=None, iops=None): # Populate desired parameters location, resource_group_name, server_name = generate_missing_parameters(cmd, location, resource_group_name, server_name, 'mysql') # validator sku_info, iops_info, single_az = get_mysql_list_skus_info(cmd, location) mysql_arguments_validator(tier, sku_name, storage_mb, backup_retention, sku_info, version=version) db_context = DbContext( azure_sdk=mysql_flexibleservers, cf_firewall=cf_mysql_flexible_firewall_rules, cf_db=cf_mysql_flexible_db, logging_name='MySQL', command_group='mysql', server_client=client) if high_availability is not None and high_availability.lower() == 'enabled': if tier == 'Burstable': raise ArgumentUsageError("High availability is not supported for Burstable tier") if single_az: raise ArgumentUsageError("This region is single availability zone. High availability is not supported in a single availability zone region.") # Raise error when user passes values for both parameters if subnet_arm_resource_id is not None and public_access is not None: raise CLIError("Incorrect usage : A combination of the parameters --subnet " "and --public_access is invalid. Use either one of them.") server_result = firewall_id = subnet_id = None server_name = server_name.lower() validate_server_name(cf_mysql_check_resource_availability(cmd.cli_ctx, '_'), server_name, 'Microsoft.DBforMySQL/flexibleServers') # Handle Vnet scenario if public_access is None: subnet_id = prepare_private_network(cmd, resource_group_name, server_name, vnet=vnet_resource_id, subnet=subnet_arm_resource_id, location=location, delegation_service_name=DELEGATION_SERVICE_NAME, vnet_address_pref=vnet_address_prefix, subnet_address_pref=subnet_address_prefix) delegated_subnet_arguments = mysql_flexibleservers.models.DelegatedSubnetArguments(subnet_arm_resource_id=subnet_id) else: delegated_subnet_arguments = None # determine IOPS iops = _determine_iops(storage_gb=storage_mb, iops_info=iops_info, iops_input=iops, tier=tier, sku_name=sku_name) storage_mb *= 1024 # storage input comes in GiB value administrator_login_password = generate_password(administrator_login_password) # Create mysql server # Note : passing public_access has no effect as the accepted values are 'Enabled' and 'Disabled'. So the value ends up being ignored. server_result = _create_server(db_context, cmd, resource_group_name, server_name, location, backup_retention, sku_name, tier, storage_mb, administrator_login, administrator_login_password, version, tags, delegated_subnet_arguments, assign_identity, public_access, high_availability, zone, iops) # Adding firewall rule if public_access is not None and str(public_access).lower() != 'none': if str(public_access).lower() == 'all': start_ip, end_ip = '0.0.0.0', '255.255.255.255' else: start_ip, end_ip = parse_public_access_input(public_access) firewall_id = create_firewall_rule(db_context, cmd, resource_group_name, server_name, start_ip, end_ip) # Create mysql database if it does not exist if database_name is None: database_name = DEFAULT_DB_NAME _create_database(db_context, cmd, resource_group_name, server_name, database_name) user = server_result.administrator_login server_id = server_result.id loc = server_result.location version = server_result.version sku = server_result.sku.name host = server_result.fully_qualified_domain_name logger.warning('Make a note of your password. If you forget, you would have to reset your password with' '\'az mysql flexible-server update -n %s -g %s -p <new-password>\'.', server_name, resource_group_name) _update_local_contexts(cmd, server_name, resource_group_name, location, user) return _form_response(user, sku, loc, server_id, host, version, administrator_login_password if administrator_login_password is not None else '*****', _create_mysql_connection_string(host, database_name, user, administrator_login_password), database_name, firewall_id, subnet_id)
def create_service_principal_for_rbac( # pylint:disable=too-many-statements,too-many-locals, too-many-branches, unused-argument, inconsistent-return-statements cmd, name=None, years=None, create_cert=False, cert=None, scopes=None, role=None, show_auth_for_sdk=None, skip_assignment=False, keyvault=None): from azure.cli.command_modules.role.custom import (_graph_client_factory, TZ_UTC, _process_service_principal_creds, _validate_app_dates, create_application, _create_service_principal, _create_role_assignment, _error_caused_by_role_assignment_exists) if role and not scopes or not role and scopes: raise ArgumentUsageError("Usage error: To create role assignments, specify both --role and --scopes.") graph_client = _graph_client_factory(cmd.cli_ctx) years = years or 1 _RETRY_TIMES = 36 existing_sps = None if not name: # No name is provided, create a new one app_display_name = 'azure-cli-' + datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') else: app_display_name = name # patch existing app with the same displayName to make the command idempotent query_exp = "displayName eq '{}'".format(name) existing_sps = list(graph_client.service_principals.list(filter=query_exp)) app_start_date = datetime.now(TZ_UTC) app_end_date = app_start_date + relativedelta(years=years or 1) password, public_cert_string, cert_file, cert_start_date, cert_end_date = \ _process_service_principal_creds(cmd.cli_ctx, years, app_start_date, app_end_date, cert, create_cert, None, keyvault) app_start_date, app_end_date, cert_start_date, cert_end_date = \ _validate_app_dates(app_start_date, app_end_date, cert_start_date, cert_end_date) aad_application = create_application(cmd, display_name=app_display_name, available_to_other_tenants=False, password=password, key_value=public_cert_string, start_date=app_start_date, end_date=app_end_date, credential_description='rbac') # pylint: disable=no-member app_id = aad_application.app_id # retry till server replication is done aad_sp = existing_sps[0] if existing_sps else None if not aad_sp: for retry_time in range(0, _RETRY_TIMES): try: aad_sp = _create_service_principal(cmd.cli_ctx, app_id, resolve_app=False) break except Exception as ex: # pylint: disable=broad-except err_msg = str(ex) if retry_time < _RETRY_TIMES and ( ' does not reference ' in err_msg or ' does not exist ' in err_msg or 'service principal being created must in the local tenant' in err_msg): logger.warning("Creating service principal failed with error '%s'. Retrying: %s/%s", err_msg, retry_time + 1, _RETRY_TIMES) time.sleep(5) else: logger.warning( "Creating service principal failed for '%s'. Trace followed:\n%s", app_id, ex.response.headers if hasattr(ex, 'response') else ex) # pylint: disable=no-member raise sp_oid = aad_sp.object_id if role: for scope in scopes: # logger.warning("Creating '%s' role assignment under scope '%s'", role, scope) # retry till server replication is done for retry_time in range(0, _RETRY_TIMES): try: _create_role_assignment(cmd.cli_ctx, role, sp_oid, None, scope, resolve_assignee=False, assignee_principal_type='ServicePrincipal') break except Exception as ex: if retry_time < _RETRY_TIMES and ' does not exist in the directory ' in str(ex): time.sleep(5) logger.warning(' Retrying role assignment creation: %s/%s', retry_time + 1, _RETRY_TIMES) continue if _error_caused_by_role_assignment_exists(ex): logger.warning(' Role assignment already exists.\n') break # dump out history for diagnoses logger.warning(' Role assignment creation failed.\n') if getattr(ex, 'response', None) is not None: logger.warning(' role assignment response headers: %s\n', ex.response.headers) # pylint: disable=no-member raise if show_auth_for_sdk: from azure.cli.core._profile import Profile profile = Profile(cli_ctx=cmd.cli_ctx) result = profile.get_sp_auth_info(scopes[0].split('/')[2] if scopes else None, app_id, password, cert_file) # sdk-auth file should be in json format all the time, hence the print print(json.dumps(result, indent=2)) return result = { 'appId': app_id, 'password': password, 'displayName': app_display_name, 'tenant': graph_client.config.tenant_id } if cert_file: logger.warning( "Please copy %s to a safe place. When you run 'az login', provide the file path in the --password argument", cert_file) result['fileWithCertAndPrivateKey'] = cert_file return result
def validate_and_convert_to_int(flag, val): try: return int(val) except ValueError: raise ArgumentUsageError( "{} is expected to have an int value.".format(flag))
def enable_addons(cmd, client, resource_group_name, name, addons, check_enabled=True, workspace_resource_id=None, subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None, no_wait=False, enable_msi_auth_for_monitoring=False): instance = client.get(resource_group_name, name) # this is overwritten by _update_addons(), so the value needs to be recorded here msi_auth = True if instance.service_principal_profile.client_id == "msi" else False subscription_id = get_subscription_id(cmd.cli_ctx) instance = update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True, check_enabled=check_enabled, workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait) if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[ CONST_MONITORING_ADDON_NAME].enabled: if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \ str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[ CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true': if not msi_auth: raise ArgumentUsageError( "--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.") else: # create a Data Collection Rule (DCR) and associate it with the cluster ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True) else: # monitoring addon will use legacy path ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False) monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[ CONST_MONITORING_ADDON_NAME].enabled ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[ CONST_INGRESS_APPGW_ADDON_NAME].enabled os_type = 'Linux' enable_virtual_node = False if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles: enable_virtual_node = True need_post_creation_role_assignment = monitoring_addon_enabled or ingress_appgw_addon_enabled or enable_virtual_node if need_post_creation_role_assignment: # adding a wait here since we rely on the result for role assignment result = LongRunningOperation(cmd.cli_ctx)( client.begin_create_or_update(resource_group_name, name, instance)) cloud_name = cmd.cli_ctx.cloud.name # mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) add_monitoring_role_assignment(result, cluster_resource_id, cmd) if ingress_appgw_addon_enabled: add_ingress_appgw_addon_role_assignment(result, cmd) if enable_virtual_node: # All agent pool will reside in the same vnet, we will grant vnet level Contributor role # in later function, so using a random agent pool here is OK random_agent_pool = result.agent_pool_profiles[0] if random_agent_pool.vnet_subnet_id != "": add_virtual_node_role_assignment( cmd, result, random_agent_pool.vnet_subnet_id) # Else, the cluster is not using custom VNet, the permission is already granted in AKS RP, # we don't need to handle it in client side in this case. else: result = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance) return result
def error(self, message): # Get a recommended command from the CommandRecommender command_arguments = self._get_failure_recovery_arguments() cli_ctx = self.cli_ctx or (self.cli_help.cli_ctx if self.cli_help else None) recommender = CommandRecommender(*command_arguments, message, cli_ctx) recommender.set_help_examples(self.get_examples(self.prog)) recommendation = recommender.recommend_a_command() az_error = ArgumentUsageError(message) if 'unrecognized arguments' in message: az_error = UnrecognizedArgumentError(message) elif 'arguments are required' in message: az_error = RequiredArgumentMissingError(message) elif 'invalid' in message: az_error = InvalidArgumentValueError(message) if '--query' in message: from azure.cli.core.util import QUERY_REFERENCE az_error.set_recommendation(QUERY_REFERENCE) elif recommendation: az_error.set_recommendation("Try this: '{}'".format(recommendation)) az_error.set_recommendation(OVERVIEW_REFERENCE.format(command=self.prog)) az_error.print_error() az_error.send_telemetry() self.exit(2)
def acr_connected_registry_update( cmd, # pylint: disable=too-many-locals, too-many-statements client, registry_name, connected_registry_name, add_client_token_list=None, remove_client_token_list=None, resource_group_name=None, sync_schedule=None, sync_window=None, log_level=None, sync_message_ttl=None, sync_audit_logs_enabled=None, add_notifications=None, remove_notifications=None): _, resource_group_name = validate_managed_registry(cmd, registry_name, resource_group_name) subscription_id = get_subscription_id(cmd.cli_ctx) current_connected_registry = acr_connected_registry_show( cmd, client, connected_registry_name, registry_name, resource_group_name) # Add or remove from the current client token id list if add_client_token_list is not None: for i, client_token_name in enumerate(add_client_token_list): add_client_token_list[i] = build_token_id(subscription_id, resource_group_name, registry_name, client_token_name) add_client_token_set = set(add_client_token_list) else: add_client_token_set = set() if remove_client_token_list is not None: for i, client_token_name in enumerate(remove_client_token_list): remove_client_token_list[i] = build_token_id( subscription_id, resource_group_name, registry_name, client_token_name) remove_client_token_set = set(remove_client_token_list) else: remove_client_token_set = set() duplicate_client_token = set.intersection(add_client_token_set, remove_client_token_set) if duplicate_client_token: errors = sorted( map(lambda action: action[action.rfind('/') + 1:], duplicate_client_token)) raise CLIError( 'Update ambiguity. Duplicate client token ids were provided with ' + '--add-client-tokens and --remove-client-tokens arguments.\n{}'. format(errors)) current_client_token_set = set(current_connected_registry.client_token_ids) \ if current_connected_registry.client_token_ids else set() client_token_set = current_client_token_set.union( add_client_token_set).difference(remove_client_token_set) client_token_list = list( client_token_set ) if client_token_set != current_client_token_set else None # Add or remove from the current notifications list add_notifications_set = set(list(add_notifications)) \ if add_notifications else set() remove_notifications_set = set(list(remove_notifications)) \ if remove_notifications else set() duplicate_notifications = set.intersection(add_notifications_set, remove_notifications_set) if duplicate_notifications: errors = sorted(duplicate_notifications) raise ArgumentUsageError( 'Update ambiguity. Duplicate notifications list were provided with ' + '--add-notifications and --remove-notifications arguments.\n{}'. format(errors)) current_notifications_set = set(current_connected_registry.notifications_list) \ if current_connected_registry.notifications_list else set() notifications_set = current_notifications_set.union( add_notifications_set).difference(remove_notifications_set) notifications_list = list( notifications_set ) if notifications_set != current_notifications_set else None ConnectedRegistryUpdateParameters, SyncUpdateProperties, LoggingProperties = cmd.get_models( 'ConnectedRegistryUpdateParameters', 'SyncUpdateProperties', 'LoggingProperties') connected_registry_update_parameters = ConnectedRegistryUpdateParameters( sync_properties=SyncUpdateProperties(schedule=sync_schedule, message_ttl=sync_message_ttl, sync_window=sync_window), logging=LoggingProperties(log_level=log_level, audit_log_status=sync_audit_logs_enabled), client_token_ids=client_token_list, notifications_list=notifications_list) try: return client.begin_update( resource_group_name=resource_group_name, registry_name=registry_name, connected_registry_name=connected_registry_name, connected_registry_update_parameters= connected_registry_update_parameters) except ValidationError as e: raise CLIError(e)