def validate_builder_resource(namespace): if namespace.builder_json is not None and namespace.builder_file is not None: raise ClientRequestError( "You can only specify either --builder-json or --builder-file.") if namespace.builder_json is None and namespace.builder_file is None: raise ClientRequestError( "--builder-json or --builder-file is required.")
def _validate_acs_settings(client, resource_group, service, acs_settings): logger.warning( "[1/2] Validating Application Configuration Service settings") if acs_settings is None or acs_settings.git_property is None: return try: result = sdk_no_wait(False, client.configuration_services.begin_validate, resource_group, service, DEFAULT_NAME, acs_settings).result() except Exception as err: # pylint: disable=broad-except raise ClientRequestError( "{0}. You may raise a support ticket if needed by the following link: https://docs.microsoft.com/azure/spring-cloud/spring-cloud-faq?pivots=programming-language-java#how-can-i-provide-feedback-and-report-issues" .format(err)) if result is not None and result.git_property_validation_result is not None: git_result = result.git_property_validation_result if not git_result.is_valid: validation_result = git_result.git_repos_validation_result filter_result = [{ 'name': x.name, 'messages': x.messages } for x in validation_result if len(x.messages) > 0] raise ClientRequestError( "Application Configuration Service settings contain errors.\n{}" .format(json.dumps(filter_result, indent=2)))
def api_portal_update(cmd, client, resource_group, service, instance_count=None, assign_endpoint=None, https_only=None, scope=None, client_id=None, client_secret=None, issuer_uri=None): api_portal = client.api_portals.get(resource_group, service, DEFAULT_NAME) sso_properties = api_portal.properties.sso_properties if scope and client_id and client_secret and issuer_uri: sso_properties = models.SsoProperties( scope=scope, client_id=client_id, client_secret=client_secret, issuer_uri=issuer_uri, ) properties = models.ApiPortalProperties( public=assign_endpoint if assign_endpoint is not None else api_portal.properties.public, https_only=https_only if https_only is not None else api_portal.properties.https_only, gateway_ids=api_portal.properties.gateway_ids, sso_properties=sso_properties ) sku = models.Sku(name=api_portal.sku.name, tier=api_portal.sku.tier, capacity=instance_count or api_portal.sku.capacity) if sku.capacity > 1 and properties.sso_properties: raise ClientRequestError("API Portal doesn't support to configure SSO with multiple replicas for now.") api_portal_resource = models.ApiPortalResource( properties=properties, sku=sku) return client.api_portals.begin_create_or_update(resource_group, service, DEFAULT_NAME, api_portal_resource)
def validate_build_pool_size(namespace): if _parse_sku_name(namespace.sku) == 'enterprise': if namespace.build_pool_size is None: namespace.build_pool_size = 'S1' else: if namespace.build_pool_size is not None: raise ClientRequestError("You can only specify --build-pool-size with enterprise tier.")
def get_bicep_available_release_tags(): try: os.environ.setdefault("CURL_CA_BUNDLE", certifi.where()) response = requests.get("https://aka.ms/BicepReleases") return [release["tag_name"] for release in response.json()] except IOError as err: raise ClientRequestError(f"Error while attempting to retrieve available Bicep versions: {err}.")
def get_bicep_latest_release_tag(): try: os.environ.setdefault("CURL_CA_BUNDLE", certifi.where()) response = requests.get("https://aka.ms/BicepLatestRelease") response.raise_for_status() return response.json()["tag_name"] except IOError as err: raise ClientRequestError(f"Error while attempting to retrieve the latest Bicep version: {err}.")
def _get_acs_properties(properties): if properties is None: properties = models.ConfigurationServiceProperties() if properties.provisioning_state == "Updating": raise ClientRequestError( "Application Configuration Service is updating, please try again later." ) properties.settings = _get_acs_settings(properties.settings) return properties
def gitcli_check_and_login(): output = run_subprocess_get_output("gh") if output.returncode: raise ClientRequestError( 'Please install "Github CLI" to run this command.') output = run_subprocess_get_output("gh auth status") if output.returncode: run_subprocess("gh auth login", stdout_show=True)
def validate_builder_update(cmd, namespace): client = get_client(cmd) try: client.build_service_builder.get(namespace.resource_group, namespace.service, DEFAULT_BUILD_SERVICE_NAME, namespace.name) except ResourceNotFoundError: raise ClientRequestError('Builder {} does not exist.'.format(namespace.name))
def get_bicep_latest_release_tag(): try: response = requests.get( "https://api.github.com/repos/Azure/bicep/releases/latest") return response.json()["tag_name"] except IOError as err: raise ClientRequestError( f"Error while attempting to retrieve the latest Bicep version: {err}." )
def get_bicep_available_release_tags(): try: ca_file = certifi.where() response = requests.get("https://aka.ms/BicepReleases", verify=ca_file) return [release["tag_name"] for release in response.json()] except IOError as err: raise ClientRequestError( f"Error while attempting to retrieve available Bicep versions: {err}." )
def get_bicep_available_release_tags(): try: response = requests.get( "https://api.github.com/repos/Azure/bicep/releases") return [release["tag_name"] for release in response.json()] except IOError as err: raise ClientRequestError( f"Error while attempting to retrieve available Bicep versions: {err}." )
def github_actions_setup(cmd, client, resource_group_name, server_name, database_name, administrator_login, administrator_login_password, sql_file_path, repository, action_name=None, branch=None, allow_push=None): server = client.get(resource_group_name, server_name) if server.network.public_network_access == 'Disabled': raise ClientRequestError( "This command only works with public access enabled server.") if allow_push and not branch: raise RequiredArgumentMissingError( "Provide remote branch name to allow pushing the action file to your remote branch." ) if action_name is None: action_name = server.name + '_' + database_name + "_deploy" gitcli_check_and_login() if isinstance(client, MySqlServersOperations): database_engine = 'mysql' else: database_engine = 'postgresql' fill_action_template( cmd, database_engine=database_engine, server=server, database_name=database_name, administrator_login=administrator_login, administrator_login_password=administrator_login_password, file_name=sql_file_path, repository=repository, action_name=action_name) action_path = get_git_root_dir( ) + GITHUB_ACTION_PATH + action_name + '.yml' logger.warning("Making git commit for file %s", action_path) run_subprocess("git add {}".format(action_path)) run_subprocess("git commit -m \"Add github action file\"") if allow_push: logger.warning("Pushing the created action file to origin %s branch", branch) run_subprocess("git push origin {}".format(branch)) else: logger.warning( 'You did not set --allow-push parameter. Please push the prepared file %s to your remote repo and run "deploy run" command to activate the workflow.', action_path)
def validate_builder_create(cmd, namespace): client = get_client(cmd) try: builder = client.build_service_builder.get(namespace.resource_group, namespace.service, DEFAULT_BUILD_SERVICE_NAME, namespace.name) if builder is not None: raise ClientRequestError('Builder {} already exists.'.format(namespace.name)) except ResourceNotFoundError: pass
def get_bicep_latest_release_tag(): try: ca_file = certifi.where() response = requests.get("https://aka.ms/BicepLatestRelease", verify=ca_file) response.raise_for_status() return response.json()["tag_name"] except IOError as err: raise ClientRequestError( f"Error while attempting to retrieve the latest Bicep version: {err}." )
def arm_exception_handler(ex, fault_type, summary, return_if_not_found=False): if isinstance(ex, AuthenticationError): telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) raise ClientRequestError("Authentication error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) if isinstance(ex, TokenExpiredError): telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) raise ClientRequestError("Token expiration error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) if isinstance(ex, HttpOperationError): status_code = ex.response.status_code if status_code == 404 and return_if_not_found: return if status_code // 100 == 4: telemetry.set_user_fault() telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) raise ClientRequestError("Http operation error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) if isinstance(ex, ValidationError): telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) raise ClientRequestError("Validation error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) if isinstance(ex, CloudError): status_code = ex.status_code if status_code == 404 and return_if_not_found: return if status_code // 100 == 4: telemetry.set_user_fault() telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) raise ClientRequestError("Cloud error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) raise ClientRequestError("Error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary))
def ensure_bicep_installation(release_tag=None, target_platform=None, stdout=True): system = platform.system() installation_path = _get_bicep_installation_path(system) if os.path.isfile(installation_path): if not release_tag: return installed_version = _get_bicep_installed_version(installation_path) target_version = _extract_semver(release_tag) if installed_version and target_version and semver.compare( installed_version, target_version) == 0: return installation_dir = os.path.dirname(installation_path) if not os.path.exists(installation_dir): os.makedirs(installation_dir) try: release_tag = release_tag if release_tag else get_bicep_latest_release_tag( ) if stdout: if release_tag: print(f"Installing Bicep CLI {release_tag}...") else: print("Installing Bicep CLI...") ca_file = certifi.where() request = urlopen(_get_bicep_download_url( system, release_tag, target_platform=target_platform), cafile=ca_file) with open(installation_path, "wb") as f: f.write(request.read()) os.chmod( installation_path, os.stat(installation_path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) if stdout: print( f'Successfully installed Bicep CLI to "{installation_path}".') else: _logger.info( "Successfully installed Bicep CLI to %s", installation_path, ) except IOError as err: raise ClientRequestError( f"Error while attempting to download Bicep CLI: {err}")
def validate_buildpack_binding_not_exist(cmd, namespace): client = get_client(cmd) try: binding_resource = client.buildpack_binding.get( namespace.resource_group, namespace.service, DEFAULT_BUILD_SERVICE_NAME, namespace.builder_name, namespace.name) if binding_resource is not None: raise ClientRequestError( 'buildpack Binding {} in builder {} already exists ' 'in resource group {}, service {}. You can edit it by set command.' .format(namespace.name, namespace.resource_group, namespace.service, namespace.builder_name)) except ResourceNotFoundError: # Excepted case pass
def _ex_handler(ex): http_error_response = False if hasattr(ex, 'response'): http_error_response = True ex = _polish_bad_errors(ex, False) # only include if an update was attempted and failed on the backend if http_error_response: try: detail = ('If using \'--plan\', a consumption plan may be unable to migrate ' 'to a given premium plan. Please confirm that the premium plan ' 'exists in the same resource group and region. Note: Not all ' 'functionapp plans support premium instances. If you have verified ' 'your resource group and region and are still unable to migrate, ' 'please redeploy on a premium functionapp plan.') ex = ClientRequestError(ex.args[0] + '\n\n' + detail) except Exception: # pylint: disable=broad-except pass raise ex
def map_azure_error_to_cli_error(azure_error): error_message = getattr(azure_error, "message", str(azure_error)) if isinstance(azure_error, HttpResponseError): status_code = getattr(azure_error, "status_code", None) if status_code: status_code = int(status_code) if status_code == 400: return BadRequestError(error_message) if status_code == 401: return UnauthorizedError(error_message) if status_code == 403: return ForbiddenError(error_message) if status_code == 404: return ResourceNotFoundError(error_message) if 400 <= status_code < 500: return UnclassifiedUserFault(error_message) if 500 <= status_code < 600: return AzureInternalError(error_message) return ServiceError(error_message) if isinstance(azure_error, ServiceRequestError): return ClientRequestError(error_message) if isinstance(azure_error, ServiceResponseError): return AzureResponseError(error_message) return ServiceError(error_message)
def test_service_request_error(self): azure_error = ServiceRequestError("test_error_msg") cli_error = helpers.map_azure_error_to_cli_error(azure_error) mock_error = ClientRequestError("test_error_msg") self.check_error_equality(cli_error, mock_error)
def ensure_container_insights_for_monitoring(cmd, addon, cluster_subscription, cluster_resource_group_name, cluster_name, cluster_region, remove_monitoring=False, aad_route=False, create_dcr=False, create_dcra=False): """ Either adds the ContainerInsights solution to a LA Workspace OR sets up a DCR (Data Collection Rule) and DCRA (Data Collection Rule Association). Both let the monitoring addon send data to a Log Analytics Workspace. Set aad_route == True to set up the DCR data route. Otherwise the solution route will be used. Create_dcr and create_dcra have no effect if aad_route == False. Set remove_monitoring to True and create_dcra to True to remove the DCRA from a cluster. The association makes it very hard to delete either the DCR or cluster. (It is not obvious how to even navigate to the association from the portal, and it prevents the cluster and DCR from being deleted individually). """ if not addon.enabled: return None # workaround for this addon key which has been seen lowercased in the wild for key in list(addon.config): if key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop( key) workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID].strip( ) if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') # extract subscription ID and resource group from workspace_resource_id URL try: subscription_id = workspace_resource_id.split('/')[2] resource_group = workspace_resource_id.split('/')[4] workspace_name = workspace_resource_id.split('/')[8] except IndexError: raise CLIError( 'Could not locate resource group in workspace-resource-id URL.') # region of workspace can be different from region of RG so find the location of the workspace_resource_id if not remove_monitoring: resources = cf_resources(cmd.cli_ctx, subscription_id) from azure.core.exceptions import HttpResponseError try: resource = resources.get_by_id( workspace_resource_id, '2015-11-01-preview') location = resource.location except HttpResponseError as ex: raise ex if aad_route: cluster_resource_id = f"/subscriptions/{cluster_subscription}/resourceGroups/{cluster_resource_group_name}/providers/Microsoft.ContainerService/managedClusters/{cluster_name}" dataCollectionRuleName = f"MSCI-{workspace_name}" dcr_resource_id = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}" from azure.cli.core.util import send_raw_request from azure.cli.core.profiles import ResourceType if create_dcr: # first get the association between region display names and region IDs (because for some reason # the "which RPs are available in which regions" check returns region display names) region_names_to_id = {} # retry the request up to two times for _ in range(3): try: location_list_url = f"https://management.azure.com/subscriptions/{subscription_id}/locations?api-version=2019-11-01" r = send_raw_request(cmd.cli_ctx, "GET", location_list_url) # this is required to fool the static analyzer. The else statement will only run if an exception # is thrown, but flake8 will complain that e is undefined if we don't also define it here. error = None break except CLIError as e: error = e else: # This will run if the above for loop was not broken out of. This means all three requests failed raise error json_response = json.loads(r.text) for region_data in json_response["value"]: region_names_to_id[region_data["displayName"] ] = region_data["name"] # check if region supports DCRs and DCR-A for _ in range(3): try: feature_check_url = f"https://management.azure.com/subscriptions/{subscription_id}/providers/Microsoft.Insights?api-version=2020-10-01" r = send_raw_request(cmd.cli_ctx, "GET", feature_check_url) error = None break except CLIError as e: error = e else: raise error json_response = json.loads(r.text) for resource in json_response["resourceTypes"]: region_ids = map(lambda x: region_names_to_id[x], resource["locations"]) # map is lazy, so doing this for every region isn't slow if resource["resourceType"].lower() == "datacollectionrules" and location not in region_ids: raise ClientRequestError( f'Data Collection Rules are not supported for LA workspace region {location}') elif resource[ "resourceType"].lower() == "datacollectionruleassociations" and cluster_region not in region_ids: raise ClientRequestError( f'Data Collection Rule Associations are not supported for cluster region {location}') # create the DCR dcr_creation_body = json.dumps({"location": location, "properties": { "dataSources": { "extensions": [ { "name": "ContainerInsightsExtension", "streams": [ "Microsoft-Perf", "Microsoft-ContainerInventory", "Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", "Microsoft-KubeHealth", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-InsightsMetrics" ], "extensionName": "ContainerInsights" } ] }, "dataFlows": [ { "streams": [ "Microsoft-Perf", "Microsoft-ContainerInventory", "Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", "Microsoft-KubeHealth", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-InsightsMetrics" ], "destinations": [ "la-workspace" ] } ], "destinations": { "logAnalytics": [ { "workspaceResourceId": workspace_resource_id, "name": "la-workspace" } ] } }}) dcr_url = f"https://management.azure.com/{dcr_resource_id}?api-version=2019-11-01-preview" for _ in range(3): try: send_raw_request(cmd.cli_ctx, "PUT", dcr_url, body=dcr_creation_body) error = None break except CLIError as e: error = e else: raise error if create_dcra: # only create or delete the association between the DCR and cluster association_body = json.dumps({"location": cluster_region, "properties": { "dataCollectionRuleId": dcr_resource_id, "description": "routes monitoring data to a Log Analytics workspace" }}) association_url = f"https://management.azure.com/{cluster_resource_id}/providers/Microsoft.Insights/dataCollectionRuleAssociations/send-to-{workspace_name}?api-version=2019-11-01-preview" for _ in range(3): try: send_raw_request(cmd.cli_ctx, "PUT" if not remove_monitoring else "DELETE", association_url, body=association_body) error = None break except CLIError as e: error = e else: raise error
def _ensure_container_insights_dcr_for_monitoring(cmd, subscription_id, cluster_resource_group_name, cluster_name, workspace_resource_id): from azure.core.exceptions import HttpResponseError cluster_region = '' resources = cf_resources(cmd.cli_ctx, subscription_id) cluster_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Kubernetes' \ '/connectedClusters/{2}'.format(subscription_id, cluster_resource_group_name, cluster_name) try: resource = resources.get_by_id(cluster_resource_id, '2020-01-01-preview') cluster_region = resource.location.lower() except HttpResponseError as ex: raise ex # extract subscription ID and resource group from workspace_resource_id URL parsed = parse_resource_id(workspace_resource_id) workspace_subscription_id, workspace_resource_group = parsed[ "subscription"], parsed["resource_group"] workspace_region = '' resources = cf_resources(cmd.cli_ctx, workspace_subscription_id) try: resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview') workspace_region = resource.location except HttpResponseError as ex: raise ex dataCollectionRuleName = f"MSCI-{cluster_name}-{cluster_region}" dcr_resource_id = f"/subscriptions/{workspace_subscription_id}/resourceGroups/{workspace_resource_group}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}" # first get the association between region display names and region IDs (because for some reason # the "which RPs are available in which regions" check returns region display names) region_names_to_id = {} # retry the request up to two times for _ in range(3): try: location_list_url = cmd.cli_ctx.cloud.endpoints.resource_manager + f"/subscriptions/{subscription_id}/locations?api-version=2019-11-01" r = send_raw_request(cmd.cli_ctx, "GET", location_list_url) # this is required to fool the static analyzer. The else statement will only run if an exception # is thrown, but flake8 will complain that e is undefined if we don't also define it here. error = None break except AzCLIError as e: error = e else: # This will run if the above for loop was not broken out of. This means all three requests failed raise error json_response = json.loads(r.text) for region_data in json_response["value"]: region_names_to_id[region_data["displayName"]] = region_data["name"] # check if region supports DCR and DCR-A for _ in range(3): try: feature_check_url = cmd.cli_ctx.cloud.endpoints.resource_manager + f"/subscriptions/{subscription_id}/providers/Microsoft.Insights?api-version=2020-10-01" r = send_raw_request(cmd.cli_ctx, "GET", feature_check_url) error = None break except AzCLIError as e: error = e else: raise error json_response = json.loads(r.text) for resource in json_response["resourceTypes"]: if (resource["resourceType"].lower() == "datacollectionrules"): region_ids = map(lambda x: region_names_to_id[x], resource["locations"]) # dcr supported regions if (workspace_region not in region_ids): raise ClientRequestError( f"Data Collection Rules are not supported for LA workspace region {workspace_region}" ) if (resource["resourceType"].lower() == "datacollectionruleassociations"): region_ids = map(lambda x: region_names_to_id[x], resource["locations"]) # dcr-a supported regions if (cluster_region not in region_ids): raise ClientRequestError( f"Data Collection Rule Associations are not supported for cluster region {cluster_region}" ) dcr_url = cmd.cli_ctx.cloud.endpoints.resource_manager + f"{dcr_resource_id}?api-version=2019-11-01-preview" # get existing tags on the container insights extension DCR if the customer added any existing_tags = get_existing_container_insights_extension_dcr_tags( cmd, dcr_url) # create the DCR dcr_creation_body = json.dumps({ "location": workspace_region, "tags": existing_tags, "properties": { "dataSources": { "extensions": [{ "name": "ContainerInsightsExtension", "streams": [ "Microsoft-Perf", "Microsoft-ContainerInventory", "Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-InsightsMetrics", ], "extensionName": "ContainerInsights", }] }, "dataFlows": [{ "streams": [ "Microsoft-Perf", "Microsoft-ContainerInventory", "Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-InsightsMetrics", ], "destinations": ["la-workspace"], }], "destinations": { "logAnalytics": [{ "workspaceResourceId": workspace_resource_id, "name": "la-workspace", }] }, }, }) for _ in range(3): try: send_raw_request(cmd.cli_ctx, "PUT", dcr_url, body=dcr_creation_body) error = None break except AzCLIError as e: error = e else: raise error association_body = json.dumps({ "location": cluster_region, "properties": { "dataCollectionRuleId": dcr_resource_id, "description": "routes monitoring data to a Log Analytics workspace", }, }) association_url = cmd.cli_ctx.cloud.endpoints.resource_manager + f"{cluster_resource_id}/providers/Microsoft.Insights/dataCollectionRuleAssociations/ContainerInsightsExtension?api-version=2019-11-01-preview" for _ in range(3): try: send_raw_request( cmd.cli_ctx, "PUT", association_url, body=association_body, ) error = None break except AzCLIError as e: error = e else: raise error
def not_support_enterprise(cmd, namespace): if namespace.resource_group and namespace.service and is_enterprise_tier( cmd, namespace.resource_group, namespace.service): raise ClientRequestError( "'{}' doesn't support for Enterprise tier Spring instance.".format( namespace.command))
def ensure_container_insights_for_monitoring( cmd, addon, cluster_subscription, cluster_resource_group_name, cluster_name, cluster_region, remove_monitoring=False, aad_route=False, create_dcr=False, create_dcra=False, ): """ Either adds the ContainerInsights solution to a LA Workspace OR sets up a DCR (Data Collection Rule) and DCRA (Data Collection Rule Association). Both let the monitoring addon send data to a Log Analytics Workspace. Set aad_route == True to set up the DCR data route. Otherwise the solution route will be used. Create_dcr and create_dcra have no effect if aad_route == False. Set remove_monitoring to True and create_dcra to True to remove the DCRA from a cluster. The association makes it very hard to delete either the DCR or cluster. (It is not obvious how to even navigate to the association from the portal, and it prevents the cluster and DCR from being deleted individually). """ if not addon.enabled: return None # workaround for this addon key which has been seen lowercased in the wild for key in list(addon.config): if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID): addon.config[ CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop( key) workspace_resource_id = addon.config[ CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] workspace_resource_id = sanitize_loganalytics_ws_resource_id( workspace_resource_id) # extract subscription ID and resource group from workspace_resource_id URL try: subscription_id = workspace_resource_id.split("/")[2] resource_group = workspace_resource_id.split("/")[4] except IndexError: raise AzCLIError( "Could not locate resource group in workspace-resource-id URL.") # region of workspace can be different from region of RG so find the location of the workspace_resource_id if not remove_monitoring: resources = cf_resources(cmd.cli_ctx, subscription_id) try: resource = resources.get_by_id(workspace_resource_id, "2015-11-01-preview") location = resource.location except HttpResponseError as ex: raise ex if aad_route: cluster_resource_id = ( f"/subscriptions/{cluster_subscription}/resourceGroups/{cluster_resource_group_name}/" f"providers/Microsoft.ContainerService/managedClusters/{cluster_name}" ) dataCollectionRuleName = f"MSCI-{cluster_name}-{cluster_region}" dcr_resource_id = ( f"/subscriptions/{subscription_id}/resourceGroups/{resource_group}/" f"providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}" ) if create_dcr: # first get the association between region display names and region IDs (because for some reason # the "which RPs are available in which regions" check returns region display names) region_names_to_id = {} # retry the request up to two times for _ in range(3): try: location_list_url = cmd.cli_ctx.cloud.endpoints.resource_manager + \ f"/subscriptions/{subscription_id}/locations?api-version=2019-11-01" r = send_raw_request(cmd.cli_ctx, "GET", location_list_url) # this is required to fool the static analyzer. The else statement will only run if an exception # is thrown, but flake8 will complain that e is undefined if we don't also define it here. error = None break except AzCLIError as e: error = e else: # This will run if the above for loop was not broken out of. This means all three requests failed raise error json_response = json.loads(r.text) for region_data in json_response["value"]: region_names_to_id[ region_data["displayName"]] = region_data["name"] # check if region supports DCRs and DCR-A for _ in range(3): try: feature_check_url = cmd.cli_ctx.cloud.endpoints.resource_manager + \ f"/subscriptions/{subscription_id}/providers/Microsoft.Insights?api-version=2020-10-01" r = send_raw_request(cmd.cli_ctx, "GET", feature_check_url) error = None break except AzCLIError as e: error = e else: raise error json_response = json.loads(r.text) for resource in json_response["resourceTypes"]: if resource["resourceType"].lower() == "datacollectionrules": region_ids = map(lambda x: region_names_to_id[x], resource["locations"]) if location not in region_ids: raise ClientRequestError( f"Data Collection Rules are not supported for LA workspace region {location}" ) if resource["resourceType"].lower( ) == "datacollectionruleassociations": region_ids = map(lambda x: region_names_to_id[x], resource["locations"]) if cluster_region not in region_ids: raise ClientRequestError( f"Data Collection Rule Associations are not supported for cluster region {cluster_region}" ) dcr_url = cmd.cli_ctx.cloud.endpoints.resource_manager + \ f"{dcr_resource_id}?api-version=2019-11-01-preview" # get existing tags on the container insights extension DCR if the customer added any existing_tags = get_existing_container_insights_extension_dcr_tags( cmd, dcr_url) # create the DCR dcr_creation_body = json.dumps({ "location": location, "tags": existing_tags, "properties": { "dataSources": { "extensions": [{ "name": "ContainerInsightsExtension", "streams": [ "Microsoft-Perf", "Microsoft-ContainerInventory", "Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-InsightsMetrics", ], "extensionName": "ContainerInsights", }] }, "dataFlows": [{ "streams": [ "Microsoft-Perf", "Microsoft-ContainerInventory", "Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-InsightsMetrics", ], "destinations": ["la-workspace"], }], "destinations": { "logAnalytics": [{ "workspaceResourceId": workspace_resource_id, "name": "la-workspace", }] }, }, }) for _ in range(3): try: send_raw_request(cmd.cli_ctx, "PUT", dcr_url, body=dcr_creation_body) error = None break except AzCLIError as e: error = e else: raise error if create_dcra: # only create or delete the association between the DCR and cluster association_body = json.dumps({ "location": cluster_region, "properties": { "dataCollectionRuleId": dcr_resource_id, "description": "routes monitoring data to a Log Analytics workspace", }, }) association_url = cmd.cli_ctx.cloud.endpoints.resource_manager + \ f"{cluster_resource_id}/providers/Microsoft.Insights/dataCollectionRuleAssociations/ContainerInsightsExtension?api-version=2019-11-01-preview" for _ in range(3): try: send_raw_request( cmd.cli_ctx, "PUT" if not remove_monitoring else "DELETE", association_url, body=association_body, ) error = None break except AzCLIError as e: error = e else: raise error if not _is_container_insights_solution_exists(cmd, workspace_resource_id): unix_time_in_millis = int( (datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0) solution_deployment_name = "ContainerInsights-{}".format( unix_time_in_millis) # pylint: disable=line-too-long template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "workspaceResourceId": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics Resource ID" }, }, "workspaceRegion": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics workspace region" }, }, "solutionDeploymentName": { "type": "string", "metadata": { "description": "Name of the solution deployment" }, }, }, "resources": [{ "type": "Microsoft.Resources/deployments", "name": "[parameters('solutionDeploymentName')]", "apiVersion": "2017-05-10", "subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", "resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", "properties": { "mode": "Incremental", "template": { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": {}, "variables": {}, "resources": [{ "apiVersion": "2015-11-01-preview", "type": "Microsoft.OperationsManagement/solutions", "location": "[parameters('workspaceRegion')]", "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "properties": { "workspaceResourceId": "[parameters('workspaceResourceId')]" }, "plan": { "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "product": "[Concat('OMSGallery/', 'ContainerInsights')]", "promotionCode": "", "publisher": "Microsoft", }, }], }, "parameters": {}, }, }], } params = { "workspaceResourceId": { "value": workspace_resource_id }, "workspaceRegion": { "value": location }, "solutionDeploymentName": { "value": solution_deployment_name }, } deployment_name = "aks-monitoring-{}".format(unix_time_in_millis) # publish the Container Insights solution to the Log Analytics workspace return _invoke_deployment( cmd, resource_group, deployment_name, template, params, validate=False, no_wait=False, subscription_id=subscription_id, )
def _get_existing_repo(repos, name): repo = next((r for r in repos if r.name == name), None) if not repo: raise ClientRequestError("Repo '{}' not found.".format(name)) return repo