def _create_role_assignment(cmd, quantum_workspace): from azure.cli.command_modules.role.custom import create_role_assignment retry_attempts = 0 while retry_attempts < MAX_RETRIES_ROLE_ASSIGNMENT: try: create_role_assignment( cmd, role="Contributor", scope=quantum_workspace.storage_account, assignee=quantum_workspace.identity.principal_id) break except (CloudError, AzureInternalError) as e: error = str(e.args).lower() if (("does not exist" in error) or ("cannot find" in error)): print('.', end='', flush=True) time.sleep(POLLING_TIME_DURATION) retry_attempts += 1 continue raise e except Exception as x: raise AzureInternalError( f"Role assignment encountered exception ({type(x).__name__}): {x}" ) from x if retry_attempts > 0: print() # To end the line of the waiting indicators. if retry_attempts == MAX_RETRIES_ROLE_ASSIGNMENT: max_time_in_seconds = MAX_RETRIES_ROLE_ASSIGNMENT * POLLING_TIME_DURATION raise AzureInternalError( f"Role assignment could not be added to storage account {quantum_workspace.storage_account} within {max_time_in_seconds} seconds." ) return quantum_workspace
def translate_arm(cmd, template_path, parameters_path, resource_group_name, target_subscription=None): template_content = _read_json(template_path) if not template_content: raise InvalidArgumentValueError('Please make sure --template is a valid template file or url') parameters_content = _read_json(parameters_path) if not parameters_content: raise InvalidArgumentValueError('Please make sure --parameters is a valid parameters file or url') if target_subscription is None: from azure.cli.core.commands.client_factory import get_subscription_id target_subscription = get_subscription_id(cmd.cli_ctx) try: response = requests.post( ARM_TRANSLATOR_URL, json={ 'resourceGroup': resource_group_name, 'subscriptionId': target_subscription, 'template': template_content, 'parameters': parameters_content }) if response.status_code != 200: raise AzureInternalError(response.text) scripts = response.json() for script in scripts: print('{}\n'.format(script)) except Exception as e: raise AzureInternalError( 'Fail to translate, please try a few minutes later.\n' + str(e))
def _get_upload_info(self): try: response = self.client.build_service.get_resource_upload_url(self.resource_group, self.service, self.name) if not response.upload_url: raise AzureInternalError("Failed to get a SAS URL to upload context.") return response except CloudError as e: raise AzureInternalError("Failed to get a SAS URL to upload context. Error: {}".format(e.message)) except AttributeError as e: raise AzureInternalError("Failed to get a SAS URL to upload context. Error: {}".format(e))
def build(cmd, target_id=None, project=None): """ Compile a Q# program to run on Azure Quantum. """ target = TargetInfo(cmd, target_id) # Validate that dotnet is available _check_dotnet_available() args = ["dotnet", "build"] if project: args.append(project) args.append(f"-property:ExecutionTarget={target.target_id}") logger.debug("Building project with arguments:") logger.debug(args) import subprocess result = subprocess.run(args, stdout=subprocess.PIPE, check=False) if result.returncode == 0: return {'result': 'ok'} # If we got here, we might have encountered an error during compilation, so propagate standard output to the user. logger.error("Compilation stage failed with error code %s", result.returncode) print(result.stdout.decode('ascii')) raise AzureInternalError("Failed to compile program.")
def submit(cmd, program_args, resource_group_name=None, workspace_name=None, location=None, target_id=None, project=None, job_name=None, shots=None, storage=None, no_build=False, job_params=None, target_capability=None): """ Submit a Q# project to run on Azure Quantum. """ # We first build and then call run. # Can't call run directly because it fails to understand the # `ExecutionTarget` property when passed in the command line if not no_build: build(cmd, target_id=target_id, project=project, target_capability=target_capability) logger.info("Project built successfully.") else: _check_dotnet_available() ws = WorkspaceInfo(cmd, resource_group_name, workspace_name, location) target = TargetInfo(cmd, target_id) token = _get_data_credentials(cmd.cli_ctx, ws.subscription).get_token().token args = _generate_submit_args(program_args, ws, target, token, project, job_name, shots, storage, job_params) _set_cli_version() knack_logger.warning('Submitting job...') import subprocess result = subprocess.run(args, stdout=subprocess.PIPE, check=False) if result.returncode == 0: std_output = result.stdout.decode('ascii').strip() # Retrieve the job-id as the last line from standard output. job_id = std_output.split()[-1] # Query for the job and return status to caller. return get(cmd, job_id, resource_group_name, workspace_name, location) # The program compiled succesfully, but executing the stand-alone .exe failed to run. logger.error("Submission of job failed with error code %s", result.returncode) print(result.stdout.decode('ascii')) raise AzureInternalError("Failed to submit job.")
def _create_role_assignment(cmd, client, resource_group, service, name, role, scope): app = client.apps.get(resource_group, service, name) if not app.identity or not app.identity.principal_id: raise AzureInternalError( "Failed to create role assignment without object ID(principal ID) of system-assigned managed identity." ) identity_role_id = _arm.resolve_role_id(cmd.cli_ctx, role, scope) assignments_client = get_mgmt_service_client( cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION).role_assignments RoleAssignmentCreateParameters = get_sdk( cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION, 'RoleAssignmentCreateParameters', mod='models', operation_group='role_assignments') parameters = RoleAssignmentCreateParameters( role_definition_id=identity_role_id, principal_id=app.identity.principal_id) logger.warning( "Creating an assignment with a role '%s' on the scope of '%s'", identity_role_id, scope) retry_times = 36 assignment_name = _arm._gen_guid() for i in range(0, retry_times): try: assignments_client.create(scope=scope, role_assignment_name=assignment_name, parameters=parameters) break except (HttpResponseError, CloudError) as ex: if 'role assignment already exists' in ex.message: logger.warning('Role assignment already exists') break elif i < retry_times and ' does not exist in the directory ' in ex.message: sleep(APP_CREATE_OR_UPDATE_SLEEP_INTERVAL) logger.warning('Retrying role assignment creation: %s/%s', i + 1, retry_times) continue else: raise
def _get_new_identity_type_for_remove(exist_identity_type, is_remove_system_identity, new_user_identities): new_identity_type = exist_identity_type exist_identity_type_str = exist_identity_type.lower() if exist_identity_type_str == models_20220301preview.ManagedIdentityType.NONE.lower( ): new_identity_type = models_20220301preview.ManagedIdentityType.NONE elif exist_identity_type_str == models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED.lower( ): if is_remove_system_identity: new_identity_type = models_20220301preview.ManagedIdentityType.NONE else: new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED elif exist_identity_type_str == models_20220301preview.ManagedIdentityType.USER_ASSIGNED.lower( ): if not new_user_identities: new_identity_type = models_20220301preview.ManagedIdentityType.NONE else: new_identity_type = models_20220301preview.ManagedIdentityType.USER_ASSIGNED elif exist_identity_type_str == models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.lower( ): if is_remove_system_identity and not new_user_identities: new_identity_type = models_20220301preview.ManagedIdentityType.NONE elif not is_remove_system_identity and not new_user_identities: new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED elif is_remove_system_identity and new_user_identities: new_identity_type = models_20220301preview.ManagedIdentityType.USER_ASSIGNED else: new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED else: raise AzureInternalError( "Invalid identity type: {}.".format(exist_identity_type_str)) return new_identity_type
def map_azure_error_to_cli_error(azure_error): error_message = getattr(azure_error, "message", str(azure_error)) if isinstance(azure_error, HttpResponseError): status_code = getattr(azure_error, "status_code", None) if status_code: status_code = int(status_code) if status_code == 400: return BadRequestError(error_message) if status_code == 401: return UnauthorizedError(error_message) if status_code == 403: return ForbiddenError(error_message) if status_code == 404: return ResourceNotFoundError(error_message) if 400 <= status_code < 500: return UnclassifiedUserFault(error_message) if 500 <= status_code < 600: return AzureInternalError(error_message) return ServiceError(error_message) if isinstance(azure_error, ServiceRequestError): return ClientRequestError(error_message) if isinstance(azure_error, ServiceResponseError): return AzureResponseError(error_message) return ServiceError(error_message)
def create(cmd, resource_group_name=None, workspace_name=None, location=None, storage_account=None, skip_role_assignment=False, provider_sku_list=None): """ Create a new Azure Quantum workspace. """ client = cf_workspaces(cmd.cli_ctx) if not workspace_name: raise RequiredArgumentMissingError( "An explicit workspace name is required for this command.") if not storage_account: raise RequiredArgumentMissingError( "A quantum workspace requires a valid storage account.") if not location: raise RequiredArgumentMissingError( "A location for the new quantum workspace is required.") if provider_sku_list is None: raise RequiredArgumentMissingError( "A list of Azure Quantum providers and SKUs is required.") info = WorkspaceInfo(cmd, resource_group_name, workspace_name, location) if not info.resource_group: raise ResourceNotFoundError( "Please run 'az quantum workspace set' first to select a default resource group." ) quantum_workspace = _get_basic_quantum_workspace(location, info, storage_account) # Until the "--skip-role-assignment" parameter is deprecated, use the old non-ARM code to create a workspace without doing a role assignment if skip_role_assignment: _add_quantum_providers(cmd, quantum_workspace, provider_sku_list) poller = client.begin_create_or_update(info.resource_group, info.name, quantum_workspace, polling=False) while not poller.done(): time.sleep(POLLING_TIME_DURATION) quantum_workspace = poller.result() return quantum_workspace # ARM-template-based code to create an Azure Quantum workspace and make it a "Contributor" to the storage account template_path = os.path.join(os.path.dirname(__file__), 'templates', 'create-workspace-and-assign-role.json') with open(template_path, 'r', encoding='utf8') as template_file_fd: template = json.load(template_file_fd) _add_quantum_providers(cmd, quantum_workspace, provider_sku_list) validated_providers = [] for provider in quantum_workspace.providers: validated_providers.append({ "providerId": provider.provider_id, "providerSku": provider.provider_sku }) parameters = { 'quantumWorkspaceName': workspace_name, 'location': location, 'tags': {}, 'providers': validated_providers, 'storageAccountName': storage_account, 'storageAccountId': _get_storage_account_path(info, storage_account), 'storageAccountLocation': location, 'storageAccountDeploymentName': "Microsoft.StorageAccount-" + time.strftime("%d-%b-%Y-%H-%M-%S", time.gmtime()) } parameters = {k: {'value': v} for k, v in parameters.items()} deployment_properties = { 'mode': DeploymentMode.incremental, 'template': template, 'parameters': parameters } credentials = _get_data_credentials(cmd.cli_ctx, info.subscription) arm_client = ResourceManagementClient(credentials, info.subscription) deployment_async_operation = arm_client.deployments.begin_create_or_update( info.resource_group, workspace_name, # Note: This is actually specifying a the deployment name, but workspace_name is used here in test_quantum_workspace.py {'properties': deployment_properties}) # Show progress indicator dots polling_cycles = 0 while not deployment_async_operation.done(): polling_cycles += 1 if polling_cycles > MAX_POLLS_CREATE_WORKSPACE: print() raise AzureInternalError( "Create quantum workspace operation timed out.") print('.', end='', flush=True) time.sleep(POLLING_TIME_DURATION) print() quantum_workspace = deployment_async_operation.result() return quantum_workspace
def arm_exception_handler(ex, fault_type, summary, return_if_not_found=False): if isinstance(ex, AuthenticationError): telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) raise AzureResponseError( "Authentication error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) if isinstance(ex, TokenExpiredError): telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) raise AzureResponseError( "Token expiration error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) if isinstance(ex, HttpOperationError): status_code = ex.response.status_code if status_code == 404 and return_if_not_found: return if status_code // 100 == 4: telemetry.set_user_fault() telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) if status_code // 100 == 5: raise AzureInternalError( "Http operation error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) raise AzureResponseError( "Http operation error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) if isinstance(ex, MSRestValidationError): telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) raise AzureResponseError( "Validation error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) if isinstance(ex, HttpResponseError): status_code = ex.status_code if status_code == 404 and return_if_not_found: return if status_code // 100 == 4: telemetry.set_user_fault() telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) if status_code // 100 == 5: raise AzureInternalError( "Http response error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) raise AzureResponseError( "Http response error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary)) if isinstance(ex, ResourceNotFoundError) and return_if_not_found: return telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary) raise ClientRequestError("Error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary))
def app_identity_remove(cmd, client, resource_group, service, name, system_assigned=None, user_assigned=None): """ Note: Always use sync method to operate managed identity to avoid data inconsistency. :param system_assigned: 1) None or False: Don't change system-assigned managed identity. 2) True: remove system-assigned managed identity :param user_assigned: 1) None: Don't change user-assigned managed identities. 2) An empty list: remove all user-assigned managed identities. 3) A non-empty list of user-assigned managed identity resource id to remove. """ app = client.apps.get(resource_group, service, name) if _app_not_updatable(app): raise ConflictRequestError( "Failed to remove managed identities since app is in {} state.". format(app.properties.provisioning_state)) if not app.identity: logger.warning( "Skip remove managed identity since no identities assigned to app." ) return if not app.identity.type: raise AzureInternalError("Invalid existed identity type {}.".format( app.identity.type)) if app.identity.type == models_20220301preview.ManagedIdentityType.NONE: logger.warning( "Skip remove managed identity since identity type is {}.".format( app.identity.type)) return # TODO(jiec): For back-compatible, convert to remove system-assigned only case. Remove code after migration. if system_assigned is None and user_assigned is None: system_assigned = True new_user_identities = _get_new_user_identities_for_remove( app.identity.user_assigned_identities, user_assigned) new_identity_type = _get_new_identity_type_for_remove( app.identity.type, system_assigned, new_user_identities) user_identity_payload = _get_user_identity_payload_for_remove( new_identity_type, user_assigned) target_identity = models_20220301preview.ManagedIdentityProperties() target_identity.type = new_identity_type target_identity.user_assigned_identities = user_identity_payload app_resource = models_20220301preview.AppResource() app_resource.identity = target_identity poller = client.apps.begin_update(resource_group, service, name, app_resource) wait_till_end(cmd, poller) poller.result() if "succeeded" != poller.status().lower(): return poller else: return client.apps.get(resource_group, service, name)
def create(cmd, resource_group_name=None, workspace_name=None, location=None, storage_account=None, skip_role_assignment=False, provider_sku_list=None, auto_accept=False): """ Create a new Azure Quantum workspace. """ client = cf_workspaces(cmd.cli_ctx) if not workspace_name: raise RequiredArgumentMissingError( "An explicit workspace name is required for this command.") if not storage_account: raise RequiredArgumentMissingError( "A quantum workspace requires a valid storage account.") if not location: raise RequiredArgumentMissingError( "A location for the new quantum workspace is required.") info = WorkspaceInfo(cmd, resource_group_name, workspace_name, location) if not info.resource_group: raise ResourceNotFoundError( "Please run 'az quantum workspace set' first to select a default resource group." ) quantum_workspace = _get_basic_quantum_workspace(location, info, storage_account) # Until the "--skip-role-assignment" parameter is deprecated, use the old non-ARM code to create a workspace without doing a role assignment if skip_role_assignment: _add_quantum_providers(cmd, quantum_workspace, provider_sku_list, auto_accept) poller = client.begin_create_or_update(info.resource_group, info.name, quantum_workspace, polling=False) while not poller.done(): time.sleep(POLLING_TIME_DURATION) quantum_workspace = poller.result() return quantum_workspace # ARM-template-based code to create an Azure Quantum workspace and make it a "Contributor" to the storage account template_path = os.path.join(os.path.dirname(__file__), 'templates', 'create-workspace-and-assign-role.json') with open(template_path, 'r', encoding='utf8') as template_file_fd: template = json.load(template_file_fd) _add_quantum_providers(cmd, quantum_workspace, provider_sku_list, auto_accept) validated_providers = [] for provider in quantum_workspace.providers: validated_providers.append({ "providerId": provider.provider_id, "providerSku": provider.provider_sku }) # Set default storage account parameters in case the storage account does not exist yet storage_account_sku = DEFAULT_STORAGE_SKU storage_account_sku_tier = DEFAULT_STORAGE_SKU_TIER storage_account_kind = DEFAULT_STORAGE_KIND storage_account_location = location # Look for info on existing storage account storage_account_list = list_storage_accounts(cmd, resource_group_name) if storage_account_list: for storage_account_info in storage_account_list: if storage_account_info.name == storage_account: storage_account_sku = storage_account_info.sku.name storage_account_sku_tier = storage_account_info.sku.tier storage_account_kind = storage_account_info.kind storage_account_location = storage_account_info.location break # Validate the storage account SKU tier and kind _validate_storage_account('tier', storage_account_sku_tier, SUPPORTED_STORAGE_SKU_TIERS) _validate_storage_account('kind', storage_account_kind, SUPPORTED_STORAGE_KINDS) parameters = { 'quantumWorkspaceName': workspace_name, 'location': location, 'tags': {}, 'providers': validated_providers, 'storageAccountName': storage_account, 'storageAccountId': _get_storage_account_path(info, storage_account), 'storageAccountLocation': storage_account_location, 'storageAccountSku': storage_account_sku, 'storageAccountKind': storage_account_kind, 'storageAccountDeploymentName': "Microsoft.StorageAccount-" + time.strftime("%d-%b-%Y-%H-%M-%S", time.gmtime()) } parameters = {k: {'value': v} for k, v in parameters.items()} deployment_properties = { 'mode': DeploymentMode.incremental, 'template': template, 'parameters': parameters } credentials = _get_data_credentials(cmd.cli_ctx, info.subscription) arm_client = ResourceManagementClient(credentials, info.subscription) # Show the first progress indicator dot before starting ARM template deployment print('.', end='', flush=True) deployment_async_operation = arm_client.deployments.begin_create_or_update( info.resource_group, (DEPLOYMENT_NAME_PREFIX + workspace_name)[:64], {'properties': deployment_properties}) # Show progress indicator dots polling_cycles = 0 while not deployment_async_operation.done(): polling_cycles += 1 if polling_cycles > MAX_POLLS_CREATE_WORKSPACE: print() raise AzureInternalError( "Create quantum workspace operation timed out.") print('.', end='', flush=True) time.sleep(POLLING_TIME_DURATION) print() quantum_workspace = deployment_async_operation.result() return quantum_workspace
def execute_query(client, graph_query, first, skip, subscriptions, management_groups, allow_partial_scopes, skip_token): # type: (ResourceGraphClient, str, int, int, list[str], str) -> object mgs_list = management_groups if mgs_list is not None and len(mgs_list) > __MANAGEMENT_GROUP_LIMIT: mgs_list = mgs_list[:__MANAGEMENT_GROUP_LIMIT] warning_message = "The query included more management groups than allowed. "\ "Only the first {0} management groups were included for the results. "\ "To use more than {0} management groups, "\ "see the docs for examples: "\ "https://aka.ms/arg-error-toomanysubs".format(__MANAGEMENT_GROUP_LIMIT) __logger.warning(warning_message) subs_list = None if mgs_list is None: subs_list = subscriptions or _get_cached_subscriptions() if subs_list is not None and len(subs_list) > __SUBSCRIPTION_LIMIT: subs_list = subs_list[:__SUBSCRIPTION_LIMIT] warning_message = "The query included more subscriptions than allowed. "\ "Only the first {0} subscriptions were included for the results. "\ "To use more than {0} subscriptions, "\ "see the docs for examples: "\ "https://aka.ms/arg-error-toomanysubs".format(__SUBSCRIPTION_LIMIT) __logger.warning(warning_message) response = None try: result_truncated = False request_options = QueryRequestOptions( top=first, skip=skip, skip_token=skip_token, result_format=ResultFormat.object_array, allow_partial_scopes=allow_partial_scopes) request = QueryRequest(query=graph_query, subscriptions=subs_list, management_groups=mgs_list, options=request_options) response = client.resources(request) # type: QueryResponse if response.result_truncated == ResultTruncated.true: result_truncated = True if result_truncated and first is not None and len( response.data) < first: __logger.warning( "Unable to paginate the results of the query. " "Some resources may be missing from the results. " "To rewrite the query and enable paging, " "see the docs for an example: https://aka.ms/arg-results-truncated" ) except HttpResponseError as ex: if ex.model.error.code == 'BadRequest': raise BadRequestError( json.dumps(_to_dict(ex.model.error), indent=4)) from ex raise AzureInternalError(json.dumps(_to_dict(ex.model.error), indent=4)) from ex result_dict = dict() result_dict['data'] = response.data result_dict['count'] = response.count result_dict['total_records'] = response.total_records result_dict['skip_token'] = response.skip_token return result_dict