def _validate_get_payload(payload): """Validate provided payload. :param payload: Compute payload. :type dict :return: :rtype: None """ if 'properties' not in payload or 'computeType' not in payload[ 'properties']: raise ComputeTargetException('Invalid cluster payload:\n' '{}'.format(payload)) if payload['properties'][ 'computeType'] != KubernetesCompute._compute_type: raise ComputeTargetException('Invalid cluster payload, not "{}":\n' '{}'.format( KubernetesCompute._compute_type, payload)) for arm_key in ['location', 'id', 'tags']: if arm_key not in payload: raise ComputeTargetException( 'Invalid cluster payload, missing ["{}"]:\n' '{}'.format(arm_key, payload)) for key in [ 'properties', 'provisioningErrors', 'description', 'provisioningState', 'resourceId' ]: if key not in payload['properties']: raise ComputeTargetException( 'Invalid cluster payload, missing ["properties"]["{}"]:\n' '{}'.format(key, payload))
def wait_for_completion(self, show_output=False, is_delete_operation=False): """Wait for the current provisioning operation to finish on the cluster. This method returns a :class:`azureml.exceptions.ComputeTargetException` if there is a problem polling the compute object. :param show_output: Indicates whether to provide more verbose output. :type show_output: bool :param is_delete_operation: Indicates whether the operation is meant for deleting. :type is_delete_operation: bool :raises azureml.exceptions.ComputeTargetException: """ try: operation_state, error = self._wait_for_completion(show_output) print('Provisioning operation finished, operation "{}"'.format(operation_state)) if not is_delete_operation: self.refresh_state() if operation_state != 'Succeeded': if error and 'statusCode' in error and 'message' in error: error_response = ('StatusCode: {}\n' 'Message: {}'.format(error['statusCode'], error['message'])) else: error_response = error raise ComputeTargetException('Compute object provisioning polling reached non-successful terminal ' 'state, current provisioning state: {}\n' 'Provisioning operation error:\n' '{}'.format(self.provisioning_state, error_response)) except ComputeTargetException as e: if e.message == 'No operation endpoint': self.refresh_state() raise ComputeTargetException('Long running operation information not known, unable to poll. ' 'Current state is {}'.format(self.provisioning_state)) else: raise e
def _build_attach_payload(config, workspace): """Build attach payload. :param config: the compute configuration. :type config: KubeComputeAttachConfiguration :param workspace: The workspace object to associate the compute resource with. :type workspace: azureml.core.Workspace :return: :rtype: dict """ json_payload = copy.deepcopy(kubernetes_compute_template) del (json_payload['properties']['computeLocation']) if not config: raise ComputeTargetException('Error, missing config.') attach_resource_id = config.resource_id if not attach_resource_id: raise ComputeTargetException('Error, missing resource_id.') json_payload['properties'].update(config.to_dict()) json_payload['properties']['resourceId'] = attach_resource_id json_payload['properties']['properties']['amlK8sConfig'].update( config.aml_k8s_config) return json_payload
def validate_configuration(self): """Check that the specified configuration values are valid. Raises a :class:`azureml.exceptions.ComputeTargetException` if validation fails. :raises: :class:`azureml.exceptions.ComputeTargetException` """ if self.resource_id: # resource_id is provided, validate resource_id aks_arm_type = 'Microsoft.ContainerService/managedClusters' arc_arm_type = 'Microsoft.Kubernetes/connectedClusters' CLUSTER_TYPE_REGEX = '(?:{}|{})'.format(aks_arm_type, arc_arm_type) arm_template = ('/subscriptions/{part}/resourceGroups/{part}' '/providers/{cluster_type}/{part}') resource_id_pattern = \ '(?i)' + arm_template.format(part=r'[\w\-_\.]+', cluster_type=CLUSTER_TYPE_REGEX) if not re.match(resource_id_pattern, self.resource_id): raise ComputeTargetException( 'Invalid resource_id provided: {} \n Does not match\n' 'AKS template: {}\n or\n ARC template: {}'.format( self.resource_id, arm_template.format(part='<>', cluster_type=aks_arm_type), arm_template.format(part='<>', cluster_type=arc_arm_type))) else: raise ComputeTargetException('Missing argument: resource_id.')
def _valid_compute_name(compute_name): message = "compute name must be between 2 and 16 characters long. " \ "Its first character has to be alphanumeric, and " \ "valid characters include letters, digits, and the - character." if not compute_name: raise ComputeTargetException('Compute_name cannot be Empty.') if not re.match("^[A-Za-z][A-Za-z0-9-]{0,14}[A-Za-z0-9]$", compute_name): raise ComputeTargetException(message)
def _validate_get_payload(payload): if 'properties' not in payload or 'computeType' not in payload['properties']: raise ComputeTargetException('Invalid cluster payload:\n' '{}'.format(payload)) if payload['properties']['computeType'] != BatchCompute._compute_type: raise ComputeTargetException('Invalid cluster payload, not "{}":\n' '{}'.format(BatchCompute._compute_type, payload)) for arm_key in ['location', 'id', 'tags']: if arm_key not in payload: raise ComputeTargetException('Invalid cluster payload, missing ["{}"]:\n' '{}'.format(arm_key, payload)) for key in ['properties', 'provisioningErrors', 'description', 'provisioningState', 'resourceId']: if key not in payload['properties']: raise ComputeTargetException('Invalid cluster payload, missing ["properties"]["{}"]:\n' '{}'.format(key, payload))
def attach(workspace, name, resource_id): # pragma: no cover """DEPRECATED. Use the ``attach_configuration`` method instead. Associate an existing DataFactory compute resource with the provided workspace. :param workspace: The workspace object to associate the compute resource with. :type workspace: azureml.core.Workspace :param name: The name to associate with the compute resource inside the provided workspace. Does not have to match the name of the compute resource to be attached. :type name: str :param resource_id: The Azure resource ID for the compute resource being attached. :type resource_id: str :return: A DataFactoryCompute object representation of the compute object. :rtype: azureml.core.compute.datafactory.DataFactoryCompute :raises azureml.exceptions.ComputeTargetException: """ raise ComputeTargetException( 'This method is DEPRECATED. Please use the following code to attach a ' 'DataFactory compute resource.\n' '# Attach DataFactory\n' 'attach_config = DataFactoryCompute.attach_configuration(resource_group=' '"name_of_resource_group",\n' ' factory_name=' '"name_of_datafactory")\n' 'compute = ComputeTarget.attach(workspace, name, attach_config)')
def _wait_for_completion(self, show_output): """Wait for completion implementation. :param show_output: :type show_output: bool :return: :rtype: (str, dict) """ if not self._operation_endpoint: raise ComputeTargetException('No operation endpoint') operation_state, error = self._get_operation_state() current_state = operation_state if show_output: sys.stdout.write('{}'.format(current_state)) sys.stdout.flush() while operation_state != 'Succeeded' and operation_state != 'Failed' and operation_state != 'Canceled': time.sleep(5) operation_state, error = self._get_operation_state() if show_output: sys.stdout.write('.') if operation_state != current_state: sys.stdout.write('\n{}'.format(operation_state)) current_state = operation_state sys.stdout.flush() return operation_state, error
def delete(self): """ Delete is not supported for a BatchCompute object. Use :meth:`detach` instead. :raises azureml.exceptions.ComputeTargetException: """ raise ComputeTargetException('Delete is not supported for Batch object. Try to use detach instead.')
def _delete_or_detach(self, underlying_resource_action): """Remove the Compute object from its associated workspace. If underlying_resource_action is 'delete', the corresponding cloud-based objects will also be deleted. If underlying_resource_action is 'detach', no underlying cloud object will be deleted, the association will just be removed. :param underlying_resource_action: whether delete or detach the underlying cloud object :type underlying_resource_action: str :raises azureml.exceptions.ComputeTargetException: """ headers = self._auth.get_authentication_header() ComputeTarget._add_request_tracking_headers(headers) params = {'api-version': MLC_WORKSPACE_API_VERSION, 'underlyingResourceAction': underlying_resource_action} resp = ClientBase._execute_func(get_requests_session().delete, self._mlc_endpoint, params=params, headers=headers) try: resp.raise_for_status() except requests.exceptions.HTTPError: raise ComputeTargetException('Received bad response from Resource Provider:\n' 'Response Code: {}\n' 'Headers: {}\n' 'Content: {}'.format(resp.status_code, resp.headers, resp.content)) self.provisioning_state = 'Deleting' self._operation_endpoint = resp.headers['Azure-AsyncOperation']
def _get(workspace, name): """Return web response content for the compute. :param workspace: :type workspace: azureml.core.Workspace :param name: :type name: str :return: :rtype: dict """ endpoint = ComputeTarget._get_rp_compute_endpoint(workspace, name) headers = workspace._auth.get_authentication_header() ComputeTarget._add_request_tracking_headers(headers) params = {'api-version': MLC_WORKSPACE_API_VERSION} resp = ClientBase._execute_func(get_requests_session().get, endpoint, params=params, headers=headers) if resp.status_code == 200: content = resp.content if isinstance(content, bytes): content = content.decode('utf-8') get_content = json.loads(content) return get_content elif resp.status_code == 404: return None else: raise ComputeTargetException('Received bad response from Resource Provider:\n' 'Response Code: {}\n' 'Headers: {}\n' 'Content: {}'.format(resp.status_code, resp.headers, resp.content))
def test_get_compute_target(self, mock_ComputeTarget, mock_AmlCompute): mock_ComputeTarget.side_effect = [ 'test_compute_target', ComputeTargetException("Compute Target Not Found") ] # First call to mock_compute_target returns 'test_compute_target' output_1 = self.aml_interface.get_compute_target( 'test_compute_name', 'STANDARD_D2_V2') self.assertEqual(output_1, 'test_compute_target') mock_ComputeTarget.create.assert_not_called() mock_compute = Mock() # Compute target exists, create not called mock_ComputeTarget.create.return_value = mock_compute # Second call to mock_compute_target raises ComputeTargetException # Suggesting the compute target needs to be created output_2 = self.aml_interface.get_compute_target( 'test_compute_name', 'STANDARD_D2_V2') self.assertEqual(output_2, mock_compute) mock_AmlCompute.provisioning_configuration.assert_called_once_with( vm_size='STANDARD_D2_V2', min_nodes=1, max_nodes=2) mock_ComputeTarget.create.assert_called_once() mock_compute.wait_for_completion.assert_called_once()
def deserialize_from_dict(compute_target_name, compute_target_dict): """Deserialize compute_target_dict and returns the corresponding compute target object. :param compute_target_name: The compute target name, basically <compute_target_name>.compute file. :type compute_target_name: str :param compute_target_dict: The compute target dict, loaded from the on-disk .compute file. :type compute_target_dict: dict :return: The target specific compute target object. :rtype: azureml.core.compute_target.AbstractComputeTarget """ _type_to_class_dict = {_BatchAITarget._BATCH_AI_TYPE: _BatchAITarget} if AbstractComputeTarget._TARGET_TYPE_KEY in compute_target_dict: compute_type = compute_target_dict[ AbstractComputeTarget._TARGET_TYPE_KEY] if compute_type in _type_to_class_dict: return _type_to_class_dict[ compute_type]._deserialize_from_dict( compute_target_name, compute_target_dict) else: return None else: raise ComputeTargetException( "{} required field is not present in {} dict for " "creating the require compute target " "object.".format(AbstractComputeTarget._TARGET_TYPE_KEY, compute_target_dict))
def attach_legacy_compute_target(experiment, source_directory, compute_target): """Attaches a compute target to this project. :param experiment: :type experiment: azureml.core.experiment.Experiment :param source_directory: :type source_directory: str :param compute_target: A compute target object to attach. :type compute_target: str :return: None if the attach is successful, otherwise throws an exception. """ logging.warning( "attach_legacy_compute_target method is going to be deprecated. " "This will be removed in the next SDK release.") _check_paramiko() from azureml._project import _compute_target_commands if isinstance(compute_target, _SSHBasedComputeTarget): _compute_target_commands.attach_ssh_based_compute_targets( experiment, source_directory, compute_target) elif isinstance(compute_target, _BatchAITarget): _compute_target_commands.attach_batchai_compute_target( experiment, source_directory, compute_target) else: raise ComputeTargetException( "Unsupported compute target type. Type={}".format( type(compute_target)))
def _deserialize_from_dict(compute_target_name, compute_target_dict): """Create a compute target object from a dictionary. :param compute_target_name: The compute target name, basically <compute_target_name>.compute file. :type compute_target_name: str :param compute_target_dict: The compute target dict, loaded from the on-disk .compute file. :type compute_target_dict: dict :return: :rtype: _BatchAITarget """ if (_BatchAITarget._SUBSCRIPTION_ID_KEY in compute_target_dict and _BatchAITarget._RESOURCE_GROUP_NAME_KEY in compute_target_dict and _BatchAITarget._CLUSTER_NAME_KEY in compute_target_dict): batchai_object = _BatchAITarget( compute_target_name, compute_target_dict[_BatchAITarget._SUBSCRIPTION_ID_KEY], compute_target_dict[_BatchAITarget._RESOURCE_GROUP_NAME_KEY], compute_target_dict[ _BatchAITarget._CLUSTER_NAME_KEY].compute_target_dict.get( _BatchAITarget._WORKSPACE_NAME_KEY)) return batchai_object else: raise ComputeTargetException( "Failed to create a compute target object from a dictionary. " "Either {}, {} or {} is missing in " "{}".format(_BatchAITarget._SUBSCRIPTION_ID_KEY, _BatchAITarget._RESOURCE_GROUP_NAME_KEY, _BatchAITarget._CLUSTER_NAME_KEY, compute_target_dict))
def get_credentials(self): """Retrieve the credentials for the RemoteCompute target. :return: The credentials for the RemoteCompute target. :rtype: dict :raises azureml.exceptions.ComputeTargetException: """ endpoint = self._mlc_endpoint + '/listKeys' headers = self._auth.get_authentication_header() ComputeTarget._add_request_tracking_headers(headers) params = {'api-version': MLC_WORKSPACE_API_VERSION} resp = ClientBase._execute_func(get_requests_session().post, endpoint, params=params, headers=headers) try: resp.raise_for_status() except requests.exceptions.HTTPError: raise ComputeTargetException('Received bad response from MLC:\n' 'Response Code: {}\n' 'Headers: {}\n' 'Content: {}'.format( resp.status_code, resp.headers, resp.content)) content = resp.content if isinstance(content, bytes): content = content.decode('utf-8') creds_content = json.loads(content) return creds_content
def detach(self): """Detach is not supported for a DsvmCompute object. Use :meth:`delete` instead. :raises: :class:`azureml.exceptions.ComputeTargetException` """ raise ComputeTargetException( 'Detach is not supported for DSVM object. Try to use delete instead.' )
def delete(self): """Delete is not supported for an KubernetesCompute object. Use :meth:`detach` instead. :raises: :class:`azureml.exceptions.ComputeTargetException` """ raise ComputeTargetException( 'Delete is not supported for KubernetesCompute object. Try to use detach instead.' )
def delete(self): """Delete is not supported for Synapse object. Try to use detach instead. :raises: azureml.exceptions.ComputeTargetException """ raise ComputeTargetException( 'Delete is not supported for Synapse object. Try to use detach instead.' )
def validate_configuration(self): """Check that the specified configuration values are valid. Raises a :class:`azureml.exceptions.ComputeTargetException` if validation fails. :raises azureml.exceptions.ComputeTargetException: """ if self.resource_id: # resource_id is provided, validate resource_id resource_parts = self.resource_id.split('/') if len(resource_parts) != 9: raise ComputeTargetException('Invalid resource_id provided: {}'.format(self.resource_id)) resource_type = resource_parts[6] if resource_type != 'Microsoft.Batch': raise ComputeTargetException('Invalid resource_id provided, resource type {} does not match for ' 'Batch'.format(resource_type)) # make sure do not use other info if self.resource_group: raise ComputeTargetException('Since resource_id is provided, please do not provide resource_group.') if self.account_name: raise ComputeTargetException('Since resource_id is provided, please do not provide account_name.') elif self.resource_group or self.account_name: # resource_id is not provided, validate other info if not self.resource_group: raise ComputeTargetException('resource_group is not provided.') if not self.account_name: raise ComputeTargetException('account_name is not provided.') else: # neither resource_id nor other info is provided raise ComputeTargetException('Please provide resource_group and account_name for the Batch compute ' 'resource being attached. Or please provide resource_id for the resource ' 'being attached.')
def validate_configuration(self): """Check that the specified configuration values are valid. Raises a :class:`azureml.exceptions.ComputeTargetException` if validation fails. :raises: :class:`azureml.exceptions.ComputeTargetException` """ if not self.linked_service or (not isinstance(self.linked_service, LinkedService)): raise ComputeTargetException( 'A valid linked_service object is required to attach compute.') if self.type != "SynapseSpark": raise ComputeTargetException( 'Only SynapseSpark type is supported now.') if not self.pool_name: raise ComputeTargetException( 'pool_name must be provided to attach synapse compute')
def validate_configuration(self): """Check that the specified configuration values are valid. Raises a :class:`azureml.exceptions.ComputeTargetException` if validation fails. :raises: :class:`azureml.exceptions.ComputeTargetException` """ if self.resource_id: resource_parts = self.resource_id.split('/') if len(resource_parts) != 9: raise ComputeTargetException('Invalid resource_id provided: {}'.format(self.resource_id)) resource_type = resource_parts[6] if resource_type != 'Microsoft.Kusto': raise ComputeTargetException('Invalid resource_id provided, resource type {} does not match for ' 'Kusto'.format(resource_type)) if not self.resource_group or not self.workspace_name or not self.resource_id: raise ComputeTargetException('Please provide resource group, workspace name, and resource id') if not self.tenant_id or not self.application_key or not self.application_id: raise ComputeTargetException('Please provide tenant id, application id, and application key') if not self.kusto_connection_string: raise ComputeTargetException('Please provide kusto connection string for the target cluster')
def get_paginated_compute_results(payload, headers): if 'value' not in payload: raise ComputeTargetException( 'Error, invalid paginated response payload, missing "value":\n' '{}'.format(payload)) items = payload['value'] while 'nextLink' in payload: next_link = payload['nextLink'] try: resp = ClientBase._execute_func(get_requests_session().get, next_link, headers=headers) except requests.Timeout: print( 'Error, request to Machine Learning Compute timed out. Returning with items found so far' ) return items if resp.status_code == 200: content = resp.content if isinstance(content, bytes): content = content.decode('utf-8') payload = json.loads(content) else: raise ComputeTargetException( 'Received bad response from Machine Learning Compute while retrieving ' 'paginated results:\n' 'Response Code: {}\n' 'Headers: {}\n' 'Content: {}'.format(resp.status_code, resp.headers, resp.content)) if 'value' not in payload: raise ComputeTargetException( 'Error, invalid paginated response payload, missing "value":\n' '{}'.format(payload)) items += payload['value'] return items
def _attach(workspace, name, attach_payload, target_class): """Attach implementation method. :param workspace: :type workspace: azureml.core.Workspace :param name: :type name: str :param attach_payload: :type attach_payload: dict :param target_class: :type target_class: :return: :rtype: """ attach_payload['location'] = workspace.location endpoint = ComputeTarget._get_compute_endpoint(workspace, name) headers = {'Content-Type': 'application/json'} headers.update(workspace._auth.get_authentication_header()) ComputeTarget._add_request_tracking_headers(headers) params = {'api-version': MLC_WORKSPACE_API_VERSION} resp = ClientBase._execute_func(get_requests_session().put, endpoint, params=params, headers=headers, json=attach_payload) try: resp.raise_for_status() except requests.exceptions.HTTPError: raise ComputeTargetException('Received bad response from Resource Provider:\n' 'Response Code: {}\n' 'Headers: {}\n' 'Content: {}'.format(resp.status_code, resp.headers, resp.content)) if 'Azure-AsyncOperation' not in resp.headers: raise ComputeTargetException('Error, missing operation location from resp headers:\n' 'Response Code: {}\n' 'Headers: {}\n' 'Content: {}'.format(resp.status_code, resp.headers, resp.content)) compute_target = target_class(workspace, name) compute_target._operation_endpoint = resp.headers['Azure-AsyncOperation'] return compute_target
def _build_attach_payload(resource_id): """Build attach payload. :param resource_id: resource id of the synapse spark pool. :type resource_id: str :return: payload for attaching compute API call. :rtype: dict """ if not resource_id: raise ComputeTargetException('Error, missing resource_id.') json_payload = copy.deepcopy(synapse_compute_template) json_payload['properties']['resourceId'] = resource_id return json_payload
def attach(workspace, name, username, address, ssh_port=22, password='', private_key_file='', private_key_passphrase=''): # pragma: no cover """DEPRECATED. Use the ``attach_configuration`` method instead. Associate an existing remote compute resource with the provided workspace. :param workspace: The workspace object to associate the compute resource with. :type workspace: azureml.core.Workspace :param name: The name to associate with the compute resource inside the provided workspace. Does not have to match the name of the compute resource to be attached. :type name: str :param username: The username needed to access the resource. :type username: str :param address: The address of the resource to be attached. :type address: str :param ssh_port: The exposed port for the resource. Defaults to 22. :type ssh_port: int :param password: The password needed to access the resource. :type password: str :param private_key_file: Path to a file containing the private key for the resource. :type private_key_file: str :param private_key_passphrase: Private key phrase needed to access the resource. :type private_key_passphrase: str :return: A RemoteCompute object representation of the compute object. :rtype: azureml.core.compute.remote.RemoteCompute :raises azureml.exceptions.ComputeTargetException: """ raise ComputeTargetException( 'This method is DEPRECATED. Please use the following code to attach a Remote ' 'compute resource.\n' '# Attach Remote compute\n' 'attach_config = RemoteCompute.attach_configuration(address="ip_address",\n' ' ssh_port=22,\n' ' username="******",\n' ' password=None, # If using ' 'ssh key\n' ' private_key_file=' '"path_to_a_file",\n' ' private_key_passphrase=' '"some_key_phrase")\n' 'compute = ComputeTarget.attach(workspace, name, attach_config)')
def _get_workspace_key(experiment): cloud_execution_service_address = experiment.workspace.service_context._get_run_history_url() execution_service_details = ExecutionServiceAddress(cloud_execution_service_address) experiment_uri_path = experiment.workspace.service_context._get_experiment_scope(experiment.name) uri = execution_service_details.address uri += "/execution/v1.0" + experiment_uri_path + "/getorcreateworkspacesshkey" auth_header = experiment.workspace._auth_object.get_authentication_header() headers = {} headers.update(auth_header) response = requests.post(uri, headers=headers) if response.status_code >= 400: from azureml._base_sdk_common.common import get_http_exception_response_string raise ComputeTargetException(get_http_exception_response_string(response)) return response.json()
def _get_operation_state(self): """Return operation state. :return: :rtype: (str, dict) """ headers = self._auth.get_authentication_header() ComputeTarget._add_request_tracking_headers(headers) params = {} # API version should not be appended for operation status URLs. # This is a bug fix for older SDK and ARM breaking changes and # will append version only if the request URL doesn't have one. if 'api-version' not in self._operation_endpoint: params = {'api-version': MLC_WORKSPACE_API_VERSION} resp = ClientBase._execute_func(get_requests_session().get, self._operation_endpoint, params=params, headers=headers) try: resp.raise_for_status() except requests.exceptions.HTTPError: raise ComputeTargetException('Received bad response from Resource Provider:\n' 'Response Code: {}\n' 'Headers: {}\n' 'Content: {}'.format(resp.status_code, resp.headers, resp.content)) content = resp.content if isinstance(content, bytes): content = content.decode('utf-8') content = json.loads(content) status = content['status'] error = content.get('error') # Prior to API version 2019-06-01 the 'error' element was double nested. # This change retains backwards compat for 2018-11-19 version. if error is not None: innererror = error.get('error') if innererror is not None: error = innererror # --------------------------------------------------------------------- return status, error
def __new__(cls, workspace, name): """Return an instance of a compute target. ComputeTarget constructor is used to retrieve a cloud representation of a Compute object associated with the provided workspace. Will return an instance of a child class corresponding to the specific type of the retrieved Compute object. :param workspace: The workspace object containing the Compute object to retrieve. :type workspace: azureml.core.Workspace :param name: The name of the of the Compute object to retrieve. :type name: str :return: An instance of a child of :class:`azureml.core.ComputeTarget` corresponding to the specific type of the retrieved Compute object :rtype: azureml.core.ComputeTarget :raises azureml.exceptions.ComputeTargetException: """ if workspace and name: compute_payload = cls._get(workspace, name) if compute_payload: compute_type = compute_payload['properties']['computeType'] is_attached = compute_payload['properties']['isAttachedCompute'] for child in ComputeTarget.__subclasses__(): if is_attached and compute_type == 'VirtualMachine' and child.__name__ == 'DsvmCompute': # Cannot attach DsvmCompute continue elif not is_attached and compute_type == 'VirtualMachine' and child.__name__ == 'RemoteCompute': # Cannot create RemoteCompute continue elif not is_attached and compute_type == 'Kubernetes' and child.__name__ == 'KubernetesCompute': # Cannot create KubernetesCompute continue elif compute_type == child._compute_type: compute_target = super(ComputeTarget, cls).__new__(child) compute_target._initialize(workspace, compute_payload) return compute_target else: raise ComputeTargetException('ComputeTargetNotFound: Compute Target with name {} not found in ' 'provided workspace'.format(name)) else: return super(ComputeTarget, cls).__new__(cls)
async def _wait_for_target_state(self, target_state, progress_between=(30, 70), progress_in_seconds=240): """ Wait for the compute instance to be in the target state. emit events reporting progress starting at `progress_between[0]` to `progress_between[1]` over `progress_in_seconds` seconds. This is to give the use watching the progress bar the illusion of progress even if we don't really know how far we have progressed. """ started_at = datetime.datetime.now() while True: state, _ = self._poll_compute_setup() time_taken = datetime.datetime.now() - started_at min_progress, max_progress = progress_between progress = ( min_progress + (max_progress - min_progress) * (time_taken.total_seconds() / progress_in_seconds)) // 1 progress = max_progress if progress > max_progress else progress if state.lower() == target_state: self.log.info(f"Compute in target state {target_state}.") self._add_event(f"Compute in target state '{target_state}'.", max_progress) break elif state.lower() in self._vm_bad_states: self._add_event( f"Compute instance in failed state: {state!r}.", min_progress) raise ComputeTargetException( f"Compute instance in failed state: {state!r}.") else: self._add_event( f"Compute in state '{state.lower()}' after {time_taken.total_seconds():.0f} seconds." + f"Aiming for target state '{target_state}', this may take a short while", progress) await asyncio.sleep(5)