def result(self, timeout=None, wait=5): """Return the result of the job. Note: Some IBMQ job results can be read only once. A second attempt to query the API for the job will fail, as the job is "consumed". The first call to this method in an ``IBMQJob`` instance will query the API and consume the job if it finished successfully (otherwise it will raise a ``JobError`` exception without consumming the job). Subsequent calls to that instance's method will also return the results, since they are cached. However, attempting to retrieve the results again in another instance or session might fail due to the job having been consumed. Args: timeout (float): number of seconds to wait for job wait (int): time between queries to IBM Q server Returns: qiskit.Result: Result object Raises: JobError: if attempted to recover a result on a failed job. """ self._wait_for_completion(timeout=timeout, wait=wait) status = self.status() if status is not JobStatus.DONE: raise JobError('Invalid job state. The job should be DONE but ' 'it is {}'.format(str(status))) if not self._result: if self._use_object_storage: # Retrieve the results via object storage. result_response = self._api.job_result_object_storage( self._job_id) self._result = Result.from_dict(result_response) else: job_response = self._get_job() self._result = Result.from_dict(job_response['qObjectResult']) return self._result
def submit(self, job_name=None): """Submit job to IBM-Q. Args: job_name (str): custom name to be assigned to the job. Events: ibmq.job.start: The job has started. Raises: JobError: If we have already submitted the job. """ # TODO: Validation against the schema should be done here and not # during initialization. Once done, we should document that the method # can raise QobjValidationError. if self._future is not None or self._job_id is not None: raise JobError("We have already submitted the job!") self._future = self._executor.submit(self._submit_callback, job_name) Publisher().publish("ibmq.job.start", self)
def submit(self): if len(self._futures)>0: raise JobError("We have already submitted the job!") self._t_submit = time.time() logger.debug("submitting...") all_exps = self._qobj_dict shots = all_exps['config']['shots'] for exp in all_exps["experiments"]: single_exp = copy.deepcopy(all_exps) single_exp["experiments"]=[exp] self._futures.append(self._executor.submit(_run_with_rigetti_static, single_exp, shots, self._lattice_name, self._as_qvm, self._job_id ) )
def _wait_for_result(self, timeout=None, wait=5): start_time = time.time() result = None while True: elapsed = time.time() - start_time if timeout and elapsed >= timeout: raise JobTimeoutError('Timed out waiting for result') result = requests.put(self._backend.url, data={ 'id': self._job_id, 'access_token': self._backend._provider.access_token }).json() if result['status'] == 'finished': break if result['status'] == 'error': raise JobError('API returned error:\n' + str(result)) time.sleep(wait) return result
def cancel(self): """Attempt to cancel a job. Returns: bool: True if job can be cancelled, else False. Currently this is only possible on commercial systems. Raises: JobError: if there was some unexpected failure in the server """ hub = self._api.config.get('hub', None) group = self._api.config.get('group', None) project = self._api.config.get('project', None) try: response = self._api.cancel_job(self._job_id, hub, group, project) self._cancelled = 'error' not in response return self._cancelled except ApiError as error: self._cancelled = False raise JobError('Error cancelling job: %s' % error.usr_msg)
def submit(self) -> None: """Submit job to IBM-Q. Note: This function is deprecated, please use ``IBMQBackend.run()`` to submit a job. Events: The job has started. Raises: JobError: If an error occurred during job submit. """ if self.job_id() is not None: raise JobError("We have already submitted the job!") warnings.warn( "job.submit() is deprecated. Please use " "IBMQBackend.run() to submit a job.", DeprecationWarning, stacklevel=2)
def _accumulate_experiment_results(self, results: List[Result]): """Merge all experiments into a single in a`Result` this function merges the counts and the number of shots from each experiment in a `Result` for a noise simulation if `id` in metadata field is the same. Args: results: Result list whose experiments will be combined. Returns: list: Result list Raises: JobError: If results do not have count or memory data """ results_list = [] for each_result in results: _merge_results = [] master_id = None master_result = None for _result in each_result.results: if not hasattr(_result.data, "counts") and not hasattr(_result.data, "memory"): raise JobError( "Results do not include counts or memory data") meta_data = getattr(_result.header, "metadata", None) if meta_data and "id" in meta_data: _id = meta_data["id"] if master_id == _id: master_result = self._merge_exp(master_result, _result) else: master_id = _id master_result = _result _merge_results.append(master_result) else: _merge_results.append(_result) each_result.results = _merge_results results_list.append(each_result) return results_list
def refresh(self) -> None: """Obtain the latest job information from the API.""" with api_to_job_error(): api_response = self._api.job_get(self.job_id()) saved_model_cls = JobResponseSchema.model_cls try: # Load response into a dictionary JobResponseSchema.model_cls = dict data, _ = self.schema.load(api_response) BaseModel.__init__(self, **data) # Model attributes. self._use_object_storage = ( self.kind == ApiJobKind.QOBJECT_STORAGE) self._update_status_position(data.pop('_status'), data.pop('infoQueue', None)) except ValidationError as ex: raise JobError( "Unexpected return value received from the server.") from ex finally: JobResponseSchema.model_cls = saved_model_cls
def status(self): """Gets the status of the job by querying the Python's future Returns: JobStatus: The current JobStatus Raises: JobError: If the future is in unexpected state concurrent.futures.TimeoutError: if timeout occurred. """ # The order is important here if self._future.running(): _status = JobStatus.RUNNING elif self._future.cancelled(): _status = JobStatus.CANCELLED elif self._future.done(): _status = JobStatus.DONE if self._future.exception( ) is None else JobStatus.ERROR elif self._future._state == 'PENDING': _status = JobStatus.QUEUED else: raise JobError('Unexpected behavior of {0}'.format( self.__class__.__name__)) return _status
def _wait_for_job(self, timeout=None, wait=5): """Blocks until the job is complete and returns the job content from the API, consuming it. Args: timeout (float): number of seconds to wait for job. wait (int): time between queries to IBM Q server. Return: dict: a dictionary with the contents of the job. Raises: JobError: if there is an error while requesting the results. """ self._wait_for_completion(timeout, wait) try: job_response = self._get_job() if not self._qobj_payload: self._qobj_payload = job_response.get('qObject', {}) except ApiError as api_err: raise JobError(str(api_err)) return job_response
def cancel(self): """Attempt to cancel a job. Note: This function waits for a job ID to become available if the job has been submitted but not yet queued. Returns: bool: True if job can be cancelled, else False. Note this operation might not be possible depending on the environment. Raises: JobError: if there was some unexpected failure in the server. """ # Wait for the job ID to become available. self._wait_for_submission() try: response = self._api.cancel_job(self._job_id) self._cancelled = 'error' not in response return self._cancelled except ApiError as error: self._cancelled = False raise JobError('Error cancelling job: %s' % error.usr_msg)
def _get_job(self): if self._cancelled: raise JobError( 'Job result impossible to retrieve. The job was cancelled.') return self._api.circuit_job_get(self._job_id)
def _check_submitted(self): if self._future is None: raise JobError( "Job not submitted yet!. You have to .submit() first!")
def api_to_job_error() -> Generator[None, None, None]: """Convert an ApiError to a JobError.""" try: yield except ApiError as api_err: raise JobError(str(api_err))
def _reorder_bits(job_data): """Temporary fix for ibmq backends. For every ran circuit, get reordering information from qobj and apply reordering on result. Args: job_data (dict): dict with the bare contents of the API.get_job request. Raises: JobError: raised if the creg sizes don't add up in result header. """ for circuit_result in job_data['qasms']: if 'metadata' in circuit_result: circ = circuit_result['metadata'].get('compiled_circuit') else: logger.warning('result object missing metadata for reordering' ' bits: bits may be out of order') return # device_qubit -> device_clbit (how it should have been) measure_dict = { op['qubits'][0]: op.get('clbits', op.get('memory'))[0] for op in circ['operations'] if op['name'] == 'measure' } counts_dict_new = {} for item in circuit_result['data']['counts'].items(): # fix clbit ordering to what it should have been bits = list(item[0]) bits.reverse() # lsb in 0th position count = item[1] reordered_bits = list('x' * len(bits)) for device_clbit, bit in enumerate(bits): if device_clbit in measure_dict: correct_device_clbit = measure_dict[device_clbit] reordered_bits[correct_device_clbit] = bit reordered_bits.reverse() # only keep the clbits specified by circuit, not everything on device num_clbits = circ['header'].get('number_of_clbits', circ['header'].get('memory_slots')) compact_key = reordered_bits[-num_clbits:] compact_key = "".join( [b if b != 'x' else '0' for b in compact_key]) # insert spaces to signify different classical registers cregs = circ['header'].get('creg_sizes', circ['header'].get('clbit_labels')) if sum([creg[1] for creg in cregs]) != num_clbits: raise JobError("creg sizes don't add up in result header.") creg_begin_pos = [] creg_end_pos = [] acc = 0 for creg in reversed(cregs): creg_size = creg[1] creg_begin_pos.append(acc) creg_end_pos.append(acc + creg_size) acc += creg_size compact_key = " ".join([ compact_key[creg_begin_pos[i]:creg_end_pos[i]] for i in range(len(cregs)) ]) # marginalize over unwanted measured qubits if compact_key not in counts_dict_new: counts_dict_new[compact_key] = count else: counts_dict_new[compact_key] += count circuit_result['data']['counts'] = counts_dict_new
def _wrapper(self, *args, **kwargs): if self._future is None: raise JobError( "Job not submitted yet!. You have to .submit() first!") return func(self, *args, **kwargs)
def submit(self): if self._future is not None: raise JobError("The job has already been submitted!") #validate_qobj_against_schema(self._qobj) self._future = self._executor.submit(self._fn, self._job_id, self._qobj)