def load_result(filename): """Deserialize a Result object from a JSON file.""" with open(filename, 'r') as file: tmp = json.load(file) if isinstance(tmp, list): return [Result.from_dict(i) for i in tmp] return Result.from_dict(tmp)
def _run(self, qobj, job_id=''): """Run a job""" # Start timer start = time.time() # Run simulation output = self._execute(qobj) # Validate output if not isinstance(output, dict): logger.error("%s: simulation failed.", self.name()) if output: logger.error('Output: %s', output) raise AerError( "simulation terminated without returning valid output.") # Format results output["job_id"] = job_id output["date"] = datetime.datetime.now().isoformat() output["backend_name"] = self.name() output["backend_version"] = self.configuration().backend_version # Add execution time output["time_taken"] = time.time() - start return Result.from_dict(output)
def object_hook(self, obj: Any) -> Any: """Called to decode object.""" if '__type__' in obj: obj_type = obj['__type__'] obj_val = obj['__value__'] if obj_type == 'complex': return obj_val[0] + 1j * obj_val[1] if obj_type == 'ndarray': return _decode_and_deserialize(obj_val, np.load) if obj_type == 'set': return set(obj_val) if obj_type == 'QuantumCircuit': return _decode_and_deserialize(obj_val, qpy_serialization.load)[0] if obj_type == 'ParameterExpression': return _decode_and_deserialize( obj_val, qpy_serialization._read_parameter_expression, False) if obj_type == 'Instruction': return _decode_and_deserialize( obj_val, qpy_serialization._read_instruction, False) if obj_type == 'settings': return deserialize_from_settings(mod_name=obj['__module__'], class_name=obj['__class__'], settings=obj_val) if obj_type == 'Result': return Result.from_dict(obj_val) if obj_type == 'spmatrix': return _decode_and_deserialize(obj_val, scipy.sparse.load_npz, False) if obj_type == 'to_json': return obj_val return obj
def _run_job(self, job_id, qobj): """Run experiments in qobj. Args: job_id (str): unique id for the job. qobj (Qobj): job description Returns: Result: Result object """ self._validate(qobj) result_list = [] start = time.time() for experiment in qobj.experiments: result_list.append(self.run_experiment(experiment)) end = time.time() result = {'backend_name': self.name(), 'backend_version': self._configuration.backend_version, 'qobj_id': qobj.qobj_id, 'job_id': job_id, 'results': result_list, 'status': 'COMPLETED', 'success': True, 'time_taken': (end - start), 'header': qobj.header.to_dict()} return Result.from_dict(result)
def test_zz_fitter(self): """ Test ZZ fitter in Ignis characterization This test relies on static data stored in the file zz_data.json. To generate zz_data.json, run 'python generate_data.py zz'. """ with open(os.path.join(os.path.dirname(__file__), 'zz_data.json'), 'r') as handle: data = json.load(handle) fit = ZZFitter(Result.from_dict(data['backend_result']), data['xdata'], data['qubits'], data['spectators'], fit_p0=[0.5, data['omega'], 0, 0.5], fit_bounds=([-0.5, 0, -np.pi, -0.5], [1.5, 2 * data['omega'], np.pi, 1.5])) self.assertEqual(fit.series, ['0', '1']) self.assertEqual(sorted(list(fit.params.keys())), ['0', '1']) num_of_qubits = len(data['qubits']) self.assertEqual(len(fit.params['0']), num_of_qubits) self.assertEqual(len(fit.params_err['0']), num_of_qubits) self.assertTrue( np.isclose(fit.ZZ_rate(), data['zz'], rtol=0.3, atol=0.1))
def _retrieve_result(self) -> Result: """Retrieve the job result response. Returns: The job result. Raises: IBMQJobApiError: If there was some unexpected failure in the server. IBMQJobFailureError: If the job failed and partial result could not be retrieved. """ # pylint: disable=access-member-before-definition,attribute-defined-outside-init result_response = None if not self._result: # type: ignore[has-type] try: result_response = self._api.job_result( self.job_id(), self._use_object_storage) self._result = Result.from_dict(result_response) except (ModelValidationError, ApiError) as err: if self._status is JobStatus.ERROR: raise IBMQJobFailureError( 'Unable to retrieve job result. Job has failed. ' 'Use job.error_message() to get more details.') raise IBMQJobApiError(str(err)) finally: # In case partial results are returned or job failure, an error message is cached. if result_response: self._check_for_error_message(result_response) if self._status is JobStatus.ERROR and not self._result.results: raise IBMQJobFailureError( 'Unable to retrieve job result. Job has failed. ' 'Use job.error_message() to get more details.') return self._result
def test_t2star_fitter(self): """ Test T2* fitter in Ignis characterization This test relies on static data stored in the file t2star_data.json. To generate t2star_data.json, run 'python generate_data.py t2star'. """ with open(os.path.join(os.path.dirname(__file__), 't2star_data.json'), 'r') as handle: data = json.load(handle) fit = T2StarFitter( Result.from_dict(data['backend_result']), data['xdata'], data['qubits'], fit_p0=[0.5, data['t2'], data['omega'], 0, 0.5], fit_bounds=([-0.5, 0, data['omega'] - 0.02, -np.pi, -0.5], [ 1.5, data['t2'] * 1.2, data['omega'] + 0.02, np.pi, 1.5 ])) self.assertEqual(fit.series, ['0']) self.assertEqual(list(fit.params.keys()), ['0']) num_of_qubits = len(data['qubits']) self.assertEqual(len(fit.params['0']), num_of_qubits) self.assertEqual(len(fit.params_err['0']), num_of_qubits) for qubit in range(num_of_qubits): self.assertTrue( np.allclose(fit.params['0'][qubit], [0.5, data['t2'], data['omega'], 0, 0.5], rtol=0.3, atol=0.1))
def result(self): result = {'samples': [0, 0]} results = [{ 'success': True, 'shots': len(result['samples']), 'dax_code': self.aqt_qobj, 'data': {}, 'header': { 'name': self.qobj.experiments[0].header.name } }] return Result.from_dict({ 'results': results, 'backend_name': self._backend._configuration.backend_name, 'backend_version': self._backend._configuration.backend_version, 'qobj_id': self.qobj.qobj_id, 'success': True, 'job_id': self._job_id, })
def _run_job(self, job_id, qobj): """Run experiments in qobj. Args: job_id (str): unique id for the job. qobj (Qobj): job description Returns: Result: Result object """ self._validate(qobj) result_list = [] start = time.time() for experiment in qobj.experiments: result_list.append(self.run_experiment(experiment)) end = time.time() result = { "backend_name": self.name(), "backend_version": self._configuration.backend_version, "qobj_id": qobj.qobj_id, "job_id": job_id, "results": result_list, "status": "COMPLETED", "success": True, "time_taken": (end - start), "header": qobj.header.to_dict(), } return Result.from_dict(result)
def deserialize(file_path): """ Deserializes the file behind the given url to list of results """ results = [] with codecs.open(file_path, 'r', 'utf-8') as file: for line in file: result = ast.literal_eval(line) for i in range(len(result['results'])): # convert int back to enum type result['results'][i]['meas_level'] = MeasLevel( result['results'][i]['meas_level']) # convert statevector array back to np.array type data = result['results'][i]['data'] if 'statevector' in data.keys(): statev = data['statevector'] statev = ast.literal_eval(statev) statev = np.array(statev) result['results'][i]['data']['statevector'] = statev results.append(Result.from_dict(result)) return results
def _run(self, qobj, job_id=''): """Run a job""" # Start timer start = time.time() # Run simulation output = self._execute(qobj) # Validate output if not isinstance(output, dict): logger.error("%s: simulation failed.", self.name()) if output: logger.error('Output: %s', output) raise AerError( "simulation terminated without returning valid output.") # Format results output["job_id"] = job_id output["date"] = datetime.datetime.now().isoformat() output["backend_name"] = self.name() output["backend_version"] = self.configuration().backend_version # Add execution time output["time_taken"] = time.time() - start # Display warning if simulation failed if not output.get("success", False): msg = "Simulation failed" if "status" in output: msg += f" and returned the following error message:\n{output['status']}" logger.warning(msg) return Result.from_dict(output)
def run_job(self, job_id, qobj): """Main dummy run loop""" del qobj # unused time.sleep(self.time_alive) return Result.from_dict( {'job_id': job_id, 'result': [], 'status': 'COMPLETED'})
def result(self): result = self._wait_for_result() results = [{ 'success': True, 'shots': len(result['samples']), 'data': { 'counts': self._format_counts(result['samples']) }, 'header': { 'memory_slots': self.qobj.config.memory_slots, 'name': self.qobj.experiments[0].header.name } }] return Result.from_dict({ 'results': results, 'backend_name': self._backend._configuration.backend_name, 'backend_version': self._backend._configuration.backend_version, 'qobj_id': self.qobj.qobj_id, 'success': True, 'job_id': self._job_id, })
def __calc_result(index:int, agg_info:Dict[Any, Any], result:Result) -> Result: """Get one result of the aggregated result Args: index (int): the index of the result agg_info (Dict[Any, Any]): aggregation information as Dict result (Result): Result of the aggregated QuantumCircuit Raises: Exception: Multiple results are stored in one Result objext Returns: Result: the inital result for the given index """ result_dict = result.to_dict() result_dict_copy = copy.deepcopy(result_dict) qubits_start = agg_info["circuits"][index]["qubits"]["start"] qubits_stop = agg_info["circuits"][index]["qubits"]["stop"] n_qubits = qubits_stop - qubits_start circ_size = agg_info["total_qubits"] reg_mapping = agg_info["circuits"][index]["reg_mapping"] data = result.data()["counts"] counts = {} bit_mask = sum([2**i for i in range(n_qubits)]) for state in data: state_int = int(state, 16) state_int = state_int >> qubits_start state_int = state_int & bit_mask state_hex = hex(state_int) count = data[state] if state_hex in counts: counts[state_hex] += count else: counts[state_hex] = count if len(result_dict["results"]) == 1: result_dict_copy["results"][0]["data"]["counts"] = counts header = result_dict["results"][0]["header"] else: raise Exception("Result length not 1") clbit_labels = __relabel(reg_mapping, header, "clbit_labels") qubit_labels = __relabel(reg_mapping, header, "qubit_labels") creg_sizes = __relabel(reg_mapping, header, "creg_sizes") qreg_sizes = __relabel(reg_mapping, header, "qreg_sizes") result_dict_copy["results"][0]["header"]["clbit_labels"] = clbit_labels result_dict_copy["results"][0]["header"]["creg_sizes"] = creg_sizes result_dict_copy["results"][0]["header"]["memory_slots"] = n_qubits result_dict_copy["results"][0]["header"]["name"] = agg_info["circuits"][index]["name"] if len(qubit_labels) > 0: result_dict_copy["results"][0]["header"]["qubit_labels"] = qubit_labels if len(qreg_sizes) > 0: result_dict_copy["results"][0]["header"]["qreg_sizes"] = qreg_sizes result_dict_copy["results"][0]["header"]["n_qubits"] = n_qubits return Result.from_dict(result_dict_copy)
def _get_job_result(self, circ_count, has_metadata=False): """Return a job result with random counts.""" job_result = { "backend_name": self.backend.name(), "backend_version": "1.1.1", "qobj_id": "1234", "job_id": "some_job_id", "success": True, "results": [], } circ_result_template = {"shots": 1024, "success": True, "data": {}} for _ in range(circ_count): counts = randrange(1024) circ_result = copy.copy(circ_result_template) circ_result["data"] = { "counts": { "0x0": counts, "0x3": 1024 - counts } } if has_metadata: circ_result["header"] = {"metadata": {"meas_basis": "pauli"}} job_result["results"].append(circ_result) return Result.from_dict(job_result)
def _set_result(self, raw_data: Optional[Dict]) -> None: """Set the job result. Args: raw_data: Raw result data. Raises: IBMQJobInvalidStateError: If result is in an unsupported format. IBMQJobApiError: If an unexpected error occurred when communicating with the server. """ if raw_data is None: self._result = None return raw_data['client_version'] = self.client_version decode_result(raw_data) try: self._result = Result.from_dict(raw_data) except (KeyError, TypeError) as err: if not self._kind: raise IBMQJobInvalidStateError( 'Unable to retrieve result for job {}. Job result ' 'is in an unsupported format.'.format( self.job_id())) from err raise IBMQJobApiError('Unable to retrieve result for ' 'job {}: {}'.format(self.job_id(), str(err))) from err
def _set_result(self, raw_data: Optional[Dict]) -> None: """Set the job result. Args: raw_data: Raw result data. Raises: IBMQJobInvalidStateError: If result is in an unsupported format. IBMQJobApiError: If an unexpected error occurred when communicating with the server. """ if raw_data is None: self._result = None return raw_data['client_version'] = self.client_version # TODO Stop checking Terra version when it's released. from qiskit.version import __version__ as terra_version if terra_version >= '0.15.0': decode_result(raw_data) # if 'date' in raw_data: # raw_data['date'] = utc_to_local(raw_data['date']) try: self._result = Result.from_dict(raw_data) if hasattr(self._result, 'date'): self._result.date = utc_to_local(self._result.date) except (KeyError, TypeError) as err: if not self._kind: raise IBMQJobInvalidStateError( 'Unable to retrieve result for job {}. Job result ' 'is in an unsupported format.'.format( self.job_id())) from err raise IBMQJobApiError('Unable to retrieve result for ' 'job {}: {}'.format(self.job_id(), str(err))) from err
def __init__( self, backend: 'ibmqbackend.IBMQBackend', api: AccountClient, job_id: str, creation_date: str, status: str, kind: Optional[str] = None, name: Optional[str] = None, time_per_step: Optional[dict] = None, result: Optional[dict] = None, qobj: Optional[Union[dict, QasmQobj, PulseQobj]] = None, error: Optional[dict] = None, tags: Optional[List[str]] = None, run_mode: Optional[str] = None, **kwargs: Any ) -> None: """IBMQJob constructor. Args: backend: The backend instance used to run this job. api: Object for connecting to the server. job_id: Job ID. creation_date: Job creation date. status: Job status returned by the server. kind: Job type. name: Job name. time_per_step: Time spent for each processing step. result: Job result. qobj: Qobj for this job. error: Job error. tags: Job tags. run_mode: Scheduling mode the job runs in. kwargs: Additional job attributes. """ self._backend = backend self._api = api self._job_id = job_id self._creation_date = dateutil.parser.isoparse(creation_date) self._api_status = status self._kind = ApiJobKind(kind) if kind else None self._name = name self._time_per_step = time_per_step self._result = Result.from_dict(result) if result else None if isinstance(qobj, dict): qobj = dict_to_qobj(qobj) self._qobj = qobj self._error = error self._tags = tags or [] self._run_mode = run_mode self._status, self._queue_info = \ self._get_status_position(status, kwargs.pop('info_queue', None)) self._use_object_storage = (self._kind == ApiJobKind.QOBJECT_STORAGE) SimpleNamespace.__init__(self, **kwargs) BaseJob.__init__(self, self.backend(), self.job_id()) # Properties used for caching. self._cancelled = False self._job_error_msg = None # type: Optional[str]
def _process_results(self): """Convert Honeywell job result to qiskit.Result""" results = [] self._status = JobStatus.DONE for i, res_resp in enumerate(self._experiment_results): status = res_resp.get('status', 'failed') if status == 'failed': self._status = JobStatus.ERROR res = res_resp['results'] counts = dict(Counter(hex(int("".join(r), 2)) for r in [*zip(*list(res.values()))])) experiment_result = { 'shots': self._qobj_payload.get('config', {}).get('shots', 1), 'success': ApiJobStatus(status) is ApiJobStatus.COMPLETED, 'data': {'counts': counts}, 'header': self._qobj_payload[ 'experiments'][i]['header'] if self._qobj_payload else {}, 'job_id': self._job_ids[i] } results.append(experiment_result) result = { 'success': self._status is JobStatus.DONE, 'job_id': self._job_id, 'results': results, 'backend_name': self._backend.name(), 'backend_version': self._backend.status().backend_version, 'qobj_id': self._job_id } return Result.from_dict(result)
def _format_results(self, job_id, output, time_taken): """Construct Result object from simulator output.""" output["job_id"] = job_id output["date"] = datetime.datetime.now().isoformat() output["backend_name"] = self.name() output["backend_version"] = self.configuration().backend_version output["time_taken"] = time_taken return Result.from_dict(output)
def _load(file_path: str) -> List[Result]: with open(file_path, 'r') as fp: dict_results = json.load(fp) results = [] for dict_result in dict_results: results.append(Result.from_dict(dict_result)) return results
def _run_job(self, job_id, qobj, backend_options, noise_model, validate): """Run a qobj job""" start = time.time() if validate: validate_qobj_against_schema(qobj) self._validate(qobj, backend_options, noise_model) output = self._controller(self._format_qobj(qobj, backend_options, noise_model)) end = time.time() return Result.from_dict(self._format_results(job_id, output, end - start))
def run(self, run_input, **options): result = { "backend_name": "Dummmy backend", "backend_version": "0", "qobj_id": uuid.uuid4().hex, "job_id": uuid.uuid4().hex, "success": True, "results": [], } return FakeJob(backend=self, result=Result.from_dict(result))
def result(self, timeout=None, wait=5): """Return the result of the job. Note: Some IBMQ job results can be read only once. A second attempt to query the API for the job will fail, as the job is "consumed". The first call to this method in an ``IBMQJob`` instance will query the API and consume the job if it finished successfully (otherwise it will raise a ``JobError`` exception without consuming the job). Subsequent calls to that instance's method will also return the results, since they are cached. However, attempting to retrieve the results again in another instance or session might fail due to the job having been consumed. Args: timeout (float): number of seconds to wait for job wait (int): time between queries to IBM Q server Returns: qiskit.Result: Result object Raises: JobError: if attempted to recover a result on a failed job. """ self._wait_for_completion(timeout=timeout, wait=wait) status = self.status() if status is not JobStatus.DONE: raise JobError('Invalid job state. The job should be DONE but ' 'it is {}'.format(str(status))) if not self._result: if self._use_object_storage: # Retrieve the results via object storage. result_response = self._api.job_result_object_storage( self._job_id) self._result = Result.from_dict(result_response) else: job_response = self._get_job() self._result = Result.from_dict(job_response['qObjectResult']) return self._result
def run(self, run_input, **options): """Run the restless backend.""" self.options.update_options(**options) shots = self.options.get("shots") meas_level = self.options.get("meas_level") result = { "backend_name": f"{self.__class__.__name__}", "backend_version": "0", "qobj_id": 0, "job_id": 0, "success": True, "results": [], } self._compute_outcome_probabilities(run_input) if run_input[0].num_qubits != 2: raise DataProcessorError(f"{self.__class__.__name__} is a two qubit mock device.") prev_outcome, state_strings = "00", self._get_state_strings(2) # Setup the list of dicts where each dict corresponds to a circuit. sorted_memory = [{"memory": [], "metadata": circ.metadata} for circ in run_input] for _ in range(shots): for circ_idx, _ in enumerate(run_input): probs = self._precomputed_probabilities[(circ_idx, prev_outcome)] # Generate the next shot dependent on the pre-computed probabilities. outcome = self._rng.choice(state_strings, p=probs) # Append the single shot to the memory of the corresponding circuit. sorted_memory[circ_idx]["memory"].append(hex(int(outcome, 2))) prev_outcome = outcome for idx, circ in enumerate(run_input): counts = {} for key1, key2 in zip(["00", "01", "10", "11"], ["0x0", "0x1", "0x2", "0x3"]): counts[key1] = sorted_memory[idx]["memory"].count(key2) run_result = { "shots": shots, "success": True, "header": {"metadata": circ.metadata}, "meas_level": meas_level, "data": { "counts": counts, "memory": sorted_memory[idx]["memory"], }, } result["results"].append(run_result) return FakeJob(self, Result.from_dict(result))
def _format_results(output): """Format C++ simulator output for constructing Result""" for result in output["results"]: data = result.get("data", {}) metadata = result.get("metadata", {}) save_types = metadata.get("result_types", {}) save_subtypes = metadata.get("result_subtypes", {}) for key, val in data.items(): if key in save_types: data[key] = format_save_type(val, save_types[key], save_subtypes[key]) return Result.from_dict(output)
def result(self, timeout=None, wait=5): """Get the result data of a circuit. Parameters: timeout (float): A timeout for trying to get the counts. wait (float): A specified wait time between counts retrival attempts. Returns: Result: Result object. """ result = self._wait_for_result(timeout, wait) if isinstance(self.qobj, QasmQobj): results = [{ 'success': True, 'shots': len(result['samples']), 'data': { 'counts': self._format_counts(result['samples']) }, 'header': { 'memory_slots': self.qobj.config.memory_slots, 'name': self.qobj.experiments[0].header.name } }] qobj_id = self.qobj.qobj_id else: results = [{ 'success': True, 'shots': len(result['samples']), 'data': { 'counts': self._format_counts(result['samples']) }, 'header': { 'memory_slots': self.qobj.num_clbits, 'name': self.qobj.name } }] qobj_id = id(self.qobj) return Result.from_dict({ 'results': results, 'backend_name': self._backend._configuration.backend_name, 'backend_version': self._backend._configuration.backend_version, 'qobj_id': qobj_id, 'success': True, 'job_id': self._job_id, })
def run_job(self, job_id, qobj): """Main dummy run loop""" time.sleep(self.time_alive) return Result.from_dict({ 'job_id': job_id, 'backend_name': self.name(), 'backend_version': self.configuration().backend_version, 'qobj_id': qobj.qobj_id, 'results': [], 'status': 'COMPLETED', 'success': True })
def _format_results(self, job_id, results, time_taken, qobj_id): """Construct Result object from simulator output.""" # Add result metadata output = {} output['qobj_id'] = qobj_id output['results'] = results output['success'] = True output["job_id"] = job_id output["date"] = datetime.datetime.now().isoformat() output["backend_name"] = self.name() output["backend_version"] = self.configuration().backend_version output["time_taken"] = time_taken return Result.from_dict(output)
def _result_from_job_response(self, job_response): # type: (AcQuantumResultResponse) -> Result backend = self.backend() # type: BaseBackend config = backend.configuration() # type: BackendConfiguration experiment = self._api.get_experiment(int(self.job_id())) # type: AcQuantumExperiment result_details = {} job_results = job_response.get_results() if len(job_results) == 1: experiment_result = job_results[0] # type: AcQuantumResult counts = dict((hex(int(k, 2)), int(v * experiment_result.shots)) for k, v in experiment_result.data.items()) self._qobj = Qobj.from_dict(json.loads(experiment.code)) self._job_name = self._qobj.experiments[0].header.name success = experiment_result.exception is None result_details = { "status": self._status.name, "success": success, "name": self._job_name, "seed": experiment_result.seed, "shots": experiment_result.shots, "data": { "counts": counts }, "start_time": experiment_result.start_time, "finish_time": experiment_result.finish_time, "header": self._qobj.experiments[0].header.as_dict() } from dateutil.parser import parser date = parser().parse(result_details['finish_time']) result_dict = { 'results': [result_details], 'backend_name': config.backend_name, 'backend_version': config.backend_version, 'qobj_id': self._qobj.qobj_id, 'job_id': str(self.job_id()), 'success': len(job_results) == 1, 'header': { "backend_name": config.backend_name }, "date": date.isoformat() } result = Result.from_dict(result_dict) return result