def _async_submit(self, qobj: Qobj, job_name: str, backend: IBMQBackend, submit_lock: Lock, job_share_level: ApiJobShareLevel, job_tags: Optional[List[str]] = None) -> None: """Run a Qobj asynchronously and populate instance attributes. Args: qobj: Qobj to run. job_name: Name of the job. backend: Backend to execute the experiments on. submit_lock: Lock used to synchronize job submission. job_share_level: Job share level. job_tags: Tags to be assigned to the job. """ # pylint: disable=missing-raises-doc logger.debug("Job %s waiting for submit lock.", job_name) submit_lock.acquire() logger.debug("Job %s got the submit lock.", job_name) try: while self.job is None: try: self.job = backend.run( qobj=qobj, job_name=job_name, job_share_level=job_share_level.value, job_tags=job_tags) except IBMQBackendApiError as api_err: if 'Error code: 3458' in str(api_err): final_states = [ state.value for state in API_JOB_FINAL_STATES ] oldest_running = backend.jobs( limit=1, descending=False, db_filter={"status": { "nin": final_states }}) if oldest_running: oldest_running = oldest_running[0] logger.warning( "Job limit reached, waiting for job %s to finish " "before submitting the next one.", oldest_running.job_id()) oldest_running.wait_for_final_state(timeout=300) else: raise except Exception as err: # pylint: disable=broad-except warnings.warn( "Unable to submit job for experiments {}-{}: {}".format( self.start_index, self.end_index, err)) self.submit_error = err finally: submit_lock.release() logger.debug("Job %s released the submit lock.", job_name)
def _async_submit(self, qobj: Qobj, job_name: str, backend: IBMQBackend, job_share_level: ApiJobShareLevel, job_tags: Optional[List[str]] = None) -> None: """Run a Qobj asynchronously and populate instance attributes. Args: qobj: Qobj to run. job_name: Name of the job. backend: Backend to execute the experiments on. job_share_level: Job share level. job_tags: tags to be assigned to the job. Returns: IBMQJob instance for the job. """ try: self.job = backend.run(qobj=qobj, job_name=job_name, job_share_level=job_share_level.value, job_tags=job_tags) except Exception as err: # pylint: disable=broad-except warnings.warn( "Unable to submit job for experiments {}-{}: {}".format( self.start_index, self.end_index, err)) self.submit_error = err
def __init__(self, backend: IBMQBackend, tags=None): self._circuits = [] self._backend = backend backend_configuration = backend.configuration() self._maximum_batch_job = backend_configuration.max_experiments self._maximum_shots = backend_configuration.max_shots self._circuits_results = [] self._job_ids = [] if tags is None: tags = [] self._tags = tags
def prep_experiments(qc_list: List[QuantumCircuit], backend: IBMQBackend, physical_dist_list: List[int], save_path, output=False): """prepare experiments multiple qcs varing hardware usage""" # prepare pandas dataframe columns = [ "Backend", "Physical distance", "Hardware Usage (%)", "Total Circuit Duration Time", "Quantum Circuits", "Scheduled Pulse" ] df = pd.DataFrame(columns=columns) # backend info num_hw_qubit = backend.configuration().num_qubits for physical_dist in physical_dist_list: transpiled, num_usage = dynamic_multiqc_compose( queued_qc=qc_list, backend=backend, routing_method="sabre", scheduling_method="alap", num_hw_dist=physical_dist, return_num_usage=True, ) scheduled = [schedule(_tqc, backend=backend) for _tqc in transpiled] usage = "{:.2%}".format( average([_usage / num_hw_qubit for _usage in num_usage[0:-1]])) tdt = sum([_sched._duration for _sched in scheduled]) df = df.append( { "Backend": backend.name, "Physical distance": physical_dist, "Hardware Usage (%)": usage, "Total Circuit Duration Time": tdt, "Quantum Circuits": transpiled, "Scheduled Pulse": scheduled, }, ignore_index=True) # save the DataFrame as pickle file pickle_dump(obj=df, path=save_path) if output: return df
def _async_submit( self, qobj: Qobj, job_name: str, backend: IBMQBackend, ) -> None: """Run a Qobj asynchronously and populate instance attributes. Args: qobj: Qobj to run. job_name: Name of the job. backend: Backend to execute the experiments on. Returns: IBMQJob instance for the job. """ try: self.job = backend.run(qobj=qobj, job_name=job_name) except Exception as err: # pylint: disable=broad-except warnings.warn("Unable to submit job for experiments {}-{}: {}".format( self.start_index, self.end_index, err)) self.submit_error = err
def backend_to_dto(backend: IBMQBackend) -> Qpu: properties = backend.properties() status = backend.status() backend_name = status.backend_name backend_version = status.backend_version queue_size = status.pending_jobs max_shots = backend.configuration().max_shots number_of_qubits = backend.configuration().n_qubits qpu_id = generate_deterministic_uuid("ibmq.qpu", backend_name) last_updated_utc = datetime.utcnow().isoformat() if isinstance(backend, IBMQSimulator): return Qpu(id=str(qpu_id), name=backend_name, version=backend_version, last_updated=last_updated_utc, last_calibrated="", max_shots=max_shots, queue_size=queue_size, number_of_qubits=number_of_qubits, avg_t1_time=0, avg_t2_time=0, avg_readout_error=0, avg_single_qubit_gate_error=0, avg_multi_qubit_gate_error=0, avg_single_qubit_gate_time=0, avg_multi_qubit_gate_time=0, max_gate_time=0, simulator=True) else: number_of_qubits = len(properties.qubits) sum_t1 = 0 sum_t2 = 0 sum_readout_error = 0 for qubit in properties.qubits: for property in qubit: if property.name == "T1": sum_t1 += property.value if property.name == "T2": sum_t2 += property.value if property.name == "readout_error": sum_readout_error += property.value avg_t1 = sum_t1 / number_of_qubits avg_t2 = sum_t2 / number_of_qubits avg_readout_error = sum_readout_error / number_of_qubits sum_single_qubit_gate_error = 0 sum_single_qubit_gate_time = 0 single_qubit_gate_cnt = 0 sum_multi_qubit_gate_error = 0 sum_multi_qubit_gate_time = 0 multi_qubit_gate_cnt = 0 max_gate_time = 0 for gate in properties.gates: if len(gate.qubits) == 1: for param in gate.parameters: if param.name == "gate_error": sum_single_qubit_gate_error += param.value if param.name == "gate_length": sum_single_qubit_gate_time += param.value max_gate_time = max(max_gate_time, param.value) single_qubit_gate_cnt += 1 if len(gate.qubits) == 2: for param in gate.parameters: if param.name == "gate_error": sum_multi_qubit_gate_error += param.value max_gate_time = max(max_gate_time, param.value) if param.name == "gate_length": sum_multi_qubit_gate_time += param.value multi_qubit_gate_cnt += 1 avg_single_qubit_gate_error = sum_single_qubit_gate_error / single_qubit_gate_cnt avg_single_qubit_gate_time = sum_single_qubit_gate_time / single_qubit_gate_cnt avg_multi_qubit_gate_error = 0 if multi_qubit_gate_cnt != 0: avg_multi_qubit_gate_error = sum_multi_qubit_gate_error / multi_qubit_gate_cnt avg_multi_qubit_gate_time = 0 if multi_qubit_gate_cnt != 0: avg_multi_qubit_gate_time = sum_multi_qubit_gate_time / multi_qubit_gate_cnt last_calibrated_with_timezone: datetime = properties.last_update_date last_calibrated_utc = datetime.utcfromtimestamp( last_calibrated_with_timezone.timestamp()).isoformat() return Qpu(id=str(qpu_id), name=backend_name, version=backend_version, last_updated=last_updated_utc, last_calibrated=last_calibrated_utc, max_shots=max_shots, queue_size=queue_size, number_of_qubits=number_of_qubits, avg_t1_time=avg_t1, avg_t2_time=avg_t2, avg_readout_error=avg_readout_error, avg_single_qubit_gate_error=avg_single_qubit_gate_error, avg_multi_qubit_gate_error=avg_multi_qubit_gate_error, avg_single_qubit_gate_time=avg_single_qubit_gate_time, avg_multi_qubit_gate_time=avg_multi_qubit_gate_time, max_gate_time=max_gate_time, simulator=False)
def _async_submit(self, circuits: Union[QuantumCircuit, Schedule, List[Union[QuantumCircuit, Schedule]]], job_name: str, backend: IBMQBackend, submit_lock: Lock, job_tags: Optional[List[str]] = None, **run_config: Dict) -> None: """Run circuits asynchronously and populate instance attributes. Args: circuits: Circuits to run. job_name: Name of the job. backend: Backend to execute the experiments on. submit_lock: Lock used to synchronize job submission. job_tags: Tags to be assigned to the job. **run_config: Extra arguments used to configure the run. """ # pylint: disable=missing-raises-doc logger.debug("Job %s waiting for submit lock.", job_name) submit_lock.acquire() logger.debug("Job %s got the submit lock.", job_name) try: while self.job is None: try: self.job = backend.run(circuits, job_name=job_name, job_tags=job_tags, **run_config) except IBMQBackendJobLimitError: final_states = [ state.value for state in API_JOB_FINAL_STATES ] oldest_running = backend.jobs( limit=1, descending=False, db_filter={"status": { "nin": final_states }}) if oldest_running: oldest_running = oldest_running[0] logger.warning( "Job limit reached, waiting for job %s to finish " "before submitting the next one.", oldest_running.job_id()) try: oldest_running.wait_for_final_state(timeout=300) except Exception as err: # pylint: disable=broad-except # Don't kill the submit if unable to wait for old job. logger.debug( "An error occurred while waiting for " "job %s to finish: %s", oldest_running.job_id(), err) except Exception as err: # pylint: disable=broad-except warnings.warn( "Unable to submit job for experiments {}-{}: {}".format( self.start_index, self.end_index, err)) self.submit_error = err finally: submit_lock.release() logger.debug("Job %s released the submit lock.", job_name)