def _positivity_result(evals, qpt=False): """Check if eigenvalues are positive""" cond = np.sum(np.abs(evals[evals < 0])) is_pos = bool(np.isclose(cond, 0)) name = "completely_positive" if qpt else "positive" result = AnalysisResultData(name, is_pos) if not is_pos: result.extra = {"delta": cond} return result
def _tp_result(evals, evecs): """Check if QPT channel is trace preserving""" size = len(evals) dim = int(np.sqrt(size)) mats = np.reshape(evecs.T, (size, dim, dim), order="F") kraus_cond = np.einsum("i,ija,ijb->ab", evals, mats.conj(), mats) cond = np.sum(np.abs(la.eigvalsh(kraus_cond - np.eye(dim)))) is_tp = bool(np.isclose(cond, 0)) result = AnalysisResultData("trace_preserving", is_tp) if not is_tp: result.extra = {"delta": cond} return result
def _run_analysis( self, experiment_data: ExperimentData ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: r"""Run analysis for :math:`T_\phi` experiment. It invokes CompositeAnalysis._run_analysis that will invoke _run_analysis for the two sub-experiments. Based on the results, it computes the result for :math:`T_phi`. """ super()._run_analysis(experiment_data) t1_result = experiment_data.child_data(0).analysis_results("T1") t2star_result = experiment_data.child_data(1).analysis_results("T2star") tphi = 1 / (1 / t2star_result.value - 1 / (2 * t1_result.value)) quality_tphi = ( "good" if (t1_result.quality == "good" and t2star_result.quality == "good") else "bad" ) analysis_results = [ AnalysisResultData( name="T_phi", value=tphi, chisq=None, quality=quality_tphi, extra={"unit": "s"}, ) ] return analysis_results, []
def _run_analysis(self, experiment_data): angles = [] radii = [] centers = [] for i in range(2): center = complex(*experiment_data.data(i)["memory"][0]) angles.append(np.angle(center)) radii.append(np.absolute(center)) centers.append(center) angle = (angles[0] + angles[1]) / 2 if (np.abs(angles[0] - angles[1])) % (2 * np.pi) > np.pi: angle += np.pi extra_results = {} extra_results["readout_angle_0"] = angles[0] extra_results["readout_angle_1"] = angles[1] extra_results["readout_radius_0"] = radii[0] extra_results["readout_radius_1"] = radii[1] analysis_results = [ AnalysisResultData(name="readout_angle", value=angle, extra=extra_results) ] if self.options.plot: ax = self._format_plot(centers, ax=self.options.ax) figures = [ax.get_figure()] else: figures = None return analysis_results, figures
def _run_analysis( self, experiment_data: ExperimentData ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: r"""Run analysis for :math:`T_\phi` experiment. It invokes CompositeAnalysis._run_analysis that will invoke _run_analysis for the two sub-experiments. Based on the results, it computes the result for :math:`T_phi`. """ # Run composite analysis and extract T1 and T2star results analysis_results, figures = super()._run_analysis(experiment_data) t1_result = next(filter(lambda res: res.name == "T1", analysis_results)) t2star_result = next( filter(lambda res: res.name == "T2star", analysis_results)) # Calculate Tphi from T1 and T2star tphi = 1 / (1 / t2star_result.value - 1 / (2 * t1_result.value)) quality_tphi = ("good" if (t1_result.quality == "good" and t2star_result.quality == "good") else "bad") tphi_result = AnalysisResultData( name="T_phi", value=tphi, chisq=None, quality=quality_tphi, extra={"unit": "s"}, ) # Return combined results analysis_results = [tphi_result] + analysis_results return analysis_results, figures
def _extra_database_entry(self, fit_data: curve.FitData) -> List[AnalysisResultData]: """Calculate EPC.""" nrb = 2**self._num_qubits scale = (nrb - 1) / nrb alpha = fit_data.fitval("alpha") alpha_c = fit_data.fitval("alpha_c") # Calculate epc_est (=r_c^est) - Eq. (4): epc = scale * (1 - alpha_c) # Calculate the systematic error bounds - Eq. (5): systematic_err_1 = scale * (abs(alpha.n - alpha_c.n) + (1 - alpha.n)) systematic_err_2 = ( 2 * (nrb * nrb - 1) * (1 - alpha.n) / (alpha.n * nrb * nrb) + 4 * (np.sqrt(1 - alpha.n)) * (np.sqrt(nrb * nrb - 1)) / alpha.n ) systematic_err = min(systematic_err_1, systematic_err_2) systematic_err_l = epc.n - systematic_err systematic_err_r = epc.n + systematic_err extra_data = AnalysisResultData( name="EPC", value=epc, chisq=fit_data.reduced_chisq, quality=self._evaluate_quality(fit_data), extra={ "EPC_systematic_err": systematic_err, "EPC_systematic_bounds": [max(systematic_err_l, 0), systematic_err_r], }, ) return [extra_data]
def _fidelity_result( state_result: AnalysisResultData, target: Union[Choi, DensityMatrix], input_dim: int = 1, ): """Faster computation of fidelity from eigen decomposition""" evals = state_result.extra["eigvals"] evecs = state_result.extra["eigvecs"] # Format target to statevector or densitymatrix array name = "process_fidelity" if input_dim > 1 else "state_fidelity" if target is None: raise AnalysisError("No target state provided") if isinstance(target, QuantumChannel): target_state = Choi(target).data / input_dim elif isinstance(target, BaseOperator): target_state = np.ravel(Operator(target), order="F") / np.sqrt(input_dim) else: # Statevector or density matrix target_state = np.array(target) if target_state.ndim == 1: rho = evecs @ (evals / input_dim * evecs).T.conj() fidelity = np.real(target_state.conj() @ rho @ target_state) else: sqrt_rho = evecs @ (np.sqrt(evals / input_dim) * evecs).T.conj() eig = la.eigvalsh(sqrt_rho @ target_state @ sqrt_rho) fidelity = np.sum(np.sqrt(np.maximum(eig, 0)))**2 return AnalysisResultData(name, fidelity)
def _run_analysis(self, experiment_data, **options): seed = options.get("seed", None) rng = np.random.default_rng(seed=seed) analysis_results = [ AnalysisResultData(f"result_{i}", value) for i, value in enumerate(rng.random(3)) ] return analysis_results, None
def _extra_database_entry(self, fit_data: curve.FitData) -> List[AnalysisResultData]: """Calculate Hamiltonian coefficients from fit values.""" extra_entries = [] for control in ("z", "i"): for target in ("x", "y", "z"): p0_val = fit_data.fitval(f"p{target}0") p1_val = fit_data.fitval(f"p{target}1") if control == "z": coef_val = 0.5 * (p0_val - p1_val) / (2 * np.pi) else: coef_val = 0.5 * (p0_val + p1_val) / (2 * np.pi) extra_entries.append( AnalysisResultData( name=f"omega_{control}{target}", value=coef_val, chisq=fit_data.reduced_chisq, device_components=[Qubit(q) for q in self._physical_qubits], extra={"unit": "Hz"}, ) ) return extra_entries
def _run_analysis(self, experiment_data): analysis_results = [ AnalysisResultData(name="non-zero counts", value=len( experiment_data.data(0)["counts"])), ] return analysis_results, []
def _tp_result( state_result: AnalysisResultData, input_dim: int = 1, ) -> AnalysisResultData: """Check if QPT channel is trace preserving""" evals = state_result.extra["eigvals"] evecs = state_result.extra["eigvecs"] size = len(evals) output_dim = size // input_dim mats = np.reshape(evecs.T, (size, output_dim, input_dim), order="F") kraus_cond = np.einsum("i,ija,ijb->ab", evals, mats.conj(), mats) cond = np.sum(np.abs(la.eigvalsh(kraus_cond - np.eye(input_dim)))) is_tp = bool(np.isclose(cond, 0)) result = AnalysisResultData("trace_preserving", is_tp) if not is_tp: result.extra = {"delta": cond} return result
def _run_analysis(self, experiment_data: CompositeExperimentData, **options): """Run analysis on circuit data. Args: experiment_data: the experiment data to analyze. options: kwarg options for analysis function. Returns: tuple: A pair ``(analysis_results, figures)`` where ``analysis_results`` is a list of :class:`AnalysisResultData` objects, and ``figures`` is a list of any figures for the experiment. Raises: QiskitError: if analysis is attempted on non-composite experiment data. """ if not isinstance(experiment_data, CompositeExperimentData): raise QiskitError( "CompositeAnalysis must be run on CompositeExperimentData.") # Add sub-experiment metadata as result of batch experiment # Note: if Analysis results had ID's these should be included here # rather than just the sub-experiment IDs sub_types = [] sub_ids = [] sub_qubits = [] comp_exp = experiment_data.experiment for i in range(comp_exp.num_experiments): # Run analysis for sub-experiments and add sub-experiment metadata exp = comp_exp.component_experiment(i) expdata = experiment_data.component_experiment_data(i) exp.run_analysis(expdata, **options) # Add sub-experiment metadata as result of batch experiment # Note: if Analysis results had ID's these should be included here # rather than just the sub-experiment IDs sub_types.append(expdata.experiment_type) sub_ids.append(expdata.experiment_id) sub_qubits.append(expdata.experiment.physical_qubits) result = AnalysisResultData( name="parallel_experiment", value=len(sub_types), extra={ "experiment_types": sub_types, "experiment_ids": sub_ids, }, ) return [result], None
def _run_analysis( self, experiment_data: ExperimentData, **options ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: data = experiment_data.data() qubits = experiment_data.metadata["physical_qubits"] labels = [datum["metadata"]["label"] for datum in data] matrix = self._generate_matrix(data, labels) result_mitigator = CorrelatedReadoutMitigator(matrix, qubits=qubits) analysis_results = [ AnalysisResultData("Correlated Readout Mitigator", result_mitigator) ] ax = options.get("ax", None) figures = [self._plot_calibration(matrix, labels, ax)] return analysis_results, figures
def _run_analysis( self, experiment_data: ExperimentData ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: data = experiment_data.data() qubits = experiment_data.metadata["physical_qubits"] matrices = self._generate_matrices(data) result_mitigator = LocalReadoutMitigator(matrices, qubits=qubits) analysis_results = [AnalysisResultData("Local Readout Mitigator", result_mitigator)] if self.options.plot: figure = assignment_matrix_visualization( result_mitigator.assignment_matrix(), ax=self.options.ax ) figures = [figure] else: figures = None return analysis_results, figures
def _run_analysis(self, experiment_data, parameter_guess=None, plot=True, ax=None): state_labels = [] for datum in experiment_data.data(): state_label = datum['metadata']['state_label'] if state_label in state_labels: break state_labels.append(state_label) meas_fitter = CompleteMeasFitter(None, state_labels, circlabel='mcal') nstates = len(state_labels) for job_id in experiment_data.job_ids: full_result = experiment_data.backend.retrieve_job(job_id).result() # full_result might contain repeated experiments for iset in range(len(full_result.results) // nstates): try: date = full_result.date except: date = None try: status = full_result.status except: status = None try: header = full_result.header except: header = None result = Result(full_result.backend_name, full_result.backend_version, \ full_result.qobj_id, full_result.job_id, \ full_result.success, full_result.results[iset * nstates:(iset + 1) * nstates], \ date=date, status=status, header=header, **full_result._metadata) meas_fitter.add_data(result) results = [ AnalysisResultData('error_matrix', meas_fitter.cal_matrix, extra=state_labels) ] plots = [] if plot: figure, ax = plt.subplots(1, 1) meas_fitter.plot_calibration(ax=ax) plots.append(figure) return results, plots
def test_calculate_2q_epg(self): """Testing the calculation of 2 qubit error per gate The EPG is computed based on the error per clifford determined in the RB experiment, the gate counts, and an estimate about the relations between the errors of different gate types """ epc_2_qubit = ufloat(0.034184849962675984, 0) qubits = [1, 4] gate_error_ratio = { ((1,), "id"): 1, ((4,), "id"): 1, ((1,), "rz"): 0, ((4,), "rz"): 0, ((1,), "sx"): 1, ((4,), "sx"): 1, ((1,), "x"): 1, ((4,), "x"): 1, ((4, 1), "cx"): 1, ((1, 4), "cx"): 1, } gates_per_clifford = { ((1, 4), "barrier"): 1.032967032967033, ((1,), "rz"): 15.932967032967033, ((1,), "sx"): 12.382417582417583, ((4,), "rz"): 18.681946624803768, ((4,), "sx"): 14.522605965463109, ((1, 4), "cx"): 1.0246506515936569, ((4, 1), "cx"): 0.5212064090480678, ((4,), "x"): 0.24237661112857592, ((1,), "measure"): 0.01098901098901099, ((4,), "measure"): 0.01098901098901099, ((1,), "x"): 0.2525918944392083, } epg_1_qubit = [ AnalysisResultData("EPG_rz", 0.0, device_components=[1]), AnalysisResultData("EPG_rz", 0.0, device_components=[4]), AnalysisResultData("EPG_sx", 0.00036207066403884814, device_components=[1]), AnalysisResultData("EPG_sx", 0.0005429962529239195, device_components=[4]), AnalysisResultData("EPG_x", 0.00036207066403884814, device_components=[1]), AnalysisResultData("EPG_x", 0.0005429962529239195, device_components=[4]), ] with self.assertWarns(DeprecationWarning): epg = rb.RBUtils.calculate_2q_epg( epc_2_qubit, qubits, gate_error_ratio, gates_per_clifford, epg_1_qubit ) error_dict = { ((1, 4), "cx"): ufloat(0.012438847900902494, 0), } expected_epg = error_dict[((1, 4), "cx")] actual_epg = epg[(1, 4)]["cx"] self.assertAlmostEqual(expected_epg.nominal_value, actual_epg.nominal_value, delta=0.001) self.assertAlmostEqual(expected_epg.std_dev, actual_epg.std_dev, delta=0.001)
def _state_result( cls, fit: np.ndarray, make_positive: bool = False, trace: Optional[float] = None, input_dims: Optional[Tuple[int, ...]] = None, output_dims: Optional[Tuple[int, ...]] = None, ) -> AnalysisResultData: """Convert fit data to state result data""" # Get eigensystem of state fit raw_eigvals, eigvecs = cls._state_eigensystem(fit) # Optionally rescale eigenvalues to be non-negative if make_positive and np.any(raw_eigvals < 0): eigvals = cls._make_positive(raw_eigvals) fit = eigvecs @ (eigvals * eigvecs).T.conj() rescaled_psd = True else: eigvals = raw_eigvals rescaled_psd = False # Optionally rescale fit trace fit_trace = np.sum(eigvals) if trace is not None and not np.isclose( fit_trace - trace, 0, atol=1e-12): scale = trace / fit_trace fit = fit * scale eigvals = eigvals * scale else: trace = fit_trace # Convert class of value if input_dims and np.prod(input_dims) > 1: value = Choi(fit, input_dims=input_dims, output_dims=output_dims) else: value = DensityMatrix(fit, dims=output_dims) # Construct state result extra metadata extra = { "trace": trace, "eigvals": eigvals, "raw_eigvals": raw_eigvals, "rescaled_psd": rescaled_psd, "eigvecs": eigvecs, } return AnalysisResultData("state", value, extra=extra)
def _create_analysis_results( self, fit_data: curve.FitData, quality: str, **metadata, ) -> List[AnalysisResultData]: """Create analysis results for important fit parameters. Args: fit_data: Fit outcome. quality: Quality of fit outcome. Returns: List of analysis result data. """ outcomes = super()._create_analysis_results(fit_data, quality, **metadata) for control in ("z", "i"): for target in ("x", "y", "z"): p0_val = fit_data.fitval(f"p{target}0") p1_val = fit_data.fitval(f"p{target}1") if control == "z": coef_val = 0.5 * (p0_val - p1_val) / (2 * np.pi) else: coef_val = 0.5 * (p0_val + p1_val) / (2 * np.pi) outcomes.append( AnalysisResultData( name=f"omega_{control}{target}", value=coef_val, chisq=fit_data.reduced_chisq, quality=quality, extra={ "unit": "Hz", **metadata, }, )) return outcomes
def _fidelity_result(evals, evecs, target, qpt=False): """Faster computation of fidelity from eigen decomposition""" # Format target to statevector or densitymatrix array trace = np.sqrt(len(evals)) if qpt else 1 name = "process_fidelity" if qpt else "state_fidelity" if target is None: raise AnalysisError("No target state provided") if isinstance(target, QuantumChannel): target_state = Choi(target).data / trace elif isinstance(target, BaseOperator): target_state = np.ravel(Operator(target), order="F") / np.sqrt(trace) else: target_state = np.array(target) if target_state.ndim == 1: rho = evecs @ (evals / trace * evecs).T.conj() fidelity = np.real(target_state.conj() @ rho @ target_state) else: sqrt_rho = evecs @ (np.sqrt(evals / trace) * evecs).T.conj() eig = la.eigvalsh(sqrt_rho @ target_state @ sqrt_rho) fidelity = np.sum(np.sqrt(np.maximum(eig, 0))) ** 2 return AnalysisResultData(name, fidelity)
def _create_curve_data( self, curve_data: CurveData, series: List[SeriesDef], **metadata, ) -> List[AnalysisResultData]: """Create analysis results for raw curve data. Args: curve_data: Formatted data that is used for the fitting. series: List of series definition associated with the curve data. Returns: List of analysis result data. """ samples = [] if not self.options.return_data_points: return samples for sdef in series: s_data = curve_data.get_subset_of(sdef.name) raw_datum = AnalysisResultData( name=DATA_ENTRY_PREFIX + self.__class__.__name__, value={ "xdata": s_data.x, "ydata": s_data.y, "sigma": s_data.y_err, }, extra={ "name": sdef.name, **metadata, }, ) samples.append(raw_datum) return samples
def _run_analysis( self, experiment_data: ExperimentData ) -> Tuple[List[AnalysisResultData], List["pyplot.Figure"]]: # # 1. Parse arguments # # Update all fit functions in the series definitions if fixed parameter is defined. # Fixed parameters should be provided by the analysis options. if self.__fixed_parameters__: assigned_params = { k: self.options.get(k, None) for k in self.__fixed_parameters__ } # Check if all parameters are assigned. if any(v is None for v in assigned_params.values()): raise AnalysisError( f"Unassigned fixed-value parameters for the fit " f"function {self.__class__.__name__}." f"All values of fixed-parameters, i.e. {self.__fixed_parameters__}, " "must be provided by the analysis options to run this analysis." ) # Override series definition with assigned fit functions. assigned_series = [] for series_def in self.__series__: dict_def = dataclasses.asdict(series_def) dict_def["fit_func"] = functools.partial( series_def.fit_func, **assigned_params) assigned_series.append(SeriesDef(**dict_def)) self.__series__ = assigned_series # get experiment metadata try: self.__experiment_metadata = experiment_data.metadata except AttributeError: pass # get backend try: self.__backend = experiment_data.backend except AttributeError: pass # # 2. Setup data processor # # If no data processor was provided at run-time we infer one from the job # metadata and default to the data processor for averaged classified data. data_processor = self.options.data_processor if not data_processor: data_processor = get_processor(experiment_data, self.options) if isinstance(data_processor, DataProcessor) and not data_processor.is_trained: # Qiskit DataProcessor instance. May need calibration. data_processor.train(data=experiment_data.data()) # # 3. Extract curve entries from experiment data # self._extract_curves(experiment_data=experiment_data, data_processor=data_processor) # # 4. Run fitting # formatted_data = self._data(label="fit_ready") # Generate algorithmic initial guesses and boundaries default_fit_opt = FitOptions( parameters=self._fit_params(), default_p0=self.options.p0, default_bounds=self.options.bounds, **self.options.curve_fitter_options, ) fit_options = self._generate_fit_guesses(default_fit_opt) if isinstance(fit_options, FitOptions): fit_options = [fit_options] # Run fit for each configuration fit_results = [] for fit_opt in set(fit_options): try: fit_result = self.options.curve_fitter( funcs=[ series_def.fit_func for series_def in self.__series__ ], series=formatted_data.data_index, xdata=formatted_data.x, ydata=formatted_data.y, sigma=formatted_data.y_err, **fit_opt.options, ) fit_results.append(fit_result) except AnalysisError: # Some guesses might be too far from the true parameters and may thus fail. # We ignore initial guesses that fail and continue with the next fit candidate. pass # Find best value with chi-squared value if len(fit_results) == 0: warnings.warn( "All initial guesses and parameter boundaries failed to fit the data. " "Please provide better initial guesses or fit parameter boundaries.", UserWarning, ) # at least return raw data points rather than terminating fit_result = None else: fit_result = sorted(fit_results, key=lambda r: r.reduced_chisq)[0] # # 5. Create database entry # analysis_results = [] if fit_result: # pylint: disable=assignment-from-none quality = self._evaluate_quality(fit_data=fit_result) fit_models = { series_def.name: series_def.model_description or "no description" for series_def in self.__series__ } # overview entry analysis_results.append( AnalysisResultData( name=PARAMS_ENTRY_PREFIX + self.__class__.__name__, value=[p.nominal_value for p in fit_result.popt], chisq=fit_result.reduced_chisq, quality=quality, extra={ "popt_keys": fit_result.popt_keys, "dof": fit_result.dof, "covariance_mat": fit_result.pcov, "fit_models": fit_models, **self.options.extra, }, )) # output special parameters result_parameters = self.options.result_parameters if result_parameters: for param_repr in result_parameters: if isinstance(param_repr, ParameterRepr): p_name = param_repr.name p_repr = param_repr.repr or param_repr.name unit = param_repr.unit else: p_name = param_repr p_repr = param_repr unit = None fit_val = fit_result.fitval(p_name) if unit: metadata = copy.copy(self.options.extra) metadata["unit"] = unit else: metadata = self.options.extra result_entry = AnalysisResultData( name=p_repr, value=fit_val, chisq=fit_result.reduced_chisq, quality=quality, extra=metadata, ) analysis_results.append(result_entry) # add extra database entries analysis_results.extend(self._extra_database_entry(fit_result)) if self.options.return_data_points: # save raw data points in the data base if option is set (default to false) raw_data_dict = dict() for series_def in self.__series__: series_data = self._data(series_name=series_def.name, label="raw_data") raw_data_dict[series_def.name] = { "xdata": series_data.x, "ydata": series_data.y, "sigma": series_data.y_err, } raw_data_entry = AnalysisResultData( name=DATA_ENTRY_PREFIX + self.__class__.__name__, value=raw_data_dict, extra={ "x-unit": self.options.xval_unit, "y-unit": self.options.yval_unit, }, ) analysis_results.append(raw_data_entry) # # 6. Create figures # if self.options.plot: fit_figure = FitResultPlotters[ self.options.curve_plotter].value.draw( series_defs=self.__series__, raw_samples=[ self._data(ser.name, "raw_data") for ser in self.__series__ ], fit_samples=[ self._data(ser.name, "fit_ready") for ser in self.__series__ ], tick_labels={ "xval_unit": self.options.xval_unit, "yval_unit": self.options.yval_unit, "xlabel": self.options.xlabel, "ylabel": self.options.ylabel, "xlim": self.options.xlim, "ylim": self.options.ylim, }, fit_data=fit_result, result_entries=analysis_results, style=self.options.style, axis=self.options.axis, ) figures = [fit_figure] else: figures = [] return analysis_results, figures
def _calc_quantum_volume(self, heavy_output_prob_exp, depth, trials): """ Calc the quantum volume of the analysed system. quantum volume is determined by the largest successful depth. A depth is successful if it has 'mean heavy-output probability' > 2/3 with confidence level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran. we assume the error (standard deviation) of the heavy output probability is due to a binomial distribution. standard deviation for binomial distribution is sqrt(np(1-p)), where n is the number of trials and p is the success probability. Returns: dict: quantum volume calculations - the quantum volume, whether the results passed the threshold, the confidence of the result, the heavy output probability for each trial, the mean heavy output probability, the error of the heavy output probability, the depth of the circuit, the number of trials ran """ quantum_volume = 1 success = False mean_hop = np.mean(heavy_output_prob_exp) sigma_hop = (mean_hop * ((1.0 - mean_hop) / trials))**0.5 z = 2 threshold = 2 / 3 + z * sigma_hop z_value = self._calc_z_value(mean_hop, sigma_hop) confidence_level = self._calc_confidence_level(z_value) if confidence_level > 0.977: quality = "good" else: quality = "bad" # Must have at least 100 trials if trials < 100: warnings.warn( "Must use at least 100 trials to consider Quantum Volume as successful." ) if mean_hop > threshold and trials >= 100: quantum_volume = 2**depth success = True hop_result = AnalysisResultData( "mean_HOP", value=FitVal(mean_hop, sigma_hop), quality=quality, extra={ "HOPs": heavy_output_prob_exp, "two_sigma": 2 * sigma_hop, "depth": depth, "trials": trials, }, ) qv_result = AnalysisResultData( "quantum_volume", value=quantum_volume, quality=quality, extra={ "success": success, "confidence": confidence_level, "depth": depth, "trials": trials, }, ) return hop_result, qv_result
def _run_analysis( self, experiment_data: ExperimentData, user_p0: Optional[Dict[str, float]] = None, user_bounds: Optional[Tuple[List[float], List[float]]] = None, plot: bool = False, ax: Optional["AxesSubplot"] = None, **kwargs, ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: r"""Calculate T2Ramsey experiment. Args: experiment_data (ExperimentData): the experiment data to analyze user_p0: contains initial values given by the user, for the fit parameters :math:`(a, t2ramsey, f, \phi, b)` user_bounds: lower and upper bounds on the parameters in p0, given by the user. The first tuple is the lower bounds, The second tuple is the upper bounds. For both params, the order is :math:`a, t2ramsey, f, \phi, b`. plot: if True, create the plot, otherwise, do not create the plot. ax: the plot object **kwargs: additional parameters for curve fit. Returns: The analysis result with the estimated :math:`t2ramsey` and 'f' (frequency) The graph of the function. """ def osc_fit_fun(x, a, t2ramsey, f, phi, c): """Decay cosine fit function""" return a * np.exp( -x / t2ramsey) * np.cos(2 * np.pi * f * x + phi) + c def _format_plot(ax, unit, fit_result, conversion_factor): """Format curve fit plot""" # Formatting ax.tick_params(labelsize=14) ax.set_xlabel("Delay (s)", fontsize=12) ax.ticklabel_format(axis="x", style="sci", scilimits=(0, 0)) ax.set_ylabel("Probability of measuring 0", fontsize=12) t2ramsey = fit_result["popt"][1] / conversion_factor t2_err = fit_result["popt_err"][1] / conversion_factor box_text = "$T_2Ramsey$ = {:.2f} \u00B1 {:.2f} {}".format( t2ramsey, t2_err, unit) bbox_props = dict(boxstyle="square,pad=0.3", fc="white", ec="black", lw=1) ax.text( 0.6, 0.9, box_text, ha="center", va="center", size=12, bbox=bbox_props, transform=ax.transAxes, ) return ax # implementation of _run_analysis data = experiment_data.data() circ_metadata = data[0]["metadata"] unit = circ_metadata["unit"] conversion_factor = circ_metadata.get("dt_factor", None) osc_freq = circ_metadata.get("osc_freq", None) if conversion_factor is None: conversion_factor = 1 if unit in ("s", "dt") else apply_prefix(1, unit) xdata, ydata, sigma = process_curve_data( data, lambda datum: level2_probability(datum, "0")) t2ramsey_estimate = np.mean(xdata) p0, bounds = self._t2ramsey_default_params(conversion_factor, user_p0, user_bounds, t2ramsey_estimate, osc_freq) xdata *= conversion_factor fit_result = curve_fit(osc_fit_fun, xdata, ydata, p0=list(p0.values()), sigma=sigma, bounds=bounds) fit_result = dataclasses.asdict(fit_result) fit_result["circuit_unit"] = unit if osc_freq is not None: fit_result["osc_freq"] = osc_freq if unit == "dt": fit_result["dt"] = conversion_factor quality = self._fit_quality(fit_result["popt"], fit_result["popt_err"], fit_result["reduced_chisq"]) chisq = fit_result["reduced_chisq"] if plot: ax = plot_curve_fit(osc_fit_fun, fit_result, ax=ax) ax = plot_scatter(xdata, ydata, ax=ax) ax = plot_errorbar(xdata, ydata, sigma, ax=ax) _format_plot(ax, unit, fit_result, conversion_factor) figures = [ax.get_figure()] else: figures = None # Output unit is 'sec', regardless of the unit used in the input result_t2star = AnalysisResultData( "T2star", value=FitVal(fit_result["popt"][1], fit_result["popt_err"][1], "s"), quality=quality, chisq=chisq, extra=fit_result, ) result_freq = AnalysisResultData( "Frequency", value=FitVal(fit_result["popt"][2], fit_result["popt_err"][2], "Hz"), quality=quality, chisq=chisq, extra=fit_result, ) return [result_t2star, result_freq], figures
def _create_analysis_results( self, fit_data: curve.FitData, quality: str, **metadata, ) -> List[AnalysisResultData]: """Create analysis results for important fit parameters. Args: fit_data: Fit outcome. quality: Quality of fit outcome. Returns: List of analysis result data. """ outcomes = super()._create_analysis_results(fit_data, quality, **metadata) num_qubits = len(self._physical_qubits) # Calculate EPC alpha = fit_data.fitval("alpha") scale = (2**num_qubits - 1) / (2**num_qubits) epc = scale * (1 - alpha) outcomes.append( AnalysisResultData( name="EPC", value=epc, chisq=fit_data.reduced_chisq, quality=quality, extra=metadata, )) # Correction for 1Q depolarizing channel if EPGs are provided if self.options.epg_1_qubit and num_qubits == 2: epc = _exclude_1q_error( epc=epc, qubits=self._physical_qubits, gate_counts_per_clifford=self._gate_counts_per_clifford, extra_analyses=self.options.epg_1_qubit, ) outcomes.append( AnalysisResultData( name="EPC_corrected", value=epc, chisq=fit_data.reduced_chisq, quality=quality, extra=metadata, )) # Calculate EPG if self._gate_counts_per_clifford is not None and self.options.gate_error_ratio: epg_dict = _calculate_epg( epc=epc, qubits=self._physical_qubits, gate_error_ratio=self.options.gate_error_ratio, gate_counts_per_clifford=self._gate_counts_per_clifford, ) if epg_dict: for gate, epg_val in epg_dict.items(): outcomes.append( AnalysisResultData( name=f"EPG_{gate}", value=epg_val, chisq=fit_data.reduced_chisq, quality=quality, extra=metadata, )) return outcomes
def _extra_database_entry( self, fit_data: curve.FitData) -> List[AnalysisResultData]: """Calculate EPC.""" extra_entries = [] # Calculate EPC alpha = fit_data.fitval("alpha") scale = (2**self._num_qubits - 1) / (2**self._num_qubits) epc = scale * (1 - alpha) extra_entries.append( AnalysisResultData( name="EPC", value=epc, chisq=fit_data.reduced_chisq, quality=self._evaluate_quality(fit_data), )) # Calculate EPG if not self.options.gate_error_ratio: # we attempt to get the ratio from the backend properties if not self.options.error_dict: gate_error_ratio = RBUtils.get_error_dict_from_backend( backend=self._backend, qubits=self._physical_qubits) else: gate_error_ratio = self.options.error_dict else: gate_error_ratio = self.options.gate_error_ratio count_ops = [] for meta in self._data(label="raw_data").metadata: count_ops += meta.get("count_ops", []) if len(count_ops) > 0 and gate_error_ratio is not None: gates_per_clifford = RBUtils.gates_per_clifford(count_ops) num_qubits = len(self._physical_qubits) if num_qubits == 1: epg_dict = RBUtils.calculate_1q_epg( epc, self._physical_qubits, gate_error_ratio, gates_per_clifford, ) elif num_qubits == 2: epg_1_qubit = self.options.epg_1_qubit epg_dict = RBUtils.calculate_2q_epg( epc, self._physical_qubits, gate_error_ratio, gates_per_clifford, epg_1_qubit=epg_1_qubit, ) else: # EPG calculation is not supported for more than 3 qubits RB epg_dict = None if epg_dict: for qubits, gate_dict in epg_dict.items(): for gate, value in gate_dict.items(): extra_entries.append( AnalysisResultData( f"EPG_{gate}", value, chisq=fit_data.reduced_chisq, quality=self._evaluate_quality(fit_data), device_components=[Qubit(i) for i in qubits], )) return extra_entries
def _create_analysis_results( self, fit_data: FitData, quality: str, **metadata, ) -> List[AnalysisResultData]: """Create analysis results for important fit parameters. Args: fit_data: Fit outcome. quality: Quality of fit outcome. Returns: List of analysis result data. """ outcomes = [] # Create entry for all fit parameters if self.options.return_fit_parameters: fit_parameters = AnalysisResultData( name=PARAMS_ENTRY_PREFIX + self.__class__.__name__, value=[p.nominal_value for p in fit_data.popt], chisq=fit_data.reduced_chisq, quality=quality, extra={ "popt_keys": fit_data.popt_keys, "dof": fit_data.dof, "covariance_mat": fit_data.pcov, **metadata, }, ) outcomes.append(fit_parameters) # Create entries for important parameters for param_repr in self.options.result_parameters: if isinstance(param_repr, ParameterRepr): p_name = param_repr.name p_repr = param_repr.repr or param_repr.name unit = param_repr.unit else: p_name = param_repr p_repr = param_repr unit = None fit_val = fit_data.fitval(p_name) if unit: par_metadata = metadata.copy() par_metadata["unit"] = unit else: par_metadata = metadata outcome = AnalysisResultData( name=p_repr, value=fit_val, chisq=fit_data.reduced_chisq, quality=quality, extra=par_metadata, ) outcomes.append(outcome) return outcomes
def _run_analysis( self, experiment_data, t1_guess=None, amplitude_guess=None, offset_guess=None, plot=True, ax=None, ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: """ Calculate T1 Args: experiment_data (ExperimentData): the experiment data to analyze t1_guess (float): Optional, an initial guess of T1 amplitude_guess (float): Optional, an initial guess of the coefficient of the exponent offset_guess (float): Optional, an initial guess of the offset plot (bool): Generator plot of exponential fit. ax (AxesSubplot): Optional, axes to add figure to. Returns: The analysis result with the estimated T1 Raises: AnalysisError: if the analysis fails. """ data = experiment_data.data() unit = data[0]["metadata"]["unit"] conversion_factor = data[0]["metadata"].get("dt_factor", None) qubit = data[0]["metadata"]["qubit"] if conversion_factor is None: conversion_factor = 1 if unit == "s" else apply_prefix(1, unit) xdata, ydata, sigma = process_curve_data(data, lambda datum: level2_probability(datum, "1")) xdata *= conversion_factor if t1_guess is None: t1_guess = np.mean(xdata) else: t1_guess = t1_guess * conversion_factor if offset_guess is None: offset_guess = ydata[-1] if amplitude_guess is None: amplitude_guess = ydata[0] - offset_guess # Perform fit def fit_fun(x, a, tau, c): return a * np.exp(-x / tau) + c init = {"a": amplitude_guess, "tau": t1_guess, "c": offset_guess} fit_result = curve_fit(fit_fun, xdata, ydata, init, sigma=sigma) fit_result = dataclasses.asdict(fit_result) fit_result["circuit_unit"] = unit if unit == "dt": fit_result["dt"] = conversion_factor # Construct analysis result name = "T1" unit = "s" value = FitVal(fit_result["popt"][1], fit_result["popt_err"][1], unit="s") chisq = fit_result["reduced_chisq"] quality = self._fit_quality( fit_result["popt"], fit_result["popt_err"], fit_result["reduced_chisq"] ) analysis_results = [ AnalysisResultData( name, value, chisq=chisq, quality=quality, extra=fit_result, ) ] # Generate fit plot figures = [] if plot: ax = plot_curve_fit(fit_fun, fit_result, ax=ax, fit_uncertainty=True) ax = plot_errorbar(xdata, ydata, sigma, ax=ax) self._format_plot(ax, fit_result, qubit=qubit) figures.append(ax.get_figure()) return analysis_results, figures
def _postprocess_fit( cls, state, metadata=None, target_state=None, rescale_positive=False, rescale_trace=False, qpt=False, ): """Post-process fitter data""" # Get eigensystem of state state_cls = type(state) evals, evecs = cls._state_eigensystem(state) # Rescale eigenvalues to be PSD rescaled_psd = False if rescale_positive and np.any(evals < 0): scaled_evals = cls._make_positive(evals) rescaled_psd = True else: scaled_evals = evals # Rescale trace trace = np.sqrt(len(scaled_evals)) if qpt else 1 sum_evals = np.sum(scaled_evals) rescaled_trace = False if rescale_trace and not np.isclose(sum_evals - trace, 0, atol=1e-12): scaled_evals = trace * scaled_evals / sum_evals rescaled_trace = True # Compute state with rescaled eigenvalues state_result = AnalysisResultData("state", state, extra=metadata) state_result.extra["eigvals"] = scaled_evals if rescaled_psd or rescaled_trace: state = state_cls(evecs @ (scaled_evals * evecs).T.conj()) state_result.value = state state_result.extra["raw_eigvals"] = evals if rescaled_trace: state_result.extra["trace"] = np.sum(scaled_evals) state_result.extra["raw_trace"] = sum_evals else: state_result.extra["trace"] = sum_evals # Results list analysis_results = [state_result] # Compute fidelity with target if target_state is not None: analysis_results.append( cls._fidelity_result(scaled_evals, evecs, target_state, qpt=qpt) ) # Check positive analysis_results.append(cls._positivity_result(scaled_evals, qpt=qpt)) # Check trace preserving if qpt: analysis_results.append(cls._tp_result(scaled_evals, evecs)) return analysis_results