def aggregregate_measurements( self, jobs: List[IBMQJob], batches: List[List[QuantumCircuit]], multiplicities: List[int], **kwargs, ) -> List[Measurements]: """Combine samples from a circuit set that has been expanded and batched to obtain a set of measurements for each of the original circuits. Also applies readout correction after combining. Args: jobs: The submitted IBMQ jobs. batches: The batches of experiments submitted. multiplicities: The number of copies of each of the original circuits. kwargs: Passed to self.apply_readout_correction. Returns: A list of list of measurements, where each list of measurements corresponds to one of the circuits of the original (unexpanded) circuit set. """ ibmq_circuit_counts_set = [] for job, batch in zip(jobs, batches): for experiment in batch: ibmq_circuit_counts_set.append(job.result().get_counts(experiment)) measurements_set = [] ibmq_circuit_index = 0 for multiplicity in multiplicities: combined_counts = Counts({}) for i in range(multiplicity): for bitstring, counts in ibmq_circuit_counts_set[ ibmq_circuit_index ].items(): combined_counts[bitstring] = ( combined_counts.get(bitstring, 0) + counts ) ibmq_circuit_index += 1 if self.readout_correction: combined_counts = self.apply_readout_correction(combined_counts, kwargs) # qiskit counts object maps bitstrings in reversed order to ints, so we must # flip the bitstrings reversed_counts = {} for bitstring in combined_counts.keys(): reversed_counts[bitstring[::-1]] = int(combined_counts[bitstring]) measurements = Measurements.from_counts(reversed_counts) measurements_set.append(measurements) return measurements_set
def test_qubits_parameter(self, circuits_data): """Test whether the qubits parameter is handled correctly""" counts_ideal_012 = Counts({"000": 5000, "001": 5000}) counts_ideal_210 = Counts({"000": 5000, "100": 5000}) counts_ideal_102 = Counts({"000": 5000, "010": 5000}) counts_noise = Counts({ "000": 4844, "001": 4962, "100": 56, "101": 65, "011": 37, "010": 35, "110": 1 }) CRM = CorrelatedReadoutMitigator( circuits_data["correlated_method_matrix"]) LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"]) mitigators = [CRM, LRM] for mitigator in mitigators: mitigated_probs_012 = (mitigator.quasi_probabilities( counts_noise, qubits=[ 0, 1, 2 ]).nearest_probability_distribution().binary_probabilities()) mitigated_error = self.compare_results(counts_ideal_012, mitigated_probs_012) self.assertTrue( mitigated_error < 0.001, "Mitigator {} did not correctly handle qubit order 0, 1, 2". format(mitigator), ) mitigated_probs_210 = (mitigator.quasi_probabilities( counts_noise, qubits=[ 2, 1, 0 ]).nearest_probability_distribution().binary_probabilities()) mitigated_error = self.compare_results(counts_ideal_210, mitigated_probs_210) self.assertTrue( mitigated_error < 0.001, "Mitigator {} did not correctly handle qubit order 2, 1, 0". format(mitigator), ) mitigated_probs_102 = (mitigator.quasi_probabilities( counts_noise, qubits=[ 1, 0, 2 ]).nearest_probability_distribution().binary_probabilities()) mitigated_error = self.compare_results(counts_ideal_102, mitigated_probs_102) self.assertTrue( mitigated_error < 0.001, "Mitigator {} did not correctly handle qubit order 1, 0, 2". format(mitigator), )
def test_qubits_subset_parameter(self, circuits_data): """Tests mitigation on a subset of the initial set of qubits.""" counts_ideal_2 = Counts({"0": 5000, "1": 5000}) counts_ideal_6 = Counts({"0": 10000}) counts_noise = Counts({ "000": 4844, "001": 4962, "100": 56, "101": 65, "011": 37, "010": 35, "110": 1 }) CRM = CorrelatedReadoutMitigator( circuits_data["correlated_method_matrix"], qubits=[2, 4, 6]) LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"], qubits=[2, 4, 6]) mitigators = [CRM, LRM] for mitigator in mitigators: mitigated_probs_2 = (mitigator.quasi_probabilities( counts_noise, qubits=[ 2 ]).nearest_probability_distribution().binary_probabilities()) mitigated_error = self.compare_results(counts_ideal_2, mitigated_probs_2) self.assertTrue( mitigated_error < 0.001, "Mitigator {} did not correctly handle qubit subset".format( mitigator), ) mitigated_probs_6 = (mitigator.quasi_probabilities( counts_noise, qubits=[ 6 ]).nearest_probability_distribution().binary_probabilities()) mitigated_error = self.compare_results(counts_ideal_6, mitigated_probs_6) self.assertTrue( mitigated_error < 0.001, "Mitigator {} did not correctly handle qubit subset".format( mitigator), ) diagonal = str2diag("ZZ") ideal_expectation = 0 mitigated_expectation, _ = mitigator.expectation_value( counts_noise, diagonal, qubits=[2, 6]) mitigated_error = np.abs(ideal_expectation - mitigated_expectation) self.assertTrue( mitigated_error < 0.1, "Mitigator {} did not improve circuit expectation".format( mitigator), )
def test_save_probabilities_dict(self, qubits): """Test save probabilities dict instruction""" SUPPORTED_METHODS = [ 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust', 'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust', 'matrix_product_state', 'stabilizer' ] circ = QuantumCircuit(3) circ.x(0) circ.h(1) circ.cx(1, 2) # Target probabilities state = qi.Statevector(circ) target = state.probabilities_dict(qubits) # Snapshot circuit label = 'probs' opts = self.BACKEND_OPTS.copy() circ = transpile(circ, self.SIMULATOR) circ.save_probabilities_dict(qubits, label) qobj = assemble(circ) result = self.SIMULATOR.run(qobj, **opts).result() method = opts.get('method', 'automatic') if method not in SUPPORTED_METHODS: self.assertFalse(result.success) else: self.assertTrue(result.success) value = Counts(result.data(0)[label], memory_slots=len(qubits)) self.assertDictAlmostEqual(value, target)
def simulate_circuit(circuit, assignment_matrix, num_qubits, shots=1024): """Simulates the given circuit under the given readout noise""" probs = Statevector.from_instruction(circuit).probabilities() noisy_probs = assignment_matrix @ probs labels = [bin(a)[2:].zfill(num_qubits) for a in range(2**num_qubits)] results = TestReadoutMitigation.rng.choice(labels, size=shots, p=noisy_probs) return Counts(dict(Counter(results)))
def test_expectation_value_endian(self): """Test that endian for expval is little.""" mitigators = self.mitigators(self.assignment_matrices()) counts = Counts({"10": 3, "11": 24, "00": 74, "01": 923}) for mitigator in mitigators: expval, _ = mitigator.expectation_value(counts, diagonal="IZ", qubits=[0, 1]) self.assertAlmostEqual(expval, -1.0, places=0)
def test_qubits_parameter(self): """Test whether the qubits parameter is handled correctly""" shots = 10000 assignment_matrices = self.assignment_matrices() mitigators = self.mitigators(assignment_matrices) circuit, _, _ = self.first_qubit_h_3_circuit() counts_ideal, counts_noise, _ = self.counts_data(circuit, assignment_matrices, shots) counts_ideal_012 = counts_ideal counts_ideal_210 = Counts({"000": counts_ideal["000"], "100": counts_ideal["001"]}) counts_ideal_102 = Counts({"000": counts_ideal["000"], "010": counts_ideal["001"]}) for mitigator in mitigators: mitigated_probs_012 = ( mitigator.quasi_probabilities(counts_noise, qubits=[0, 1, 2]) .nearest_probability_distribution() .binary_probabilities(num_bits=3) ) mitigated_error = self.compare_results(counts_ideal_012, mitigated_probs_012) self.assertLess( mitigated_error, 0.001, "Mitigator {} did not correctly handle qubit order 0, 1, 2".format(mitigator), ) mitigated_probs_210 = ( mitigator.quasi_probabilities(counts_noise, qubits=[2, 1, 0]) .nearest_probability_distribution() .binary_probabilities(num_bits=3) ) mitigated_error = self.compare_results(counts_ideal_210, mitigated_probs_210) self.assertLess( mitigated_error, 0.001, "Mitigator {} did not correctly handle qubit order 2, 1, 0".format(mitigator), ) mitigated_probs_102 = ( mitigator.quasi_probabilities(counts_noise, qubits=[1, 0, 2]) .nearest_probability_distribution() .binary_probabilities(num_bits=3) ) mitigated_error = self.compare_results(counts_ideal_102, mitigated_probs_102) self.assertLess( mitigated_error, 0.001, "Mitigator {} did not correctly handle qubit order 1, 0, 2".format(mitigator), )
def test_expectation_improvement(self, circuits_data): """Test whether readout mitigation led to more accurate results and that its standard deviation is increased""" CRM = CorrelatedReadoutMitigator( circuits_data["correlated_method_matrix"]) LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"]) num_qubits = circuits_data["num_qubits"] diagonals = [] diagonals.append(z_diagonal(2**num_qubits)) diagonals.append("IZ0") diagonals.append("ZZZ") diagonals.append("101") diagonals.append("IZI") mitigators = [CRM, LRM] qubit_index = {i: i for i in range(num_qubits)} for circuit_name, circuit_data in circuits_data["circuits"].items(): counts_ideal = Counts(circuit_data["counts_ideal"]) counts_noise = Counts(circuit_data["counts_noise"]) probs_ideal, _ = counts_probability_vector(counts_ideal, qubit_index=qubit_index) probs_noise, _ = counts_probability_vector(counts_noise, qubit_index=qubit_index) for diagonal in diagonals: if isinstance(diagonal, str): diagonal = str2diag(diagonal) unmitigated_expectation, unmitigated_stddev = expval_with_stddev( diagonal, probs_noise, shots=counts_noise.shots()) ideal_expectation = np.dot(probs_ideal, diagonal) unmitigated_error = np.abs(ideal_expectation - unmitigated_expectation) for mitigator in mitigators: mitigated_expectation, mitigated_stddev = mitigator.expectation_value( counts_noise, diagonal) mitigated_error = np.abs(ideal_expectation - mitigated_expectation) self.assertTrue( mitigated_error < unmitigated_error, "Mitigator {} did not improve circuit {} measurements". format(mitigator, circuit_name), ) self.assertTrue( mitigated_stddev >= unmitigated_stddev, "Mitigator {} did not increase circuit {} the standard deviation" .format(mitigator, circuit_name), )
def _fitter_data( data: List[Dict[str, any]] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[np.ndarray]]: """Return list a tuple of basis, frequency, shot data""" outcome_dict = {} meas_size = None prep_size = None for datum in data: # Get basis data metadata = datum["metadata"] meas_element = tuple(metadata["m_idx"]) prep_element = tuple( metadata["p_idx"]) if "p_idx" in metadata else tuple() if meas_size is None: meas_size = len(meas_element) if prep_size is None: prep_size = len(prep_element) # Add outcomes counts = Counts( marginal_counts(datum["counts"], metadata["clbits"])).int_outcomes() basis_key = (meas_element, prep_element) if basis_key in outcome_dict: TomographyAnalysis._append_counts(outcome_dict[basis_key], counts) else: outcome_dict[basis_key] = counts num_basis = len(outcome_dict) measurement_data = np.zeros((num_basis, meas_size), dtype=int) preparation_data = np.zeros((num_basis, prep_size), dtype=int) shot_data = np.zeros(num_basis, dtype=int) outcome_data = [] for i, (basis_key, counts) in enumerate(outcome_dict.items()): measurement_data[i] = basis_key[0] preparation_data[i] = basis_key[1] outcome_arr = np.zeros((len(counts), 2), dtype=int) for j, (outcome, freq) in enumerate(counts.items()): outcome_arr[j] = [outcome, freq] shot_data[i] += freq outcome_data.append(outcome_arr) return outcome_data, shot_data, measurement_data, preparation_data
def test_repeated_qubits_parameter(self, circuits_data): """Tests the order of mitigated qubits.""" counts_ideal_012 = Counts({"000": 5000, "001": 5000}) counts_ideal_210 = Counts({"000": 5000, "100": 5000}) counts_noise = Counts({ "000": 4844, "001": 4962, "100": 56, "101": 65, "011": 37, "010": 35, "110": 1 }) CRM = CorrelatedReadoutMitigator( circuits_data["correlated_method_matrix"], qubits=[0, 1, 2]) LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"], qubits=[0, 1, 2]) mitigators = [CRM, LRM] for mitigator in mitigators: mitigated_probs_210 = (mitigator.quasi_probabilities( counts_noise, qubits=[ 2, 1, 0 ]).nearest_probability_distribution().binary_probabilities()) mitigated_error = self.compare_results(counts_ideal_210, mitigated_probs_210) self.assertTrue( mitigated_error < 0.001, "Mitigator {} did not correctly handle qubit order 2,1,0". format(mitigator), ) # checking qubit order 2,1,0 should not "overwrite" the default 0,1,2 mitigated_probs_012 = ( mitigator.quasi_probabilities(counts_noise). nearest_probability_distribution().binary_probabilities()) mitigated_error = self.compare_results(counts_ideal_012, mitigated_probs_012) self.assertTrue( mitigated_error < 0.001, "Mitigator {} did not correctly handle qubit order 0,1,2 (the expected default)" .format(mitigator), )
def test_mitigation_improvement(self, circuits_data): """Test whether readout mitigation led to more accurate results""" CRM = CorrelatedReadoutMitigator( circuits_data["correlated_method_matrix"]) LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"]) mitigators = [CRM, LRM] for circuit_name, circuit_data in circuits_data["circuits"].items(): counts_ideal = Counts(circuit_data["counts_ideal"]) counts_noise = Counts(circuit_data["counts_noise"]) probs_noise = { key: value / circuits_data["shots"] for key, value in counts_noise.items() } unmitigated_error = self.compare_results(counts_ideal, counts_noise) # TODO: verify mitigated stddev is larger unmitigated_stddev = stddev(probs_noise, circuits_data["shots"]) for mitigator in mitigators: mitigated_quasi_probs = mitigator.quasi_probabilities( counts_noise) mitigated_probs = ( mitigated_quasi_probs.nearest_probability_distribution( ).binary_probabilities()) mitigated_error = self.compare_results(counts_ideal, mitigated_probs) self.assertTrue( mitigated_error < unmitigated_error * 0.8, "Mitigator {} did not improve circuit {} measurements". format(mitigator, circuit_name), ) mitigated_stddev_upper_bound = mitigated_quasi_probs._stddev_upper_bound max_unmitigated_stddev = max(unmitigated_stddev.values()) self.assertTrue( mitigated_stddev_upper_bound >= max_unmitigated_stddev, "Mitigator {} on circuit {} gave stddev upper bound {} " "while unmitigated stddev maximum is {}".format( mitigator, circuit_name, mitigated_stddev_upper_bound, max_unmitigated_stddev, ), )
def counts_probability_vector( counts: Counts, qubits: Optional[List[int]] = None, clbits: Optional[List[int]] = None, num_qubits: Optional[int] = None, return_shots: Optional[bool] = False) -> np.ndarray: """Compute mitigated expectation value. Args: counts: counts object qubits: qubits the count bitstrings correspond to. clbits: Optional, marginalize counts to just these bits. num_qubits: the total number of qubits. return_shots: return the number of shots. Raises: QiskitError: if qubit and clbit kwargs are not valid. Returns: np.ndarray: a probability vector for all count outcomes. """ # Marginalize counts if clbits is not None: counts = marginal_counts(counts, meas_qubits=clbits) # Get total number of qubits if num_qubits is None: num_qubits = len(next(iter(counts))) # Get vector vec = np.zeros(2**num_qubits, dtype=float) shots = 0 for key, val in counts.items(): shots += val vec[int(key, 2)] = val vec /= shots # Remap qubits if qubits is not None: if len(qubits) != num_qubits: raise QiskitError("Num qubits does not match vector length.") axes = [num_qubits - 1 - i for i in reversed(np.argsort(qubits))] vec = np.reshape(vec, num_qubits * [2]).transpose(axes).reshape(vec.shape) if return_shots: return vec, shots return vec
def _test_save_probabilities_dict(self, qubits, **options): """Test save probabilities dict instruction""" backend = self.backend(**options) circ = QuantumCircuit(3) circ.x(0) circ.h(1) circ.cx(1, 2) # Target probabilities state = qi.Statevector(circ) target = state.probabilities_dict(qubits) # Snapshot circuit label = 'probs' circ.save_probabilities_dict(qubits, label=label) result = backend.run(transpile(circ, backend, optimization_level=0), shots=1).result() self.assertTrue(result.success) simdata = result.data(0) self.assertIn(label, simdata) value = Counts(result.data(0)[label], memory_slots=len(qubits)) self.assertDictAlmostEqual(value, target)
def test_repeated_qubits_parameter(self): """Tests the order of mitigated qubits.""" shots = 10000 assignment_matrices = self.assignment_matrices() mitigators = self.mitigators(assignment_matrices, qubits=[0, 1, 2]) circuit, _, _ = self.first_qubit_h_3_circuit() counts_ideal, counts_noise, _ = self.counts_data(circuit, assignment_matrices, shots) counts_ideal_012 = counts_ideal counts_ideal_210 = Counts({"000": counts_ideal["000"], "100": counts_ideal["001"]}) for mitigator in mitigators: mitigated_probs_210 = ( mitigator.quasi_probabilities(counts_noise, qubits=[2, 1, 0]) .nearest_probability_distribution() .binary_probabilities(num_bits=3) ) mitigated_error = self.compare_results(counts_ideal_210, mitigated_probs_210) self.assertLess( mitigated_error, 0.001, "Mitigator {} did not correctly handle qubit order 2,1,0".format(mitigator), ) # checking qubit order 2,1,0 should not "overwrite" the default 0,1,2 mitigated_probs_012 = ( mitigator.quasi_probabilities(counts_noise) .nearest_probability_distribution() .binary_probabilities(num_bits=3) ) mitigated_error = self.compare_results(counts_ideal_012, mitigated_probs_012) self.assertLess( mitigated_error, 0.001, "Mitigator {} did not correctly handle qubit order 0,1,2 (the expected default)".format( mitigator ), )
def expectation_value( counts: Counts, diagonal: Optional[np.ndarray] = None, qubits: Optional[List[int]] = None, clbits: Optional[List[int]] = None, meas_mitigator: Optional = None, ) -> Tuple[float, float]: r"""Compute the expectation value of a diagonal operator from counts. This computes the estimator of :math:`\langle O \rangle = \mbox{Tr}[\rho. O]`, optionally with measurement error mitigation, of a diagonal observable :math:`O = \sum_{x\in\{0, 1\}^n} O(x)|x\rangle\!\langle x|`. Args: counts: counts object diagonal: Optional, the vector of diagonal values for summing the expectation value. If ``None`` the the default value is :math:`[1, -1]^\otimes n`. qubits: Optional, the measured physical qubits the count bitstrings correspond to. If None qubits are assumed to be :math:`[0, ..., n-1]`. clbits: Optional, if not None marginalize counts to the specified bits. meas_mitigator: Optional, a measurement mitigator to apply mitigation. Returns: (float, float): the expectation value and standard deviation. Additional Information: The diagonal observable :math:`O` is input using the ``diagonal`` kwarg as a list or Numpy array :math:`[O(0), ..., O(2^n -1)]`. If no diagonal is specified the diagonal of the Pauli operator :math:`O = \mbox{diag}(Z^{\otimes n}) = [1, -1]^{\otimes n}` is used. The ``clbits`` kwarg is used to marginalize the input counts dictionary over the specified bit-values, and the ``qubits`` kwarg is used to specify which physical qubits these bit-values correspond to as ``circuit.measure(qubits, clbits)``. For calibrating a expval measurement error mitigator for the ``meas_mitigator`` kwarg see :func:`qiskit.ignis.mitigation.expval_meas_mitigator_circuits` and :class:`qiskit.ignis.mitigation.ExpvalMeasMitigatorFitter`. """ if meas_mitigator is not None: # Use mitigator expectation value method return meas_mitigator.expectation_value(counts, diagonal=diagonal, clbits=clbits, qubits=qubits) # Marginalize counts if clbits is not None: counts = marginal_counts(counts, meas_qubits=clbits) # Get counts shots and probabilities probs = np.array(list(counts.values())) shots = probs.sum() probs = probs / shots # Get diagonal operator coefficients if diagonal is None: coeffs = np.array([(-1)**(key.count('1') % 2) for key in counts.keys()], dtype=probs.dtype) else: diagonal = np.asarray(diagonal) keys = [int(key, 2) for key in counts.keys()] coeffs = np.asarray(diagonal[keys], dtype=probs.dtype) return _expval_with_stddev(coeffs, probs, shots)
def _fitter_data( data: List[Dict[str, any]], measurement_basis: Optional[MeasurementBasis] = None, measurement_qubits: Optional[Tuple[int, ...]] = None, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """Return list a tuple of basis, frequency, shot data""" meas_size = None prep_size = None # Construct marginalized tomography count dicts outcome_dict = {} shots_dict = {} for datum in data: # Get basis data metadata = datum["metadata"] meas_element = tuple( metadata["m_idx"]) if "m_idx" in metadata else tuple() prep_element = tuple( metadata["p_idx"]) if "p_idx" in metadata else tuple() if meas_size is None: meas_size = len(meas_element) if prep_size is None: prep_size = len(prep_element) # Add outcomes counts = Counts( marginal_counts(datum["counts"], metadata["clbits"])) shots = datum.get("shots", sum(counts.values())) basis_key = (meas_element, prep_element) if basis_key in outcome_dict: TomographyAnalysis._append_counts(outcome_dict[basis_key], counts) shots_dict[basis_key] += shots else: outcome_dict[basis_key] = counts shots_dict[basis_key] = shots # Construct function for converting count outcome dit-strings into # integers based on the specified number of outcomes of the measurement # bases on each qubit if meas_size == 0: # Trivial case with no measurement num_outcomes = 1 outcome_func = lambda _: 1 elif measurement_basis is None: # If no basis is provided assume N-qubit measurement case num_outcomes = 2**meas_size outcome_func = lambda outcome: int(outcome, 2) else: # General measurement basis case for arbitrary outcome measurements if measurement_qubits is None: measurement_qubits = tuple(range(meas_size)) elif len(measurement_qubits) != meas_size: raise AnalysisError( "Specified number of measurementqubits does not match data." ) outcome_shape = measurement_basis.outcome_shape(measurement_qubits) num_outcomes = np.prod(outcome_shape) outcome_func = _int_outcome_function(outcome_shape) num_basis = len(outcome_dict) measurement_data = np.zeros((num_basis, meas_size), dtype=int) preparation_data = np.zeros((num_basis, prep_size), dtype=int) shot_data = np.zeros(num_basis, dtype=int) outcome_data = np.zeros((num_basis, num_outcomes), dtype=int) for i, (basis_key, counts) in enumerate(outcome_dict.items()): measurement_data[i] = basis_key[0] preparation_data[i] = basis_key[1] shot_data[i] = shots_dict[basis_key] for outcome, freq in counts.items(): outcome_data[i][outcome_func(outcome)] = freq return outcome_data, shot_data, measurement_data, preparation_data