Пример #1
0
    def test_error_handling(self):
        """Test that the assignment matrices are valid."""
        bad_matrix_A = np.array([[-0.3, 1], [1.3, 0]])  # negative indices
        bad_matrix_B = np.array([[0.2, 1], [0.7,
                                            0]])  # columns not summing to 1
        good_matrix_A = np.array([[0.2, 1], [0.8, 0]])
        for bad_matrix in [bad_matrix_A, bad_matrix_B]:
            with self.assertRaises(QiskitError) as cm:
                CorrelatedReadoutMitigator(bad_matrix)
            self.assertEqual(
                cm.exception.message,
                "Assignment matrix columns must be valid probability distributions",
            )

        with self.assertRaises(QiskitError) as cm:
            amats = [good_matrix_A, bad_matrix_A]
            LocalReadoutMitigator(amats)
        self.assertEqual(
            cm.exception.message,
            "Assignment matrix columns must be valid probability distributions",
        )

        with self.assertRaises(QiskitError) as cm:
            amats = [bad_matrix_B, good_matrix_A]
            LocalReadoutMitigator(amats)
        self.assertEqual(
            cm.exception.message,
            "Assignment matrix columns must be valid probability distributions",
        )
Пример #2
0
 def mitigators(assignment_matrices, qubits=None):
     """Generates the mitigators to test for given assignment matrices"""
     full_assignment_matrix = assignment_matrices[0]
     for m in assignment_matrices[1:]:
         full_assignment_matrix = np.kron(full_assignment_matrix, m)
     CRM = CorrelatedReadoutMitigator(full_assignment_matrix, qubits)
     LRM = LocalReadoutMitigator(assignment_matrices, qubits)
     mitigators = [CRM, LRM]
     return mitigators
Пример #3
0
    def test_qubits_parameter(self, circuits_data):
        """Test whether the qubits parameter is handled correctly"""
        counts_ideal_012 = Counts({"000": 5000, "001": 5000})
        counts_ideal_210 = Counts({"000": 5000, "100": 5000})
        counts_ideal_102 = Counts({"000": 5000, "010": 5000})
        counts_noise = Counts({
            "000": 4844,
            "001": 4962,
            "100": 56,
            "101": 65,
            "011": 37,
            "010": 35,
            "110": 1
        })
        CRM = CorrelatedReadoutMitigator(
            circuits_data["correlated_method_matrix"])
        LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"])
        mitigators = [CRM, LRM]
        for mitigator in mitigators:
            mitigated_probs_012 = (mitigator.quasi_probabilities(
                counts_noise, qubits=[
                    0, 1, 2
                ]).nearest_probability_distribution().binary_probabilities())
            mitigated_error = self.compare_results(counts_ideal_012,
                                                   mitigated_probs_012)
            self.assertTrue(
                mitigated_error < 0.001,
                "Mitigator {} did not correctly handle qubit order 0, 1, 2".
                format(mitigator),
            )

            mitigated_probs_210 = (mitigator.quasi_probabilities(
                counts_noise, qubits=[
                    2, 1, 0
                ]).nearest_probability_distribution().binary_probabilities())
            mitigated_error = self.compare_results(counts_ideal_210,
                                                   mitigated_probs_210)
            self.assertTrue(
                mitigated_error < 0.001,
                "Mitigator {} did not correctly handle qubit order 2, 1, 0".
                format(mitigator),
            )

            mitigated_probs_102 = (mitigator.quasi_probabilities(
                counts_noise, qubits=[
                    1, 0, 2
                ]).nearest_probability_distribution().binary_probabilities())
            mitigated_error = self.compare_results(counts_ideal_102,
                                                   mitigated_probs_102)
            self.assertTrue(
                mitigated_error < 0.001,
                "Mitigator {} did not correctly handle qubit order 1, 0, 2".
                format(mitigator),
            )
Пример #4
0
    def test_qubits_subset_parameter(self, circuits_data):
        """Tests mitigation on a subset of the initial set of qubits."""
        counts_ideal_2 = Counts({"0": 5000, "1": 5000})
        counts_ideal_6 = Counts({"0": 10000})

        counts_noise = Counts({
            "000": 4844,
            "001": 4962,
            "100": 56,
            "101": 65,
            "011": 37,
            "010": 35,
            "110": 1
        })
        CRM = CorrelatedReadoutMitigator(
            circuits_data["correlated_method_matrix"], qubits=[2, 4, 6])
        LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"],
                                    qubits=[2, 4, 6])
        mitigators = [CRM, LRM]
        for mitigator in mitigators:
            mitigated_probs_2 = (mitigator.quasi_probabilities(
                counts_noise, qubits=[
                    2
                ]).nearest_probability_distribution().binary_probabilities())
            mitigated_error = self.compare_results(counts_ideal_2,
                                                   mitigated_probs_2)
            self.assertTrue(
                mitigated_error < 0.001,
                "Mitigator {} did not correctly handle qubit subset".format(
                    mitigator),
            )

            mitigated_probs_6 = (mitigator.quasi_probabilities(
                counts_noise, qubits=[
                    6
                ]).nearest_probability_distribution().binary_probabilities())
            mitigated_error = self.compare_results(counts_ideal_6,
                                                   mitigated_probs_6)
            self.assertTrue(
                mitigated_error < 0.001,
                "Mitigator {} did not correctly handle qubit subset".format(
                    mitigator),
            )
            diagonal = str2diag("ZZ")
            ideal_expectation = 0
            mitigated_expectation, _ = mitigator.expectation_value(
                counts_noise, diagonal, qubits=[2, 6])
            mitigated_error = np.abs(ideal_expectation - mitigated_expectation)
            self.assertTrue(
                mitigated_error < 0.1,
                "Mitigator {} did not improve circuit expectation".format(
                    mitigator),
            )
Пример #5
0
 def _run_analysis(
     self, experiment_data: ExperimentData, **options
 ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]:
     data = experiment_data.data()
     qubits = experiment_data.metadata["physical_qubits"]
     labels = [datum["metadata"]["label"] for datum in data]
     matrix = self._generate_matrix(data, labels)
     result_mitigator = CorrelatedReadoutMitigator(matrix, qubits=qubits)
     analysis_results = [
         AnalysisResultData("Correlated Readout Mitigator",
                            result_mitigator)
     ]
     ax = options.get("ax", None)
     figures = [self._plot_calibration(matrix, labels, ax)]
     return analysis_results, figures
Пример #6
0
 def test_expectation_improvement(self, circuits_data):
     """Test whether readout mitigation led to more accurate results
     and that its standard deviation is increased"""
     CRM = CorrelatedReadoutMitigator(
         circuits_data["correlated_method_matrix"])
     LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"])
     num_qubits = circuits_data["num_qubits"]
     diagonals = []
     diagonals.append(z_diagonal(2**num_qubits))
     diagonals.append("IZ0")
     diagonals.append("ZZZ")
     diagonals.append("101")
     diagonals.append("IZI")
     mitigators = [CRM, LRM]
     qubit_index = {i: i for i in range(num_qubits)}
     for circuit_name, circuit_data in circuits_data["circuits"].items():
         counts_ideal = Counts(circuit_data["counts_ideal"])
         counts_noise = Counts(circuit_data["counts_noise"])
         probs_ideal, _ = counts_probability_vector(counts_ideal,
                                                    qubit_index=qubit_index)
         probs_noise, _ = counts_probability_vector(counts_noise,
                                                    qubit_index=qubit_index)
         for diagonal in diagonals:
             if isinstance(diagonal, str):
                 diagonal = str2diag(diagonal)
             unmitigated_expectation, unmitigated_stddev = expval_with_stddev(
                 diagonal, probs_noise, shots=counts_noise.shots())
             ideal_expectation = np.dot(probs_ideal, diagonal)
             unmitigated_error = np.abs(ideal_expectation -
                                        unmitigated_expectation)
             for mitigator in mitigators:
                 mitigated_expectation, mitigated_stddev = mitigator.expectation_value(
                     counts_noise, diagonal)
                 mitigated_error = np.abs(ideal_expectation -
                                          mitigated_expectation)
                 self.assertTrue(
                     mitigated_error < unmitigated_error,
                     "Mitigator {} did not improve circuit {} measurements".
                     format(mitigator, circuit_name),
                 )
                 self.assertTrue(
                     mitigated_stddev >= unmitigated_stddev,
                     "Mitigator {} did not increase circuit {} the standard deviation"
                     .format(mitigator, circuit_name),
                 )
Пример #7
0
    def test_repeated_qubits_parameter(self, circuits_data):
        """Tests the order of mitigated qubits."""
        counts_ideal_012 = Counts({"000": 5000, "001": 5000})
        counts_ideal_210 = Counts({"000": 5000, "100": 5000})
        counts_noise = Counts({
            "000": 4844,
            "001": 4962,
            "100": 56,
            "101": 65,
            "011": 37,
            "010": 35,
            "110": 1
        })
        CRM = CorrelatedReadoutMitigator(
            circuits_data["correlated_method_matrix"], qubits=[0, 1, 2])
        LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"],
                                    qubits=[0, 1, 2])
        mitigators = [CRM, LRM]
        for mitigator in mitigators:
            mitigated_probs_210 = (mitigator.quasi_probabilities(
                counts_noise, qubits=[
                    2, 1, 0
                ]).nearest_probability_distribution().binary_probabilities())
            mitigated_error = self.compare_results(counts_ideal_210,
                                                   mitigated_probs_210)
            self.assertTrue(
                mitigated_error < 0.001,
                "Mitigator {} did not correctly handle qubit order 2,1,0".
                format(mitigator),
            )

            # checking qubit order 2,1,0 should not "overwrite" the default 0,1,2
            mitigated_probs_012 = (
                mitigator.quasi_probabilities(counts_noise).
                nearest_probability_distribution().binary_probabilities())
            mitigated_error = self.compare_results(counts_ideal_012,
                                                   mitigated_probs_012)
            self.assertTrue(
                mitigated_error < 0.001,
                "Mitigator {} did not correctly handle qubit order 0,1,2 (the expected default)"
                .format(mitigator),
            )
Пример #8
0
 def test_mitigation_improvement(self, circuits_data):
     """Test whether readout mitigation led to more accurate results"""
     CRM = CorrelatedReadoutMitigator(
         circuits_data["correlated_method_matrix"])
     LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"])
     mitigators = [CRM, LRM]
     for circuit_name, circuit_data in circuits_data["circuits"].items():
         counts_ideal = Counts(circuit_data["counts_ideal"])
         counts_noise = Counts(circuit_data["counts_noise"])
         probs_noise = {
             key: value / circuits_data["shots"]
             for key, value in counts_noise.items()
         }
         unmitigated_error = self.compare_results(counts_ideal,
                                                  counts_noise)
         # TODO: verify mitigated stddev is larger
         unmitigated_stddev = stddev(probs_noise, circuits_data["shots"])
         for mitigator in mitigators:
             mitigated_quasi_probs = mitigator.quasi_probabilities(
                 counts_noise)
             mitigated_probs = (
                 mitigated_quasi_probs.nearest_probability_distribution(
                 ).binary_probabilities())
             mitigated_error = self.compare_results(counts_ideal,
                                                    mitigated_probs)
             self.assertTrue(
                 mitigated_error < unmitigated_error * 0.8,
                 "Mitigator {} did not improve circuit {} measurements".
                 format(mitigator, circuit_name),
             )
             mitigated_stddev_upper_bound = mitigated_quasi_probs._stddev_upper_bound
             max_unmitigated_stddev = max(unmitigated_stddev.values())
             self.assertTrue(
                 mitigated_stddev_upper_bound >= max_unmitigated_stddev,
                 "Mitigator {} on circuit {} gave stddev upper bound {} "
                 "while unmitigated stddev maximum is {}".format(
                     mitigator,
                     circuit_name,
                     mitigated_stddev_upper_bound,
                     max_unmitigated_stddev,
                 ),
             )