def _load_json_data(self, rb_exp_data_file_name: str): """ loader for the experiment data and configuration setup. Args: rb_exp_data_file_name(str): The file name that contain the experiment data. Returns: list: containing dict of the experiment setup configuration and list of dictionaries containing the experiment results. ExperimentData: ExperimentData object that was creates by the analysis function. """ expdata1 = ExperimentData() self.assertTrue( os.path.isfile(rb_exp_data_file_name), "The file containing the experiment data doesn't exist." " Please run the data generator.", ) with open(rb_exp_data_file_name, "r") as json_file: data = json.load(json_file) # The experiment attributes added exp_attributes = data[0] # pylint: disable=protected-access, invalid-name expdata1._metadata = data[0] # The experiment data located in index [1] as it is a list of dicts expdata1.add_data(data[1]) return data, exp_attributes, expdata1
def test_t1_low_quality(self): """ A test where the fit's quality will be low """ data = ExperimentData() data._metadata = { "job_metadata": [ { "run_options": { "meas_level": 2 }, }, ] } for i in range(10): data.add_data({ "counts": { "0": 10, "1": 10 }, "metadata": { "xval": i * 1e-9, "experiment_type": "T1", "qubit": 0, "unit": "s", }, }) res, _ = T1Analysis()._run_analysis(data) result = res[1] self.assertEqual(result.quality, "bad")
def test_local_analysis(self): """Tests local mitigator generation from experimental data""" qubits = [0, 1, 2] run_data = [ { "counts": {"000": 986, "010": 10, "100": 16, "001": 12}, "metadata": {"label": "000"}, "shots": 1024, }, { "counts": {"111": 930, "110": 39, "011": 24, "101": 29, "010": 1, "100": 1}, "metadata": {"label": "111"}, "shots": 1024, }, ] expected_assignment_matrices = [ np.array([[0.98828125, 0.04003906], [0.01171875, 0.95996094]]), np.array([[0.99023438, 0.02929688], [0.00976562, 0.97070312]]), np.array([[0.984375, 0.02441406], [0.015625, 0.97558594]]), ] run_meta = {"physical_qubits": qubits} expdata = ExperimentData() expdata.add_data(run_data) expdata._metadata = run_meta exp = ReadoutMitigationExperiment(qubits) result = exp.analysis.run(expdata) mitigator = result.analysis_results(0).value self.assertEqual(len(qubits), mitigator._num_qubits) self.assertEqual(qubits, mitigator._qubits) self.assertTrue(matrix_equal(expected_assignment_matrices, mitigator._assignment_mats))
def test_t1_analysis(self): """ Test T1Analysis """ data = ExperimentData() data._metadata = {"meas_level": 2} numbers = [ 750, 1800, 2750, 3550, 4250, 4850, 5450, 5900, 6400, 6800, 7000, 7350, 7700 ] for i, count0 in enumerate(numbers): data.add_data({ "counts": { "0": count0, "1": 10000 - count0 }, "metadata": { "xval": (3 * i + 1) * 1e-9, "experiment_type": "T1", "qubit": 0, "unit": "s", }, }) res, _ = T1Analysis()._run_analysis(data) result = res[1] self.assertEqual(result.quality, "good") self.assertAlmostEqual(result.value.nominal_value, 25e-9, delta=3)
def test_correlated_analysis(self): """Tests correlated mitigator generation from experimental data""" qubits = [0, 2, 3] run_data = [ { "counts": { "000": 989, "010": 12, "100": 7, "001": 15, "101": 1 }, "metadata": { "state_label": "000" }, "shots": 1024, }, { "counts": { "001": 971, "101": 15, "000": 36, "011": 2 }, "metadata": { "state_label": "001" }, "shots": 1024, }, { "counts": { "000": 30, "010": 965, "110": 15, "011": 11, "001": 2, "100": 1 }, "metadata": { "state_label": "010" }, "shots": 1024, }, { "counts": { "011": 955, "111": 15, "010": 26, "001": 27, "110": 1 }, "metadata": { "state_label": "011" }, "shots": 1024, }, { "counts": { "100": 983, "101": 8, "110": 13, "000": 20 }, "metadata": { "state_label": "100" }, "shots": 1024, }, { "counts": { "101": 947, "001": 34, "100": 32, "111": 11 }, "metadata": { "state_label": "101" }, "shots": 1024, }, { "counts": { "100": 26, "110": 965, "010": 21, "111": 11, "000": 1 }, "metadata": { "state_label": "110" }, "shots": 1024, }, { "counts": { "111": 938, "011": 23, "110": 35, "101": 27, "100": 1 }, "metadata": { "state_label": "111" }, "shots": 1024, }, ] expected_assignment_matrix = np.array([ [ 0.96582031, 0.03515625, 0.02929688, 0.0, 0.01953125, 0.0, 0.00097656, 0.0 ], [ 0.01464844, 0.94824219, 0.00195312, 0.02636719, 0.0, 0.03320312, 0.0, 0.0 ], [ 0.01171875, 0.0, 0.94238281, 0.02539062, 0.0, 0.0, 0.02050781, 0.0 ], [ 0.0, 0.00195312, 0.01074219, 0.93261719, 0.0, 0.0, 0.0, 0.02246094 ], [ 0.00683594, 0.0, 0.00097656, 0.0, 0.95996094, 0.03125, 0.02539062, 0.00097656 ], [ 0.00097656, 0.01464844, 0.0, 0.0, 0.0078125, 0.92480469, 0.0, 0.02636719 ], [ 0.0, 0.0, 0.01464844, 0.00097656, 0.01269531, 0.0, 0.94238281, 0.03417969 ], [ 0.0, 0.0, 0.0, 0.01464844, 0.0, 0.01074219, 0.01074219, 0.91601562 ], ]) run_meta = {"physical_qubits": qubits} expdata = ExperimentData() expdata.add_data(run_data) expdata._metadata = run_meta exp = CorrelatedReadoutError(qubits) result = exp.analysis.run(expdata) mitigator = result.analysis_results(0).value self.assertEqual(len(qubits), mitigator._num_qubits) self.assertEqual(qubits, mitigator._qubits) self.assertTrue( matrix_equal(expected_assignment_matrix, mitigator.assignment_matrix()))