Exemple #1
0
    def test_to_real(self):
        """Test scaling and conversion to real part."""
        processor = DataProcessor("memory", [ToReal(scale=1e-3)])

        exp_data = ExperimentData(FakeExperiment())
        exp_data.add_data(self.result_lvl1)

        # Test to real on a single datum
        new_data = processor(exp_data.data(0))

        expected_old = {
            "memory": [
                [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],
                [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],
                [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],
            ],
            "metadata": {"experiment_type": "fake_test_experiment"},
            "job_id": "job-123",
            "meas_level": 1,
            "shots": 3,
        }

        expected_new = np.array([[1103.26, 2959.012], [442.17, -5279.41], [3016.514, -3404.7560]])

        self.assertEqual(exp_data.data(0), expected_old)
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected_new,
        )
        self.assertTrue(np.isnan(unp.std_devs(new_data)).all())

        # Test that we can call with history.
        new_data, history = processor.call_with_history(exp_data.data(0))

        self.assertEqual(exp_data.data(0), expected_old)
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected_new,
        )

        self.assertEqual(history[0][0], "ToReal")
        np.testing.assert_array_almost_equal(
            unp.nominal_values(history[0][1]),
            expected_new,
        )

        # Test to real on more than one datum
        new_data = processor(exp_data.data())

        expected_new = np.array(
            [
                [[1103.26, 2959.012], [442.17, -5279.41], [3016.514, -3404.7560]],
                [[5131.962, 4438.87], [3415.985, 2942.458], [5199.964, 4030.843]],
            ]
        )
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected_new,
        )
    def test_to_imag(self):
        """Test that we can average the data."""
        processor = DataProcessor("memory")
        processor.append(ToImag(scale=1e-3))

        exp_data = ExperimentData(FakeExperiment())
        exp_data.add_data(self.result_lvl1)

        new_data, error = processor(exp_data.data(0))

        expected_old = {
            "memory": [
                [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],
                [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],
                [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],
            ],
            "metadata": {
                "experiment_type": "fake_test_experiment"
            },
            "job_id":
            "job-123",
            "meas_level":
            1,
            "shots":
            3,
        }

        expected_new = np.array([
            [-11378.508, -16488.753],
            [-19283.206000000002, -15339.630000000001],
            [-14548.009, -16743.348],
        ])

        self.assertEqual(exp_data.data(0), expected_old)
        self.assertTrue(np.allclose(new_data, expected_new))
        self.assertIsNone(error)

        # Test that we can call with history.
        new_data, error, history = processor.call_with_history(
            exp_data.data(0))
        self.assertEqual(exp_data.data(0), expected_old)
        self.assertTrue(np.allclose(new_data, expected_new))

        self.assertEqual(history[0][0], "ToImag")
        self.assertTrue(np.allclose(history[0][1], expected_new))

        # Test to imaginary on more than one datum
        new_data, error = processor(exp_data.data())

        expected_new = np.array([
            [[-11378.508, -16488.753], [-19283.206, -15339.630],
             [-14548.009, -16743.348]],
            [[-16630.257, -13752.518], [-16031.913, -15840.465],
             [-14955.998, -14538.923]],
        ])

        self.assertTrue(np.allclose(new_data, expected_new))
Exemple #3
0
    def _initialize(
        self,
        experiment_data: ExperimentData,
    ):
        """Initialize curve analysis with experiment data.

        This method is called ahead of other processing.

        Args:
            experiment_data: Experiment data to analyze.

        Raises:
            AnalysisError: When circuit metadata for ops count is missing.
        """
        super()._initialize(experiment_data)

        if self.options.gate_error_ratio is not None:
            # If gate error ratio is not False, EPG analysis is enabled.
            # Here analysis prepares gate error ratio and gate counts for EPC to EPG conversion.

            # If gate count dictionary is not set it will compute counts from circuit metadata.
            avg_gpc = defaultdict(float)
            n_circs = len(experiment_data.data())
            for circ_result in experiment_data.data():
                try:
                    count_ops = circ_result["metadata"]["count_ops"]
                except KeyError as ex:
                    raise AnalysisError(
                        "'count_ops' key is not found in the circuit metadata. "
                        "This analysis cannot compute error per gates. "
                        "Please disable this with 'gate_error_ratio=False'."
                    ) from ex
                nclif = circ_result["metadata"]["xval"]
                for (qinds, gate), count in count_ops:
                    formatted_key = tuple(sorted(qinds)), gate
                    avg_gpc[formatted_key] += count / nclif / n_circs
            self._gate_counts_per_clifford = dict(avg_gpc)

            if self.options.gate_error_ratio == "default":
                # Gate error dict is computed for gates appearing in counts dictionary
                # Error ratio among gates is determined based on the predefined lookup table.
                # This is not always accurate for every quantum backends.
                gate_error_ratio = {}
                for qinds, gate in self._gate_counts_per_clifford.keys():
                    if set(qinds) != set(
                            experiment_data.metadata["physical_qubits"]):
                        continue
                    gate_error_ratio[gate] = _lookup_epg_ratio(
                        gate, len(qinds))
                self.set_options(gate_error_ratio=gate_error_ratio)

        # Get qubit number
        self._physical_qubits = experiment_data.metadata["physical_qubits"]
Exemple #4
0
    def update_calibrations(self, experiment_data: ExperimentData):
        r"""Update the amplitude of the pulse in the calibrations.

        The update rule of this experiment is

        .. math::

            A \to A \frac{\theta_\text{target}}{\theta_\text{target} + {\rm d}\theta}

        Where :math:`A` is the amplitude of the pulse before the update.

        Args:
            experiment_data: The experiment data from which to extract the measured over/under
                rotation used to adjust the amplitude.
        """
        data = experiment_data.data()

        # No data -> no update
        if len(data) > 0:
            result_index = self.experiment_options.result_index
            group = data[0]["metadata"]["cal_group"]
            target_angle = data[0]["metadata"]["target_angle"]
            prev_amp = data[0]["metadata"]["cal_param_value"]

            d_theta = BaseUpdater.get_value(experiment_data, "d_theta",
                                            result_index)

            BaseUpdater.add_parameter_value(
                self._cals,
                experiment_data,
                prev_amp * target_angle / (target_angle + d_theta),
                self._param_name,
                self._sched_name,
                group,
            )
    def test_composite_single_kerneled_memory_marginalization(self):
        """Test the marginalization of level 1 data."""
        test_data = ExperimentData()

        datum = {
            "memory": [
                # qubit 0,   qubit 1,    qubit 2
                [[0.0, 0.0], [1.0, 1.0], [2.0, 2.0]],  # shot 1
                [[0.1, 0.1], [1.1, 1.1], [2.1, 2.1]],  # shot 2
                [[0.2, 0.2], [1.2, 1.2], [2.2, 2.2]],  # shot 3
                [[0.3, 0.3], [1.3, 1.3], [2.3, 2.3]],  # shot 4
                [[0.4, 0.4], [1.4, 1.4], [2.4, 2.4]],  # shot 5
            ],
            "metadata": {
                "experiment_type":
                "ParallelExperiment",
                "composite_index": [0, 1, 2],
                "composite_metadata": [
                    {
                        "experiment_type": "FineXAmplitude",
                        "qubits": [0]
                    },
                    {
                        "experiment_type": "FineXAmplitude",
                        "qubits": [1]
                    },
                    {
                        "experiment_type": "FineXAmplitude",
                        "qubits": [2]
                    },
                ],
                "composite_qubits": [[0], [1], [2]],
                "composite_clbits": [[0], [1], [2]],
            },
            "shots":
            5,
            "meas_level":
            1,
        }

        test_data.add_data(datum)

        all_sub_data = CompositeAnalysis([])._marginalized_component_data(
            test_data.data())
        for idx, sub_data in enumerate(all_sub_data):
            expected = {
                "metadata": {
                    "experiment_type": "FineXAmplitude",
                    "qubits": [idx]
                },
                "memory": [
                    [[idx + 0.0, idx + 0.0]],
                    [[idx + 0.1, idx + 0.1]],
                    [[idx + 0.2, idx + 0.2]],
                    [[idx + 0.3, idx + 0.3]],
                    [[idx + 0.4, idx + 0.4]],
                ],
            }

            self.assertEqual(expected, sub_data[0])
    def test_composite_avg_kerneled_memory_marginalization(self):
        """The the marginalization of level 1 averaged data."""
        test_data = ExperimentData()

        datum = {
            "memory": [
                [0.0, 0.1],  # qubit 0
                [1.0, 1.1],  # qubit 1
                [2.0, 2.1],  # qubit 2
            ],
            "metadata": {
                "experiment_type":
                "ParallelExperiment",
                "composite_index": [0, 1, 2],
                "composite_metadata": [
                    {
                        "experiment_type": "FineXAmplitude",
                        "qubits": [0]
                    },
                    {
                        "experiment_type": "FineXAmplitude",
                        "qubits": [1]
                    },
                    {
                        "experiment_type": "FineXAmplitude",
                        "qubits": [2]
                    },
                ],
                "composite_qubits": [[0], [1], [2]],
                "composite_clbits": [[0], [1], [2]],
            },
            "shots": 5,
            "meas_level": 1,
        }

        test_data.add_data(datum)

        all_sub_data = CompositeAnalysis([])._marginalized_component_data(
            test_data.data())
        for idx, sub_data in enumerate(all_sub_data):
            expected = {
                "metadata": {
                    "experiment_type": "FineXAmplitude",
                    "qubits": [idx]
                },
                "memory": [[idx + 0.0, idx + 0.1]],
            }

            self.assertEqual(expected, sub_data[0])
Exemple #7
0
 def _run_analysis(
     self, experiment_data: ExperimentData, **options
 ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]:
     data = experiment_data.data()
     qubits = experiment_data.metadata["physical_qubits"]
     labels = [datum["metadata"]["label"] for datum in data]
     matrix = self._generate_matrix(data, labels)
     result_mitigator = CorrelatedReadoutMitigator(matrix, qubits=qubits)
     analysis_results = [
         AnalysisResultData("Correlated Readout Mitigator",
                            result_mitigator)
     ]
     ax = options.get("ax", None)
     figures = [self._plot_calibration(matrix, labels, ax)]
     return analysis_results, figures
    def _component_experiment_data(
            self, experiment_data: ExperimentData) -> List[ExperimentData]:
        """Return a list of marginalized experiment data for component experiments.

        Args:
            experiment_data: a composite experiment experiment data container.

        Returns:
            The list of analysis-ready marginalized experiment data for each
            component experiment.

        Raises:
            AnalysisError: if the component experiment data cannot be extracted.
        """
        if not self._flatten_results:
            # Retrieve child data for component experiments for updating
            component_index = experiment_data.metadata.get(
                "component_child_index", [])
            if not component_index:
                raise AnalysisError(
                    "Unable to extract component child experiment data")
            component_expdata = [
                experiment_data.child_data(i) for i in component_index
            ]
        else:
            # Initialize temporary ExperimentData containers for
            # each component experiment to analysis on. These will
            # not be saved but results and figures will be collected
            # from them
            component_expdata = self._initialize_component_experiment_data(
                experiment_data)

        # Compute marginalize data for each component experiment
        marginalized_data = self._marginalized_component_data(
            experiment_data.data())

        # Add the marginalized component data and component job metadata
        # to each component child experiment. Note that this will clear
        # any currently stored data in the experiment. Since copying of
        # child data is handled by the `replace_results` kwarg of the
        # parent container it is safe to always clear and replace the
        # results of child containers in this step
        for sub_expdata, sub_data in zip(component_expdata, marginalized_data):
            # Clear any previously stored data and add marginalized data
            sub_expdata._data.clear()
            sub_expdata.add_data(sub_data)

        return component_expdata
Exemple #9
0
 def _run_analysis(
     self, experiment_data: ExperimentData
 ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]:
     data = experiment_data.data()
     qubits = experiment_data.metadata["physical_qubits"]
     matrices = self._generate_matrices(data)
     result_mitigator = LocalReadoutMitigator(matrices, qubits=qubits)
     analysis_results = [AnalysisResultData("Local Readout Mitigator", result_mitigator)]
     if self.options.plot:
         figure = assignment_matrix_visualization(
             result_mitigator.assignment_matrix(), ax=self.options.ax
         )
         figures = [figure]
     else:
         figures = None
     return analysis_results, figures
Exemple #10
0
    def _run_analysis(self, experiment_data: ExperimentData):
        # Extract job metadata for the component experiments so it can be added
        # to the child experiment data incase it is required by the child experiments
        # analysis classes
        composite_exp = experiment_data.experiment
        component_exps = composite_exp.component_experiment()
        component_metadata = experiment_data.metadata.get(
            "component_metadata", [{}] * composite_exp.num_experiments)

        # Initialize component data for updating and get the experiment IDs for
        # the component child experiments in case there are other child experiments
        # in the experiment data
        component_ids = self._initialize_components(composite_exp,
                                                    experiment_data)

        # Compute marginalize data for each component experiment
        marginalized_data = self._marginalize_data(experiment_data.data())

        # Add the marginalized component data and component job metadata
        # to each component child experiment. Note that this will clear
        # any currently stored data in the experiment. Since copying of
        # child data is handled by the `replace_results` kwarg of the
        # parent container it is safe to always clear and replace the
        # results of child containers in this step
        for i, (sub_data,
                sub_exp) in enumerate(zip(marginalized_data, component_exps)):
            sub_exp_data = experiment_data.child_data(component_ids[i])

            # Clear any previously stored data and add marginalized data
            sub_exp_data._data.clear()
            sub_exp_data.add_data(sub_data)

            # Add component job metadata
            sub_exp_data.metadata.update(component_metadata[i])

            # Run analysis
            # Since copy for replace result is handled at the parent level
            # we always run with replace result on component analysis
            sub_exp.analysis.run(sub_exp_data, replace_results=True)

        # Wait for all component analysis to finish before returning
        # the parent experiment analysis results
        for comp_id in component_ids:
            experiment_data.child_data(comp_id).block_for_results()

        return [], []
    def _component_experiment_data(
            self, experiment_data: ExperimentData) -> List[ExperimentData]:
        """Return a list of component child experiment data"""
        # Initialize component data for updating and get the experiment IDs for
        # the component child experiments in case there are other child experiments
        # in the experiment data
        component_ids = self._initialize_components(experiment_data)
        if len(component_ids) != len(self._analyses):
            raise AnalysisError(
                "Number of experiment components does not match number of"
                " component analysis classes")

        # Extract job metadata for the component experiments so it can be added
        # to the child experiment data in case it is required by the child experiments
        # analysis classes
        component_metadata = experiment_data.metadata.get(
            "component_metadata", [{}] * len(component_ids))

        # Compute marginalize data for each component experiment
        marginalized_data = self._component_data(experiment_data.data())

        # Add the marginalized component data and component job metadata
        # to each component child experiment. Note that this will clear
        # any currently stored data in the experiment. Since copying of
        # child data is handled by the `replace_results` kwarg of the
        # parent container it is safe to always clear and replace the
        # results of child containers in this step
        component_data = []
        for i, sub_data in enumerate(marginalized_data):
            sub_exp_data = experiment_data.child_data(component_ids[i])

            # Clear any previously stored data and add marginalized data
            sub_exp_data._data.clear()
            sub_exp_data.add_data(sub_data)

            # Add component job metadata
            sub_exp_data.metadata.update(component_metadata[i])
            component_data.append(sub_exp_data)

        return component_data
    def _initialize(
        self,
        experiment_data: ExperimentData,
    ):
        """Initialize curve analysis with experiment data.

        This method is called ahead of other processing.

        Args:
            experiment_data: Experiment data to analyze.
        """
        # Initialize canvas
        if self.options.plot:
            self.drawer.initialize_canvas()

        # Initialize data processor
        # TODO move this to base analysis in follow-up
        data_processor = self.options.data_processor or get_processor(experiment_data, self.options)

        if not data_processor.is_trained:
            data_processor.train(data=experiment_data.data())
        self.set_options(data_processor=data_processor)
    def _run_analysis(
        self, experiment_data: ExperimentData
    ) -> Tuple[List[AnalysisResultData], List["pyplot.Figure"]]:
        """Wrap the analysis to optionally plot the IQ data."""
        analysis_results, figures = super()._run_analysis(experiment_data)

        if self.options.plot_iq_data:
            axis = get_non_gui_ax()
            figure = axis.get_figure()
            figure.set_size_inches(*self.options.style.figsize)

            iqs = []

            for datum in experiment_data.data():
                if "memory" in datum:
                    mem = np.array(datum["memory"])

                    # Average single-shot data.
                    if len(mem.shape) == 3:
                        for idx in range(mem.shape[1]):
                            iqs.append(np.average(mem[:, idx, :], axis=0))
                    else:
                        iqs.append(mem)

            if len(iqs) > 0:
                iqs = np.vstack(iqs)
                axis.scatter(iqs[:, 0], iqs[:, 1], color="b")
                axis.set_xlabel("In phase [arb. units]",
                                fontsize=self.options.style.axis_label_size)
                axis.set_ylabel("Quadrature [arb. units]",
                                fontsize=self.options.style.axis_label_size)
                axis.tick_params(labelsize=self.options.style.tick_label_size)
                axis.grid(True)

                figures.append(figure)

        return analysis_results, figures
Exemple #14
0
class TestIQSingleAvg(BaseDataProcessorTest):
    """Test the IQ data processing nodes single and average."""
    def setUp(self):
        """Setup some IQ data."""
        super().setUp()

        mem_avg = ExperimentResultData(
            memory=[[-539698.0, -153030784.0], [5541283.0, -160369600.0]])
        mem_single = ExperimentResultData(memory=[
            [[-56470872.0, -136691568.0], [-53407256.0, -176278624.0]],
            [[-34623272.0, -151247824.0], [-36650644.0, -170559312.0]],
            [[42658720.0, -153054640.0], [29689970.0, -174671824.0]],
            [[-47387248.0, -177826640.0], [-62149124.0, -165909728.0]],
            [[-51465408.0, -148338000.0], [23157112.0, -165826736.0]],
            [[51426688.0, -142703104.0], [34330920.0, -185572592.0]],
        ])

        res_single = ExperimentResult(
            shots=3,
            success=True,
            meas_level=1,
            meas_return="single",
            data=mem_single,
            header=self.header,
        )
        res_avg = ExperimentResult(shots=6,
                                   success=True,
                                   meas_level=1,
                                   meas_return="avg",
                                   data=mem_avg,
                                   header=self.header)

        # result_single = Result(results=[res_single], **self.base_result_args)
        # result_avg = Result(results=[res_avg], **self.base_result_args)

        self.exp_data_single = ExperimentData(FakeExperiment())
        self.exp_data_single.add_data(
            Result(results=[res_single], **self.base_result_args))

        self.exp_data_avg = ExperimentData(FakeExperiment())
        self.exp_data_avg.add_data(
            Result(results=[res_avg], **self.base_result_args))

    def test_avg_and_single(self):
        """Test that the different nodes process the data correctly."""

        to_real = DataProcessor("memory", [ToReal(scale=1)])
        to_imag = DataProcessor("memory", [ToImag(scale=1)])

        # Test the real single shot node
        new_data = to_real(self.exp_data_single.data(0))
        expected = np.array([
            [-56470872.0, -53407256.0],
            [-34623272.0, -36650644.0],
            [42658720.0, 29689970.0],
            [-47387248.0, -62149124.0],
            [-51465408.0, 23157112.0],
            [51426688.0, 34330920.0],
        ])
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected,
        )
        self.assertTrue(np.isnan(unp.std_devs(new_data)).all())

        # Test the imaginary single shot node
        new_data = to_imag(self.exp_data_single.data(0))
        expected = np.array([
            [-136691568.0, -176278624.0],
            [-151247824.0, -170559312.0],
            [-153054640.0, -174671824.0],
            [-177826640.0, -165909728.0],
            [-148338000.0, -165826736.0],
            [-142703104.0, -185572592.0],
        ])
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected,
        )

        # Test the real average node
        new_data = to_real(self.exp_data_avg.data(0))
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            np.array([-539698.0, 5541283.0]),
        )

        # Test the imaginary average node
        new_data = to_imag(self.exp_data_avg.data(0))
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            np.array([-153030784.0, -160369600.0]),
        )
Exemple #15
0
class DataProcessorTest(BaseDataProcessorTest):
    """Class to test DataProcessor."""
    def setUp(self):
        """Setup variables used for testing."""
        super().setUp()

        mem1 = ExperimentResultData(memory=[
            [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],
            [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],
            [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],
        ])

        mem2 = ExperimentResultData(memory=[
            [[5131962.0, -16630257.0], [4438870.0, -13752518.0]],
            [[3415985.0, -16031913.0], [2942458.0, -15840465.0]],
            [[5199964.0, -14955998.0], [4030843.0, -14538923.0]],
        ])

        res1 = ExperimentResult(shots=3,
                                success=True,
                                meas_level=1,
                                data=mem1,
                                header=self.header)
        res2 = ExperimentResult(shots=3,
                                success=True,
                                meas_level=1,
                                data=mem2,
                                header=self.header)

        self.result_lvl1 = Result(results=[res1, res2],
                                  **self.base_result_args)

        raw_counts1 = {"0x0": 4, "0x2": 6}
        raw_counts2 = {"0x0": 2, "0x2": 8}
        data1 = ExperimentResultData(counts=dict(**raw_counts1))
        data2 = ExperimentResultData(counts=dict(**raw_counts2))
        res1 = ExperimentResult(shots=10,
                                success=True,
                                meas_level=2,
                                data=data1,
                                header=self.header)
        res2 = ExperimentResult(shots=10,
                                success=True,
                                meas_level=2,
                                data=data2,
                                header=self.header)
        self.exp_data_lvl2 = ExperimentData(FakeExperiment())
        self.exp_data_lvl2.add_data(
            Result(results=[res1, res2], **self.base_result_args))

    def test_data_prep_level1_memory_single(self):
        """Format meas_level=1 meas_return=single."""
        # slots = 3, shots = 2, circuits = 2
        data_raw = [
            {
                "memory": [
                    [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]],
                    [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]],
                ],
            },
            {
                "memory": [
                    [[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]],
                    [[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]],
                ],
            },
        ]
        formatted_data = DataProcessor("memory", [])._data_extraction(data_raw)

        ref_data = np.array([
            [
                [
                    [ufloat(0.1, np.nan),
                     ufloat(0.2, np.nan)],
                    [ufloat(0.3, np.nan),
                     ufloat(0.4, np.nan)],
                    [ufloat(0.5, np.nan),
                     ufloat(0.6, np.nan)],
                ],
                [
                    [ufloat(0.1, np.nan),
                     ufloat(0.2, np.nan)],
                    [ufloat(0.3, np.nan),
                     ufloat(0.4, np.nan)],
                    [ufloat(0.5, np.nan),
                     ufloat(0.6, np.nan)],
                ],
            ],
            [
                [
                    [ufloat(0.7, np.nan),
                     ufloat(0.8, np.nan)],
                    [ufloat(0.9, np.nan),
                     ufloat(1.0, np.nan)],
                    [ufloat(1.1, np.nan),
                     ufloat(1.2, np.nan)],
                ],
                [
                    [ufloat(0.7, np.nan),
                     ufloat(0.8, np.nan)],
                    [ufloat(0.9, np.nan),
                     ufloat(1.0, np.nan)],
                    [ufloat(1.1, np.nan),
                     ufloat(1.2, np.nan)],
                ],
            ],
        ])

        self.assertTupleEqual(formatted_data.shape, ref_data.shape)
        np.testing.assert_array_equal(unp.nominal_values(formatted_data),
                                      unp.nominal_values(ref_data))
        # note that np.nan cannot be evaluated by "=="
        self.assertTrue(np.isnan(unp.std_devs(formatted_data)).all())

    def test_data_prep_level1_memory_average(self):
        """Format meas_level=1 meas_return=avg."""
        # slots = 3, circuits = 2
        data_raw = [
            {
                "memory": [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]],
            },
            {
                "memory": [[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]],
            },
        ]
        formatted_data = DataProcessor("memory", [])._data_extraction(data_raw)

        ref_data = np.array([
            [
                [ufloat(0.1, np.nan), ufloat(0.2, np.nan)],
                [ufloat(0.3, np.nan), ufloat(0.4, np.nan)],
                [ufloat(0.5, np.nan), ufloat(0.6, np.nan)],
            ],
            [
                [ufloat(0.7, np.nan), ufloat(0.8, np.nan)],
                [ufloat(0.9, np.nan), ufloat(1.0, np.nan)],
                [ufloat(1.1, np.nan), ufloat(1.2, np.nan)],
            ],
        ])

        self.assertTupleEqual(formatted_data.shape, ref_data.shape)
        np.testing.assert_array_equal(unp.nominal_values(formatted_data),
                                      unp.nominal_values(ref_data))
        # note that np.nan cannot be evaluated by "=="
        self.assertTrue(np.isnan(unp.std_devs(formatted_data)).all())

    def test_data_prep_level2_counts(self):
        """Format meas_level=2."""
        # slots = 2, shots=10, circuits = 2
        data_raw = [
            {
                "counts": {
                    "00": 2,
                    "01": 3,
                    "10": 1,
                    "11": 4
                },
            },
            {
                "counts": {
                    "00": 3,
                    "01": 3,
                    "10": 2,
                    "11": 2
                },
            },
        ]
        formatted_data = DataProcessor("counts", [])._data_extraction(data_raw)

        ref_data = np.array(
            [
                {
                    "00": 2,
                    "01": 3,
                    "10": 1,
                    "11": 4
                },
                {
                    "00": 3,
                    "01": 3,
                    "10": 2,
                    "11": 2
                },
            ],
            dtype=object,
        )

        np.testing.assert_array_equal(formatted_data, ref_data)

    def test_data_prep_level2_counts_memory(self):
        """Format meas_level=2 with having memory set."""
        # slots = 2, shots=10, circuits = 2
        data_raw = [
            {
                "counts": {
                    "00": 2,
                    "01": 3,
                    "10": 1,
                    "11": 4
                },
                "memory":
                ["00", "01", "01", "10", "11", "11", "00", "01", "11", "11"],
            },
            {
                "counts": {
                    "00": 3,
                    "01": 3,
                    "10": 2,
                    "11": 2
                },
                "memory":
                ["00", "00", "01", "00", "10", "01", "01", "11", "10", "11"],
            },
        ]
        formatted_data = DataProcessor("memory", [])._data_extraction(data_raw)

        ref_data = np.array(
            [
                ["00", "01", "01", "10", "11", "11", "00", "01", "11", "11"],
                ["00", "00", "01", "00", "10", "01", "01", "11", "10", "11"],
            ],
            dtype=object,
        )

        np.testing.assert_array_equal(formatted_data, ref_data)

    def test_empty_processor(self):
        """Check that a DataProcessor without steps does nothing."""
        data_processor = DataProcessor("counts")

        datum = data_processor(self.exp_data_lvl2.data(0))
        self.assertEqual(datum, {"00": 4, "10": 6})

        datum, history = data_processor.call_with_history(
            self.exp_data_lvl2.data(0))
        self.assertEqual(datum, {"00": 4, "10": 6})
        self.assertEqual(history, [])

    def test_to_real(self):
        """Test scaling and conversion to real part."""
        processor = DataProcessor("memory", [ToReal(scale=1e-3)])

        exp_data = ExperimentData(FakeExperiment())
        exp_data.add_data(self.result_lvl1)

        # Test to real on a single datum
        new_data = processor(exp_data.data(0))

        expected_old = {
            "memory": [
                [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],
                [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],
                [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],
            ],
            "metadata": {
                "experiment_type": "fake_test_experiment"
            },
            "job_id":
            "job-123",
            "meas_level":
            1,
            "shots":
            3,
        }

        expected_new = np.array([[1103.26, 2959.012], [442.17, -5279.41],
                                 [3016.514, -3404.7560]])

        self.assertEqual(exp_data.data(0), expected_old)
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected_new,
        )
        self.assertTrue(np.isnan(unp.std_devs(new_data)).all())

        # Test that we can call with history.
        new_data, history = processor.call_with_history(exp_data.data(0))

        self.assertEqual(exp_data.data(0), expected_old)
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected_new,
        )

        self.assertEqual(history[0][0], "ToReal")
        np.testing.assert_array_almost_equal(
            unp.nominal_values(history[0][1]),
            expected_new,
        )

        # Test to real on more than one datum
        new_data = processor(exp_data.data())

        expected_new = np.array([
            [[1103.26, 2959.012], [442.17, -5279.41], [3016.514, -3404.7560]],
            [[5131.962, 4438.87], [3415.985, 2942.458], [5199.964, 4030.843]],
        ])
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected_new,
        )

    def test_to_imag(self):
        """Test that we can average the data."""
        processor = DataProcessor("memory")
        processor.append(ToImag(scale=1e-3))

        exp_data = ExperimentData(FakeExperiment())
        exp_data.add_data(self.result_lvl1)

        new_data = processor(exp_data.data(0))

        expected_old = {
            "memory": [
                [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],
                [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],
                [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],
            ],
            "metadata": {
                "experiment_type": "fake_test_experiment"
            },
            "job_id":
            "job-123",
            "meas_level":
            1,
            "shots":
            3,
        }

        expected_new = np.array([
            [-11378.508, -16488.753],
            [-19283.206000000002, -15339.630000000001],
            [-14548.009, -16743.348],
        ])

        self.assertEqual(exp_data.data(0), expected_old)
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected_new,
        )
        self.assertTrue(np.isnan(unp.std_devs(new_data)).all())

        # Test that we can call with history.
        new_data, history = processor.call_with_history(exp_data.data(0))
        self.assertEqual(exp_data.data(0), expected_old)
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected_new,
        )

        self.assertEqual(history[0][0], "ToImag")
        np.testing.assert_array_almost_equal(
            unp.nominal_values(history[0][1]),
            expected_new,
        )

        # Test to imaginary on more than one datum
        new_data = processor(exp_data.data())

        expected_new = np.array([
            [[-11378.508, -16488.753], [-19283.206, -15339.630],
             [-14548.009, -16743.348]],
            [[-16630.257, -13752.518], [-16031.913, -15840.465],
             [-14955.998, -14538.923]],
        ])

        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            expected_new,
        )

    def test_populations(self):
        """Test that counts are properly converted to a population."""

        processor = DataProcessor("counts")
        processor.append(Probability("00", alpha_prior=1.0))

        # Test on a single datum.
        new_data = processor(self.exp_data_lvl2.data(0))

        self.assertAlmostEqual(float(unp.nominal_values(new_data)), 0.41666667)
        self.assertAlmostEqual(float(unp.std_devs(new_data)),
                               0.13673544235706114)

        # Test on all the data
        new_data = processor(self.exp_data_lvl2.data())
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            np.array([0.41666667, 0.25]),
        )

    def test_validation(self):
        """Test the validation mechanism."""

        for validate, error in [(False, AttributeError),
                                (True, DataProcessorError)]:
            processor = DataProcessor("counts")
            processor.append(Probability("00", validate=validate))

            with self.assertRaises(error):
                processor({"counts": [0, 1, 2]})
class DataProcessorTest(BaseDataProcessorTest):
    """Class to test DataProcessor."""
    def setUp(self):
        """Setup variables used for testing."""
        super().setUp()

        mem1 = ExperimentResultData(memory=[
            [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],
            [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],
            [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],
        ])

        mem2 = ExperimentResultData(memory=[
            [[5131962.0, -16630257.0], [4438870.0, -13752518.0]],
            [[3415985.0, -16031913.0], [2942458.0, -15840465.0]],
            [[5199964.0, -14955998.0], [4030843.0, -14538923.0]],
        ])

        res1 = ExperimentResult(shots=3,
                                success=True,
                                meas_level=1,
                                data=mem1,
                                header=self.header)
        res2 = ExperimentResult(shots=3,
                                success=True,
                                meas_level=1,
                                data=mem2,
                                header=self.header)

        self.result_lvl1 = Result(results=[res1, res2],
                                  **self.base_result_args)

        raw_counts1 = {"0x0": 4, "0x2": 6}
        raw_counts2 = {"0x0": 2, "0x2": 8}
        data1 = ExperimentResultData(counts=dict(**raw_counts1))
        data2 = ExperimentResultData(counts=dict(**raw_counts2))
        res1 = ExperimentResult(shots=9,
                                success=True,
                                meas_level=2,
                                data=data1,
                                header=self.header)
        res2 = ExperimentResult(shots=9,
                                success=True,
                                meas_level=2,
                                data=data2,
                                header=self.header)
        self.exp_data_lvl2 = ExperimentData(FakeExperiment())
        self.exp_data_lvl2.add_data(
            Result(results=[res1, res2], **self.base_result_args))

    def test_empty_processor(self):
        """Check that a DataProcessor without steps does nothing."""
        data_processor = DataProcessor("counts")

        datum, error = data_processor(self.exp_data_lvl2.data(0))
        self.assertEqual(datum, [{"00": 4, "10": 6}])
        self.assertIsNone(error)

        datum, error, history = data_processor.call_with_history(
            self.exp_data_lvl2.data(0))
        self.assertEqual(datum, [{"00": 4, "10": 6}])
        self.assertEqual(history, [])

    def test_to_real(self):
        """Test scaling and conversion to real part."""
        processor = DataProcessor("memory", [ToReal(scale=1e-3)])

        exp_data = ExperimentData(FakeExperiment())
        exp_data.add_data(self.result_lvl1)

        # Test to real on a single datum
        new_data, error = processor(exp_data.data(0))

        expected_old = {
            "memory": [
                [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],
                [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],
                [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],
            ],
            "metadata": {
                "experiment_type": "fake_test_experiment"
            },
            "job_id":
            "job-123",
            "meas_level":
            1,
            "shots":
            3,
        }

        expected_new = np.array([[1103.26, 2959.012], [442.17, -5279.41],
                                 [3016.514, -3404.7560]])

        self.assertEqual(exp_data.data(0), expected_old)
        self.assertTrue(np.allclose(new_data, expected_new))
        self.assertIsNone(error)

        # Test that we can call with history.
        new_data, error, history = processor.call_with_history(
            exp_data.data(0))

        self.assertEqual(exp_data.data(0), expected_old)
        self.assertTrue(np.allclose(new_data, expected_new))

        self.assertEqual(history[0][0], "ToReal")
        self.assertTrue(np.allclose(history[0][1], expected_new))

        # Test to real on more than one datum
        new_data, error = processor(exp_data.data())

        expected_new = np.array([
            [[1103.26, 2959.012], [442.17, -5279.41], [3016.514, -3404.7560]],
            [[5131.962, 4438.87], [3415.985, 2942.458], [5199.964, 4030.843]],
        ])

        self.assertTrue(np.allclose(new_data, expected_new))

    def test_to_imag(self):
        """Test that we can average the data."""
        processor = DataProcessor("memory")
        processor.append(ToImag(scale=1e-3))

        exp_data = ExperimentData(FakeExperiment())
        exp_data.add_data(self.result_lvl1)

        new_data, error = processor(exp_data.data(0))

        expected_old = {
            "memory": [
                [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],
                [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],
                [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],
            ],
            "metadata": {
                "experiment_type": "fake_test_experiment"
            },
            "job_id":
            "job-123",
            "meas_level":
            1,
            "shots":
            3,
        }

        expected_new = np.array([
            [-11378.508, -16488.753],
            [-19283.206000000002, -15339.630000000001],
            [-14548.009, -16743.348],
        ])

        self.assertEqual(exp_data.data(0), expected_old)
        self.assertTrue(np.allclose(new_data, expected_new))
        self.assertIsNone(error)

        # Test that we can call with history.
        new_data, error, history = processor.call_with_history(
            exp_data.data(0))
        self.assertEqual(exp_data.data(0), expected_old)
        self.assertTrue(np.allclose(new_data, expected_new))

        self.assertEqual(history[0][0], "ToImag")
        self.assertTrue(np.allclose(history[0][1], expected_new))

        # Test to imaginary on more than one datum
        new_data, error = processor(exp_data.data())

        expected_new = np.array([
            [[-11378.508, -16488.753], [-19283.206, -15339.630],
             [-14548.009, -16743.348]],
            [[-16630.257, -13752.518], [-16031.913, -15840.465],
             [-14955.998, -14538.923]],
        ])

        self.assertTrue(np.allclose(new_data, expected_new))

    def test_populations(self):
        """Test that counts are properly converted to a population."""

        processor = DataProcessor("counts")
        processor.append(Probability("00"))

        # Test on a single datum.
        new_data, error = processor(self.exp_data_lvl2.data(0))

        self.assertEqual(new_data, 0.4)
        self.assertEqual(error, np.sqrt(0.4 * (1 - 0.4) / 10))

        # Test on all the data
        new_data, error = processor(self.exp_data_lvl2.data())
        self.assertTrue(np.allclose(new_data, np.array([0.4, 0.2])))

    def test_validation(self):
        """Test the validation mechanism."""

        for validate, error in [(False, AttributeError),
                                (True, DataProcessorError)]:
            processor = DataProcessor("counts")
            processor.append(Probability("00", validate=validate))

            with self.assertRaises(error):
                processor({"counts": [0, 1, 2]})
class TestAveragingAndSVD(BaseDataProcessorTest):
    """Test the averaging of single-shot IQ data followed by a SVD."""
    def setUp(self):
        """Here, single-shots average to points at plus/minus 1.

        The setting corresponds to four single-shots done on two qubits.
        """
        super().setUp()

        circ_es = ExperimentResultData(memory=[
            [[1.1, 0.9], [-0.8, 1.0]],
            [[1.2, 1.1], [-0.9, 1.0]],
            [[0.8, 1.1], [-1.2, 1.0]],
            [[0.9, 0.9], [-1.1, 1.0]],
        ])
        self._sig_gs = np.array([[1.0], [-1.0]]) / np.sqrt(2.0)

        circ_gs = ExperimentResultData(memory=[
            [[-1.1, -0.9], [0.8, -1.0]],
            [[-1.2, -1.1], [0.9, -1.0]],
            [[-0.8, -1.1], [1.2, -1.0]],
            [[-0.9, -0.9], [1.1, -1.0]],
        ])
        self._sig_es = np.array([[-1.0], [1.0]]) / np.sqrt(2.0)

        circ_x90p = ExperimentResultData(memory=[
            [[-1.0, -1.0], [1.0, -1.0]],
            [[-1.0, -1.0], [1.0, -1.0]],
            [[1.0, 1.0], [-1.0, 1.0]],
            [[1.0, 1.0], [-1.0, 1.0]],
        ])
        self._sig_x90 = np.array([[0], [0]])

        circ_x45p = ExperimentResultData(memory=[
            [[-1.0, -1.0], [1.0, -1.0]],
            [[-1.0, -1.0], [1.0, -1.0]],
            [[-1.0, -1.0], [1.0, -1.0]],
            [[1.0, 1.0], [-1.0, 1.0]],
        ])
        self._sig_x45 = np.array([[0.5], [-0.5]]) / np.sqrt(2.0)

        res_es = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="single",
            data=circ_es,
            header=self.header,
        )

        res_gs = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="single",
            data=circ_gs,
            header=self.header,
        )

        res_x90p = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="single",
            data=circ_x90p,
            header=self.header,
        )

        res_x45p = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="single",
            data=circ_x45p,
            header=self.header,
        )

        self.data = ExperimentData(FakeExperiment())
        self.data.add_data(
            Result(results=[res_es, res_gs, res_x90p, res_x45p],
                   **self.base_result_args))

    def test_averaging(self):
        """Test that averaging of the datums produces the expected IQ points."""

        processor = DataProcessor("memory", [AverageData(axis=1)])

        # Test that we get the expected outcome for the excited state
        processed, error = processor(self.data.data(0))
        expected_avg = np.array([[1.0, 1.0], [-1.0, 1.0]])
        expected_std = np.array([[0.15811388300841894, 0.1],
                                 [0.15811388300841894, 0.0]]) / 2.0
        self.assertTrue(np.allclose(processed, expected_avg))
        self.assertTrue(np.allclose(error, expected_std))

        # Test that we get the expected outcome for the ground state
        processed, error = processor(self.data.data(1))
        expected_avg = np.array([[-1.0, -1.0], [1.0, -1.0]])
        expected_std = np.array([[0.15811388300841894, 0.1],
                                 [0.15811388300841894, 0.0]]) / 2.0
        self.assertTrue(np.allclose(processed, expected_avg))
        self.assertTrue(np.allclose(error, expected_std))

    def test_averaging_and_svd(self):
        """Test averaging followed by a SVD."""

        processor = DataProcessor("memory", [AverageData(axis=1), SVD()])

        # Test training using the calibration points
        self.assertFalse(processor.is_trained)
        processor.train([self.data.data(idx) for idx in [0, 1]])
        self.assertTrue(processor.is_trained)

        # Test the excited state
        processed, error = processor(self.data.data(0))
        self.assertTrue(np.allclose(processed, self._sig_es))

        # Test the ground state
        processed, error = processor(self.data.data(1))
        self.assertTrue(np.allclose(processed, self._sig_gs))

        # Test the x90p rotation
        processed, error = processor(self.data.data(2))
        self.assertTrue(np.allclose(processed, self._sig_x90))
        self.assertTrue(np.allclose(error, np.array([0.25, 0.25])))

        # Test the x45p rotation
        processed, error = processor(self.data.data(3))
        expected_std = np.array([np.std([1, 1, 1, -1]) / np.sqrt(4.0) / 2] * 2)
        self.assertTrue(np.allclose(processed, self._sig_x45))
        self.assertTrue(np.allclose(error, expected_std))

    def test_process_all_data(self):
        """Test that we can process all data at once."""

        processor = DataProcessor("memory", [AverageData(axis=1), SVD()])

        # Test training using the calibration points
        self.assertFalse(processor.is_trained)
        processor.train([self.data.data(idx) for idx in [0, 1]])
        self.assertTrue(processor.is_trained)

        all_expected = np.vstack((
            self._sig_es.reshape(1, 2),
            self._sig_gs.reshape(1, 2),
            self._sig_x90.reshape(1, 2),
            self._sig_x45.reshape(1, 2),
        )).T

        # Test processing of all data
        processed = processor(self.data.data())[0]
        self.assertTrue(np.allclose(processed, all_expected))

        # Test processing of each datum individually
        for idx, expected in enumerate(
            [self._sig_es, self._sig_gs, self._sig_x90, self._sig_x45]):
            processed = processor(self.data.data(idx))[0]
            self.assertTrue(np.allclose(processed, expected))

    def test_normalize(self):
        """Test that by adding a normalization node we get a signal between 1 and 1."""

        processor = DataProcessor(
            "memory", [AverageData(axis=1),
                       SVD(), MinMaxNormalize()])

        self.assertFalse(processor.is_trained)
        processor.train([self.data.data(idx) for idx in [0, 1]])
        self.assertTrue(processor.is_trained)

        all_expected = np.array([[0.0, 1.0, 0.5, 0.75], [1.0, 0.0, 0.5, 0.25]])

        # Test processing of all data
        processed = processor(self.data.data())[0]
        self.assertTrue(np.allclose(processed, all_expected))
    def _run_analysis(
        self,
        experiment_data: ExperimentData,
        user_p0: Optional[Dict[str, float]] = None,
        user_bounds: Optional[Tuple[List[float], List[float]]] = None,
        plot: bool = False,
        ax: Optional["AxesSubplot"] = None,
        **kwargs,
    ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]:
        r"""Calculate T2Ramsey experiment.

        Args:
            experiment_data (ExperimentData): the experiment data to analyze
            user_p0: contains initial values given by the user, for the
            fit parameters :math:`(a, t2ramsey, f, \phi, b)`
            user_bounds: lower and upper bounds on the parameters in p0,
                         given by the user.
                         The first tuple is the lower bounds,
                         The second tuple is the upper bounds.
                         For both params, the order is :math:`a, t2ramsey, f, \phi, b`.
            plot: if True, create the plot, otherwise, do not create the plot.
            ax: the plot object
            **kwargs: additional parameters for curve fit.

        Returns:
            The analysis result with the estimated :math:`t2ramsey` and 'f' (frequency)
            The graph of the function.
        """
        def osc_fit_fun(x, a, t2ramsey, f, phi, c):
            """Decay cosine fit function"""
            return a * np.exp(
                -x / t2ramsey) * np.cos(2 * np.pi * f * x + phi) + c

        def _format_plot(ax, unit, fit_result, conversion_factor):
            """Format curve fit plot"""
            # Formatting
            ax.tick_params(labelsize=14)
            ax.set_xlabel("Delay (s)", fontsize=12)
            ax.ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
            ax.set_ylabel("Probability of measuring 0", fontsize=12)
            t2ramsey = fit_result["popt"][1] / conversion_factor
            t2_err = fit_result["popt_err"][1] / conversion_factor
            box_text = "$T_2Ramsey$ = {:.2f} \u00B1 {:.2f} {}".format(
                t2ramsey, t2_err, unit)
            bbox_props = dict(boxstyle="square,pad=0.3",
                              fc="white",
                              ec="black",
                              lw=1)
            ax.text(
                0.6,
                0.9,
                box_text,
                ha="center",
                va="center",
                size=12,
                bbox=bbox_props,
                transform=ax.transAxes,
            )
            return ax

        # implementation of  _run_analysis

        data = experiment_data.data()
        circ_metadata = data[0]["metadata"]
        unit = circ_metadata["unit"]
        conversion_factor = circ_metadata.get("dt_factor", None)
        osc_freq = circ_metadata.get("osc_freq", None)
        if conversion_factor is None:
            conversion_factor = 1 if unit in ("s",
                                              "dt") else apply_prefix(1, unit)

        xdata, ydata, sigma = process_curve_data(
            data, lambda datum: level2_probability(datum, "0"))

        t2ramsey_estimate = np.mean(xdata)
        p0, bounds = self._t2ramsey_default_params(conversion_factor, user_p0,
                                                   user_bounds,
                                                   t2ramsey_estimate, osc_freq)
        xdata *= conversion_factor
        fit_result = curve_fit(osc_fit_fun,
                               xdata,
                               ydata,
                               p0=list(p0.values()),
                               sigma=sigma,
                               bounds=bounds)
        fit_result = dataclasses.asdict(fit_result)
        fit_result["circuit_unit"] = unit
        if osc_freq is not None:
            fit_result["osc_freq"] = osc_freq
        if unit == "dt":
            fit_result["dt"] = conversion_factor
        quality = self._fit_quality(fit_result["popt"], fit_result["popt_err"],
                                    fit_result["reduced_chisq"])
        chisq = fit_result["reduced_chisq"]

        if plot:
            ax = plot_curve_fit(osc_fit_fun, fit_result, ax=ax)
            ax = plot_scatter(xdata, ydata, ax=ax)
            ax = plot_errorbar(xdata, ydata, sigma, ax=ax)
            _format_plot(ax, unit, fit_result, conversion_factor)
            figures = [ax.get_figure()]
        else:
            figures = None

        # Output unit is 'sec', regardless of the unit used in the input
        result_t2star = AnalysisResultData(
            "T2star",
            value=FitVal(fit_result["popt"][1], fit_result["popt_err"][1],
                         "s"),
            quality=quality,
            chisq=chisq,
            extra=fit_result,
        )
        result_freq = AnalysisResultData(
            "Frequency",
            value=FitVal(fit_result["popt"][2], fit_result["popt_err"][2],
                         "Hz"),
            quality=quality,
            chisq=chisq,
            extra=fit_result,
        )

        return [result_t2star, result_freq], figures
Exemple #19
0
    def _extract_curves(self, experiment_data: ExperimentData,
                        data_processor: Union[Callable, DataProcessor]):
        """Extract curve data from experiment data.

        This method internally populates two types of curve data.

        - raw_data:

            This is the data directly obtained from the experiment data.
            You can access this data with ``self._data(label="raw_data")``.

        - fit_ready:

            This is the formatted data created by pre-processing defined by
            `self._format_data()` method. This method is implemented by subclasses.
            You can access to this data with ``self._data(label="fit_ready")``.

        If multiple series exist, you can optionally specify ``series_name`` in
        ``self._data`` method to filter data in the target series.

        .. notes::
            The target metadata properties to define each curve entry is described by
            the class attribute __series__ (see `filter_kwargs`).

        Args:
            experiment_data: ExperimentData object to fit parameters.
            data_processor: A callable or DataProcessor instance to format data into numpy array.
                This should take a list of dictionaries and return two tuple of float values,
                that represent a y value and an error of it.
        Raises:
            DataProcessorError: When `x_key` specified in the analysis option is not
                defined in the circuit metadata.
            AnalysisError: When formatted data has label other than fit_ready.
        """
        self.__processed_data_set = list()

        def _is_target_series(datum, **filters):
            try:
                return all(datum["metadata"][key] == val
                           for key, val in filters.items())
            except KeyError:
                return False

        # Extract X, Y, Y_sigma data
        data = experiment_data.data()

        x_key = self.options.x_key
        try:
            xdata = np.asarray([datum["metadata"][x_key] for datum in data],
                               dtype=float)
        except KeyError as ex:
            raise DataProcessorError(
                f"X value key {x_key} is not defined in circuit metadata."
            ) from ex

        if isinstance(data_processor, DataProcessor):
            ydata = data_processor(data)
        else:
            y_nominals, y_stderrs = zip(*map(data_processor, data))
            ydata = unp.uarray(y_nominals, y_stderrs)

        # Store metadata
        metadata = np.asarray([datum["metadata"] for datum in data],
                              dtype=object)

        # Store shots
        shots = np.asarray([datum.get("shots", np.nan) for datum in data])

        # Find series (invalid data is labeled as -1)
        data_index = np.full(xdata.size, -1, dtype=int)
        for idx, series_def in enumerate(self.__series__):
            data_matched = np.asarray([
                _is_target_series(datum, **series_def.filter_kwargs)
                for datum in data
            ],
                                      dtype=bool)
            data_index[data_matched] = idx

        # Store raw data
        raw_data = CurveData(
            label="raw_data",
            x=xdata,
            y=unp.nominal_values(ydata),
            y_err=unp.std_devs(ydata),
            shots=shots,
            data_index=data_index,
            metadata=metadata,
        )
        self.__processed_data_set.append(raw_data)

        # Format raw data
        formatted_data = self._format_data(raw_data)
        if formatted_data.label != "fit_ready":
            raise AnalysisError(
                f"Not expected data label {formatted_data.label} != fit_ready."
            )
        self.__processed_data_set.append(formatted_data)
Exemple #20
0
    def _run_analysis(
        self, experiment_data: ExperimentData
    ) -> Tuple[List[AnalysisResultData], List["pyplot.Figure"]]:
        #
        # 1. Parse arguments
        #

        # Update all fit functions in the series definitions if fixed parameter is defined.
        # Fixed parameters should be provided by the analysis options.
        if self.__fixed_parameters__:
            assigned_params = {
                k: self.options.get(k, None)
                for k in self.__fixed_parameters__
            }

            # Check if all parameters are assigned.
            if any(v is None for v in assigned_params.values()):
                raise AnalysisError(
                    f"Unassigned fixed-value parameters for the fit "
                    f"function {self.__class__.__name__}."
                    f"All values of fixed-parameters, i.e. {self.__fixed_parameters__}, "
                    "must be provided by the analysis options to run this analysis."
                )

            # Override series definition with assigned fit functions.
            assigned_series = []
            for series_def in self.__series__:
                dict_def = dataclasses.asdict(series_def)
                dict_def["fit_func"] = functools.partial(
                    series_def.fit_func, **assigned_params)
                assigned_series.append(SeriesDef(**dict_def))
            self.__series__ = assigned_series

        # get experiment metadata
        try:
            self.__experiment_metadata = experiment_data.metadata

        except AttributeError:
            pass

        # get backend
        try:
            self.__backend = experiment_data.backend
        except AttributeError:
            pass

        #
        # 2. Setup data processor
        #

        # If no data processor was provided at run-time we infer one from the job
        # metadata and default to the data processor for averaged classified data.
        data_processor = self.options.data_processor

        if not data_processor:
            data_processor = get_processor(experiment_data, self.options)

        if isinstance(data_processor,
                      DataProcessor) and not data_processor.is_trained:
            # Qiskit DataProcessor instance. May need calibration.
            data_processor.train(data=experiment_data.data())

        #
        # 3. Extract curve entries from experiment data
        #
        self._extract_curves(experiment_data=experiment_data,
                             data_processor=data_processor)

        #
        # 4. Run fitting
        #
        formatted_data = self._data(label="fit_ready")

        # Generate algorithmic initial guesses and boundaries
        default_fit_opt = FitOptions(
            parameters=self._fit_params(),
            default_p0=self.options.p0,
            default_bounds=self.options.bounds,
            **self.options.curve_fitter_options,
        )

        fit_options = self._generate_fit_guesses(default_fit_opt)
        if isinstance(fit_options, FitOptions):
            fit_options = [fit_options]

        # Run fit for each configuration
        fit_results = []
        for fit_opt in set(fit_options):
            try:
                fit_result = self.options.curve_fitter(
                    funcs=[
                        series_def.fit_func for series_def in self.__series__
                    ],
                    series=formatted_data.data_index,
                    xdata=formatted_data.x,
                    ydata=formatted_data.y,
                    sigma=formatted_data.y_err,
                    **fit_opt.options,
                )
                fit_results.append(fit_result)
            except AnalysisError:
                # Some guesses might be too far from the true parameters and may thus fail.
                # We ignore initial guesses that fail and continue with the next fit candidate.
                pass

        # Find best value with chi-squared value
        if len(fit_results) == 0:
            warnings.warn(
                "All initial guesses and parameter boundaries failed to fit the data. "
                "Please provide better initial guesses or fit parameter boundaries.",
                UserWarning,
            )
            # at least return raw data points rather than terminating
            fit_result = None
        else:
            fit_result = sorted(fit_results, key=lambda r: r.reduced_chisq)[0]

        #
        # 5. Create database entry
        #
        analysis_results = []
        if fit_result:
            # pylint: disable=assignment-from-none
            quality = self._evaluate_quality(fit_data=fit_result)

            fit_models = {
                series_def.name: series_def.model_description
                or "no description"
                for series_def in self.__series__
            }

            # overview entry
            analysis_results.append(
                AnalysisResultData(
                    name=PARAMS_ENTRY_PREFIX + self.__class__.__name__,
                    value=[p.nominal_value for p in fit_result.popt],
                    chisq=fit_result.reduced_chisq,
                    quality=quality,
                    extra={
                        "popt_keys": fit_result.popt_keys,
                        "dof": fit_result.dof,
                        "covariance_mat": fit_result.pcov,
                        "fit_models": fit_models,
                        **self.options.extra,
                    },
                ))

            # output special parameters
            result_parameters = self.options.result_parameters
            if result_parameters:
                for param_repr in result_parameters:
                    if isinstance(param_repr, ParameterRepr):
                        p_name = param_repr.name
                        p_repr = param_repr.repr or param_repr.name
                        unit = param_repr.unit
                    else:
                        p_name = param_repr
                        p_repr = param_repr
                        unit = None

                    fit_val = fit_result.fitval(p_name)
                    if unit:
                        metadata = copy.copy(self.options.extra)
                        metadata["unit"] = unit
                    else:
                        metadata = self.options.extra

                    result_entry = AnalysisResultData(
                        name=p_repr,
                        value=fit_val,
                        chisq=fit_result.reduced_chisq,
                        quality=quality,
                        extra=metadata,
                    )
                    analysis_results.append(result_entry)

            # add extra database entries
            analysis_results.extend(self._extra_database_entry(fit_result))

        if self.options.return_data_points:
            # save raw data points in the data base if option is set (default to false)
            raw_data_dict = dict()
            for series_def in self.__series__:
                series_data = self._data(series_name=series_def.name,
                                         label="raw_data")
                raw_data_dict[series_def.name] = {
                    "xdata": series_data.x,
                    "ydata": series_data.y,
                    "sigma": series_data.y_err,
                }
            raw_data_entry = AnalysisResultData(
                name=DATA_ENTRY_PREFIX + self.__class__.__name__,
                value=raw_data_dict,
                extra={
                    "x-unit": self.options.xval_unit,
                    "y-unit": self.options.yval_unit,
                },
            )
            analysis_results.append(raw_data_entry)

        #
        # 6. Create figures
        #
        if self.options.plot:
            fit_figure = FitResultPlotters[
                self.options.curve_plotter].value.draw(
                    series_defs=self.__series__,
                    raw_samples=[
                        self._data(ser.name, "raw_data")
                        for ser in self.__series__
                    ],
                    fit_samples=[
                        self._data(ser.name, "fit_ready")
                        for ser in self.__series__
                    ],
                    tick_labels={
                        "xval_unit": self.options.xval_unit,
                        "yval_unit": self.options.yval_unit,
                        "xlabel": self.options.xlabel,
                        "ylabel": self.options.ylabel,
                        "xlim": self.options.xlim,
                        "ylim": self.options.ylim,
                    },
                    fit_data=fit_result,
                    result_entries=analysis_results,
                    style=self.options.style,
                    axis=self.options.axis,
                )
            figures = [fit_figure]
        else:
            figures = []

        return analysis_results, figures
    def test_composite_count_memory_marginalization(self, memory):
        """Test the marginalization of level two memory."""
        test_data = ExperimentData()

        # Simplified experimental data
        datum = {
            "counts": {
                "0 0": 4,
                "0 1": 1,
                "1 0": 2,
                "1 1": 3
            },
            "memory": memory,
            "metadata": {
                "experiment_type":
                "ParallelExperiment",
                "composite_index": [0, 1],
                "composite_metadata": [
                    {
                        "experiment_type": "FineXAmplitude",
                        "qubits": [0]
                    },
                    {
                        "experiment_type": "FineXAmplitude",
                        "qubits": [1]
                    },
                ],
                "composite_qubits": [[0], [1]],
                "composite_clbits": [[0], [1]],
            },
            "shots": 10,
            "meas_level": 2,
        }

        test_data.add_data(datum)

        sub_data = CompositeAnalysis([])._marginalized_component_data(
            test_data.data())
        expected = [
            [{
                "metadata": {
                    "experiment_type": "FineXAmplitude",
                    "qubits": [0]
                },
                "counts": {
                    "0": 6,
                    "1": 4
                },
                "memory": ["0", "0", "1", "0", "0", "1", "1", "0", "0", "1"],
            }],
            [{
                "metadata": {
                    "experiment_type": "FineXAmplitude",
                    "qubits": [1]
                },
                "counts": {
                    "0": 5,
                    "1": 5
                },
                "memory": ["0", "1", "1", "0", "0", "0", "1", "0", "1", "1"],
            }],
        ]

        self.assertListEqual(sub_data, expected)
Exemple #22
0
class TestAveragingAndSVD(BaseDataProcessorTest):
    """Test the averaging of single-shot IQ data followed by a SVD."""
    def setUp(self):
        """Here, single-shots average to points at plus/minus 1.

        The setting corresponds to four single-shots done on two qubits.
        """
        super().setUp()

        circ_es = ExperimentResultData(memory=[
            [[1.1, 0.9], [-0.8, 1.0]],
            [[1.2, 1.1], [-0.9, 1.0]],
            [[0.8, 1.1], [-1.2, 1.0]],
            [[0.9, 0.9], [-1.1, 1.0]],
        ])
        self._sig_gs = np.array([1.0, -1.0]) / np.sqrt(2.0)

        circ_gs = ExperimentResultData(memory=[
            [[-1.1, -0.9], [0.8, -1.0]],
            [[-1.2, -1.1], [0.9, -1.0]],
            [[-0.8, -1.1], [1.2, -1.0]],
            [[-0.9, -0.9], [1.1, -1.0]],
        ])
        self._sig_es = np.array([-1.0, 1.0]) / np.sqrt(2.0)

        circ_x90p = ExperimentResultData(memory=[
            [[-1.0, -1.0], [1.0, -1.0]],
            [[-1.0, -1.0], [1.0, -1.0]],
            [[1.0, 1.0], [-1.0, 1.0]],
            [[1.0, 1.0], [-1.0, 1.0]],
        ])
        self._sig_x90 = np.array([0, 0])

        circ_x45p = ExperimentResultData(memory=[
            [[-1.0, -1.0], [1.0, -1.0]],
            [[-1.0, -1.0], [1.0, -1.0]],
            [[-1.0, -1.0], [1.0, -1.0]],
            [[1.0, 1.0], [-1.0, 1.0]],
        ])
        self._sig_x45 = np.array([0.5, -0.5]) / np.sqrt(2.0)

        res_es = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="single",
            data=circ_es,
            header=self.header,
        )

        res_gs = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="single",
            data=circ_gs,
            header=self.header,
        )

        res_x90p = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="single",
            data=circ_x90p,
            header=self.header,
        )

        res_x45p = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="single",
            data=circ_x45p,
            header=self.header,
        )

        self.data = ExperimentData(FakeExperiment())
        self.data.add_data(
            Result(results=[res_es, res_gs, res_x90p, res_x45p],
                   **self.base_result_args))

    def test_averaging(self):
        """Test that averaging of the datums produces the expected IQ points."""

        processor = DataProcessor("memory", [AverageData(axis=1)])

        # Test that we get the expected outcome for the excited state
        processed = processor(self.data.data(0))

        np.testing.assert_array_almost_equal(
            unp.nominal_values(processed),
            np.array([[1.0, 1.0], [-1.0, 1.0]]),
        )
        np.testing.assert_array_almost_equal(
            unp.std_devs(processed),
            np.array([[0.15811388300841894, 0.1], [0.15811388300841894, 0.0]])
            / 2.0,
        )

        # Test that we get the expected outcome for the ground state
        processed = processor(self.data.data(1))

        np.testing.assert_array_almost_equal(
            unp.nominal_values(processed),
            np.array([[-1.0, -1.0], [1.0, -1.0]]),
        )
        np.testing.assert_array_almost_equal(
            unp.std_devs(processed),
            np.array([[0.15811388300841894, 0.1], [0.15811388300841894, 0.0]])
            / 2.0,
        )

    def test_averaging_and_svd(self):
        """Test averaging followed by a SVD."""

        processor = DataProcessor("memory", [AverageData(axis=1), SVD()])

        # Test training using the calibration points
        self.assertFalse(processor.is_trained)
        processor.train([self.data.data(idx) for idx in [0, 1]])
        self.assertTrue(processor.is_trained)

        # Test the excited state
        processed = processor(self.data.data(0))
        np.testing.assert_array_almost_equal(
            unp.nominal_values(processed),
            self._sig_es,
        )

        # Test the ground state
        processed = processor(self.data.data(1))
        np.testing.assert_array_almost_equal(
            unp.nominal_values(processed),
            self._sig_gs,
        )

        # Test the x90p rotation
        processed = processor(self.data.data(2))
        np.testing.assert_array_almost_equal(
            unp.nominal_values(processed),
            self._sig_x90,
        )
        np.testing.assert_array_almost_equal(
            unp.std_devs(processed),
            np.array([0.25, 0.25]),
        )

        # Test the x45p rotation
        processed = processor(self.data.data(3))
        np.testing.assert_array_almost_equal(
            unp.nominal_values(processed),
            self._sig_x45,
        )
        np.testing.assert_array_almost_equal(
            unp.std_devs(processed),
            np.array([np.std([1, 1, 1, -1]) / np.sqrt(4.0) / 2] * 2),
        )

    def test_process_all_data(self):
        """Test that we can process all data at once."""

        processor = DataProcessor("memory", [AverageData(axis=1), SVD()])

        # Test training using the calibration points
        self.assertFalse(processor.is_trained)
        processor.train([self.data.data(idx) for idx in [0, 1]])
        self.assertTrue(processor.is_trained)

        all_expected = np.vstack((
            self._sig_es.reshape(1, 2),
            self._sig_gs.reshape(1, 2),
            self._sig_x90.reshape(1, 2),
            self._sig_x45.reshape(1, 2),
        ))

        # Test processing of all data
        processed = processor(self.data.data())
        np.testing.assert_array_almost_equal(
            unp.nominal_values(processed),
            all_expected,
        )

        # Test processing of each datum individually
        for idx, expected in enumerate(
            [self._sig_es, self._sig_gs, self._sig_x90, self._sig_x45]):
            processed = processor(self.data.data(idx))
            np.testing.assert_array_almost_equal(
                unp.nominal_values(processed),
                expected,
            )

    def test_normalize(self):
        """Test that by adding a normalization node we get a signal between 1 and 1."""

        processor = DataProcessor(
            "memory", [AverageData(axis=1),
                       SVD(), MinMaxNormalize()])

        self.assertFalse(processor.is_trained)
        processor.train([self.data.data(idx) for idx in [0, 1]])
        self.assertTrue(processor.is_trained)

        # Test processing of all data
        processed = processor(self.data.data())
        np.testing.assert_array_almost_equal(
            unp.nominal_values(processed),
            np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5], [0.75, 0.25]]),
        )

    def test_distorted_iq_data(self):
        """Test if uncertainty can consider correlation.

        SVD projects IQ data onto I-axis, and input different data sets that
        have the same mean and same variance but squeezed along different axis.
        """
        svd_node = SVD()
        svd_node._scales = [1.0]
        svd_node._main_axes = [np.array([1, 0])]
        svd_node._means = [(0.0, 0.0)]

        processor = DataProcessor("memory", [AverageData(axis=1), svd_node])

        dist_i_axis = {
            "memory": [[[-1, 0]], [[-0.5, 0]], [[0.0, 0]], [[0.5, 0]], [[1,
                                                                         0]]]
        }
        dist_q_axis = {
            "memory": [[[0, -1]], [[0, -0.5]], [[0, 0.0]], [[0, 0.5]], [[0,
                                                                         1]]]
        }

        out_i = processor(dist_i_axis)
        self.assertAlmostEqual(out_i[0].nominal_value, 0.0)
        self.assertAlmostEqual(out_i[0].std_dev, 0.31622776601683794)

        out_q = processor(dist_q_axis)
        self.assertAlmostEqual(out_q[0].nominal_value, 0.0)
        self.assertAlmostEqual(out_q[0].std_dev, 0.0)
Exemple #23
0
class TestAvgDataAndSVD(BaseDataProcessorTest):
    """Test the SVD and normalization on averaged IQ data."""
    def setUp(self):
        """Here, single-shots average to points at plus/minus 1.

        The setting corresponds to four single-shots done on two qubits.
        """
        super().setUp()

        circ_es = ExperimentResultData(memory=[[1.0, 1.0], [-1.0, 1.0]])
        self._sig_gs = np.array([1.0, -1.0]) / np.sqrt(2.0)

        circ_gs = ExperimentResultData(memory=[[-1.0, -1.0], [1.0, -1.0]])
        self._sig_es = np.array([-1.0, 1.0]) / np.sqrt(2.0)

        circ_x90p = ExperimentResultData(memory=[[0.0, 0.0], [0.0, 0.0]])
        self._sig_x90 = np.array([0, 0])

        circ_x45p = ExperimentResultData(memory=[[-0.5, -0.5], [0.5, -0.5]])
        self._sig_x45 = np.array([0.5, -0.5]) / np.sqrt(2.0)

        res_es = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="avg",
            data=circ_es,
            header=self.header,
        )

        res_gs = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="avg",
            data=circ_gs,
            header=self.header,
        )

        res_x90p = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="avg",
            data=circ_x90p,
            header=self.header,
        )

        res_x45p = ExperimentResult(
            shots=4,
            success=True,
            meas_level=1,
            meas_return="avg",
            data=circ_x45p,
            header=self.header,
        )

        self.data = ExperimentData(FakeExperiment())
        self.data.add_data(
            Result(results=[res_es, res_gs, res_x90p, res_x45p],
                   **self.base_result_args))

    def test_normalize(self):
        """Test that by adding a normalization node we get a signal between 1 and 1."""

        processor = DataProcessor("memory", [SVD(), MinMaxNormalize()])

        self.assertFalse(processor.is_trained)
        processor.train([self.data.data(idx) for idx in [0, 1]])
        self.assertTrue(processor.is_trained)

        # Test processing of all data
        processed = processor(self.data.data())
        np.testing.assert_array_almost_equal(
            unp.nominal_values(processed),
            np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5], [0.75, 0.25]]),
        )
Exemple #24
0
    def _run_analysis(
        self, experiment_data: ExperimentData
    ) -> Tuple[List[AnalysisResultData], List["pyplot.Figure"]]:

        # Update all fit functions in the series definitions if fixed parameter is defined.
        # These lines will be removed once proper fit model class is implemented.
        assigned_params = self.options.fixed_parameters
        if assigned_params:
            # Check if all parameters are assigned.
            if any(v is None for v in assigned_params.values()):
                raise AnalysisError(
                    f"Unassigned fixed-value parameters for the fit "
                    f"function {self.__class__.__name__}."
                    f"All values of fixed-parameters, i.e. {assigned_params}, "
                    "must be provided by the analysis options to run this analysis."
                )
            # Override series definition with assigned fit functions.
            assigned_series = []
            for series_def in self.__series__:
                dict_def = dataclasses.asdict(series_def)
                dict_def["fit_func"] = functools.partial(
                    series_def.fit_func, **assigned_params)
                del dict_def["signature"]
                assigned_series.append(SeriesDef(**dict_def))
            self.__series__ = assigned_series

        # Prepare for fitting
        self._initialize(experiment_data)
        analysis_results = []

        # Run data processing
        processed_data = self._run_data_processing(experiment_data.data(),
                                                   self.__series__)

        if self.options.plot and self.options.plot_raw_data:
            for s in self.__series__:
                sub_data = processed_data.get_subset_of(s.name)
                self.drawer.draw_raw_data(
                    x_data=sub_data.x,
                    y_data=sub_data.y,
                    ax_index=s.canvas,
                )
        # for backward compatibility, will be removed in 0.4.
        self.__processed_data_set["raw_data"] = processed_data

        # Format data
        formatted_data = self._format_data(processed_data)
        if self.options.plot:
            for s in self.__series__:
                sub_data = formatted_data.get_subset_of(s.name)
                self.drawer.draw_formatted_data(
                    x_data=sub_data.x,
                    y_data=sub_data.y,
                    y_err_data=sub_data.y_err,
                    name=s.name,
                    ax_index=s.canvas,
                    color=s.plot_color,
                    marker=s.plot_symbol,
                )
        # for backward compatibility, will be removed in 0.4.
        self.__processed_data_set["fit_ready"] = formatted_data

        # Run fitting
        fit_data = self._run_curve_fit(formatted_data, self.__series__)

        # Create figure and result data
        if fit_data:
            metadata = self.options.extra.copy()
            metadata["fit_models"] = {
                s.name: s.model_description or "no description"
                for s in self.__series__
            }
            quality = self._evaluate_quality(fit_data)

            # Create analysis results
            analysis_results.extend(
                self._create_analysis_results(fit_data, quality, **metadata))
            # calling old extra entry method for backward compatibility
            if hasattr(self, "_extra_database_entry"):
                warnings.warn(
                    "Method '_extra_database_entry' has been deprecated and will be "
                    "removed after 0.4. Please override new method "
                    "'_create_analysis_results' with updated method signature.",
                    DeprecationWarning,
                )
                deprecated_method = getattr(self, "_extra_database_entry")
                analysis_results.extend(deprecated_method(fit_data))

            # Draw fit curves and report
            if self.options.plot:
                for s in self.__series__:
                    interp_x = np.linspace(*fit_data.x_range, 100)

                    params = {}
                    for fitpar in s.signature:
                        if fitpar in self.options.fixed_parameters:
                            params[fitpar] = self.options.fixed_parameters[
                                fitpar]
                        else:
                            params[fitpar] = fit_data.fitval(fitpar)

                    y_data_with_uncertainty = s.fit_func(interp_x, **params)
                    y_mean = unp.nominal_values(y_data_with_uncertainty)
                    y_std = unp.std_devs(y_data_with_uncertainty)
                    # Draw fit line
                    self.drawer.draw_fit_line(
                        x_data=interp_x,
                        y_data=y_mean,
                        ax_index=s.canvas,
                        color=s.plot_color,
                    )
                    # Draw confidence intervals with different n_sigma
                    sigmas = unp.std_devs(y_data_with_uncertainty)
                    if np.isfinite(sigmas).all():
                        for n_sigma, alpha in self.drawer.options.plot_sigma:
                            self.drawer.draw_confidence_interval(
                                x_data=interp_x,
                                y_ub=y_mean + n_sigma * y_std,
                                y_lb=y_mean - n_sigma * y_std,
                                ax_index=s.canvas,
                                alpha=alpha,
                                color=s.plot_color,
                            )

                # Write fitting report
                report_description = ""
                for res in analysis_results:
                    if isinstance(res.value, (float, UFloat)):
                        report_description += f"{analysis_result_to_repr(res)}\n"
                report_description += r"Fit $\chi^2$ = " + f"{fit_data.reduced_chisq: .4g}"
                self.drawer.draw_fit_report(description=report_description)

        # Add raw data points
        analysis_results.extend(
            self._create_curve_data(formatted_data, self.__series__))

        # Finalize plot
        if self.options.plot:
            self.drawer.format_canvas()
            return analysis_results, [self.drawer.figure]

        return analysis_results, []