Esempio n. 1
0
    def test_stdgst_matrix(self):
        # Using matrix-based calculations
        target_model = std.target_model().copy()
        target_model.set_all_parameterizations("CPTP")
        target_model.set_simtype(
            'matrix')  # the default for 1Q, so we could remove this line
        results = pygsti.run_long_sequence_gst(self.ds,
                                               target_model,
                                               std.prepStrs,
                                               std.effectStrs,
                                               self.germs,
                                               self.maxLengths,
                                               advanced_options=self.advOpts,
                                               verbosity=4)
        #RUN BELOW LINES TO SAVE GATESET (UNCOMMENT to regenerate)
        #pygsti.io.write_model(results.estimates['default'].models['go0'],
        #                        compare_files + "/test2Qcalc_std_exact.model","Saved Standard-Calc 2Q test model")

        # Note: expected nSigma of 143 is so high b/c we use very high tol of 1e-2 => result isn't very good
        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               143,
                               delta=2.0)
        mdl_compare = pygsti.io.load_model(compare_files +
                                           "/test2Qcalc_std_exact.model")
        self.assertAlmostEqual(
            results.estimates['default'].models['go0'].frobeniusdist(
                mdl_compare),
            0,
            places=3)
Esempio n. 2
0
    def test_reducedmod_map2(self):
        # Using sparse embedded matrices and map-based calcs
        target_model = pc.build_nqnoise_model(self.nQubits,
                                              geometry="line",
                                              max_idle_weight=1,
                                              maxhops=1,
                                              extra_weight_1_hops=0,
                                              extra_gate_weight=1,
                                              sparse=True,
                                              sim_type="map",
                                              verbosity=1)
        target_model.from_vector(self.rand_start206)
        results = pygsti.run_long_sequence_gst(
            self.redmod_ds,
            target_model,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advanced_options={'tolerance': 1e-3})

        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               1.0,
                               delta=1.0)
        mdl_compare = pygsti.serialization.json.load(
            open(compare_files + "/test2Qcalc_redmod_exact.model"))
        self.assertAlmostEqual(np.linalg.norm(
            results.estimates['default'].models['go0'].to_vector() -
            mdl_compare.to_vector()),
                               0,
                               places=1)
Esempio n. 3
0
    def test_reducedmod_cterm(self):
        # Using term-based calcs using map-based stabilizer-state propagation
        target_model = pc.build_nqnoise_model(
            self.nQubits,
            geometry="line",
            max_idle_weight=1,
            maxhops=1,
            extra_weight_1_hops=0,
            extra_gate_weight=1,
            sparse=False,
            verbosity=1,
            sim_type="termorder:1",
            parameterization="H+S clifford terms")
        target_model.from_vector(self.rand_start228)
        results = pygsti.run_long_sequence_gst(
            self.redmod_ds,
            target_model,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advanced_options={'tolerance': 1e-3})

        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               3.0,
                               delta=1.0)
        mdl_compare = pygsti.serialization.json.load(
            open(compare_files + "/test2Qcalc_redmod_terms.model"))
        self.assertAlmostEqual(np.linalg.norm(
            results.estimates['default'].models['go0'].to_vector() -
            mdl_compare.to_vector()),
                               0,
                               places=3)
Esempio n. 4
0
    def test_reducedmod_cterm_errorgens(self):
        # Using term-based calcs using map-based stabilizer-state propagation (same as above)
        # but w/errcomp_type=errogens Model
        termsim = pygsti.forwardsims.TermForwardSimulator(mode='taylor-order',
                                                          max_order=1)
        target_model = build_XYCNOT_cloudnoise_model(self.nQubits,
                                                     geometry="line",
                                                     maxIdleWeight=1,
                                                     maxhops=1,
                                                     extraWeight1Hops=0,
                                                     extraGateWeight=1,
                                                     verbosity=1,
                                                     evotype="stabilizer",
                                                     simulator=termsim,
                                                     parameterization="H+S",
                                                     errcomp_type='errorgens')
        print("Num params = ", target_model.num_params)
        target_model.from_vector(self.rand_start36)
        results = pygsti.run_long_sequence_gst(
            self.redmod_ds,
            target_model,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advanced_options={'tolerance': 1e-3})

        print("MISFIT nSigma = ",
              results.estimates[results.name].misfit_sigma())
        self.assertAlmostEqual(results.estimates[results.name].misfit_sigma(),
                               0.0,
                               delta=1.0)
Esempio n. 5
0
    def test_reducedmod_map2_errorgens(self):
        # Using sparse embedded matrices and map-based calcs (same as above)
        # but w/*errcomp_type=errogens* Model (maybe not really necessary to include?)
        target_model = build_XYCNOT_cloudnoise_model(self.nQubits,
                                                     geometry="line",
                                                     maxIdleWeight=1,
                                                     maxhops=1,
                                                     extraWeight1Hops=0,
                                                     extraGateWeight=1,
                                                     simulator="map",
                                                     errcomp_type='errorgens',
                                                     verbosity=1)
        print("Num params = ", target_model.num_params)
        target_model.from_vector(self.rand_start25)
        results = pygsti.run_long_sequence_gst(
            self.redmod_ds,
            target_model,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advanced_options={'tolerance': 1e-3})

        print("MISFIT nSigma = ",
              results.estimates[results.name].misfit_sigma())
        self.assertAlmostEqual(results.estimates[results.name].misfit_sigma(),
                               0.0,
                               delta=1.0)
Esempio n. 6
0
    def test_time_dependent_gst_staticdata(self):

        #run GST in a time-dependent mode:
        prep_fiducials, meas_fiducials = std1Q_XYI.prepStrs, std1Q_XYI.effectStrs
        germs = std1Q_XYI.germs
        maxLengths = [1, 2]

        target_model = std1Q_XYI.target_model("full TP", sim_type="map")
        mdl_datagen = target_model.depolarize(op_noise=0.01, spam_noise=0.001)
        edesign = pygsti.protocols.StandardGSTDesign(target_model.create_processor_spec(), prep_fiducials,
                                                     meas_fiducials, germs, maxLengths)

        # *sparse*, time-independent data
        ds = pygsti.data.simulate_data(mdl_datagen, edesign.all_circuits_needing_data, num_samples=10,
                                               sample_error="binomial", seed=1234, times=[0],
                                               record_zero_counts=False)
        data = pygsti.protocols.ProtocolData(edesign, ds)

        target_model.sim = pygsti.forwardsims.MapForwardSimulator(max_cache_size=0)  # No caching allowed for time-dependent calcs
        self.assertEqual(ds.degrees_of_freedom(aggregate_times=False), 126)

        builders = pygsti.protocols.GSTObjFnBuilders([pygsti.objectivefns.TimeDependentPoissonPicLogLFunction.builder()], [])
        gst = pygsti.protocols.GateSetTomography(target_model, gaugeopt_suite=None,
                                                 objfn_builders=builders)
        results = gst.run(data)

        # Normal GST used as a check - should get same answer since data is time-independent
        results2 = pygsti.run_long_sequence_gst(ds, target_model, prep_fiducials, meas_fiducials,
                                                germs, maxLengths, verbosity=3,
                                                advanced_options={'starting_point': 'target',
                                                                  'always_perform_mle': True,
                                                                  'only_perform_mle': True}, gauge_opt_params=False)
Esempio n. 7
0
    def test_reducedmod_matrix(self):
        # Using dense matrices and matrix-based calcs
        target_model = build_XYCNOT_cloudnoise_model(self.nQubits,
                                                     geometry="line",
                                                     maxIdleWeight=1,
                                                     maxhops=1,
                                                     extraWeight1Hops=0,
                                                     extraGateWeight=1,
                                                     simulator="matrix",
                                                     verbosity=1)
        print("Num params = ", target_model.num_params)
        target_model.from_vector(self.rand_start25)
        results = pygsti.run_long_sequence_gst(
            self.redmod_ds,
            target_model,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advanced_options={'tolerance': 1e-3})

        #RUN BELOW LINES TO SAVE GATESET (SAVE)
        if regenerate_references():
            pygsti.serialization.json.dump(
                results.estimates[results.name].models['go0'],
                open(compare_files + "/test1Qcalc_redmod_exact.model", 'w'))

        print("MISFIT nSigma = ",
              results.estimates[results.name].misfit_sigma())
        self.assertAlmostEqual(results.estimates[results.name].misfit_sigma(),
                               0.0,
                               delta=1.0)
Esempio n. 8
0
    def test_reducedmod_map2(self):
        # Using sparse embedded matrices and map-based calcs
        target_model = build_XYCNOT_cloudnoise_model(self.nQubits,
                                                     geometry="line",
                                                     maxIdleWeight=1,
                                                     maxhops=1,
                                                     extraWeight1Hops=0,
                                                     extraGateWeight=1,
                                                     simulator="map",
                                                     errcomp_type='gates',
                                                     verbosity=1)
        print("Num params = ", target_model.num_params)
        target_model.from_vector(self.rand_start25)
        results = pygsti.run_long_sequence_gst(
            self.redmod_ds,
            target_model,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advanced_options={'tolerance': 1e-3})

        print("MISFIT nSigma = ",
              results.estimates[results.name].misfit_sigma())
        self.assertAlmostEqual(results.estimates[results.name].misfit_sigma(),
                               0.0,
                               delta=1.0)
        mdl_compare = pygsti.serialization.json.load(
            open(compare_files + "/test1Qcalc_redmod_exact.model"))
        self.assertAlmostEqual(np.linalg.norm(
            results.estimates[results.name].models['go0'].to_vector() -
            mdl_compare.to_vector()),
                               0,
                               places=1)
Esempio n. 9
0
    def test_stdgst_prunedpath(self):
        # Using term-based (path integral) calculation with path pruning
        # This performs a map-based unitary evolution along each path.
        target_model = std.target_model("static unitary", evotype='statevec')
        target_model.set_all_parameterizations("H+S")
        target_model.sim = pygsti.forwardsims.TermForwardSimulator(
            mode='pruned',
            max_order=3,
            desired_perr=0.01,
            allowed_perr=0.1,
            max_paths_per_outcome=1000,
            perr_heuristic='meanscaled',
            max_term_stages=5)
        target_model.from_vector(
            1e-10 * np.ones(target_model.num_params)
        )  # to seed term calc (starting with perfect zeros causes trouble)
        results = pygsti.run_long_sequence_gst(self.ds,
                                               target_model,
                                               std.prep_fiducials(),
                                               std.meas_fiducials(),
                                               std.germs(),
                                               self.maxLengths,
                                               verbosity=3)

        #RUN BELOW LINES TO SAVE GATESET (SAVE)
        if regenerate_references():
            pygsti.serialization.json.dump(
                results.estimates[results.name].models['go0'],
                open(compare_files + "/test1Qcalc_std_prunedpath.model", 'w'))

        print("MISFIT nSigma = ",
              results.estimates[results.name].misfit_sigma())
        self.assertAlmostEqual(results.estimates[results.name].misfit_sigma(),
                               1,
                               delta=1.0)
Esempio n. 10
0
    def test_stdgst_map(self):
        # Using map-based calculation
        target_model = std.target_model()
        target_model.set_all_parameterizations("CPTP")
        target_model.sim = 'map'
        results = pygsti.run_long_sequence_gst(self.ds,
                                               target_model,
                                               std.prep_fiducials(),
                                               std.meas_fiducials(),
                                               std.germs(),
                                               self.maxLengths,
                                               verbosity=4)

        print("MISFIT nSigma = ",
              results.estimates[results.name].misfit_sigma())
        self.assertAlmostEqual(results.estimates[results.name].misfit_sigma(),
                               1.0,
                               delta=2.0)
        mdl_compare = pygsti.serialization.json.load(
            open(compare_files + "/test1Qcalc_std_exact.model"))

        gsEstimate = results.estimates[results.name].models['go0'].copy()
        gsEstimate.set_all_parameterizations("full")
        gsEstimate = pygsti.algorithms.gaugeopt_to_target(
            gsEstimate, mdl_compare)
        self.assertAlmostEqual(gsEstimate.frobeniusdist(mdl_compare),
                               0,
                               places=0)
Esempio n. 11
0
    def test_stdgst_terms(self):
        # Using term-based (path integral) calculation
        # This performs a map-based unitary evolution along each path.
        target_model = std.target_model().copy()
        target_model.set_all_parameterizations("H+S terms")
        target_model.set_simtype(
            'termorder:1'
        )  # this is the default set by set_all_parameterizations above
        results = pygsti.run_long_sequence_gst(self.ds,
                                               target_model,
                                               std.prepStrs,
                                               std.effectStrs,
                                               self.germs,
                                               self.maxLengths,
                                               verbosity=4)

        #RUN BELOW LINES TO SAVE GATESET (UNCOMMENT to regenerate)
        #pygsti.io.json.dump(results.estimates['default'].models['go0'],
        #                    open(compare_files + "/test2Qcalc_std_terms.model",'w'))

        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               5,
                               delta=1.0)
        mdl_compare = pygsti.serialization.json.load(
            open(compare_files + "/test2Qcalc_std_terms.model"))
        self.assertAlmostEqual(np.linalg.norm(
            results.estimates['default'].models['go0'].to_vector() -
            mdl_compare.to_vector()),
                               0,
                               places=3)
Esempio n. 12
0
    def test_stdgst_map(self):
        # Using map-based calculation
        target_model = std.target_model().copy()
        target_model.set_all_parameterizations("CPTP")
        target_model.set_simtype('map')
        results = pygsti.run_long_sequence_gst(self.ds,
                                               target_model,
                                               std.prepStrs,
                                               std.effectStrs,
                                               self.germs,
                                               self.maxLengths,
                                               advanced_options=self.advOpts,
                                               verbosity=4)

        #Note: expected nSigma of 143 is so high b/c we use very high tol of 1e-2 => result isn't very good
        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               143,
                               delta=2.0)
        mdl_compare = pygsti.io.load_model(compare_files +
                                           "/test2Qcalc_std_exact.model")
        self.assertAlmostEqual(
            results.estimates['default'].models['go0'].frobeniusdist(
                mdl_compare),
            0,
            places=3)
Esempio n. 13
0
    def test_stdgst_terms(self):
        # Using term-based (path integral) calculation
        # This performs a map-based unitary evolution along each path.
        target_model = std.target_model("static unitary", evotype='statevec')
        target_model.set_all_parameterizations("H+S")
        target_model.sim = pygsti.forwardsims.TermForwardSimulator(
            mode='taylor-order', max_order=1)
        target_model._print_gpindices()
        target_model.from_vector(
            1e-10 * np.ones(target_model.num_params)
        )  # to seed term calc (starting with perfect zeros causes trouble)
        results = pygsti.run_long_sequence_gst(self.ds,
                                               target_model,
                                               std.prep_fiducials(),
                                               std.meas_fiducials(),
                                               std.germs(),
                                               self.maxLengths,
                                               verbosity=4)

        #RUN BELOW LINES TO SAVE GATESET (SAVE)
        if regenerate_references():
            pygsti.serialization.json.dump(
                results.estimates[results.name].models['go0'],
                open(compare_files + "/test1Qcalc_std_terms.model", 'w'))

        print("MISFIT nSigma = ",
              results.estimates[results.name].misfit_sigma())
        self.assertAlmostEqual(results.estimates[results.name].misfit_sigma(),
                               1,
                               delta=1.0)
        mdl_compare = pygsti.serialization.json.load(
            open(compare_files + "/test1Qcalc_std_terms.model"))

        # can't easily gauge opt b/c term-based models can't be converted to "full"
        #mdl_compare.set_all_parameterizations("full")
        #
        #gsEstimate = results.estimates[results.name].models['go0'].copy()
        #gsEstimate.set_all_parameterizations("full")
        #gsEstimate = pygsti.algorithms.gaugeopt_to_target(gsEstimate, mdl_compare)
        #self.assertAlmostEqual( gsEstimate.frobeniusdist(mdl_compare), 0, places=0)

        #A direct vector comparison works if python (&numpy?) versions are identical, but
        # gauge freedoms make this incorrectly fail in other cases - so just check sigmas
        print("VEC DIFF = ",
              (results.estimates[results.name].models['go0'].to_vector() -
               mdl_compare.to_vector()))
        self.assertAlmostEqual(np.linalg.norm(
            results.estimates[results.name].models['go0'].to_vector() -
            mdl_compare.to_vector()),
                               0,
                               places=1)
Esempio n. 14
0
    def test_idletomog_gstdata_1Qofstd2Q(self):
        # perform idle tomography on first qubit of 2Q
        from pygsti.modelpacks.legacy import std2Q_XYICNOT as std2Q
        from pygsti.modelpacks.legacy import std1Q_XYI as std
        std2Q = pygsti.modelpacks.stdtarget.stdmodule_to_smqmodule(std2Q)
        std = pygsti.modelpacks.stdtarget.stdmodule_to_smqmodule(std)

        maxLens = [1, 2, 4]
        expList = pygsti.circuits.create_lsgst_circuits(
            std2Q.target_model(), std2Q.prepStrs, std2Q.effectStrs,
            std2Q.germs_lite, maxLens)
        mdl_datagen = std2Q.target_model().depolarize(0.01, 0.01)
        ds2Q = pygsti.data.simulate_data(mdl_datagen,
                                         expList,
                                         1000,
                                         'multinomial',
                                         seed=1234)

        #Just analyze first qubit (qubit 0)
        ds = pygsti.data.filter_dataset(ds2Q, (0, ))

        start = std.target_model()
        start.set_all_parameterizations("full TP")
        result = pygsti.run_long_sequence_gst(
            ds,
            start,
            std.prepStrs[0:4],
            std.effectStrs[0:4],
            std.germs_lite,
            maxLens,
            verbosity=3,
            advanced_options={'objective': 'chi2'})

        report = pygsti.report.construct_standard_report(
            result,
            "Test GST Report w/Idle Tomog.: StdXYI from StdXYICNOT",
            advanced_options={'idt_idle_oplabel': ()},
            verbosity=3)
        idt_sections = list(
            filter(
                lambda x: isinstance(
                    x, pygsti.report.section.IdleTomographySection),
                report._sections))
        self.assertEqual(len(idt_sections), 1)
        self.assertTrue(
            isinstance(
                report._global_qtys['top_switchboard'].idtresults.base[0],
                pygsti.extras.idletomography.idtresults.IdleTomographyResults))
Esempio n. 15
0
    def test_reducedmod_svterm(self):
        # Using term-based calcs using map-based state-vector propagation
        termsim = pygsti.forwardsims.TermForwardSimulator(mode='taylor-order',
                                                          max_order=1)
        target_model = build_XYCNOT_cloudnoise_model(self.nQubits,
                                                     geometry="line",
                                                     maxIdleWeight=1,
                                                     maxhops=1,
                                                     extraWeight1Hops=0,
                                                     extraGateWeight=1,
                                                     evotype="statevec",
                                                     verbosity=1,
                                                     simulator=termsim,
                                                     parameterization="H+S",
                                                     errcomp_type='gates')
        print("Num params = ", target_model.num_params)
        target_model.from_vector(self.rand_start36)
        results = pygsti.run_long_sequence_gst(
            self.redmod_ds,
            target_model,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advanced_options={'tolerance': 1e-3})

        #RUN BELOW LINES TO SAVE GATESET (SAVE)
        if regenerate_references():
            pygsti.serialization.json.dump(
                results.estimates[results.name].models['go0'],
                open(compare_files + "/test1Qcalc_redmod_terms.model", 'w'))

        print("MISFIT nSigma = ",
              results.estimates[results.name].misfit_sigma())
        self.assertAlmostEqual(results.estimates[results.name].misfit_sigma(),
                               0.0,
                               delta=1.0)
        mdl_compare = pygsti.serialization.json.load(
            open(compare_files + "/test1Qcalc_redmod_terms.model"))
        self.assertAlmostEqual(np.linalg.norm(
            results.estimates[results.name].models['go0'].to_vector() -
            mdl_compare.to_vector()),
                               0,
                               places=3)
Esempio n. 16
0
    def test_stdgst_matrix(self):
        # Using matrix-based calculations
        target_model = std.target_model()
        target_model.set_all_parameterizations("CPTP")
        target_model.sim = 'matrix'  # the default for 1Q, so we could remove this line
        results = pygsti.run_long_sequence_gst(self.ds,
                                               target_model,
                                               std.prep_fiducials(),
                                               std.meas_fiducials(),
                                               std.germs(),
                                               self.maxLengths,
                                               verbosity=4)

        #CHECK that copy gives identical models - this is checked by other
        # unit tests but here we're using a true "GST model" - so do it again:
        print("CHECK COPY")
        mdl = results.estimates[results.name].models['go0']
        mdl_copy = mdl.copy()
        print(mdl.strdiff(mdl_copy))
        self.assertAlmostEqual(mdl.frobeniusdist(mdl_copy), 0, places=2)

        #RUN BELOW LINES TO SAVE GATESET (SAVE)
        if regenerate_references():
            pygsti.serialization.json.dump(
                results.estimates[results.name].models['go0'],
                open(compare_files + "/test1Qcalc_std_exact.model", 'w'))

        print("MISFIT nSigma = ",
              results.estimates[results.name].misfit_sigma())
        self.assertAlmostEqual(results.estimates[results.name].misfit_sigma(),
                               1.0,
                               delta=2.0)
        mdl_compare = pygsti.serialization.json.load(
            open(compare_files + "/test1Qcalc_std_exact.model"))

        #gauge opt before compare
        gsEstimate = results.estimates[results.name].models['go0'].copy()
        gsEstimate.set_all_parameterizations("full")
        gsEstimate = pygsti.algorithms.gaugeopt_to_target(
            gsEstimate, mdl_compare)
        print(gsEstimate.strdiff(mdl_compare))
        self.assertAlmostEqual(gsEstimate.frobeniusdist(mdl_compare),
                               0,
                               places=1)
Esempio n. 17
0
    def test_reducedmod_prunedpath_svterm_errorgens(self):
        termsim = pygsti.forwardsims.TermForwardSimulator(mode='pruned')
        target_model = build_XYCNOT_cloudnoise_model(self.nQubits,
                                                     geometry="line",
                                                     maxIdleWeight=1,
                                                     maxhops=1,
                                                     extraWeight1Hops=0,
                                                     extraGateWeight=1,
                                                     verbosity=1,
                                                     evotype="statevec",
                                                     simulator=termsim,
                                                     parameterization="H+S",
                                                     errcomp_type='errorgens')

        #separately call set_simtype to set other params
        target_model.sim = pygsti.forwardsims.TermForwardSimulator(
            mode='pruned',
            max_order=3,
            desired_perr=0.01,
            allowed_perr=0.05,
            max_paths_per_outcome=1000,
            perr_heuristic='none',
            max_term_stages=5)

        print("Num params = ", target_model.num_params)
        target_model.from_vector(self.rand_start36)
        results = pygsti.run_long_sequence_gst(
            self.redmod_ds,
            target_model,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advanced_options={'tolerance': 1e-3})

        print("MISFIT nSigma = ",
              results.estimates[results.name].misfit_sigma())
        self.assertAlmostEqual(results.estimates[results.name].misfit_sigma(),
                               0.0,
                               delta=1.0)
Esempio n. 18
0
    def test_idletomog_gstdata_std1Q(self):
        from pygsti.modelpacks.legacy import std1Q_XYI as std
        std = pygsti.modelpacks.stdtarget.stdmodule_to_smqmodule(std)

        maxLens = [1, 2, 4]
        expList = pygsti.circuits.create_lsgst_circuits(
            std.target_model(), std.prepStrs, std.effectStrs, std.germs_lite,
            maxLens)
        ds = pygsti.data.simulate_data(std.target_model().depolarize(
            0.01, 0.01),
                                       expList,
                                       1000,
                                       'multinomial',
                                       seed=1234)

        result = pygsti.run_long_sequence_gst(ds,
                                              std.target_model(),
                                              std.prepStrs,
                                              std.effectStrs,
                                              std.germs_lite,
                                              maxLens,
                                              verbosity=3)

        #standard report will run idle tomography
        report = pygsti.report.construct_standard_report(
            result,
            "Test GST Report w/Idle Tomography Tab: StdXYI",
            advanced_options={'idt_idle_oplabel': ()},
            verbosity=3)
        idt_sections = list(
            filter(
                lambda x: isinstance(
                    x, pygsti.report.section.IdleTomographySection),
                report._sections))
        self.assertEqual(len(idt_sections), 1)
        self.assertTrue(
            isinstance(
                report._global_qtys['top_switchboard'].idtresults.base[0],
                pygsti.extras.idletomography.idtresults.IdleTomographyResults))
Esempio n. 19
0
    def test_reducedmod_matrix(self):
        # Using dense matrices and matrix-based calcs
        target_model = pc.build_nqnoise_model(self.nQubits,
                                              geometry="line",
                                              max_idle_weight=1,
                                              maxhops=1,
                                              extra_weight_1_hops=0,
                                              extra_gate_weight=1,
                                              sparse=False,
                                              sim_type="matrix",
                                              verbosity=1)
        target_model.from_vector(self.rand_start206)
        results = pygsti.run_long_sequence_gst(
            self.redmod_ds,
            target_model,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advanced_options={'tolerance': 1e-3})

        #RUN BELOW LINES TO SAVE GATESET (UNCOMMENT to regenerate)
        #pygsti.io.json.dump(results.estimates['default'].models['go0'],
        #                    open(compare_files + "/test2Qcalc_redmod_exact.model",'w'))

        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               1.0,
                               delta=1.0)
        mdl_compare = pygsti.serialization.json.load(
            open(compare_files + "/test2Qcalc_redmod_exact.model"))
        self.assertAlmostEqual(
            results.estimates['default'].models['go0'].frobeniusdist(
                mdl_compare),
            0,
            places=3)
Esempio n. 20
0
                                        seed=1234)
ds2 = pygsti.construction.simulate_data(gs_datagen2,
                                        listOfExperiments,
                                        n_samples=1000,
                                        sample_error="binomial",
                                        seed=1234)
ds3 = ds1.copy_nonstatic()
ds3.add_counts_from_dataset(ds2)
ds3.done_adding_data()

#Run GST on all three datasets
gs_target.set_all_parameterizations("TP")
results1 = pygsti.run_long_sequence_gst(ds1,
                                        gs_target,
                                        fiducials,
                                        fiducials,
                                        germs,
                                        maxLengths,
                                        verbosity=0)
results2 = pygsti.run_long_sequence_gst(ds2,
                                        gs_target,
                                        fiducials,
                                        fiducials,
                                        germs,
                                        maxLengths,
                                        verbosity=0)
results3 = pygsti.run_long_sequence_gst(ds3,
                                        gs_target,
                                        fiducials,
                                        fiducials,
                                        germs,
Esempio n. 21
0
    def setUpClass(cls):
        """
        Handle all once-per-class (slow) computation and loading,
         to avoid calling it for each test (like setUp).  Store
         results in class variable for use within setUp.
        """
        super(ReportBaseCase, cls).setUpClass()

        orig_cwd = os.getcwd()
        os.chdir(os.path.abspath(os.path.dirname(__file__)))
        os.chdir('..') # The test_packages directory

        target_model = std.target_model()
        datagen_gateset = target_model.depolarize(op_noise=0.05, spam_noise=0.1)
        datagen_gateset2 = target_model.depolarize(op_noise=0.1, spam_noise=0.05).rotate((0.15,-0.03,0.03))

        #cls.specs = pygsti.construction.build_spam_specs(std.fiducials, effect_labels=['E0'])
        #  #only use the first EVec

        op_labels = std.gates
        cls.lgstStrings = pygsti.circuits.create_lgst_circuits(std.fiducials, std.fiducials, op_labels)
        cls.maxLengthList = [1,2,4,8]

        cls.lsgstStrings = pygsti.circuits.create_lsgst_circuit_lists(
            op_labels, std.fiducials, std.fiducials, std.germs, cls.maxLengthList)
        cls.lsgstStructs = pygsti.circuits.make_lsgst_structs(
            op_labels, std.fiducials, std.fiducials, std.germs, cls.maxLengthList)


        # RUN BELOW LINES TO GENERATE ANALYSIS DATASET (SAVE)
        if regenerate_references():
            ds = pygsti.data.simulate_data(datagen_gateset, cls.lsgstStrings[-1], num_samples=1000,
                                                   sample_error='binomial', seed=100)
            ds.save(compare_files + "/reportgen.dataset")
            ds2 = pygsti.data.simulate_data(datagen_gateset2, cls.lsgstStrings[-1], num_samples=1000,
                                                    sample_error='binomial', seed=100)
            ds2.save(compare_files + "/reportgen2.dataset")


        cls.ds = pygsti.data.DataSet(file_to_load_from=compare_files + "/reportgen.dataset")
        cls.ds2 = pygsti.data.DataSet(file_to_load_from=compare_files + "/reportgen2.dataset")

        mdl_lgst = pygsti.run_lgst(cls.ds, std.fiducials, std.fiducials, target_model, svd_truncate_to=4, verbosity=0)
        mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst, target_model, {'gates': 1.0, 'spam': 0.0})
        cls.mdl_clgst = pygsti.contract(mdl_lgst_go, "CPTP")
        cls.mdl_clgst_tp = pygsti.contract(cls.mdl_clgst, "vSPAM")
        cls.mdl_clgst_tp.set_all_parameterizations("full TP")

        #Compute results for MC2GST
        lsgst_gatesets_prego, *_ = pygsti.run_iterative_gst(
            cls.ds, cls.mdl_clgst, cls.lsgstStrings,
            optimizer={'tol': 1e-5},
            iteration_objfn_builders=['chi2'],
            final_objfn_builders=[],
            resource_alloc=None,
            verbosity=0
        )

        experiment_design = pygsti.protocols.StandardGSTDesign(
            target_model.create_processor_spec(), std.fiducials, std.fiducials, std.germs, cls.maxLengthList
        )
        data = pygsti.protocols.ProtocolData(experiment_design, cls.ds)
        protocol = pygsti.protocols.StandardGST()
        cls.results = pygsti.protocols.gst.ModelEstimateResults(data, protocol)
        cls.results.add_estimate(pygsti.protocols.estimate.Estimate.create_gst_estimate(
            cls.results, target_model, cls.mdl_clgst,lsgst_gatesets_prego,
            {'objective': "chi2",
             'min_prob_clip_for_weighting': 1e-4,
             'prob_clip_interval': (-1e6,1e6), 'radius': 1e-4,
             'weights': None, 'defaultDirectory': temp_files + "",
             'defaultBasename': "MyDefaultReportName"}
        ))

        gaugeOptParams = collections.OrderedDict([
                ('model', lsgst_gatesets_prego[-1]),  #so can gauge-propagate CIs
                ('target_model', target_model),       #so can gauge-propagate CIs
                ('cptp_penalty_factor', 0),
                ('gates_metric',"frobenius"),
                ('spam_metric',"frobenius"),
                ('item_weights', {'gates': 1.0, 'spam': 0.001}),
                ('return_all', True) ])

        _, gaugeEl, go_final_gateset = pygsti.gaugeopt_to_target(**gaugeOptParams)
        gaugeOptParams['_gaugeGroupEl'] = gaugeEl  #so can gauge-propagate CIs
        cls.results.estimates['default'].add_gaugeoptimized(gaugeOptParams, go_final_gateset)
        cls.results.estimates['default'].add_gaugeoptimized(gaugeOptParams, go_final_gateset, "go_dup")

        #Compute results for MLGST with TP constraint
        # Use run_long_sequence_gst with a non-mark dataset to trigger data scaling
        tp_target = target_model.copy(); tp_target.set_all_parameterizations("full TP")


        cls.ds3 = cls.ds.copy_nonstatic()
        cls.ds3.add_counts_from_dataset(cls.ds2)
        cls.ds3.done_adding_data()

        cls.results_logL = pygsti.run_long_sequence_gst(cls.ds3, tp_target, std.fiducials, std.fiducials,
                                                        std.germs, cls.maxLengthList, verbosity=0,
                                                        advanced_options={'tolerance': 1e-6, 'starting_point': 'LGST',
                                                                        'on_bad_fit': ["robust","Robust","robust+","Robust+"],
                                                                        'bad_fit_threshold': -1.0,
                                                                        'germ_length_limits': {('Gx','Gi','Gi'): 2} })

        #OLD
        #lsgst_gatesets_TP = pygsti.do_iterative_mlgst(cls.ds, cls.mdl_clgst_tp, cls.lsgstStrings, verbosity=0,
        #                                           min_prob_clip=1e-4, prob_clip_interval=(-1e6,1e6),
        #                                           returnAll=True) #TP initial model => TP output models
        #cls.results_logL = pygsti.objects.Results()
        #cls.results_logL.init_dataset(cls.ds)
        #cls.results_logL.init_circuits(cls.lsgstStructs)
        #cls.results_logL.add_estimate(target_model, cls.mdl_clgst_tp,
        #                         lsgst_gatesets_TP,
        #                         {'objective': "logl",
        #                          'min_prob_clip': 1e-4,
        #                          'prob_clip_interval': (-1e6,1e6), 'radius': 1e-4,
        #                          'weights': None, 'defaultDirectory': temp_files + "",
        #                          'defaultBasename': "MyDefaultReportName"})
        #
        #tp_target = target_model.copy(); tp_target.set_all_parameterizations("full TP")
        #gaugeOptParams = gaugeOptParams.copy() #just to be safe
        #gaugeOptParams['model'] = lsgst_gatesets_TP[-1]  #so can gauge-propagate CIs
        #gaugeOptParams['target_model'] = tp_target  #so can gauge-propagate CIs
        #_, gaugeEl, go_final_gateset = pygsti.gaugeopt_to_target(**gaugeOptParams)
        #gaugeOptParams['_gaugeGroupEl'] = gaugeEl #so can gauge-propagate CIs
        #cls.results_logL.estimates['default'].add_gaugeoptimized(gaugeOptParams, go_final_gateset)
        #
        ##self.results_logL.options.precision = 3
        ##self.results_logL.options.polar_precision = 2

        os.chdir(orig_cwd)
Esempio n. 22
0
    def test_3Q(self):

        ##only test when reps are fast (b/c otherwise this test is slow!)
        #try: from pygsti.objects.replib import fastreplib
        #except ImportError:
        #    warnings.warn("Skipping test_3Q b/c no fastreps!")
        #    return

        nQubits = 3
        print("Constructing Target LinearOperator Set")
        target_model = build_XYCNOT_cloudnoise_model(nQubits,
                                                     geometry="line",
                                                     maxIdleWeight=1,
                                                     maxhops=1,
                                                     extraWeight1Hops=0,
                                                     extraGateWeight=1,
                                                     simulator="map",
                                                     verbosity=1)
        #print("nElements test = ",target_model.num_elements)
        #print("nParams test = ",target_model.num_params)
        #print("nNonGaugeParams test = ",target_model.num_nongauge_params)

        print("Constructing Datagen LinearOperator Set")
        mdl_datagen = build_XYCNOT_cloudnoise_model(nQubits,
                                                    geometry="line",
                                                    maxIdleWeight=1,
                                                    maxhops=1,
                                                    extraWeight1Hops=0,
                                                    extraGateWeight=1,
                                                    verbosity=1,
                                                    roughNoise=(1234, 0.1),
                                                    simulator="map")

        mdl_test = mdl_datagen
        print(
            "Constructed model with %d op-blks, dim=%d, and nParams=%d.  Norm(paramvec) = %g"
            % (len(mdl_test.operation_blks), mdl_test.dim, mdl_test.num_params,
               np.linalg.norm(mdl_test.to_vector())))

        op_labels = target_model.primitive_op_labels
        line_labels = tuple(range(nQubits))
        fids1Q = std1Q_XY.fiducials
        fiducials = []
        for i in range(nQubits):
            fiducials.extend(
                pygsti.circuits.manipulate_circuits(fids1Q, [((L('Gx'), ),
                                                              (L('Gx', i), )),
                                                             ((L('Gy'), ),
                                                              (L('Gy', i), ))],
                                                    line_labels=line_labels))
        print(len(fiducials), "Fiducials")
        prep_fiducials = meas_fiducials = fiducials
        #TODO: add fiducials for 2Q pairs (edges on graph)

        germs = pygsti.circuits.to_circuits([(gl, ) for gl in op_labels],
                                            line_labels=line_labels)
        maxLs = [1]
        expList = pygsti.circuits.create_lsgst_circuits(
            mdl_datagen, prep_fiducials, meas_fiducials, germs, maxLs)
        self.assertTrue(Circuit((), line_labels) in expList)

        ds = pygsti.data.simulate_data(mdl_datagen,
                                       expList,
                                       1000,
                                       "multinomial",
                                       seed=1234)
        print("Created Dataset with %d strings" % len(ds))

        logL = pygsti.tools.logl(mdl_datagen, ds, expList)
        max_logL = pygsti.tools.logl_max(mdl_datagen, ds, expList)
        twoDeltaLogL = 2 * (max_logL - logL)
        chi2 = pygsti.tools.chi2(mdl_datagen, ds, expList)

        dof = ds.degrees_of_freedom()
        nParams = mdl_datagen.num_params
        print("Datagen 2DeltaLogL = 2(%g-%g) = %g" %
              (logL, max_logL, twoDeltaLogL))
        print("Datagen chi2 = ", chi2)
        print("Datagen expected DOF = ", dof)
        print("nParams = ", nParams)
        print("Expected 2DeltaLogL or chi2 ~= %g-%g =%g" %
              (dof, nParams, dof - nParams))
        #print("EXIT"); exit()
        return

        results = pygsti.run_long_sequence_gst(
            ds,
            target_model,
            prep_fiducials,
            meas_fiducials,
            germs,
            maxLs,
            verbosity=5,
            advanced_options={
                'max_iterations': 2
            })  #keep this short; don't care if it doesn't converge.
        print("DONE!")
Esempio n. 23
0
def main():
    gs_target = std1Q_XYI.gs_target
    fiducials = std1Q_XYI.fiducials
    germs = std1Q_XYI.germs
    maxLengths = [1, 2, 4]
    #maxLengths = [1, 2, 4, 8, 16, 32, 64]

    #Generate some data
    gs_datagen = gs_target.depolarize(gate_noise=0.1, spam_noise=0.001)
    gs_datagen = gs_datagen.rotate(rotate=0.04)
    listOfExperiments = pygsti.construction.create_lsgst_circuits(
        gs_target, fiducials, fiducials, germs, maxLengths)
    ds = pygsti.construction.simulate_data(gs_datagen,
                                           listOfExperiments,
                                           n_samples=1000,
                                           sample_error="binomial",
                                           seed=1234)
    #Run GST
    gs_target.set_all_parameterizations("TP")  #TP-constrained
    results = pygsti.run_long_sequence_gst(ds,
                                           gs_target,
                                           fiducials,
                                           fiducials,
                                           germs,
                                           maxLengths,
                                           verbosity=0)
    with open('data/example_report_results.pkl', 'wb') as outfile:
        pickle.dump(results, outfile, protocol=2)

    # Case1: TP-constrained GST
    tpTarget = gs_target.copy()
    tpTarget.set_all_parameterizations("TP")
    results_tp = pygsti.run_long_sequence_gst(ds,
                                              tpTarget,
                                              fiducials,
                                              fiducials,
                                              germs,
                                              maxLengths,
                                              gauge_opt_params=False,
                                              verbosity=0)
    # Gauge optimize
    est = results_tp.estimates['default']
    gsFinal = est.gatesets['final iteration estimate']
    gsTarget = est.gatesets['target']
    for spamWt in [1e-4, 1e-3, 1e-2, 1e-1, 1.0]:
        gs = pygsti.gaugeopt_to_target(gsFinal, gsTarget, {
            'gates': 1,
            'spam': spamWt
        })
        est.add_gaugeoptimized({'item_weights': {
            'gates': 1,
            'spam': spamWt
        }}, gs, "Spam %g" % spamWt)

    #Case2: "Full" GST
    fullTarget = gs_target.copy()
    fullTarget.set_all_parameterizations("full")
    results_full = pygsti.run_long_sequence_gst(ds,
                                                fullTarget,
                                                fiducials,
                                                fiducials,
                                                germs,
                                                maxLengths,
                                                gauge_opt_params=False,
                                                verbosity=0)
    #Gauge optimize
    est = results_full.estimates['default']
    gsFinal = est.gatesets['final iteration estimate']
    gsTarget = est.gatesets['target']
    for spamWt in [1e-4, 1e-3, 1e-2, 1e-1, 1.0]:
        gs = pygsti.gaugeopt_to_target(gsFinal, gsTarget, {
            'gates': 1,
            'spam': spamWt
        })
        est.add_gaugeoptimized({'item_weights': {
            'gates': 1,
            'spam': spamWt
        }}, gs, "Spam %g" % spamWt)

    with open('data/full_report_results.pkl', 'wb') as outfile:
        pickle.dump((results_tp, results_full), outfile, protocol=2)
Esempio n. 24
0
    def testIntermediateMeas(self):
        # Mess with the target model to add some error to the povm and instrument
        self.assertEqual(self.target_model.num_params,92) # 4*3 + 16*5 = 92
        mdl = self.target_model.depolarize(op_noise=0.01, spam_noise=0.01)
        gs2 = self.target_model.depolarize(max_op_noise=0.01, max_spam_noise=0.01, seed=1234) #another way to depolarize
        mdl.povms['Mdefault'].depolarize(0.01)

        # Introducing a rotation error to the measurement
        Uerr = pygsti.rotation_gate_mx([0, 0.02, 0]) # input angles are halved by the method
        E = np.dot(mdl.povms['Mdefault']['0'].T,Uerr).T # effect is stored as column vector
        Erem = self.povm_ident - E
        mdl.povms['Mdefault'] = pygsti.modelmembers.povms.UnconstrainedPOVM({'0': E, '1': Erem}, evotype='default')

        # Now add the post-measurement gates from the vector E0 and remainder = id-E0
        Gmz_plus = np.dot(E,E.T) #since E0 is stored internally as column spamvec
        Gmz_minus = np.dot(Erem,Erem.T)
        mdl.instruments['Iz'] = pygsti.modelmembers.instruments.Instrument({'plus': Gmz_plus, 'minus': Gmz_minus})
        self.assertEqual(mdl.num_params,92) # 4*3 + 16*5 = 92
        #print(mdl)

        germs = std.germs
        fiducials = std.fiducials
        max_lengths = [1] #,2,4,8]
        glbls = list(mdl.operations.keys()) + list(mdl.instruments.keys())
        lsgst_struct = pygsti.circuits.create_lsgst_circuits(
            glbls,fiducials,fiducials,germs,max_lengths)
        lsgst_struct2 = pygsti.circuits.create_lsgst_circuits(
            mdl,fiducials,fiducials,germs,max_lengths) #use mdl as source
        self.assertEqual(list(lsgst_struct), list(lsgst_struct2))



        mdl_datagen = mdl
        ds = pygsti.data.simulate_data(mdl, lsgst_struct, 1000, 'none') #'multinomial')
        pygsti.io.write_dataset(temp_files + "/intermediate_meas_dataset.txt", ds)
        ds2 = pygsti.io.read_dataset(temp_files + "/intermediate_meas_dataset.txt")
        for opstr,dsRow in ds.items():
            for lbl,cnt in dsRow.counts.items():
                self.assertAlmostEqual(cnt, ds2[opstr].counts[lbl],places=2)
        #print(ds)

        #LGST
        mdl_lgst = pygsti.run_lgst(ds, fiducials, fiducials, self.target_model) #, guessModelForGauge=mdl_datagen)
        self.assertTrue("Iz" in mdl_lgst.instruments)
        mdl_opt = pygsti.gaugeopt_to_target(mdl_lgst, mdl_datagen) #, method="BFGS")
        print(mdl_datagen.strdiff(mdl_opt))
        print("Frobdiff = ",mdl_datagen.frobeniusdist( mdl_lgst))
        print("Frobdiff after GOpt = ",mdl_datagen.frobeniusdist(mdl_opt))
        self.assertAlmostEqual(mdl_datagen.frobeniusdist(mdl_opt), 0.0, places=4)
        #print(mdl_lgst)
        #print(mdl_datagen)

        #DEBUG compiling w/dataset
        #dbList = pygsti.circuits.create_lsgst_circuits(self.target_model,fiducials,fiducials,germs,max_lengths)
        ##self.target_model.simplify_circuits(dbList, ds)
        #self.target_model.simplify_circuits([ pygsti.circuits.Circuit(None,stringrep="Iz") ], ds )
        #assert(False),"STOP"

        #LSGST
        results = pygsti.run_long_sequence_gst(ds, self.target_model, fiducials, fiducials, germs, max_lengths)
        #print(results.estimates[results.name].models['go0'])
        mdl_est = results.estimates[results.name].models['go0']
        mdl_est_opt = pygsti.gaugeopt_to_target(mdl_est, mdl_datagen)
        print("Frobdiff = ", mdl_datagen.frobeniusdist(mdl_est))
        print("Frobdiff after GOpt = ", mdl_datagen.frobeniusdist(mdl_est_opt))
        self.assertAlmostEqual(mdl_datagen.frobeniusdist(mdl_est_opt), 0.0, places=4)

        #LGST w/TP gates
        mdl_targetTP = self.target_model.copy()
        mdl_targetTP.set_all_parameterizations("full TP")
        self.assertEqual(mdl_targetTP.num_params,71) # 3 + 4*2 + 12*5 = 71
        #print(mdl_targetTP)
        resultsTP = pygsti.run_long_sequence_gst(ds, mdl_targetTP, fiducials, fiducials, germs, max_lengths, verbosity=4)
        mdl_est = resultsTP.estimates[resultsTP.name].models['go0']
        mdl_est_opt = pygsti.gaugeopt_to_target(mdl_est, mdl_datagen)
        print("TP Frobdiff = ", mdl_datagen.frobeniusdist(mdl_est))
        print("TP Frobdiff after GOpt = ", mdl_datagen.frobeniusdist(mdl_est_opt))
        self.assertAlmostEqual(mdl_datagen.frobeniusdist(mdl_est_opt), 0.0, places=4)
Esempio n. 25
0
gs_datagen = gs_target.depolarize(gate_noise=0.1, spam_noise=0.001)
ds = pygsti.construction.simulate_data(gs_datagen,
                                       listOfExperiments,
                                       n_samples=1000,
                                       sample_error="multinomial",
                                       seed=2016)

start = time.time()
'''
results = pygsti.run_long_sequence_gst(ds, gs_target, prep_fiducials, effect_fiducials, germs4,
                                    maxLengths, gaugeOptParams={'item_weights': {'spam':0.1,'gates': 1.0}},
                                    advancedOptions={ 'depolarizeStart' : 0.1 }, mem_limit=3*(1024)**3,
                                    verbosity=3 )
'''
results = pygsti.run_long_sequence_gst(
    ds,
    gs_target,
    prep_fiducials,
    effect_fiducials,
    germs4,
    maxLengths,
    gauge_opt_params=None,
    advanced_options={'depolarizeStart': 0.1},
    mem_limit=3 * (1024)**3,
    verbosity=3)
end = time.time()
print("Total time=%f hours" % ((end - start) / 3600.0))

#If you wanted to, you could pickle the results for later analysis:
pickle.dump(results, open("gaugeopt/2qbit_results.pkl", "wb"))