예제 #1
0
    def test_reducedmod_map2(self):
        # Using sparse embedded matrices and map-based calcs
        gs_target = pc.build_nqnoise_gateset(self.nQubits,
                                             geometry="line",
                                             maxIdleWeight=1,
                                             maxhops=1,
                                             extraWeight1Hops=0,
                                             extraGateWeight=1,
                                             sparse=True,
                                             sim_type="map",
                                             verbosity=1)
        gs_target.from_vector(self.rand_start206)
        results = pygsti.do_long_sequence_gst(
            self.redmod_ds,
            gs_target,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advancedOptions={'tolerance': 1e-3})

        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               1.0,
                               delta=1.0)
        gs_compare = pygsti.io.json.load(
            open(compare_files + "/test2Qcalc_redmod_exact.gateset"))
        self.assertAlmostEqual(np.linalg.norm(
            results.estimates['default'].gatesets['go0'].to_vector() -
            gs_compare.to_vector()),
                               0,
                               places=1)
예제 #2
0
    def test_reducedmod_cterm(self):
        # Using term-based calcs using map-based stabilizer-state propagation
        gs_target = pc.build_nqnoise_gateset(
            self.nQubits,
            geometry="line",
            maxIdleWeight=1,
            maxhops=1,
            extraWeight1Hops=0,
            extraGateWeight=1,
            sparse=False,
            verbosity=1,
            sim_type="termorder:1",
            parameterization="H+S clifford terms")
        gs_target.from_vector(self.rand_start228)
        results = pygsti.do_long_sequence_gst(
            self.redmod_ds,
            gs_target,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advancedOptions={'tolerance': 1e-3})

        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               3.0,
                               delta=1.0)
        gs_compare = pygsti.io.json.load(
            open(compare_files + "/test2Qcalc_redmod_terms.gateset"))
        self.assertAlmostEqual(np.linalg.norm(
            results.estimates['default'].gatesets['go0'].to_vector() -
            gs_compare.to_vector()),
                               0,
                               places=3)
예제 #3
0
    def test_reducedmod_map1(self):
        # Using dense embedded matrices and map-based calcs (maybe not really necessary to include?)
        gs_target = pc.build_nqnoise_gateset(self.nQubits,
                                             geometry="line",
                                             maxIdleWeight=1,
                                             maxhops=1,
                                             extraWeight1Hops=0,
                                             extraGateWeight=1,
                                             sparse=False,
                                             sim_type="map",
                                             verbosity=1)
        print("Num params = ", gs_target.num_params())
        gs_target.from_vector(self.rand_start25)
        results = pygsti.do_long_sequence_gst(
            self.redmod_ds,
            gs_target,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advancedOptions={'tolerance': 1e-3})

        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               0.0,
                               delta=1.0)
        gs_compare = pygsti.io.json.load(
            open(compare_files + "/test1Qcalc_redmod_exact.gateset"))
        self.assertAlmostEqual(
            results.estimates['default'].gatesets['go0'].frobeniusdist(
                gs_compare),
            0,
            places=1)
예제 #4
0
    def test_greedy_sequenceselection(self):
        nQubits = 1
        maxLengths = [1, 2]
        cnot_edges = []

        gs_datagen = pc.build_nqnoise_gateset(nQubits,
                                              "line",
                                              cnot_edges,
                                              maxIdleWeight=1,
                                              maxhops=0,
                                              extraWeight1Hops=0,
                                              extraGateWeight=0,
                                              sparse=True,
                                              verbosity=1,
                                              sim_type="map",
                                              parameterization="H+S",
                                              gateNoise=(1234, 0.01),
                                              prepNoise=(456, 0.01),
                                              povmNoise=(789, 0.01))

        cache = {}
        gss = pygsti.construction.create_nqubit_sequences(nQubits,
                                                          maxLengths,
                                                          'line',
                                                          cnot_edges,
                                                          maxIdleWeight=1,
                                                          maxhops=0,
                                                          extraWeight1Hops=0,
                                                          extraGateWeight=0,
                                                          verbosity=4,
                                                          cache=cache,
                                                          algorithm="greedy")
        #expList = gss.allstrs #[ tup[0] for tup in expList_tups]

        #RUN to save list
        #pygsti.io.json.dump(gss, open(compare_files + "/nqubit_1Q_seqs.json",'w'))

        compare_gss = pygsti.io.json.load(
            open(compare_files + "/nqubit_1Q_seqs.json"))

        #expList_tups_mod = [tuple( etup[0:3] + ('XX','XX')) for etup in expList_tups ]
        #for etup in expList_tups:
        #    etup_mod = tuple( etup[0:3] + ('XX','XX'))
        #    if etup_mod not in compare_tups:
        #        print("Not found: ", etup)
        #
        #    #if (etup[0] != ctup[0]) or (etup[1] != ctup[1]) or (etup[2] != ctup[2]):
        #    #    print("Mismatch:",(etup[0] != ctup[0]), (etup[1] != ctup[1]), (etup[2] != ctup[2]))
        #    #    print(etup); print(ctup)
        #    #    print(tuple(etup[0]))
        #    #    print(tuple(ctup[0]))

        self.assertEqual(set(gss.allstrs), set(compare_gss.allstrs))
예제 #5
0
    def test_2Q_terms(self):

        gss = pygsti.io.json.load(open(compare_files + "/nqubit_2Q_seqs.json"))
        expList = gss.allstrs

        ds = pygsti.io.json.load(
            open(compare_files + "/nqubit_2Q_dataset.json"))
        print(len(expList), " sequences")

        nQubits = 2
        maxLengths = [1, 2]
        cnot_edges = [(i, i + 1)
                      for i in range(nQubits - 1)]  #only single direction

        #OLD
        #lsgstLists = []; lst = []
        #for L in maxLengths:
        #    for tup in expList_tups:
        #        if tup[1] == L: lst.append( tup[0] )
        #    lsgstLists.append(lst[:]) # append *running* list
        lsgstLists = gss  # can just use gss as input to pygsti.do_long_sequence_gst_base

        gs_to_optimize = pc.build_nqnoise_gateset(nQubits,
                                                  "line",
                                                  cnot_edges,
                                                  maxIdleWeight=2,
                                                  maxhops=1,
                                                  extraWeight1Hops=0,
                                                  extraGateWeight=1,
                                                  verbosity=1,
                                                  sim_type="termorder:1",
                                                  parameterization="H+S terms",
                                                  sparse=False)

        #RUN to create cache
        #calc_cache = {}
        #gs_to_optimize.set_simtype("termorder:1",calc_cache)
        #gs_to_optimize.bulk_probs(lsgstLists[-1])
        #pygsti.io.json.dump(calc_cache, open(compare_files + '/nqubit_2Qterms.cache','w'))

        #Just load precomputed cache (we test do_long_sequence_gst_base here, not cache computation)
        calc_cache = pygsti.io.json.load(
            open(compare_files + '/nqubit_2Qterms.cache'))
        gs_to_optimize.set_simtype("termorder:1", calc_cache)

        results = pygsti.do_long_sequence_gst_base(
            ds,
            gs_to_optimize,
            lsgstLists,
            gaugeOptParams=False,
            advancedOptions={'tolerance': 1e-3},
            verbosity=4)
예제 #6
0
    def test_sequential_sequenceselection(self):
        nQubits = 2
        maxLengths = [1, 2]
        cnot_edges = [(i, i + 1)
                      for i in range(nQubits - 1)]  #only single direction

        gs_datagen = pc.build_nqnoise_gateset(nQubits,
                                              "line",
                                              cnot_edges,
                                              maxIdleWeight=2,
                                              maxhops=1,
                                              extraWeight1Hops=0,
                                              extraGateWeight=0,
                                              sparse=True,
                                              verbosity=1,
                                              sim_type="map",
                                              parameterization="H+S",
                                              gateNoise=(1234, 0.01),
                                              prepNoise=(456, 0.01),
                                              povmNoise=(789, 0.01))

        cache = {}
        gss = pygsti.construction.create_nqubit_sequences(
            nQubits,
            maxLengths,
            'line',
            cnot_edges,
            maxIdleWeight=2,
            maxhops=1,
            extraWeight1Hops=0,
            extraGateWeight=0,
            verbosity=4,
            cache=cache,
            algorithm="sequential")
        expList = gss.allstrs  #[ tup[0] for tup in expList_tups]

        #RUN to save list & dataset
        #pygsti.io.json.dump(gss, open(compare_files + "/nqubit_2Q_seqs.json",'w'))
        #ds = pygsti.construction.generate_fake_data(gs_datagen, expList, 1000, "multinomial", seed=1234)
        #pygsti.io.json.dump(ds,open(compare_files + "/nqubit_2Q_dataset.json",'w'))

        compare_gss = pygsti.io.json.load(
            open(compare_files + "/nqubit_2Q_seqs.json"))
        self.assertEqual(set(gss.allstrs), set(compare_gss.allstrs))
예제 #7
0
    def test_autogator(self):
        #Test this here b/c auto-gators are associated with parallel gate labels
        gs = pc.build_nqnoise_gateset(2,
                                      "line", [(0, 1)],
                                      maxIdleWeight=2,
                                      maxhops=1,
                                      extraWeight1Hops=0,
                                      extraGateWeight=1,
                                      verbosity=1,
                                      sim_type="map",
                                      parameterization="H+S",
                                      sparse=True)

        # gs[('Gx',0)].factorgates  # Composed([fullTargetOp,fullIdleErr,fullLocalErr])
        self.assertEqual(
            set(gs.gates.keys()),
            set([
                L('Gi'),
                L('Gx', 0),
                L('Gy', 0),
                L('Gx', 1),
                L('Gy', 1),
                L('Gcnot', (0, 1))
            ]))

        #But we can *compute* with gatestrings containing parallel labels...
        parallelLbl = L([('Gx', 0), ('Gy', 1)])

        with self.assertRaises(KeyError):
            gs.gates[parallelLbl]

        gstr = pygsti.obj.GateString((parallelLbl, ))
        probs = gs.probs(gstr)
        print(probs)

        expected = {
            ('00', ): 0.25,
            ('01', ): 0.25,
            ('10', ): 0.25,
            ('11', ): 0.25
        }
        for k, v in probs.items():
            self.assertAlmostEqual(v, expected[k])
예제 #8
0
    def test_2Q(self):

        gss = pygsti.io.json.load(open(compare_files + "/nqubit_2Q_seqs.json"))
        expList = gss.allstrs

        ds = pygsti.io.json.load(
            open(compare_files + "/nqubit_2Q_dataset.json"))
        print(len(expList), " sequences")

        nQubits = 2
        maxLengths = [1, 2]
        cnot_edges = [(i, i + 1)
                      for i in range(nQubits - 1)]  #only single direction

        #OLD
        #lsgstLists = []; lst = []
        #for L in maxLengths:
        #    for tup in expList_tups:
        #        if tup[1] == L: lst.append( tup[0] )
        #    lsgstLists.append(lst[:]) # append *running* list
        lsgstLists = gss  # can just use gss as input to pygsti.do_long_sequence_gst_base

        gs_to_optimize = pc.build_nqnoise_gateset(nQubits,
                                                  "line",
                                                  cnot_edges,
                                                  maxIdleWeight=2,
                                                  maxhops=1,
                                                  extraWeight1Hops=0,
                                                  extraGateWeight=1,
                                                  verbosity=1,
                                                  sim_type="map",
                                                  parameterization="H+S",
                                                  sparse=True)

        results = pygsti.do_long_sequence_gst_base(
            ds,
            gs_to_optimize,
            lsgstLists,
            gaugeOptParams=False,
            advancedOptions={'tolerance': 1e-2},
            verbosity=4)
예제 #9
0
    def test_reducedmod_svterm(self):
        # Using term-based calcs using map-based state-vector propagation
        gs_target = pc.build_nqnoise_gateset(self.nQubits,
                                             geometry="line",
                                             maxIdleWeight=1,
                                             maxhops=1,
                                             extraWeight1Hops=0,
                                             extraGateWeight=1,
                                             sparse=False,
                                             verbosity=1,
                                             sim_type="termorder:1",
                                             parameterization="H+S terms")
        print("Num params = ", gs_target.num_params())
        gs_target.from_vector(self.rand_start36)
        results = pygsti.do_long_sequence_gst(
            self.redmod_ds,
            gs_target,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advancedOptions={'tolerance': 1e-3})

        #RUN BELOW LINES TO SAVE GATESET (UNCOMMENT to regenerate)
        #pygsti.io.json.dump(results.estimates['default'].gatesets['go0'],
        #                    open(compare_files + "/test1Qcalc_redmod_terms.gateset",'w'))

        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               0.0,
                               delta=1.0)
        gs_compare = pygsti.io.json.load(
            open(compare_files + "/test1Qcalc_redmod_terms.gateset"))
        self.assertAlmostEqual(np.linalg.norm(
            results.estimates['default'].gatesets['go0'].to_vector() -
            gs_compare.to_vector()),
                               0,
                               places=3)
예제 #10
0
    def test_reducedmod_matrix(self):
        # Using dense matrices and matrix-based calcs
        gs_target = pc.build_nqnoise_gateset(self.nQubits,
                                             geometry="line",
                                             maxIdleWeight=1,
                                             maxhops=1,
                                             extraWeight1Hops=0,
                                             extraGateWeight=1,
                                             sparse=False,
                                             sim_type="matrix",
                                             verbosity=1)
        print("Num params = ", gs_target.num_params())
        gs_target.from_vector(self.rand_start25)
        results = pygsti.do_long_sequence_gst(
            self.redmod_ds,
            gs_target,
            self.redmod_fiducials,
            self.redmod_fiducials,
            self.redmod_germs,
            self.redmod_maxLs,
            verbosity=4,
            advancedOptions={'tolerance': 1e-3})

        #RUN BELOW LINES TO SAVE GATESET (UNCOMMENT to regenerate)
        #pygsti.io.json.dump(results.estimates['default'].gatesets['go0'],
        #                    open(compare_files + "/test1Qcalc_redmod_exact.gateset",'w'))

        print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
        self.assertAlmostEqual(results.estimates['default'].misfit_sigma(),
                               0.0,
                               delta=1.0)
        gs_compare = pygsti.io.json.load(
            open(compare_files + "/test1Qcalc_redmod_exact.gateset"))
        self.assertAlmostEqual(
            results.estimates['default'].gatesets['go0'].frobeniusdist(
                gs_compare),
            0,
            places=3)
예제 #11
0
    def setUpClass(cls):
        """ 
        Handle all once-per-class (slow) computation and loading,
         to avoid calling it for each test (like setUp).  Store
         results in class variable for use within setUp.
        """
        super(CalcMethods2QTestCase, cls).setUpClass()

        #Change to test_packages directory (since setUp hasn't been called yet...)
        origDir = os.getcwd()
        os.chdir(os.path.abspath(os.path.dirname(__file__)))
        os.chdir('..')  # The test_packages directory

        #Note: std is a 2Q gateset
        cls.maxLengths = [1]
        #cls.germs = std.germs_lite
        cls.germs = pygsti.construction.gatestring_list([
            (gl, ) for gl in std.gs_target.gates
        ])
        cls.gs_datagen = std.gs_target.depolarize(gate_noise=0.1,
                                                  spam_noise=0.001)
        cls.listOfExperiments = pygsti.construction.make_lsgst_experiment_list(
            std.gs_target, std.prepStrs, std.effectStrs, cls.germs,
            cls.maxLengths)

        #RUN BELOW FOR DATAGEN (UNCOMMENT to regenerate)
        #ds = pygsti.construction.generate_fake_data(cls.gs_datagen, cls.listOfExperiments,
        #                                            nSamples=1000, sampleError="multinomial", seed=1234)
        #ds.save(compare_files + "/calcMethods2Q.dataset%s" % cls.versionsuffix)

        cls.ds = pygsti.objects.DataSet(
            fileToLoadFrom=compare_files +
            "/calcMethods2Q.dataset%s" % cls.versionsuffix)
        cls.advOpts = {'tolerance': 1e-2}

        #Reduced model GST dataset
        cls.nQubits = 2
        cls.gs_redmod_datagen = pc.build_nqnoise_gateset(cls.nQubits,
                                                         geometry="line",
                                                         maxIdleWeight=1,
                                                         maxhops=1,
                                                         extraWeight1Hops=0,
                                                         extraGateWeight=1,
                                                         sparse=False,
                                                         sim_type="matrix",
                                                         verbosity=1,
                                                         gateNoise=(1234,
                                                                    0.01),
                                                         prepNoise=(456, 0.01),
                                                         povmNoise=(789, 0.01))

        #Create a reduced set of fiducials and germs
        gateLabels = list(cls.gs_redmod_datagen.gates.keys())
        fids1Q = std1Q_XY.fiducials[0:2]  # for speed
        cls.redmod_fiducials = []
        for i in range(cls.nQubits):
            cls.redmod_fiducials.extend(
                pygsti.construction.manipulate_gatestring_list(
                    fids1Q, [((L('Gx'), ), (L('Gx', i), )),
                             ((L('Gy'), ), (L('Gy', i), ))]))
        #print(redmod_fiducials, "Fiducials")

        cls.redmod_germs = pygsti.construction.gatestring_list([
            (gl, ) for gl in gateLabels
        ])
        cls.redmod_maxLs = [1]
        expList = pygsti.construction.make_lsgst_experiment_list(
            cls.gs_redmod_datagen, cls.redmod_fiducials, cls.redmod_fiducials,
            cls.redmod_germs, cls.redmod_maxLs)

        #RUN BELOW FOR DATAGEN (UNCOMMENT to regenerate)
        #redmod_ds = pygsti.construction.generate_fake_data(cls.gs_redmod_datagen, expList, 1000, "round", seed=1234)
        #redmod_ds.save(compare_files + "/calcMethods2Q_redmod.dataset%s" % cls.versionsuffix)

        cls.redmod_ds = pygsti.objects.DataSet(
            fileToLoadFrom=compare_files +
            "/calcMethods2Q_redmod.dataset%s" % cls.versionsuffix)

        #print(len(expList)," reduced model sequences")

        #Random starting points - little kick so we don't get hung up at start
        np.random.seed(1234)
        cls.rand_start18 = np.random.random(18) * 1e-6
        cls.rand_start206 = np.random.random(206) * 1e-6
        cls.rand_start228 = np.random.random(228) * 1e-6

        os.chdir(origDir)  # return to original directory
예제 #12
0
    def setUpClass(cls):
        """ 
        Handle all once-per-class (slow) computation and loading,
         to avoid calling it for each test (like setUp).  Store
         results in class variable for use within setUp.
        """
        super(CalcMethods1QTestCase, cls).setUpClass()

        #Change to test_packages directory (since setUp hasn't been called yet...)
        origDir = os.getcwd()
        os.chdir(os.path.abspath(os.path.dirname(__file__)))
        os.chdir('..')  # The test_packages directory

        #Standard GST dataset
        cls.maxLengths = [1, 2, 4]
        cls.gs_datagen = std.gs_target.depolarize(gate_noise=0.1,
                                                  spam_noise=0.001)
        cls.listOfExperiments = pygsti.construction.make_lsgst_experiment_list(
            std.gs_target, std.prepStrs, std.effectStrs, std.germs,
            cls.maxLengths)

        #RUN BELOW FOR DATAGEN (UNCOMMENT to regenerate)
        #ds = pygsti.construction.generate_fake_data(cls.gs_datagen, cls.listOfExperiments,
        #                                                 nSamples=1000, sampleError="multinomial", seed=1234)
        #ds.save(compare_files + "/calcMethods1Q.dataset%s" % cls.versionsuffix)

        #DEBUG TEST- was to make sure data files have same info -- seemed ultimately unnecessary
        #ds_swp = pygsti.objects.DataSet(fileToLoadFrom=compare_files + "/calcMethods1Q.datasetv3") # run in Python3
        #pygsti.io.write_dataset(temp_files + "/dataset.3to2.txt", ds_swp) # run in Python3
        #ds_swp = pygsti.io.load_dataset(temp_files + "/dataset.3to2.txt") # run in Python2
        #ds_swp.save(compare_files + "/calcMethods1Q.dataset") # run in Python2
        #assert(False),"STOP"

        cls.ds = pygsti.objects.DataSet(
            fileToLoadFrom=compare_files +
            "/calcMethods1Q.dataset%s" % cls.versionsuffix)

        #Reduced model GST dataset
        cls.nQubits = 1
        cls.gs_redmod_datagen = pc.build_nqnoise_gateset(cls.nQubits,
                                                         geometry="line",
                                                         maxIdleWeight=1,
                                                         maxhops=1,
                                                         extraWeight1Hops=0,
                                                         extraGateWeight=1,
                                                         sparse=False,
                                                         sim_type="matrix",
                                                         verbosity=1,
                                                         gateNoise=(1234,
                                                                    0.01),
                                                         prepNoise=(456, 0.01),
                                                         povmNoise=(789, 0.01))

        #Create a reduced set of fiducials and germs
        gateLabels = list(cls.gs_redmod_datagen.gates.keys())
        fids1Q = std1Q_XY.fiducials[0:2]  # for speed
        cls.redmod_fiducials = []
        for i in range(cls.nQubits):
            cls.redmod_fiducials.extend(
                pygsti.construction.manipulate_gatestring_list(
                    fids1Q, [((L('Gx'), ), (L('Gx', i), )),
                             ((L('Gy'), ), (L('Gy', i), ))]))
        #print(redmod_fiducials, "Fiducials")

        cls.redmod_germs = pygsti.construction.gatestring_list([
            (gl, ) for gl in gateLabels
        ])
        cls.redmod_maxLs = [1]
        expList = pygsti.construction.make_lsgst_experiment_list(
            cls.gs_redmod_datagen, cls.redmod_fiducials, cls.redmod_fiducials,
            cls.redmod_germs, cls.redmod_maxLs)

        #RUN BELOW FOR DATAGEN (UNCOMMENT to regenerate)
        #redmod_ds = pygsti.construction.generate_fake_data(cls.gs_redmod_datagen, expList, 1000, "round", seed=1234)
        #redmod_ds.save(compare_files + "/calcMethods1Q_redmod.dataset%s" % cls.versionsuffix)

        cls.redmod_ds = pygsti.objects.DataSet(
            fileToLoadFrom=compare_files +
            "/calcMethods1Q_redmod.dataset%s" % cls.versionsuffix)

        #print(len(expList)," reduced model sequences")

        #Random starting points - little kick so we don't get hung up at start
        np.random.seed(1234)
        cls.rand_start18 = np.random.random(18) * 1e-6
        cls.rand_start25 = np.random.random(30) * 1e-6  # TODO: rename?
        cls.rand_start36 = np.random.random(30) * 1e-6  # TODO: rename?

        #Circuit Simulation circuits
        cls.csim_nQubits = 3
        cls.circuit1 = pygsti.obj.GateString(('Gx', 'Gy'))
        # now Circuit adds qubit labels... pygsti.obj.Circuit(gatestring=('Gx','Gy'), num_lines=1) # 1-qubit circuit
        cls.circuit3 = pygsti.obj.Circuit(gatestring=[('Gxpi', 0), ('Gypi', 1),
                                                      ('Gcnot', 1, 2)],
                                          num_lines=3)  # 3-qubit circuit

        os.chdir(origDir)  # return to original directory