def test_MPI_gaugeopt(comm): #Gauge Opt to Target mdl_other = std.target_model().depolarize(op_noise=0.01, spam_noise=0.01) mdl_other['Gx'].rotate( (0,0,0.01) ) mdl_other['Gy'].rotate( (0,0,0.01) ) mdl_gopt = pygsti.gaugeopt_to_target(mdl_other, std.target_model(), verbosity=10, comm=comm) #use a method that isn't parallelized with non-None comm (warning is given) mdl_gopt_slow = pygsti.gaugeopt_to_target(mdl_other, std.target_model(), verbosity=10, method="BFGS", comm=comm)
def test_MPI_gatestrings_logl(comm): #Create dataset for serial and parallel runs ds, lsgstStrings = create_fake_dataset(comm) #Individual processors my1ProcResults = runOneQubit("logl", ds, lsgstStrings) #Using all processors myManyProcResults = runOneQubit("logl", ds, lsgstStrings, comm, "gatestrings") for i, (gs1, gs2) in enumerate(zip(my1ProcResults, myManyProcResults)): assertGatesetsInSync(gs1, comm) assertGatesetsInSync(gs2, comm) gs2_go = pygsti.gaugeopt_to_target(gs2, gs1, { 'gates': 1.0, 'spam': 1.0 }) print("Frobenius distance %d (rank %d) = " % (i, comm.Get_rank()), gs1.frobeniusdist(gs2_go)) if gs1.frobeniusdist(gs2_go) >= 1e-5: print("DIFF (%d) = " % comm.Get_rank(), gs1.strdiff(gs2_go)) assert (gs1.frobeniusdist(gs2_go) < 1e-5) return
def test_LGST_1overSqrtN_dependence(self): my_datagen_gateset = self.model.depolarize(op_noise=0.05, spam_noise=0) # !!don't depolarize spam or 1/sqrt(N) dependence saturates!! nSamplesList = np.array([16, 128, 1024, 8192]) diffs = [] for nSamples in nSamplesList: ds = pygsti.construction.generate_fake_data(my_datagen_gateset, self.lgstStrings, nSamples, sampleError='binomial', seed=100) mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst, my_datagen_gateset, { 'spam': 1.0, 'gate': 1.0 }, checkJac=True) diffs.append(my_datagen_gateset.frobeniusdist(mdl_lgst_go)) diffs = np.array(diffs, 'd') a, b = polyfit(np.log10(nSamplesList), np.log10(diffs), deg=1) #print "\n",nSamplesList; print diffs; print a #DEBUG self.assertLess(a + 0.5, 0.05)
def mdl_lgst_go(self): return pygsti.gaugeopt_to_target(self.mdl_lgst, self.model, { 'spam': 1.0, 'gates': 1.0 }, checkJac=True)
def test_LGST_no_sample_error(self): #change rep-count type so dataset can hold fractional counts for sampleError = 'none' oldType = pygsti.objects.dataset.Repcount_type pygsti.objects.dataset.Repcount_type = np.float64 ds = pygsti.construction.generate_fake_data(self.datagen_gateset, self.lgstStrings, nSamples=10000, sampleError='none') pygsti.objects.dataset.Repcount_type = oldType mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) print("DATAGEN:") print(self.datagen_gateset) print("\nLGST RAW:") print(mdl_lgst) mdl_lgst = pygsti.gaugeopt_to_target(mdl_lgst, self.datagen_gateset, { 'spam': 1.0, 'gates': 1.0 }, checkJac=False) print("\nAfter gauge opt:") print(mdl_lgst) print(mdl_lgst.strdiff(self.datagen_gateset)) self.assertAlmostEqual(mdl_lgst.frobeniusdist(self.datagen_gateset), 0, places=4)
def main(): gs, gs_target = load() #envSettings = dict(MKL_NUM_THREADS=1, NUMEXPR_NUM_THREADS=1, OMP_NUM_THREADS=1) with timed_block('TP penalty gauge opt'): gs_gaugeopt = pygsti.gaugeopt_to_target(gs, gs_target, item_weights={'spam' : 0.0001, 'gates':1.0}, TPpenalty=1.0)
def runAnalysis(obj, ds, myspecs, gsTarget, lsgstStringsToUse, useFreqWeightedChiSq=False, minProbClipForWeighting=1e-4, fidPairList=None, comm=None, distributeMethod="gatestrings"): #Run LGST to get starting gate set assertGatesetsInSync(gsTarget, comm) gs_lgst = pygsti.do_lgst(ds, myspecs, gsTarget, svdTruncateTo=gsTarget.dim, verbosity=3) assertGatesetsInSync(gs_lgst, comm) gs_lgst_go = pygsti.gaugeopt_to_target(gs_lgst, gsTarget) assertGatesetsInSync(gs_lgst_go, comm) #Run full iterative LSGST tStart = time.time() if obj == "chi2": all_gs_lsgst = pygsti.do_iterative_mc2gst( ds, gs_lgst_go, lsgstStringsToUse, minProbClipForWeighting=minProbClipForWeighting, probClipInterval=(-1e5, 1e5), verbosity=1, memLimit=3 * (1024)**3, returnAll=True, useFreqWeightedChiSq=useFreqWeightedChiSq, comm=comm, distributeMethod=distributeMethod) elif obj == "logl": all_gs_lsgst = pygsti.do_iterative_mlgst( ds, gs_lgst_go, lsgstStringsToUse, minProbClip=minProbClipForWeighting, probClipInterval=(-1e5, 1e5), verbosity=1, memLimit=3 * (1024)**3, returnAll=True, useFreqWeightedChiSq=useFreqWeightedChiSq, comm=comm, distributeMethod=distributeMethod) tEnd = time.time() print("Time = ", (tEnd - tStart) / 3600.0, "hours") return all_gs_lsgst
def test_LGST(self): ds = self.ds print("GG0 = ", self.model.default_gauge_group) mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) mdl_lgst_verb = self.runSilent(pygsti.do_lgst, ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=10) self.assertAlmostEqual(mdl_lgst.frobeniusdist(mdl_lgst_verb), 0) print("GG = ", mdl_lgst.default_gauge_group) mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst, self.model, { 'spam': 1.0, 'gates': 1.0 }, checkJac=True) mdl_clgst = pygsti.contract(mdl_lgst_go, "CPTP") # RUN BELOW LINES TO SEED SAVED GATESET FILES if regenerate_references(): pygsti.io.write_model( mdl_lgst, compare_files + "/lgst.model", "Saved LGST Model before gauge optimization") pygsti.io.write_model(mdl_lgst_go, compare_files + "/lgst_go.model", "Saved LGST Model after gauge optimization") pygsti.io.write_model( mdl_clgst, compare_files + "/clgst.model", "Saved LGST Model after G.O. and CPTP contraction") mdl_lgst_compare = pygsti.io.load_model(compare_files + "/lgst.model") mdl_lgst_go_compare = pygsti.io.load_model(compare_files + "/lgst_go.model") mdl_clgst_compare = pygsti.io.load_model(compare_files + "/clgst.model") self.assertAlmostEqual(mdl_lgst.frobeniusdist(mdl_lgst_compare), 0, places=5) self.assertAlmostEqual(mdl_lgst_go.frobeniusdist(mdl_lgst_go_compare), 0, places=5) self.assertAlmostEqual(mdl_clgst.frobeniusdist(mdl_clgst_compare), 0, places=5)
def main(): gs, gs_target = load() with timed_block('Gauge opt with CP Penalty:'): gs_gaugeopt = pygsti.gaugeopt_to_target(gs, gs_target, itemWeights={ 'spam': 0.0001, 'gates': 1.0 }, CPpenalty=1.0, validSpamPenalty=1.0)
def test_model_selection(self): ds = self.ds #pygsti.construction.generate_fake_data(self.datagen_gateset, self.lsgstStrings[-1], # nSamples=1000,sampleError='binomial', seed=100) mdl_lgst4 = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) mdl_lgst6 = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=6, verbosity=0) sys.stdout.flush() self.runSilent(pygsti.do_lgst, ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=6, verbosity=4) # test verbose prints chiSq4 = pygsti.chi2(mdl_lgst4, ds, self.lgstStrings, minProbClipForWeighting=1e-4) chiSq6 = pygsti.chi2(mdl_lgst6, ds, self.lgstStrings, minProbClipForWeighting=1e-4) print("LGST dim=4 chiSq = ",chiSq4) print("LGST dim=6 chiSq = ",chiSq6) #self.assertAlmostEqual(chiSq4, 174.061524953) #429.271983052) #self.assertAlmostEqual(chiSq6, 267012993.861, places=1) #1337.74222467) #Why is this so large??? -- DEBUG later # Least squares GST with model selection mdl_lsgst = self.runSilent(pygsti.do_iterative_mc2gst_with_model_selection, ds, mdl_lgst4, 1, self.lsgstStrings[0:3], verbosity=10, minProbClipForWeighting=1e-3, probClipInterval=(-1e5,1e5)) # Run again with other parameters tuple_strings = [ list(map(tuple, gsList)) for gsList in self.lsgstStrings[0:3] ] #to test tuple argument errorVecs, mdl_lsgst_wts = self.runSilent(pygsti.do_iterative_mc2gst_with_model_selection, ds, mdl_lgst4, 1, tuple_strings, verbosity=10, minProbClipForWeighting=1e-3, probClipInterval=(-1e5,1e5), circuitWeightsDict={ ('Gx',): 2.0 }, returnAll=True, returnErrorVec=True) # Do non-iterative to cover Circuit->tuple conversion mdl_non_iterative = self.runSilent( pygsti.do_mc2gst_with_model_selection, ds, mdl_lgst4, 1, self.lsgstStrings[0], verbosity=10, probClipInterval=(-1e5,1e5) ) # RUN BELOW LINES TO SEED SAVED GATESET FILES if os.environ.get('PYGSTI_REGEN_REF_FILES','no').lower() in ("yes","1","true"): pygsti.io.write_model(mdl_lsgst,compare_files + "/lsgstMS.model", "Saved LSGST Model with model selection") mdl_lsgst_compare = pygsti.io.load_model(compare_files + "/lsgstMS.model") mdl_lsgst_go = pygsti.gaugeopt_to_target(mdl_lsgst, mdl_lsgst_compare, {'spam':1.0}, checkJac=True) self.assertAlmostEqual( mdl_lsgst_go.frobeniusdist(mdl_lsgst_compare), 0, places=4)
def test_MPI_mlgst_forcefn(comm): fiducials = std.fiducials target_model = std.target_model() lgstStrings = pygsti.construction.list_lgst_circuits(fiducials, fiducials, list(target_model.operations.keys())) #Create dataset on root proc if comm is None or comm.Get_rank() == 0: datagen_gateset = target_model.depolarize(op_noise=0.01, spam_noise=0.01) ds = pygsti.construction.generate_fake_data(datagen_gateset, lgstStrings, nSamples=10000, sampleError='binomial', seed=100) ds = comm.bcast(ds, root=0) else: ds = comm.bcast(None, root=0) mdl_lgst = pygsti.do_lgst(ds, fiducials, fiducials, target_model, svdTruncateTo=4, verbosity=0) mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst,target_model, {'spam':1.0, 'gates': 1.0}) forcingfn_grad = np.ones((1,mdl_lgst_go.num_params()), 'd') mdl_lsgst_chk_opts3 = pygsti.algorithms.core._do_mlgst_base( ds, mdl_lgst_go, lgstStrings, verbosity=3, minProbClip=1e-4, probClipInterval=(-1e2,1e2), forcefn_grad=forcingfn_grad, comm=comm)
def test_MLGST(self): ds = self.ds mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst, self.model, { 'spam': 1.0, 'gates': 1.0 }, checkJac=True) mdl_clgst = pygsti.contract(mdl_lgst_go, "CPTP") mdl_clgst = mdl_clgst.depolarize( op_noise=0.02, spam_noise=0.02 ) # just to avoid infinity objective funct & jacs below CM = profiler._get_mem_usage() mdl_single_mlgst = pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2, 1e2), verbosity=0) #this test often gives an assetion error "finite Jacobian has inf norm!" on Travis CI Python 3 case try: mdl_single_mlgst_cpsp = pygsti.do_mlgst( ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2, 1e2), cptp_penalty_factor=1.0, spam_penalty_factor=1.0, verbosity=10) #uses both penalty factors w/verbosity > 0 except ValueError: pass # ignore when assertions in customlm.py are disabled except AssertionError: pass # just ignore for now. FUTURE: see what we can do in custom LM about scaling large jacobians... try: mdl_single_mlgst_cp = pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2, 1e2), cptp_penalty_factor=1.0, verbosity=10) except ValueError: pass # ignore when assertions in customlm.py are disabled except AssertionError: pass # just ignore for now. FUTURE: see what we can do in custom LM about scaling large jacobians... try: mdl_single_mlgst_sp = pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2, 1e2), spam_penalty_factor=1.0, verbosity=10) except ValueError: pass # ignore when assertions in customlm.py are disabled except AssertionError: pass # just ignore for now. FUTURE: see what we can do in custom LM about scaling large jacobians... mdl_mlegst = pygsti.do_iterative_mlgst(ds, mdl_clgst, self.lsgstStrings, verbosity=0, minProbClip=1e-4, probClipInterval=(-1e2, 1e2), memLimit=CM + 1024**3) maxLogL, all_gs_mlegst_tups = pygsti.do_iterative_mlgst( ds, mdl_clgst, [[mdl.tup for mdl in gsList] for gsList in self.lsgstStrings], minProbClip=1e-4, probClipInterval=(-1e2, 1e2), returnAll=True, returnMaxLogL=True) mdl_mlegst_verb = self.runSilent(pygsti.do_iterative_mlgst, ds, mdl_clgst, self.lsgstStrings, verbosity=10, minProbClip=1e-4, probClipInterval=(-1e2, 1e2), memLimit=CM + 1024**3) self.assertAlmostEqual(mdl_mlegst.frobeniusdist(mdl_mlegst_verb), 0, places=5) self.assertAlmostEqual(mdl_mlegst.frobeniusdist( all_gs_mlegst_tups[-1]), 0, places=5) #Run internal checks on less max-L values (so it doesn't take forever) mdl_mlegst_chk = pygsti.do_iterative_mlgst(ds, mdl_clgst, self.lsgstStrings[0:2], verbosity=0, minProbClip=1e-4, probClipInterval=(-1e2, 1e2), check=True) #Forcing function used by linear response error bars forcingfn_grad = np.ones((1, mdl_clgst.num_params()), 'd') mdl_lsgst_chk_opts3 = pygsti.algorithms.core._do_mlgst_base( ds, mdl_clgst, self.lsgstStrings[0], verbosity=0, minProbClip=1e-4, probClipInterval=(-1e2, 1e2), forcefn_grad=forcingfn_grad) with self.assertRaises(NotImplementedError): # Non-poisson picture needs support for a non-leastsq solver (not impl yet) mdl_lsgst_chk_opts4 = pygsti.algorithms.core._do_mlgst_base( ds, mdl_clgst, self.lsgstStrings[0], verbosity=0, poissonPicture=False, minProbClip=1e-4, probClipInterval=(-1e2, 1e2), forcefn_grad=forcingfn_grad) # non-poisson picture #Check with small but ok memlimit -- not anymore since new mem estimation uses current memory, making this non-robust #self.runSilent(pygsti.do_mlgst, ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-6, # probClipInterval=(-1e2,1e2), verbosity=4, memLimit=curMem+8500000) #invoke memory control #non-Poisson picture - should use (-1,-1) model for consistency? with self.assertRaises(NotImplementedError): # Non-poisson picture needs support for a non-leastsq solver (not impl yet) pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2, 1e2), verbosity=0, poissonPicture=False) try: pygsti.do_mlgst( ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-1, # 1e-1 b/c get inf Jacobians... probClipInterval=(-1e2, 1e2), verbosity=0, poissonPicture=False, spam_penalty_factor=1.0, cptp_penalty_factor=1.0) except ValueError: pass # ignore when assertions in customlm.py are disabled except AssertionError: pass # just ignore for now. FUTURE: see what we can do in custom LM about scaling large jacobians... #Check errors: with self.assertRaises(MemoryError): pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2, 1e2), verbosity=0, memLimit=1) # RUN BELOW LINES TO SEED SAVED GATESET FILES if regenerate_references(): pygsti.io.write_model(mdl_mlegst, compare_files + "/mle_gst.model", "Saved MLE-GST Model") mdl_mle_compare = pygsti.io.load_model(compare_files + "/mle_gst.model") mdl_mlegst_go = pygsti.gaugeopt_to_target(mdl_mlegst, mdl_mle_compare, {'spam': 1.0}, checkJac=True) self.assertAlmostEqual(mdl_mlegst_go.frobeniusdist(mdl_mle_compare), 0, places=4)
def test_MC2GST(self): ds = self.ds mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst, self.model, { 'spam': 1.0, 'gates': 1.0 }, checkJac=True) mdl_clgst = pygsti.contract(mdl_lgst_go, "CPTP") CM = profiler._get_mem_usage() mdl_lsgst = pygsti.do_iterative_mc2gst(ds, mdl_clgst, self.lsgstStrings, verbosity=0, minProbClipForWeighting=1e-6, probClipInterval=(-1e6, 1e6), memLimit=CM + 1024**3) all_minErrs, all_gs_lsgst_tups = pygsti.do_iterative_mc2gst( ds, mdl_clgst, [[mdl.tup for mdl in gsList] for gsList in self.lsgstStrings], minProbClipForWeighting=1e-6, probClipInterval=(-1e6, 1e6), returnAll=True, returnErrorVec=True) mdl_lsgst_verb = self.runSilent(pygsti.do_iterative_mc2gst, ds, mdl_clgst, self.lsgstStrings, verbosity=10, minProbClipForWeighting=1e-6, probClipInterval=(-1e6, 1e6), memLimit=CM + 1024**3) mdl_lsgst_reg = self.runSilent(pygsti.do_iterative_mc2gst, ds, mdl_clgst, self.lsgstStrings, verbosity=10, minProbClipForWeighting=1e-6, probClipInterval=(-1e6, 1e6), regularizeFactor=10, memLimit=CM + 1024**3) self.assertAlmostEqual(mdl_lsgst.frobeniusdist(mdl_lsgst_verb), 0) self.assertAlmostEqual(mdl_lsgst.frobeniusdist(all_gs_lsgst_tups[-1]), 0) # RUN BELOW LINES TO SEED SAVED GATESET FILES if regenerate_references(): pygsti.io.write_model(mdl_lsgst, compare_files + "/lsgst.model", "Saved LSGST Model") pygsti.io.write_model(mdl_lsgst_reg, compare_files + "/lsgst_reg.model", "Saved LSGST Model w/Regularization") mdl_lsgst_compare = pygsti.io.load_model(compare_files + "/lsgst.model") mdl_lsgst_reg_compare = pygsti.io.load_model(compare_files + "/lsgst_reg.model") mdl_lsgst_go = pygsti.gaugeopt_to_target(mdl_lsgst, mdl_lsgst_compare, {'spam': 1.0}, checkJac=True) mdl_lsgst_reg_go = pygsti.gaugeopt_to_target(mdl_lsgst_reg, mdl_lsgst_reg_compare, {'spam': 1.0}, checkJac=True) self.assertAlmostEqual(mdl_lsgst_go.frobeniusdist(mdl_lsgst_compare), 0, places=4) self.assertAlmostEqual( mdl_lsgst_reg_go.frobeniusdist(mdl_lsgst_reg_compare), 0, places=4) # RUN BELOW LINES TO SEED SAVED GATESET FILES if regenerate_references(): mdl_lsgst_go = pygsti.gaugeopt_to_target(mdl_lsgst, self.model, {'spam': 1.0}) pygsti.io.write_model(mdl_lsgst_go, compare_files + "/analysis.model", "Saved LSGST Analysis Model") print("DEBUG: analysis.model = ") print(mdl_lgst_go)
def test_eLGST(self): ds = self.ds assert (pygsti.obj.Model._pcheck) mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) #mdl_lgst._check_paramvec() #will fail, but OK, since paramvec is computed only when *needed* now mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst, self.model, { 'spam': 1.0, 'gates': 1.0 }, checkJac=True) mdl_lgst_go._check_paramvec() mdl_clgst = pygsti.contract(mdl_lgst_go, "CPTP") mdl_clgst.to_vector() # to make sure we're in sync mdl_clgst._check_paramvec() self.model._check_paramvec() _, mdl_single_exlgst = pygsti.do_exlgst(ds, mdl_clgst, self.elgstStrings[0], self.fiducials, self.fiducials, self.model, regularizeFactor=1e-3, svdTruncateTo=4, verbosity=0) mdl_single_exlgst._check_paramvec() _, mdl_single_exlgst_verb = self.runSilent(pygsti.do_exlgst, ds, mdl_clgst, self.elgstStrings[0], self.fiducials, self.fiducials, self.model, regularizeFactor=1e-3, svdTruncateTo=4, verbosity=10) mdl_single_exlgst_verb._check_paramvec() self.assertAlmostEqual( mdl_single_exlgst.frobeniusdist(mdl_single_exlgst_verb), 0) mdl_exlgst = pygsti.do_iterative_exlgst(ds, mdl_clgst, self.fiducials, self.fiducials, self.elgstStrings, targetModel=self.model, svdTruncateTo=4, verbosity=0) all_minErrs, all_gs_exlgst_tups = pygsti.do_iterative_exlgst( ds, mdl_clgst, self.fiducials, self.fiducials, [[mdl.tup for mdl in gsList] for gsList in self.elgstStrings], targetModel=self.model, svdTruncateTo=4, verbosity=0, returnAll=True, returnErrorVec=True) mdl_exlgst_verb = self.runSilent(pygsti.do_iterative_exlgst, ds, mdl_clgst, self.fiducials, self.fiducials, self.elgstStrings, targetModel=self.model, svdTruncateTo=4, verbosity=10) mdl_exlgst_reg = pygsti.do_iterative_exlgst(ds, mdl_clgst, self.fiducials, self.fiducials, self.elgstStrings, targetModel=self.model, svdTruncateTo=4, verbosity=0, regularizeFactor=10) self.assertAlmostEqual(mdl_exlgst.frobeniusdist(mdl_exlgst_verb), 0) self.assertAlmostEqual( mdl_exlgst.frobeniusdist(all_gs_exlgst_tups[-1]), 0) #Run internal checks on less max-L values (so it doesn't take forever) mdl_exlgst_chk = pygsti.do_iterative_exlgst(ds, mdl_clgst, self.fiducials, self.fiducials, self.elgstStrings[0:2], targetModel=self.model, svdTruncateTo=4, verbosity=0, check_jacobian=True) mdl_exlgst_chk_verb = self.runSilent(pygsti.do_iterative_exlgst, ds, mdl_clgst, self.fiducials, self.fiducials, self.elgstStrings[0:2], targetModel=self.model, svdTruncateTo=4, verbosity=10, check_jacobian=True) # RUN BELOW LINES TO SEED SAVED GATESET FILES if regenerate_references(): pygsti.io.write_model(mdl_exlgst, compare_files + "/exlgst.model", "Saved Extended-LGST (eLGST) Model") pygsti.io.write_model( mdl_exlgst_reg, compare_files + "/exlgst_reg.model", "Saved Extended-LGST (eLGST) Model w/regularization") mdl_exlgst_compare = pygsti.io.load_model(compare_files + "/exlgst.model") mdl_exlgst_reg_compare = pygsti.io.load_model(compare_files + "/exlgst_reg.model") mdl_exlgst.set_all_parameterizations( "full" ) # b/c ex-LGST sets spam to StaticSPAMVec objects (b/c they're not optimized) mdl_exlgst_reg.set_all_parameterizations( "full" ) # b/c ex-LGST sets spam to StaticSPAMVec objects (b/c they're not optimized) mdl_exlgst_go = pygsti.gaugeopt_to_target(mdl_exlgst, mdl_exlgst_compare, {'spam': 1.0}, checkJac=True) mdl_exlgst_reg_go = pygsti.gaugeopt_to_target(mdl_exlgst_reg, mdl_exlgst_reg_compare, {'spam': 1.0}, checkJac=True)
def testIntermediateMeas(self): # Mess with the target model to add some error to the povm and instrument self.assertEqual(self.target_model.num_params(), 92) # 4*3 + 16*5 = 92 mdl = self.target_model.depolarize(op_noise=0.01, spam_noise=0.01) gs2 = self.target_model.depolarize( max_op_noise=0.01, max_spam_noise=0.01, seed=1234) #another way to depolarize mdl.povms['Mdefault'].depolarize(0.01) # Introducing a rotation error to the measurement Uerr = pygsti.rotation_gate_mx( [0, 0.02, 0]) # input angles are halved by the method E = np.dot(mdl.povms['Mdefault']['0'].T, Uerr).T # effect is stored as column vector Erem = self.povm_ident - E mdl.povms['Mdefault'] = pygsti.obj.UnconstrainedPOVM({ '0': E, '1': Erem }) # Now add the post-measurement gates from the vector E0 and remainder = id-E0 Gmz_plus = np.dot( E, E.T) #since E0 is stored internally as column spamvec Gmz_minus = np.dot(Erem, Erem.T) mdl.instruments['Iz'] = pygsti.obj.Instrument({ 'plus': Gmz_plus, 'minus': Gmz_minus }) self.assertEqual(mdl.num_params(), 92) # 4*3 + 16*5 = 92 #print(mdl) germs = std.germs fiducials = std.fiducials max_lengths = [1] #,2,4,8] glbls = list(mdl.operations.keys()) + list(mdl.instruments.keys()) lsgst_list = pygsti.construction.make_lsgst_experiment_list( glbls, fiducials, fiducials, germs, max_lengths) lsgst_list2 = pygsti.construction.make_lsgst_experiment_list( mdl, fiducials, fiducials, germs, max_lengths) #use mdl as source self.assertEqual(lsgst_list, lsgst_list2) mdl_datagen = mdl ds = pygsti.construction.generate_fake_data(mdl, lsgst_list, 1000, 'none') #'multinomial') pygsti.io.write_dataset(temp_files + "/intermediate_meas_dataset.txt", ds) ds2 = pygsti.io.load_dataset(temp_files + "/intermediate_meas_dataset.txt") for opstr, dsRow in ds.items(): for lbl, cnt in dsRow.counts.items(): self.assertAlmostEqual(cnt, ds2[opstr].counts[lbl], places=2) #print(ds) #LGST mdl_lgst = pygsti.do_lgst( ds, fiducials, fiducials, self.target_model) #, guessModelForGauge=mdl_datagen) self.assertTrue("Iz" in mdl_lgst.instruments) mdl_opt = pygsti.gaugeopt_to_target(mdl_lgst, mdl_datagen) #, method="BFGS") print(mdl_datagen.strdiff(mdl_opt)) print("Frobdiff = ", mdl_datagen.frobeniusdist(mdl_lgst)) print("Frobdiff after GOpt = ", mdl_datagen.frobeniusdist(mdl_opt)) self.assertAlmostEqual(mdl_datagen.frobeniusdist(mdl_opt), 0.0, places=4) #print(mdl_lgst) #print(mdl_datagen) #DEBUG compiling w/dataset #dbList = pygsti.construction.make_lsgst_experiment_list(self.target_model,fiducials,fiducials,germs,max_lengths) ##self.target_model.simplify_circuits(dbList, ds) #self.target_model.simplify_circuits([ pygsti.obj.Circuit(None,stringrep="Iz") ], ds ) #assert(False),"STOP" #LSGST results = pygsti.do_long_sequence_gst(ds, self.target_model, fiducials, fiducials, germs, max_lengths) #print(results.estimates['default'].models['go0']) mdl_est = results.estimates['default'].models['go0'] mdl_est_opt = pygsti.gaugeopt_to_target(mdl_est, mdl_datagen) print("Frobdiff = ", mdl_datagen.frobeniusdist(mdl_est)) print("Frobdiff after GOpt = ", mdl_datagen.frobeniusdist(mdl_est_opt)) self.assertAlmostEqual(mdl_datagen.frobeniusdist(mdl_est_opt), 0.0, places=4) #LGST w/TP gates mdl_targetTP = self.target_model.copy() mdl_targetTP.set_all_parameterizations("TP") self.assertEqual(mdl_targetTP.num_params(), 71) # 3 + 4*2 + 12*5 = 71 #print(mdl_targetTP) resultsTP = pygsti.do_long_sequence_gst(ds, mdl_targetTP, fiducials, fiducials, germs, max_lengths) mdl_est = resultsTP.estimates['default'].models['go0'] mdl_est_opt = pygsti.gaugeopt_to_target(mdl_est, mdl_datagen) print("TP Frobdiff = ", mdl_datagen.frobeniusdist(mdl_est)) print("TP Frobdiff after GOpt = ", mdl_datagen.frobeniusdist(mdl_est_opt)) self.assertAlmostEqual(mdl_datagen.frobeniusdist(mdl_est_opt), 0.0, places=4)
def test_LGST(self): ds = self.ds #pygsti.construction.generate_fake_data(self.datagen_gateset, self.lgstStrings, nSamples=1000, # sampleError='binomial', seed=None) print("GG0 = ",self.model.default_gauge_group) mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) mdl_lgst_verb = self.runSilent(pygsti.do_lgst, ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=10) self.assertAlmostEqual(mdl_lgst.frobeniusdist(mdl_lgst_verb),0) print("GG = ",mdl_lgst.default_gauge_group) mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst,self.model, {'spam':1.0, 'gates': 1.0}, checkJac=True) mdl_clgst = pygsti.contract(mdl_lgst_go, "CPTP") # RUN BELOW LINES TO SEED SAVED GATESET FILES if os.environ.get('PYGSTI_REGEN_REF_FILES','no').lower() in ("yes","1","true"): pygsti.io.write_model(mdl_lgst,compare_files + "/lgst.model", "Saved LGST Model before gauge optimization") pygsti.io.write_model(mdl_lgst_go,compare_files + "/lgst_go.model", "Saved LGST Model after gauge optimization") pygsti.io.write_model(mdl_clgst,compare_files + "/clgst.model", "Saved LGST Model after G.O. and CPTP contraction") mdl_lgst_compare = pygsti.io.load_model(compare_files + "/lgst.model") mdl_lgst_go_compare = pygsti.io.load_model(compare_files + "/lgst_go.model") mdl_clgst_compare = pygsti.io.load_model(compare_files + "/clgst.model") self.assertAlmostEqual( mdl_lgst.frobeniusdist(mdl_lgst_compare), 0, places=5) self.assertAlmostEqual( mdl_lgst_go.frobeniusdist(mdl_lgst_go_compare), 0, places=5) self.assertAlmostEqual( mdl_clgst.frobeniusdist(mdl_clgst_compare), 0, places=5) #Check for error conditions with self.assertRaises(ValueError): mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, None, svdTruncateTo=4, verbosity=0) #no target model with self.assertRaises(ValueError): mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, None, opLabels=list(self.model.operations.keys()), svdTruncateTo=4, verbosity=0) #no spam dict #No need for identity vector anymore #with self.assertRaises(ValueError): # mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, None, opLabels=list(self.model.operations.keys()), # spamDict=self.model.get_reverse_spam_defs(), # svdTruncateTo=4, verbosity=0) #no identity vector with self.assertRaises(ValueError): bad_fids =pygsti.construction.circuit_list([('Gx',),('Gx',),('Gx',),('Gx',)]) mdl_lgst = pygsti.do_lgst(ds, bad_fids, bad_fids, self.model, svdTruncateTo=4, verbosity=0) # bad fiducials (rank deficient) with self.assertRaises(KeyError): # AB-matrix construction error incomplete_strings = self.lgstStrings[5:] #drop first 5 strings... bad_ds = pygsti.construction.generate_fake_data( self.datagen_gateset, incomplete_strings, nSamples=10, sampleError='none') mdl_lgst = pygsti.do_lgst(bad_ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) # incomplete dataset with self.assertRaises(KeyError): # X-matrix construction error incomplete_strings = self.lgstStrings[:-5] #drop last 5 strings... bad_ds = pygsti.construction.generate_fake_data( self.datagen_gateset, incomplete_strings, nSamples=10, sampleError='none') mdl_lgst = pygsti.do_lgst(bad_ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0)
def setUpClass(cls): """ Handle all once-per-class (slow) computation and loading, to avoid calling it for each test (like setUp). Store results in class variable for use within setUp. """ super(ReportBaseCase, cls).setUpClass() orig_cwd = os.getcwd() os.chdir(os.path.abspath(os.path.dirname(__file__))) os.chdir('..') # The test_packages directory target_model = std.target_model() datagen_gateset = target_model.depolarize(op_noise=0.05, spam_noise=0.1) datagen_gateset2 = target_model.depolarize(op_noise=0.1, spam_noise=0.05).rotate((0.15,-0.03,0.03)) #cls.specs = pygsti.construction.build_spam_specs(std.fiducials, effect_labels=['E0']) # #only use the first EVec op_labels = std.gates cls.lgstStrings = pygsti.circuits.create_lgst_circuits(std.fiducials, std.fiducials, op_labels) cls.maxLengthList = [1,2,4,8] cls.lsgstStrings = pygsti.circuits.create_lsgst_circuit_lists( op_labels, std.fiducials, std.fiducials, std.germs, cls.maxLengthList) cls.lsgstStructs = pygsti.circuits.make_lsgst_structs( op_labels, std.fiducials, std.fiducials, std.germs, cls.maxLengthList) # RUN BELOW LINES TO GENERATE ANALYSIS DATASET (SAVE) if regenerate_references(): ds = pygsti.data.simulate_data(datagen_gateset, cls.lsgstStrings[-1], num_samples=1000, sample_error='binomial', seed=100) ds.save(compare_files + "/reportgen.dataset") ds2 = pygsti.data.simulate_data(datagen_gateset2, cls.lsgstStrings[-1], num_samples=1000, sample_error='binomial', seed=100) ds2.save(compare_files + "/reportgen2.dataset") cls.ds = pygsti.data.DataSet(file_to_load_from=compare_files + "/reportgen.dataset") cls.ds2 = pygsti.data.DataSet(file_to_load_from=compare_files + "/reportgen2.dataset") mdl_lgst = pygsti.run_lgst(cls.ds, std.fiducials, std.fiducials, target_model, svd_truncate_to=4, verbosity=0) mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst, target_model, {'gates': 1.0, 'spam': 0.0}) cls.mdl_clgst = pygsti.contract(mdl_lgst_go, "CPTP") cls.mdl_clgst_tp = pygsti.contract(cls.mdl_clgst, "vSPAM") cls.mdl_clgst_tp.set_all_parameterizations("full TP") #Compute results for MC2GST lsgst_gatesets_prego, *_ = pygsti.run_iterative_gst( cls.ds, cls.mdl_clgst, cls.lsgstStrings, optimizer={'tol': 1e-5}, iteration_objfn_builders=['chi2'], final_objfn_builders=[], resource_alloc=None, verbosity=0 ) experiment_design = pygsti.protocols.StandardGSTDesign( target_model.create_processor_spec(), std.fiducials, std.fiducials, std.germs, cls.maxLengthList ) data = pygsti.protocols.ProtocolData(experiment_design, cls.ds) protocol = pygsti.protocols.StandardGST() cls.results = pygsti.protocols.gst.ModelEstimateResults(data, protocol) cls.results.add_estimate(pygsti.protocols.estimate.Estimate.create_gst_estimate( cls.results, target_model, cls.mdl_clgst,lsgst_gatesets_prego, {'objective': "chi2", 'min_prob_clip_for_weighting': 1e-4, 'prob_clip_interval': (-1e6,1e6), 'radius': 1e-4, 'weights': None, 'defaultDirectory': temp_files + "", 'defaultBasename': "MyDefaultReportName"} )) gaugeOptParams = collections.OrderedDict([ ('model', lsgst_gatesets_prego[-1]), #so can gauge-propagate CIs ('target_model', target_model), #so can gauge-propagate CIs ('cptp_penalty_factor', 0), ('gates_metric',"frobenius"), ('spam_metric',"frobenius"), ('item_weights', {'gates': 1.0, 'spam': 0.001}), ('return_all', True) ]) _, gaugeEl, go_final_gateset = pygsti.gaugeopt_to_target(**gaugeOptParams) gaugeOptParams['_gaugeGroupEl'] = gaugeEl #so can gauge-propagate CIs cls.results.estimates['default'].add_gaugeoptimized(gaugeOptParams, go_final_gateset) cls.results.estimates['default'].add_gaugeoptimized(gaugeOptParams, go_final_gateset, "go_dup") #Compute results for MLGST with TP constraint # Use run_long_sequence_gst with a non-mark dataset to trigger data scaling tp_target = target_model.copy(); tp_target.set_all_parameterizations("full TP") cls.ds3 = cls.ds.copy_nonstatic() cls.ds3.add_counts_from_dataset(cls.ds2) cls.ds3.done_adding_data() cls.results_logL = pygsti.run_long_sequence_gst(cls.ds3, tp_target, std.fiducials, std.fiducials, std.germs, cls.maxLengthList, verbosity=0, advanced_options={'tolerance': 1e-6, 'starting_point': 'LGST', 'on_bad_fit': ["robust","Robust","robust+","Robust+"], 'bad_fit_threshold': -1.0, 'germ_length_limits': {('Gx','Gi','Gi'): 2} }) #OLD #lsgst_gatesets_TP = pygsti.do_iterative_mlgst(cls.ds, cls.mdl_clgst_tp, cls.lsgstStrings, verbosity=0, # min_prob_clip=1e-4, prob_clip_interval=(-1e6,1e6), # returnAll=True) #TP initial model => TP output models #cls.results_logL = pygsti.objects.Results() #cls.results_logL.init_dataset(cls.ds) #cls.results_logL.init_circuits(cls.lsgstStructs) #cls.results_logL.add_estimate(target_model, cls.mdl_clgst_tp, # lsgst_gatesets_TP, # {'objective': "logl", # 'min_prob_clip': 1e-4, # 'prob_clip_interval': (-1e6,1e6), 'radius': 1e-4, # 'weights': None, 'defaultDirectory': temp_files + "", # 'defaultBasename': "MyDefaultReportName"}) # #tp_target = target_model.copy(); tp_target.set_all_parameterizations("full TP") #gaugeOptParams = gaugeOptParams.copy() #just to be safe #gaugeOptParams['model'] = lsgst_gatesets_TP[-1] #so can gauge-propagate CIs #gaugeOptParams['target_model'] = tp_target #so can gauge-propagate CIs #_, gaugeEl, go_final_gateset = pygsti.gaugeopt_to_target(**gaugeOptParams) #gaugeOptParams['_gaugeGroupEl'] = gaugeEl #so can gauge-propagate CIs #cls.results_logL.estimates['default'].add_gaugeoptimized(gaugeOptParams, go_final_gateset) # ##self.results_logL.options.precision = 3 ##self.results_logL.options.polar_precision = 2 os.chdir(orig_cwd)
def test_MC2GST(self): ds = self.ds #pygsti.construction.generate_fake_data(self.datagen_gateset, self.lsgstStrings[-1], # nSamples=1000, sampleError='binomial', seed=100) gs_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.gateset, svdTruncateTo=4, verbosity=0) gs_lgst_go = pygsti.gaugeopt_to_target(gs_lgst,self.gateset, {'spam':1.0, 'gates': 1.0}, checkJac=True) gs_clgst = pygsti.contract(gs_lgst_go, "CPTP") CM = pygsti.baseobjs.profiler._get_mem_usage() gs_single_lsgst = pygsti.do_mc2gst(ds, gs_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), regularizeFactor=1e-3, verbosity=0) #uses regularizeFactor gs_single_lsgst_cp = pygsti.do_mc2gst(ds, gs_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), cptp_penalty_factor=1.0, verbosity=0) #uses cptp_penalty_factor gs_single_lsgst_sp = pygsti.do_mc2gst(ds, gs_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), spam_penalty_factor=1.0, verbosity=0) #uses spam_penalty_factor gs_single_lsgst_cpsp = pygsti.do_mc2gst(ds, gs_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), cptp_penalty_factor=1.0, spam_penalty_factor=1.0, verbosity=0) #uses both penalty factors gs_single_lsgst_cpsp = self.runSilent(pygsti.do_mc2gst, ds, gs_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), cptp_penalty_factor=1.0, spam_penalty_factor=1.0, verbosity=10) #uses both penalty factors w/verbosity high gs_single_lsgst_cp = self.runSilent(pygsti.do_mc2gst, ds, gs_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), cptp_penalty_factor=1.0, verbosity=10) #uses cptp_penalty_factor w/verbosity high gs_single_lsgst_sp = self.runSilent(pygsti.do_mc2gst, ds, gs_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), spam_penalty_factor=1.0, verbosity=10) #uses spam_penalty_factor w/verbosity high gs_lsgst = pygsti.do_iterative_mc2gst(ds, gs_clgst, self.lsgstStrings, verbosity=0, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), memLimit=CM + 1024**3) all_minErrs, all_gs_lsgst_tups = pygsti.do_iterative_mc2gst( ds, gs_clgst, [ [gs.tup for gs in gsList] for gsList in self.lsgstStrings], minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), returnAll=True, returnErrorVec=True) gs_lsgst_verb = self.runSilent(pygsti.do_iterative_mc2gst, ds, gs_clgst, self.lsgstStrings, verbosity=10, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), memLimit=CM + 1024**3) gs_lsgst_reg = self.runSilent(pygsti.do_iterative_mc2gst,ds, gs_clgst, self.lsgstStrings, verbosity=10, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), regularizeFactor=10, memLimit=CM + 1024**3) self.assertAlmostEqual(gs_lsgst.frobeniusdist(gs_lsgst_verb),0) self.assertAlmostEqual(gs_lsgst.frobeniusdist(all_gs_lsgst_tups[-1]),0) #Run internal checks on less max-L values (so it doesn't take forever) gs_lsgst_chk = pygsti.do_iterative_mc2gst(ds, gs_clgst, self.lsgstStrings[0:2], verbosity=0, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), check=True, check_jacobian=True) gs_lsgst_chk_verb = self.runSilent(pygsti.do_iterative_mc2gst, ds, gs_clgst, self.lsgstStrings[0:2], verbosity=10, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), check=True, check_jacobian=True, memLimit=CM + 1024**3) #Other option variations - just make sure they run at this point gs_lsgst_chk_opts = pygsti.do_iterative_mc2gst(ds, gs_clgst, self.lsgstStrings[0:2], verbosity=0, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), useFreqWeightedChiSq=True, gateStringSetLabels=["Set1","Set2"], gatestringWeightsDict={ ('Gx',): 2.0 } ) aliased_list = [ pygsti.obj.GateString( [ (x if x != "Gx" else "GA1") for x in gs]) for gs in self.lsgstStrings[0] ] gs_withA1 = gs_clgst.copy(); gs_withA1.gates["GA1"] = gs_clgst.gates["Gx"] del gs_withA1.gates["Gx"] # otherwise gs_withA1 will have Gx params that we have no knowledge of! gs_lsgst_chk_opts2 = pygsti.do_mc2gst(ds, gs_withA1, aliased_list, minProbClipForWeighting=1e-6, probClipInterval=(-1e2,1e2), verbosity=10, gateLabelAliases={ 'GA1': ('Gx',) }) #Check with small but ok memlimit -- not anymore since new mem estimation uses current memory, making this non-robust #self.runSilent(pygsti.do_mc2gst,ds, gs_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-6, # probClipInterval=(-1e6,1e6), regularizeFactor=1e-3, # verbosity=10, memLimit=CM + 1024**3) #Check errors: with self.assertRaises(MemoryError): pygsti.do_mc2gst(ds, gs_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), regularizeFactor=1e-3, verbosity=0, memLimit=1) with self.assertRaises(AssertionError): pygsti.do_mc2gst(ds, gs_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), regularizeFactor=1e-3, verbosity=0, cptp_penalty_factor=1.0) #can't specify both cptp_penalty_factor and regularizeFactor # RUN BELOW LINES TO SEED SAVED GATESET FILES #pygsti.io.write_gateset(gs_lsgst,compare_files + "/lsgst.gateset", "Saved LSGST Gateset") #pygsti.io.write_gateset(gs_lsgst_reg,compare_files + "/lsgst_reg.gateset", "Saved LSGST Gateset w/Regularization") gs_lsgst_compare = pygsti.io.load_gateset(compare_files + "/lsgst.gateset") gs_lsgst_reg_compare = pygsti.io.load_gateset(compare_files + "/lsgst_reg.gateset") gs_lsgst_go = pygsti.gaugeopt_to_target(gs_lsgst, gs_lsgst_compare, {'spam':1.0}, checkJac=True) gs_lsgst_reg_go = pygsti.gaugeopt_to_target(gs_lsgst_reg, gs_lsgst_reg_compare, {'spam':1.0}, checkJac=True) self.assertAlmostEqual( gs_lsgst_go.frobeniusdist(gs_lsgst_compare), 0, places=4) self.assertAlmostEqual( gs_lsgst_reg_go.frobeniusdist(gs_lsgst_reg_compare), 0, places=4)
def test_LGST(self): ds = self.ds #pygsti.construction.generate_fake_data(self.datagen_gateset, self.lgstStrings, nSamples=1000, # sampleError='binomial', seed=None) print("GG0 = ",self.gateset.default_gauge_group) gs_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.gateset, svdTruncateTo=4, verbosity=0) gs_lgst_verb = self.runSilent(pygsti.do_lgst, ds, self.fiducials, self.fiducials, self.gateset, svdTruncateTo=4, verbosity=10) self.assertAlmostEqual(gs_lgst.frobeniusdist(gs_lgst_verb),0) print("GG = ",gs_lgst.default_gauge_group) gs_lgst_go = pygsti.gaugeopt_to_target(gs_lgst,self.gateset, {'spam':1.0, 'gates': 1.0}, checkJac=True) gs_clgst = pygsti.contract(gs_lgst_go, "CPTP") # RUN BELOW LINES TO SEED SAVED GATESET FILES #pygsti.io.write_gateset(gs_lgst,compare_files + "/lgst.gateset", "Saved LGST Gateset before gauge optimization") #pygsti.io.write_gateset(gs_lgst_go,compare_files + "/lgst_go.gateset", "Saved LGST Gateset after gauge optimization") #pygsti.io.write_gateset(gs_clgst,compare_files + "/clgst.gateset", "Saved LGST Gateset after G.O. and CPTP contraction") gs_lgst_compare = pygsti.io.load_gateset(compare_files + "/lgst.gateset") gs_lgst_go_compare = pygsti.io.load_gateset(compare_files + "/lgst_go.gateset") gs_clgst_compare = pygsti.io.load_gateset(compare_files + "/clgst.gateset") self.assertAlmostEqual( gs_lgst.frobeniusdist(gs_lgst_compare), 0, places=5) self.assertAlmostEqual( gs_lgst_go.frobeniusdist(gs_lgst_go_compare), 0, places=5) self.assertAlmostEqual( gs_clgst.frobeniusdist(gs_clgst_compare), 0, places=5) #Check for error conditions with self.assertRaises(ValueError): gs_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, None, svdTruncateTo=4, verbosity=0) #no target gateset with self.assertRaises(ValueError): gs_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, None, gateLabels=list(self.gateset.gates.keys()), svdTruncateTo=4, verbosity=0) #no spam dict #No need for identity vector anymore #with self.assertRaises(ValueError): # gs_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, None, gateLabels=list(self.gateset.gates.keys()), # spamDict=self.gateset.get_reverse_spam_defs(), # svdTruncateTo=4, verbosity=0) #no identity vector with self.assertRaises(ValueError): bad_fids =pygsti.construction.gatestring_list([('Gx',),('Gx',),('Gx',),('Gx',)]) gs_lgst = pygsti.do_lgst(ds, bad_fids, bad_fids, self.gateset, svdTruncateTo=4, verbosity=0) # bad fiducials (rank deficient) with self.assertRaises(KeyError): # AB-matrix construction error incomplete_strings = self.lgstStrings[5:] #drop first 5 strings... bad_ds = pygsti.construction.generate_fake_data( self.datagen_gateset, incomplete_strings, nSamples=10, sampleError='none') gs_lgst = pygsti.do_lgst(bad_ds, self.fiducials, self.fiducials, self.gateset, svdTruncateTo=4, verbosity=0) # incomplete dataset with self.assertRaises(KeyError): # X-matrix construction error incomplete_strings = self.lgstStrings[:-5] #drop last 5 strings... bad_ds = pygsti.construction.generate_fake_data( self.datagen_gateset, incomplete_strings, nSamples=10, sampleError='none') gs_lgst = pygsti.do_lgst(bad_ds, self.fiducials, self.fiducials, self.gateset, svdTruncateTo=4, verbosity=0)
def test_MLGST(self): ds = self.ds #pygsti.construction.generate_fake_data(self.datagen_gateset, self.lsgstStrings[-1], # nSamples=1000, sampleError='binomial', seed=100) mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst,self.model, {'spam':1.0, 'gates': 1.0}, checkJac=True) mdl_clgst = pygsti.contract(mdl_lgst_go, "CPTP") mdl_clgst = mdl_clgst.depolarize(op_noise=0.02, spam_noise=0.02) # just to avoid infinity objective funct & jacs below CM = pygsti.baseobjs.profiler._get_mem_usage() mdl_single_mlgst = pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2,1e2), verbosity=0) #this test often gives an assetion error "finite Jacobian has inf norm!" on Travis CI Python 3 case try: mdl_single_mlgst_cpsp = pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2,1e2), cptp_penalty_factor=1.0, spam_penalty_factor=1.0, verbosity=10) #uses both penalty factors w/verbosity > 0 except ValueError: pass # ignore when assertions in customlm.py are disabled except AssertionError: pass # just ignore for now. FUTURE: see what we can do in custom LM about scaling large jacobians... try: mdl_single_mlgst_cp = pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2,1e2), cptp_penalty_factor=1.0, verbosity=10) except ValueError: pass # ignore when assertions in customlm.py are disabled except AssertionError: pass # just ignore for now. FUTURE: see what we can do in custom LM about scaling large jacobians... try: mdl_single_mlgst_sp = pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2,1e2), spam_penalty_factor=1.0, verbosity=10) except ValueError: pass # ignore when assertions in customlm.py are disabled except AssertionError: pass # just ignore for now. FUTURE: see what we can do in custom LM about scaling large jacobians... mdl_mlegst = pygsti.do_iterative_mlgst(ds, mdl_clgst, self.lsgstStrings, verbosity=0, minProbClip=1e-4, probClipInterval=(-1e2,1e2), memLimit=CM + 1024**3) maxLogL, all_gs_mlegst_tups = pygsti.do_iterative_mlgst( ds, mdl_clgst, [ [mdl.tup for mdl in gsList] for gsList in self.lsgstStrings], minProbClip=1e-4, probClipInterval=(-1e2,1e2), returnAll=True, returnMaxLogL=True) mdl_mlegst_verb = self.runSilent(pygsti.do_iterative_mlgst, ds, mdl_clgst, self.lsgstStrings, verbosity=10, minProbClip=1e-4, probClipInterval=(-1e2,1e2), memLimit=CM + 1024**3) self.assertAlmostEqual(mdl_mlegst.frobeniusdist(mdl_mlegst_verb),0, places=5) self.assertAlmostEqual(mdl_mlegst.frobeniusdist(all_gs_mlegst_tups[-1]),0,places=5) #Run internal checks on less max-L values (so it doesn't take forever) mdl_mlegst_chk = pygsti.do_iterative_mlgst(ds, mdl_clgst, self.lsgstStrings[0:2], verbosity=0, minProbClip=1e-4, probClipInterval=(-1e2,1e2), check=True) #Other option variations - just make sure they run at this point mdl_mlegst_chk_opts = pygsti.do_iterative_mlgst(ds, mdl_clgst, self.lsgstStrings[0:2], verbosity=0, minProbClip=1e-4, probClipInterval=(-1e2,1e2), circuitSetLabels=["Set1","Set2"], useFreqWeightedChiSq=True, circuitWeightsDict={ (L('Gx'),): 2.0 } ) aliased_list = [ pygsti.obj.Circuit( [ (x if x != L("Gx") else L("GA1")) for x in mdl]) for mdl in self.lsgstStrings[0] ] mdl_withA1 = mdl_clgst.copy(); mdl_withA1.operations["GA1"] = mdl_clgst.operations["Gx"] del mdl_withA1.operations["Gx"] # otherwise mdl_withA1 will have Gx params that we have no knowledge of! mdl_mlegst_chk_opts2 = pygsti.do_mlgst(ds, mdl_withA1, aliased_list, minProbClip=1e-4, probClipInterval=(-1e2,1e2), verbosity=10, opLabelAliases={ L('GA1'): (L('Gx'),) }) #Other option variations - just make sure they run at this point mdl_mlegst_chk_opts3 = pygsti.do_iterative_mlgst(ds, mdl_clgst, self.lsgstStrings[0:2], verbosity=0, minProbClip=1e-4, probClipInterval=(-1e2,1e2), circuitSetLabels=["Set1","Set2"], useFreqWeightedChiSq=True, circuitWeightsDict={ (L('Gx'),): 2.0 }, alwaysPerformMLE=True ) #Forcing function used by linear response error bars forcingfn_grad = np.ones((1,mdl_clgst.num_params()), 'd') mdl_lsgst_chk_opts3 = pygsti.algorithms.core._do_mlgst_base( ds, mdl_clgst, self.lsgstStrings[0], verbosity=0, minProbClip=1e-4, probClipInterval=(-1e2,1e2), forcefn_grad=forcingfn_grad) with self.assertRaises(NotImplementedError): # Non-poisson picture needs support for a non-leastsq solver (not impl yet) mdl_lsgst_chk_opts4 = pygsti.algorithms.core._do_mlgst_base( ds, mdl_clgst, self.lsgstStrings[0], verbosity=0, poissonPicture=False, minProbClip=1e-4, probClipInterval=(-1e2,1e2), forcefn_grad=forcingfn_grad) # non-poisson picture #Check with small but ok memlimit -- not anymore since new mem estimation uses current memory, making this non-robust #self.runSilent(pygsti.do_mlgst, ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-6, # probClipInterval=(-1e2,1e2), verbosity=4, memLimit=curMem+8500000) #invoke memory control #non-Poisson picture - should use (-1,-1) model for consistency? with self.assertRaises(NotImplementedError): # Non-poisson picture needs support for a non-leastsq solver (not impl yet) pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2,1e2), verbosity=0, poissonPicture=False) try: pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-1, # 1e-1 b/c get inf Jacobians... probClipInterval=(-1e2,1e2), verbosity=0, poissonPicture=False, spam_penalty_factor=1.0, cptp_penalty_factor=1.0) except ValueError: pass # ignore when assertions in customlm.py are disabled except AssertionError: pass # just ignore for now. FUTURE: see what we can do in custom LM about scaling large jacobians... #Check errors: with self.assertRaises(MemoryError): pygsti.do_mlgst(ds, mdl_clgst, self.lsgstStrings[0], minProbClip=1e-4, probClipInterval=(-1e2,1e2),verbosity=0, memLimit=1) # RUN BELOW LINES TO SEED SAVED GATESET FILES if os.environ.get('PYGSTI_REGEN_REF_FILES','no').lower() in ("yes","1","true"): pygsti.io.write_model(mdl_mlegst,compare_files + "/mle_gst.model", "Saved MLE-GST Model") mdl_mle_compare = pygsti.io.load_model(compare_files + "/mle_gst.model") mdl_mlegst_go = pygsti.gaugeopt_to_target(mdl_mlegst, mdl_mle_compare, {'spam':1.0}, checkJac=True) self.assertAlmostEqual( mdl_mlegst_go.frobeniusdist(mdl_mle_compare), 0, places=4)
def test_eLGST(self): ds = self.ds #pygsti.construction.generate_fake_data(self.datagen_gateset, self.lsgstStrings[-1], # nSamples=1000,sampleError='binomial', seed=100) assert(pygsti.obj.GateSet._pcheck) gs_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.gateset, svdTruncateTo=4, verbosity=0) gs_lgst._check_paramvec() gs_lgst_go = pygsti.gaugeopt_to_target(gs_lgst,self.gateset, {'spam':1.0, 'gates': 1.0}, checkJac=True) gs_lgst_go._check_paramvec() gs_clgst = pygsti.contract(gs_lgst_go, "CPTP") gs_clgst._check_paramvec() self.gateset._check_paramvec() _,gs_single_exlgst = pygsti.do_exlgst(ds, gs_clgst, self.elgstStrings[0], self.fiducials, self.fiducials, self.gateset, regularizeFactor=1e-3, svdTruncateTo=4, verbosity=0) gs_single_exlgst._check_paramvec() _,gs_single_exlgst_verb = self.runSilent(pygsti.do_exlgst, ds, gs_clgst, self.elgstStrings[0], self.fiducials, self.fiducials, self.gateset, regularizeFactor=1e-3, svdTruncateTo=4, verbosity=10) gs_single_exlgst_verb._check_paramvec() self.assertAlmostEqual(gs_single_exlgst.frobeniusdist(gs_single_exlgst_verb),0) gs_exlgst = pygsti.do_iterative_exlgst(ds, gs_clgst, self.fiducials, self.fiducials, self.elgstStrings, targetGateset=self.gateset, svdTruncateTo=4, verbosity=0) all_minErrs, all_gs_exlgst_tups = pygsti.do_iterative_exlgst( ds, gs_clgst, self.fiducials, self.fiducials, [ [gs.tup for gs in gsList] for gsList in self.elgstStrings], targetGateset=self.gateset, svdTruncateTo=4, verbosity=0, returnAll=True, returnErrorVec=True) gs_exlgst_verb = self.runSilent(pygsti.do_iterative_exlgst, ds, gs_clgst, self.fiducials, self.fiducials, self.elgstStrings, targetGateset=self.gateset, svdTruncateTo=4, verbosity=10) gs_exlgst_reg = pygsti.do_iterative_exlgst(ds, gs_clgst, self.fiducials, self.fiducials, self.elgstStrings, targetGateset=self.gateset, svdTruncateTo=4, verbosity=0, regularizeFactor=10) self.assertAlmostEqual(gs_exlgst.frobeniusdist(gs_exlgst_verb),0) self.assertAlmostEqual(gs_exlgst.frobeniusdist(all_gs_exlgst_tups[-1]),0) #Run internal checks on less max-L values (so it doesn't take forever) gs_exlgst_chk = pygsti.do_iterative_exlgst(ds, gs_clgst, self.fiducials, self.fiducials, self.elgstStrings[0:2], targetGateset=self.gateset, svdTruncateTo=4, verbosity=0, check_jacobian=True) gs_exlgst_chk_verb = self.runSilent(pygsti.do_iterative_exlgst,ds, gs_clgst, self.fiducials, self.fiducials, self.elgstStrings[0:2], targetGateset=self.gateset, svdTruncateTo=4, verbosity=10, check_jacobian=True) # RUN BELOW LINES TO SEED SAVED GATESET FILES #pygsti.io.write_gateset(gs_exlgst,compare_files + "/exlgst.gateset", "Saved Extended-LGST (eLGST) Gateset") #pygsti.io.write_gateset(gs_exlgst_reg,compare_files + "/exlgst_reg.gateset", "Saved Extended-LGST (eLGST) Gateset w/regularization") gs_exlgst_compare = pygsti.io.load_gateset(compare_files + "/exlgst.gateset") gs_exlgst_reg_compare = pygsti.io.load_gateset(compare_files + "/exlgst_reg.gateset") gs_exlgst.set_all_parameterizations("full") # b/c ex-LGST sets spam to StaticSPAMVec objects (b/c they're not optimized) gs_exlgst_reg.set_all_parameterizations("full") # b/c ex-LGST sets spam to StaticSPAMVec objects (b/c they're not optimized) gs_exlgst_go = pygsti.gaugeopt_to_target(gs_exlgst,gs_exlgst_compare, {'spam':1.0 }, checkJac=True) gs_exlgst_reg_go = pygsti.gaugeopt_to_target(gs_exlgst_reg,gs_exlgst_reg_compare, {'spam':1.0 }, checkJac=True)
'updnup': ('rho0', 'E2'), 'updndn': ('rho0', 'E3'), 'dnupup': ('rho0', 'E4'), 'dnupdn': ('rho0', 'E5'), 'dndnup': ('rho0', 'E6'), 'dndndn': ('rho0', 'remainder') }, basis="pp") #print gs_target.num_params() # In[3]: #Test Gauge optimization gs_depol = gs_target.copy().depolarize(max_gate_noise=0.05, spam_noise=0.1, seed=1200) gs_kicked = gs_depol.kick(absmag=0.25, seed=1200) # In[ ]: t0 = time.time() gs_go = pygsti.gaugeopt_to_target(gs_kicked, gs_target, tol=1e-10, verbosity=3) print("%g sec" % (time.time() - t0)) print(gs_go.frobeniusdist(gs_target)) # In[ ]: pickle.dump(results, open("3qbit_results.pkl", "wb")) # In[ ]:
def setUpClass(cls): """ Handle all once-per-class (slow) computation and loading, to avoid calling it for each test (like setUp). Store results in class variable for use within setUp. """ super(ReportBaseCase, cls).setUpClass() orig_cwd = os.getcwd() os.chdir(os.path.abspath(os.path.dirname(__file__))) os.chdir('..') # The test_packages directory targetGateset = std.gs_target datagen_gateset = targetGateset.depolarize(gate_noise=0.05, spam_noise=0.1) datagen_gateset2 = targetGateset.depolarize(gate_noise=0.1, spam_noise=0.05).rotate( (0.15, -0.03, 0.03)) #cls.specs = pygsti.construction.build_spam_specs(std.fiducials, effect_labels=['E0']) # #only use the first EVec gateLabels = std.gates cls.lgstStrings = pygsti.construction.list_lgst_gatestrings( std.fiducials, std.fiducials, gateLabels) cls.maxLengthList = [1, 2, 4, 8] cls.lsgstStrings = pygsti.construction.make_lsgst_lists( gateLabels, std.fiducials, std.fiducials, std.germs, cls.maxLengthList) cls.lsgstStructs = pygsti.construction.make_lsgst_structs( gateLabels, std.fiducials, std.fiducials, std.germs, cls.maxLengthList) try: basestring #Only defined in Python 2 cls.versionsuffix = "" #Python 2 except NameError: cls.versionsuffix = "v3" #Python 3 # RUN BELOW LINES TO GENERATE ANALYSIS DATASET #ds = pygsti.construction.generate_fake_data(datagen_gateset, cls.lsgstStrings[-1], nSamples=1000, # sampleError='binomial', seed=100) #ds.save(compare_files + "/reportgen.dataset%s" % cls.versionsuffix) #ds2 = pygsti.construction.generate_fake_data(datagen_gateset2, cls.lsgstStrings[-1], nSamples=1000, # sampleError='binomial', seed=100) #ds2.save(compare_files + "/reportgen2.dataset%s" % cls.versionsuffix) cls.ds = pygsti.objects.DataSet( fileToLoadFrom=compare_files + "/reportgen.dataset%s" % cls.versionsuffix) cls.ds2 = pygsti.objects.DataSet( fileToLoadFrom=compare_files + "/reportgen2.dataset%s" % cls.versionsuffix) gs_lgst = pygsti.do_lgst(cls.ds, std.fiducials, std.fiducials, targetGateset, svdTruncateTo=4, verbosity=0) gs_lgst_go = pygsti.gaugeopt_to_target(gs_lgst, targetGateset, { 'gates': 1.0, 'spam': 0.0 }) cls.gs_clgst = pygsti.contract(gs_lgst_go, "CPTP") cls.gs_clgst_tp = pygsti.contract(cls.gs_clgst, "vSPAM") cls.gs_clgst_tp.set_all_parameterizations("TP") #Compute results for MC2GST lsgst_gatesets_prego = pygsti.do_iterative_mc2gst( cls.ds, cls.gs_clgst, cls.lsgstStrings, verbosity=0, minProbClipForWeighting=1e-6, probClipInterval=(-1e6, 1e6), returnAll=True) cls.results = pygsti.objects.Results() cls.results.init_dataset(cls.ds) cls.results.init_gatestrings(cls.lsgstStructs) cls.results.add_estimate( targetGateset, cls.gs_clgst, lsgst_gatesets_prego, { 'objective': "chi2", 'minProbClipForWeighting': 1e-4, 'probClipInterval': (-1e6, 1e6), 'radius': 1e-4, 'weights': None, 'defaultDirectory': temp_files + "", 'defaultBasename': "MyDefaultReportName" }) gaugeOptParams = collections.OrderedDict([ ('gateset', lsgst_gatesets_prego[-1]), #so can gauge-propagate CIs ('targetGateset', targetGateset), #so can gauge-propagate CIs ('cptp_penalty_factor', 0), ('gatesMetric', "frobenius"), ('spamMetric', "frobenius"), ('itemWeights', { 'gates': 1.0, 'spam': 0.001 }), ('returnAll', True) ]) _, gaugeEl, go_final_gateset = pygsti.gaugeopt_to_target( **gaugeOptParams) gaugeOptParams['_gaugeGroupEl'] = gaugeEl #so can gauge-propagate CIs cls.results.estimates['default'].add_gaugeoptimized( gaugeOptParams, go_final_gateset) cls.results.estimates['default'].add_gaugeoptimized( gaugeOptParams, go_final_gateset, "go_dup") #Compute results for MLGST with TP constraint # Use do_long_sequence_gst with a non-mark dataset to trigger data scaling tp_target = targetGateset.copy() tp_target.set_all_parameterizations("TP") cls.ds3 = cls.ds.copy_nonstatic() cls.ds3.add_counts_from_dataset(cls.ds2) cls.ds3.done_adding_data() cls.results_logL = pygsti.do_long_sequence_gst( cls.ds3, tp_target, std.fiducials, std.fiducials, std.germs, cls.maxLengthList, verbosity=0, advancedOptions={ 'tolerance': 1e-6, 'starting point': 'LGST', 'onBadFit': ["robust", "Robust", "robust+", "Robust+"], 'badFitThreshold': -1.0, 'germLengthLimits': { ('Gx', 'Gi', 'Gi'): 2 } }) #OLD #lsgst_gatesets_TP = pygsti.do_iterative_mlgst(cls.ds, cls.gs_clgst_tp, cls.lsgstStrings, verbosity=0, # minProbClip=1e-4, probClipInterval=(-1e6,1e6), # returnAll=True) #TP initial gateset => TP output gatesets #cls.results_logL = pygsti.objects.Results() #cls.results_logL.init_dataset(cls.ds) #cls.results_logL.init_gatestrings(cls.lsgstStructs) #cls.results_logL.add_estimate(targetGateset, cls.gs_clgst_tp, # lsgst_gatesets_TP, # {'objective': "logl", # 'minProbClip': 1e-4, # 'probClipInterval': (-1e6,1e6), 'radius': 1e-4, # 'weights': None, 'defaultDirectory': temp_files + "", # 'defaultBasename': "MyDefaultReportName"}) # #tp_target = targetGateset.copy(); tp_target.set_all_parameterizations("TP") #gaugeOptParams = gaugeOptParams.copy() #just to be safe #gaugeOptParams['gateset'] = lsgst_gatesets_TP[-1] #so can gauge-propagate CIs #gaugeOptParams['targetGateset'] = tp_target #so can gauge-propagate CIs #_, gaugeEl, go_final_gateset = pygsti.gaugeopt_to_target(**gaugeOptParams) #gaugeOptParams['_gaugeGroupEl'] = gaugeEl #so can gauge-propagate CIs #cls.results_logL.estimates['default'].add_gaugeoptimized(gaugeOptParams, go_final_gateset) # ##self.results_logL.options.precision = 3 ##self.results_logL.options.polar_precision = 2 os.chdir(orig_cwd)
def testIntermediateMeas(self): # Mess with the target gateset to add some error to the povm and instrument self.assertEqual(self.gs_target.num_params(), 92) # 4*3 + 16*5 = 92 gs = self.gs_target.depolarize(gate_noise=0.01, spam_noise=0.01) gs2 = self.gs_target.depolarize(max_gate_noise=0.01, max_spam_noise=0.01, seed=1234) #another way to depolarize gs.povms['Mdefault'].depolarize(0.01) # Introducing a rotation error to the measurement Uerr = pygsti.rotation_gate_mx( [0, 0.02, 0]) # input angles are halved by the method E = np.dot(gs.povms['Mdefault']['0'].T, Uerr).T # effect is stored as column vector Erem = self.povm_ident - E gs.povms['Mdefault'] = pygsti.obj.UnconstrainedPOVM({ '0': E, '1': Erem }) # Now add the post-measurement gates from the vector E0 and remainder = id-E0 Gmz_plus = np.dot( E, E.T) #since E0 is stored internally as column spamvec Gmz_minus = np.dot(Erem, Erem.T) gs.instruments['Iz'] = pygsti.obj.Instrument({ 'plus': Gmz_plus, 'minus': Gmz_minus }) self.assertEqual(gs.num_params(), 92) # 4*3 + 16*5 = 92 #print(gs) germs = std.germs fiducials = std.fiducials max_lengths = [1] #,2,4,8] glbls = list(gs.gates.keys()) + list(gs.instruments.keys()) lsgst_list = pygsti.construction.make_lsgst_experiment_list( glbls, fiducials, fiducials, germs, max_lengths) lsgst_list2 = pygsti.construction.make_lsgst_experiment_list( gs, fiducials, fiducials, germs, max_lengths) #use gs as source self.assertEqual(lsgst_list, lsgst_list2) gs_datagen = gs ds = pygsti.construction.generate_fake_data(gs, lsgst_list, 1000, 'none') #'multinomial') pygsti.io.write_dataset(temp_files + "/intermediate_meas_dataset.txt", ds) ds2 = pygsti.io.load_dataset(temp_files + "/intermediate_meas_dataset.txt") for gstr, dsRow in ds.items(): for lbl, cnt in dsRow.counts.items(): self.assertAlmostEqual(cnt, ds2[gstr].counts[lbl], places=2) #print(ds) #LGST gs_lgst = pygsti.do_lgst( ds, fiducials, fiducials, self.gs_target) #, guessGatesetForGauge=gs_datagen) self.assertTrue("Iz" in gs_lgst.instruments) gs_opt = pygsti.gaugeopt_to_target(gs_lgst, gs_datagen) #, method="BFGS") print(gs_datagen.strdiff(gs_opt)) print("Frobdiff = ", gs_datagen.frobeniusdist(gs_lgst)) print("Frobdiff after GOpt = ", gs_datagen.frobeniusdist(gs_opt)) self.assertAlmostEqual(gs_datagen.frobeniusdist(gs_opt), 0.0, places=4) #print(gs_lgst) #print(gs_datagen) #LSGST results = pygsti.do_long_sequence_gst(ds, self.gs_target, fiducials, fiducials, germs, max_lengths) #print(results.estimates['default'].gatesets['go0']) gs_est = results.estimates['default'].gatesets['go0'] gs_est_opt = pygsti.gaugeopt_to_target(gs_est, gs_datagen) print("Frobdiff = ", gs_datagen.frobeniusdist(gs_est)) print("Frobdiff after GOpt = ", gs_datagen.frobeniusdist(gs_est_opt)) self.assertAlmostEqual(gs_datagen.frobeniusdist(gs_est_opt), 0.0, places=4) #LGST w/TP gates gs_targetTP = self.gs_target.copy() gs_targetTP.set_all_parameterizations("TP") self.assertEqual(gs_targetTP.num_params(), 71) # 3 + 4*2 + 12*5 = 71 #print(gs_targetTP) resultsTP = pygsti.do_long_sequence_gst(ds, gs_targetTP, fiducials, fiducials, germs, max_lengths) gs_est = resultsTP.estimates['default'].gatesets['go0'] gs_est_opt = pygsti.gaugeopt_to_target(gs_est, gs_datagen) print("TP Frobdiff = ", gs_datagen.frobeniusdist(gs_est)) print("TP Frobdiff after GOpt = ", gs_datagen.frobeniusdist(gs_est_opt)) self.assertAlmostEqual(gs_datagen.frobeniusdist(gs_est_opt), 0.0, places=4)
def setUp(self): super(ReportBaseCase, self).setUp() self.targetGateset = std.gs_target datagen_gateset = self.targetGateset.depolarize(gate_noise=0.05, spam_noise=0.1) self.fiducials = std.fiducials self.germs = std.germs self.specs = pygsti.construction.build_spam_specs(self.fiducials, effect_labels=['E0']) #only use the first EVec self.gateLabels = list(self.targetGateset.gates.keys()) # also == std.gates self.lgstStrings = pygsti.construction.list_lgst_gatestrings(self.specs, self.gateLabels) self.maxLengthList = [0,1,2,4,8] self.lsgstStrings = pygsti.construction.make_lsgst_lists( self.gateLabels, self.fiducials, self.fiducials, self.germs, self.maxLengthList) self.ds = pygsti.objects.DataSet(fileToLoadFrom=compare_files + "/reportgen.dataset") # RUN BELOW LINES TO GENERATE ANALYSIS DATASET #ds = pygsti.construction.generate_fake_data(datagen_gateset, lsgstStrings[-1], nSamples=1000, # sampleError='binomial', seed=100) #ds.save(compare_files + "/reportgen.dataset") gs_lgst = pygsti.do_lgst(self.ds, self.specs, self.targetGateset, svdTruncateTo=4, verbosity=0) #gs_lgst_go = pygsti.optimize_gauge(gs_lgst,"target",targetGateset=self.targetGateset,gateWeight=1.0,spamWeight=0.0) #DEPRECATED gs_lgst_go = pygsti.gaugeopt_to_target(gs_lgst, self.targetGateset, {'gates': 1.0, 'spam': 0.0}) self.gs_clgst = pygsti.contract(gs_lgst_go, "CPTP") self.gs_clgst_tp = pygsti.contract(self.gs_clgst, "vSPAM") self.gs_clgst_tp.set_all_parameterizations("TP") try: import pptx self.have_python_pptx = True except ImportError: warnings.warn("**** IMPORT: Cannot import pptx (python-pptx), and so" + " Powerpoint slide generation tests have been disabled.") self.have_python_pptx = False #Compute results for MC2GST lsgst_gatesets_prego = pygsti.do_iterative_mc2gst(self.ds, self.gs_clgst, self.lsgstStrings, verbosity=0, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), returnAll=True) gaugeOptParams = collections.OrderedDict([ ('TPpenalty', 0), ('CPpenalty', 0), ('gatesMetric',"frobenius"), ('spamMetric',"frobenius"), ('itemWeights', {'gates': 1.0, 'spam': 0.001}) ]) lsgst_gatesets = [] for gs in lsgst_gatesets_prego: lsgst_gatesets.append( pygsti.gaugeopt_to_target(gs,self.targetGateset, **gaugeOptParams) ) self.results = pygsti.report.Results() self.results.init_Ls_and_germs("chi2", self.targetGateset, self.ds, self.gs_clgst, self.maxLengthList, self.germs, lsgst_gatesets, self.lsgstStrings, self.fiducials, self.fiducials, pygsti.construction.repeat_with_max_length, None, lsgst_gatesets_prego) self.results.parameters.update({'minProbClip': 1e-6, 'minProbClipForWeighting': 1e-4, 'probClipInterval': (-1e6,1e6), 'radius': 1e-4, 'weights': None, 'defaultDirectory': temp_files + "", 'defaultBasename': "MyDefaultReportName", 'gaugeOptParams': gaugeOptParams} ) self.results.options.precision = 3 self.results.options.polar_precision = 2 #Compute results for MLGST with TP constraint lsgst_gatesets_TP = pygsti.do_iterative_mlgst(self.ds, self.gs_clgst_tp, self.lsgstStrings, verbosity=0, minProbClip=1e-4, probClipInterval=(-1e6,1e6), returnAll=True) #TP initial gateset => TP output gatesets tp_target = self.targetGateset.copy(); tp_target.set_all_parameterizations("TP") lsgst_gatesets_TP = [ pygsti.gaugeopt_to_target(gs, tp_target, {'gates': 1.0, 'spam': 0.001}) for gs in lsgst_gatesets_TP ] self.results_logL = pygsti.report.Results() self.results_logL.init_Ls_and_germs("logl", self.targetGateset, self.ds, self.gs_clgst_tp, self.maxLengthList, self.germs, lsgst_gatesets_TP, self.lsgstStrings, self.fiducials, self.fiducials, pygsti.construction.repeat_with_max_length) self.results_logL.options.precision = 3 self.results_logL.options.polar_precision = 2 try: basestring #Only defined in Python 2 self.versionsuffix = "" #Python 2 except NameError: self.versionsuffix = "v3" #Python 3
def test_MC2GST(self): ds = self.ds #pygsti.construction.generate_fake_data(self.datagen_gateset, self.lsgstStrings[-1], # nSamples=1000, sampleError='binomial', seed=100) mdl_lgst = pygsti.do_lgst(ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4, verbosity=0) mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst,self.model, {'spam':1.0, 'gates': 1.0}, checkJac=True) mdl_clgst = pygsti.contract(mdl_lgst_go, "CPTP") CM = pygsti.baseobjs.profiler._get_mem_usage() mdl_single_lsgst = pygsti.do_mc2gst(ds, mdl_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), regularizeFactor=1e-3, verbosity=0) #uses regularizeFactor mdl_single_lsgst_cp = pygsti.do_mc2gst(ds, mdl_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), cptp_penalty_factor=1.0, verbosity=0) #uses cptp_penalty_factor mdl_single_lsgst_sp = pygsti.do_mc2gst(ds, mdl_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), spam_penalty_factor=1.0, verbosity=0) #uses spam_penalty_factor mdl_single_lsgst_cpsp = pygsti.do_mc2gst(ds, mdl_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), cptp_penalty_factor=1.0, spam_penalty_factor=1.0, verbosity=0) #uses both penalty factors mdl_single_lsgst_cpsp = self.runSilent(pygsti.do_mc2gst, ds, mdl_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), cptp_penalty_factor=1.0, spam_penalty_factor=1.0, verbosity=10) #uses both penalty factors w/verbosity high mdl_single_lsgst_cp = self.runSilent(pygsti.do_mc2gst, ds, mdl_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), cptp_penalty_factor=1.0, verbosity=10) #uses cptp_penalty_factor w/verbosity high mdl_single_lsgst_sp = self.runSilent(pygsti.do_mc2gst, ds, mdl_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-4, probClipInterval=(-1e6,1e6), spam_penalty_factor=1.0, verbosity=10) #uses spam_penalty_factor w/verbosity high mdl_lsgst = pygsti.do_iterative_mc2gst(ds, mdl_clgst, self.lsgstStrings, verbosity=0, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), memLimit=CM + 1024**3) all_minErrs, all_gs_lsgst_tups = pygsti.do_iterative_mc2gst( ds, mdl_clgst, [ [mdl.tup for mdl in gsList] for gsList in self.lsgstStrings], minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), returnAll=True, returnErrorVec=True) mdl_lsgst_verb = self.runSilent(pygsti.do_iterative_mc2gst, ds, mdl_clgst, self.lsgstStrings, verbosity=10, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), memLimit=CM + 1024**3) mdl_lsgst_reg = self.runSilent(pygsti.do_iterative_mc2gst,ds, mdl_clgst, self.lsgstStrings, verbosity=10, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), regularizeFactor=10, memLimit=CM + 1024**3) self.assertAlmostEqual(mdl_lsgst.frobeniusdist(mdl_lsgst_verb),0) self.assertAlmostEqual(mdl_lsgst.frobeniusdist(all_gs_lsgst_tups[-1]),0) #Run internal checks on less max-L values (so it doesn't take forever) mdl_lsgst_chk = pygsti.do_iterative_mc2gst(ds, mdl_clgst, self.lsgstStrings[0:2], verbosity=0, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), check=True, check_jacobian=True) mdl_lsgst_chk_verb = self.runSilent(pygsti.do_iterative_mc2gst, ds, mdl_clgst, self.lsgstStrings[0:2], verbosity=10, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), check=True, check_jacobian=True, memLimit=CM + 1024**3) #Other option variations - just make sure they run at this point mdl_lsgst_chk_opts = pygsti.do_iterative_mc2gst(ds, mdl_clgst, self.lsgstStrings[0:2], verbosity=0, minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), useFreqWeightedChiSq=True, circuitSetLabels=["Set1","Set2"], circuitWeightsDict={ ('Gx',): 2.0 } ) aliased_list = [ pygsti.obj.Circuit( [ (x if x != L("Gx") else L("GA1")) for x in mdl]) for mdl in self.lsgstStrings[0] ] mdl_withA1 = mdl_clgst.copy(); mdl_withA1.operations["GA1"] = mdl_clgst.operations["Gx"] del mdl_withA1.operations["Gx"] # otherwise mdl_withA1 will have Gx params that we have no knowledge of! mdl_lsgst_chk_opts2 = pygsti.do_mc2gst(ds, mdl_withA1, aliased_list, minProbClipForWeighting=1e-6, probClipInterval=(-1e2,1e2), verbosity=10, opLabelAliases={ L('GA1'): (L('Gx'),) }) #Check with small but ok memlimit -- not anymore since new mem estimation uses current memory, making this non-robust #self.runSilent(pygsti.do_mc2gst,ds, mdl_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-6, # probClipInterval=(-1e6,1e6), regularizeFactor=1e-3, # verbosity=10, memLimit=CM + 1024**3) #Check errors: with self.assertRaises(MemoryError): pygsti.do_mc2gst(ds, mdl_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), regularizeFactor=1e-3, verbosity=0, memLimit=1) with self.assertRaises(AssertionError): pygsti.do_mc2gst(ds, mdl_clgst, self.lsgstStrings[0], minProbClipForWeighting=1e-6, probClipInterval=(-1e6,1e6), regularizeFactor=1e-3, verbosity=0, cptp_penalty_factor=1.0) #can't specify both cptp_penalty_factor and regularizeFactor # RUN BELOW LINES TO SEED SAVED GATESET FILES if os.environ.get('PYGSTI_REGEN_REF_FILES','no').lower() in ("yes","1","true"): pygsti.io.write_model(mdl_lsgst,compare_files + "/lsgst.model", "Saved LSGST Model") pygsti.io.write_model(mdl_lsgst_reg,compare_files + "/lsgst_reg.model", "Saved LSGST Model w/Regularization") mdl_lsgst_compare = pygsti.io.load_model(compare_files + "/lsgst.model") mdl_lsgst_reg_compare = pygsti.io.load_model(compare_files + "/lsgst_reg.model") mdl_lsgst_go = pygsti.gaugeopt_to_target(mdl_lsgst, mdl_lsgst_compare, {'spam':1.0}, checkJac=True) mdl_lsgst_reg_go = pygsti.gaugeopt_to_target(mdl_lsgst_reg, mdl_lsgst_reg_compare, {'spam':1.0}, checkJac=True) self.assertAlmostEqual( mdl_lsgst_go.frobeniusdist(mdl_lsgst_compare), 0, places=4) self.assertAlmostEqual( mdl_lsgst_reg_go.frobeniusdist(mdl_lsgst_reg_compare), 0, places=4) # RUN BELOW LINES TO SEED SAVED GATESET FILES if os.environ.get('PYGSTI_REGEN_REF_FILES','no').lower() in ("yes","1","true"): mdl_lsgst_go = pygsti.gaugeopt_to_target(mdl_lsgst, self.model, {'spam':1.0}) pygsti.io.write_model(mdl_lsgst_go,compare_files + "/analysis.model", "Saved LSGST Analysis Model") print("DEBUG: analysis.model = "); print(mdl_lgst_go)
def mdl_lsgst_go(self): # Was previously written to disk as 'analysis.model' return pygsti.gaugeopt_to_target(self.mdl_lsgst, self.model, {'spam': 1.0})
def main(): gs_target = std1Q_XYI.gs_target fiducials = std1Q_XYI.fiducials germs = std1Q_XYI.germs maxLengths = [1, 2, 4] #maxLengths = [1, 2, 4, 8, 16, 32, 64] #Generate some data gs_datagen = gs_target.depolarize(gate_noise=0.1, spam_noise=0.001) gs_datagen = gs_datagen.rotate(rotate=0.04) listOfExperiments = pygsti.construction.create_lsgst_circuits( gs_target, fiducials, fiducials, germs, maxLengths) ds = pygsti.construction.simulate_data(gs_datagen, listOfExperiments, n_samples=1000, sample_error="binomial", seed=1234) #Run GST gs_target.set_all_parameterizations("TP") #TP-constrained results = pygsti.run_long_sequence_gst(ds, gs_target, fiducials, fiducials, germs, maxLengths, verbosity=0) with open('data/example_report_results.pkl', 'wb') as outfile: pickle.dump(results, outfile, protocol=2) # Case1: TP-constrained GST tpTarget = gs_target.copy() tpTarget.set_all_parameterizations("TP") results_tp = pygsti.run_long_sequence_gst(ds, tpTarget, fiducials, fiducials, germs, maxLengths, gauge_opt_params=False, verbosity=0) # Gauge optimize est = results_tp.estimates['default'] gsFinal = est.gatesets['final iteration estimate'] gsTarget = est.gatesets['target'] for spamWt in [1e-4, 1e-3, 1e-2, 1e-1, 1.0]: gs = pygsti.gaugeopt_to_target(gsFinal, gsTarget, { 'gates': 1, 'spam': spamWt }) est.add_gaugeoptimized({'item_weights': { 'gates': 1, 'spam': spamWt }}, gs, "Spam %g" % spamWt) #Case2: "Full" GST fullTarget = gs_target.copy() fullTarget.set_all_parameterizations("full") results_full = pygsti.run_long_sequence_gst(ds, fullTarget, fiducials, fiducials, germs, maxLengths, gauge_opt_params=False, verbosity=0) #Gauge optimize est = results_full.estimates['default'] gsFinal = est.gatesets['final iteration estimate'] gsTarget = est.gatesets['target'] for spamWt in [1e-4, 1e-3, 1e-2, 1e-1, 1.0]: gs = pygsti.gaugeopt_to_target(gsFinal, gsTarget, { 'gates': 1, 'spam': spamWt }) est.add_gaugeoptimized({'item_weights': { 'gates': 1, 'spam': spamWt }}, gs, "Spam %g" % spamWt) with open('data/full_report_results.pkl', 'wb') as outfile: pickle.dump((results_tp, results_full), outfile, protocol=2)