def test_generate_fake_data_raises_on_bad_sample_error(self): with self.assertRaises(ValueError): pc.generate_fake_data(self.dataset, self.circuit_list, nSamples=None, sampleError='foobar', seed=100)
def test_generate_fake_data(self): dataset = pc.generate_fake_data(self.dataset, self.gatestring_list, nSamples=None, sampleError='multinomial', seed=100) dataset = pc.generate_fake_data(dataset, self.gatestring_list, nSamples=1000, sampleError='round', seed=100)
def setUpClass(cls): #Let's make our underlying model have a little bit of random unitary noise. cls.mdl_exp_0 = std1Q_XYI.target_model().randomize_with_unitary(.01, seed=0) cls.mdl_exp_1 = std1Q_XYI.target_model().randomize_with_unitary(.01, seed=1234) germs = std1Q_XYI.germs fiducials = std1Q_XYI.fiducials max_lengths = [1, 2, 4, 8] gate_sequences = pc.make_lsgst_experiment_list(std1Q_XYI.gates, fiducials, fiducials, germs, max_lengths) #Generate the data for the two datasets, using the same model, with 100 repetitions of each sequence. N = 100 cls.DS_0 = pc.generate_fake_data(cls.mdl_exp_0, gate_sequences, N, 'binomial', seed=10) cls.DS_1 = pc.generate_fake_data(cls.mdl_exp_1, gate_sequences, N, 'binomial', seed=20)
def test_generate_fake_data(self): dataset = pc.generate_fake_data(self.dataset, self.circuit_list, nSamples=None, sampleError='multinomial', seed=100) dataset = pc.generate_fake_data(dataset, self.circuit_list, nSamples=1000, sampleError='round', seed=100) randState = np.random.RandomState(1234) dataset = pc.generate_fake_data(dataset, self.circuit_list, nSamples=1000, sampleError='binomial', randState=randState)
def ds(self): expList = pc.make_lsgst_experiment_list(self.opLabels, self.fiducials, self.fiducials, self.germs, self.maxLengthList) return pc.generate_fake_data(self.datagen_gateset, expList, nSamples=1000, sampleError='binomial', seed=_SEED)
def test_do_lgst_raises_on_incomplete_ab_matrix(self): incomplete_strings = self.lgstStrings[5:] # drop first 5 strings... bad_ds = pc.generate_fake_data(self.datagen_gateset, incomplete_strings, nSamples=10, sampleError='none') with self.assertRaises(KeyError): core.do_lgst(bad_ds, self.fiducials, self.fiducials, self.model, svdTruncateTo=4)
def setUpClass(cls): super(RobustDataScalingTester, cls).setUpClass() datagen_gateset = cls.model.depolarize(op_noise=0.1, spam_noise=0.03).rotate( (0.05, 0.13, 0.02)) ds2 = pc.generate_fake_data(datagen_gateset, cls.lsgstStrings[-1], nSamples=1000, sampleError='binomial', seed=100).copy_nonstatic() ds2.add_counts_from_dataset(cls.ds) ds2.done_adding_data() cls.ds = ds2
def setUpClass(cls): super(LengthAsExponentTester, cls).setUpClass() lsgstStrings = pc.make_lsgst_lists(cls.opLabels, cls.fiducials, cls.fiducials, cls.germs, cls.maxLens, truncScheme="length as exponent") cls.ds = pc.generate_fake_data(pkg.datagen_gateset, lsgstStrings[-1], nSamples=1000, sampleError='binomial', seed=100)
def setUpClass(cls): super(TruncatedGermPowersTester, cls).setUpClass() lsgstStrings = pc.make_lsgst_lists(cls.opLabels, cls.fiducials, cls.fiducials, cls.germs, cls.maxLens, truncScheme="truncated germ powers") cls.ds = pc.generate_fake_data(pkg.datagen_gateset, lsgstStrings[-1], nSamples=1000, sampleError='binomial', seed=100)
def setUp(self): super(RPEToolsFuncBase, self).setUp() self.target = stdXY.target_model() self.target.operations['Gi'] = std.target_model().operations[ 'Gi'] # need a Gi gate... self.stringListD = rpc.make_rpe_angle_string_list_dict(2, self.config) self.mdl_depolXZ = self.target.depolarize(op_noise=0.1, spam_noise=0.1, seed=_SEED) self.ds = pc.generate_fake_data(self.mdl_depolXZ, self.stringListD['totalStrList'], nSamples=1000, sampleError='binomial', seed=_SEED)
def setUp(self): super(DataSetConstructionTestCase, self).setUp() self.gateset = pc.build_gateset( [2], [('Q0',)], ['Gi','Gx','Gy'], [ "I(Q0)","X(pi/2,Q0)", "Y(pi/2,Q0)"], prepLabels = ['rho0'], prepExpressions=["0"], effectLabels = ['E0'], effectExpressions=["1"], spamdefs={'plus': ('rho0','E0'), 'minus': ('rho0','remainder') }) self.depolGateset = self.gateset.depolarize(gate_noise=0.1) def make_lsgst_lists(gateLabels, fiducialList, germList, maxLengthList): singleGates = pc.gatestring_list([(g,) for g in gateLabels]) lgstStrings = pc.list_lgst_gatestrings(pc.build_spam_specs(fiducialList), gateLabels) lsgst_list = pc.gatestring_list([ () ]) #running list of all strings so far if maxLengthList[0] == 0: lsgst_listOfLists = [ lgstStrings ] maxLengthList = maxLengthList[1:] else: lsgst_listOfLists = [ ] for maxLen in maxLengthList: lsgst_list += pc.create_gatestring_list("f0+R(germ,N)+f1", f0=fiducialList, f1=fiducialList, germ=germList, N=maxLen, R=pc.repeat_with_max_length, order=('germ','f0','f1')) lsgst_listOfLists.append( pygsti.remove_duplicates(lgstStrings + lsgst_list) ) print("%d LSGST sets w/lengths" % len(lsgst_listOfLists), map(len,lsgst_listOfLists)) return lsgst_listOfLists gates = ['Gi','Gx','Gy'] fiducials = pc.gatestring_list([ (), ('Gx',), ('Gy',), ('Gx','Gx'), ('Gx','Gx','Gx'), ('Gy','Gy','Gy') ]) # fiducials for 1Q MUB germs = pc.gatestring_list( [('Gx',), ('Gy',), ('Gi',), ('Gx', 'Gy',), ('Gx', 'Gy', 'Gi',), ('Gx', 'Gi', 'Gy',),('Gx', 'Gi', 'Gi',), ('Gy', 'Gi', 'Gi',), ('Gx', 'Gx', 'Gi', 'Gy',), ('Gx', 'Gy', 'Gy', 'Gi',), ('Gx', 'Gx', 'Gy', 'Gx', 'Gy', 'Gy',)] ) maxLengths = [0, 1, 2, 4, 8, 16, 32, 64, 128, 256] self.lsgst_lists = make_lsgst_lists(gates, fiducials, germs, maxLengths) self.gatestring_list = self.lsgst_lists[-1] self.dataset = pc.generate_fake_data(self.depolGateset, self.gatestring_list, nSamples=1000, sampleError='binomial', seed=100)
def setUpClass(cls): super(DirectXTester, cls).setUpClass() cls._tgt = fixtures.model.copy() cls.prepStrs = fixtures.fiducials cls.effectStrs = fixtures.fiducials cls.strs = pc.circuit_list([ (), # always need empty string ('Gx', ), ('Gy', ), ('Gi', ), # need these for includeTargetOps=True ('Gx', 'Gx'), ('Gx', 'Gy', 'Gx') # additional ]) expstrs = pc.create_circuit_list("f0+base+f1", order=['f0', 'f1', 'base'], f0=fixtures.fiducials, f1=fixtures.fiducials, base=cls.strs) cls._ds = pc.generate_fake_data(fixtures.datagen_gateset.copy(), expstrs, 1000, 'multinomial', seed=_SEED)
def dataset(self): return pc.generate_fake_data(self.datagen_gateset, self.lsgstStrings[-1], nSamples=1000, sampleError='binomial', seed=100)
def ds_lgst(self): return pc.generate_fake_data(self.datagen_gateset, self.lgstStrings, nSamples=10000, sampleError='binomial', seed=_SEED)
def setUp(self): super(DataSetConstructionTestCase, self).setUp() self.model = pc.build_explicit_model( [('Q0', )], ['Gi', 'Gx', 'Gy'], ["I(Q0)", "X(pi/2,Q0)", "Y(pi/2,Q0)"]) self.depolGateset = self.model.depolarize(op_noise=0.1) def make_lsgst_lists(opLabels, fiducialList, germList, maxLengthList): singleOps = pc.circuit_list([(g, ) for g in opLabels]) lgstStrings = pc.list_lgst_circuits(fiducialList, fiducialList, opLabels) lsgst_list = pc.circuit_list([ () ]) #running list of all strings so far if maxLengthList[0] == 0: lsgst_listOfLists = [lgstStrings] maxLengthList = maxLengthList[1:] else: lsgst_listOfLists = [] for maxLen in maxLengthList: lsgst_list += pc.create_circuit_list( "f0+R(germ,N)+f1", f0=fiducialList, f1=fiducialList, germ=germList, N=maxLen, R=pc.repeat_with_max_length, order=('germ', 'f0', 'f1')) lsgst_listOfLists.append( pygsti.remove_duplicates(lgstStrings + lsgst_list)) print("%d LSGST sets w/lengths" % len(lsgst_listOfLists), map(len, lsgst_listOfLists)) return lsgst_listOfLists gates = ['Gi', 'Gx', 'Gy'] fiducials = pc.circuit_list([(), ('Gx', ), ('Gy', ), ('Gx', 'Gx'), ('Gx', 'Gx', 'Gx'), ('Gy', 'Gy', 'Gy') ]) # fiducials for 1Q MUB germs = pc.circuit_list([('Gx', ), ('Gy', ), ('Gi', ), ( 'Gx', 'Gy', ), ( 'Gx', 'Gy', 'Gi', ), ( 'Gx', 'Gi', 'Gy', ), ( 'Gx', 'Gi', 'Gi', ), ( 'Gy', 'Gi', 'Gi', ), ( 'Gx', 'Gx', 'Gi', 'Gy', ), ( 'Gx', 'Gy', 'Gy', 'Gi', ), ( 'Gx', 'Gx', 'Gy', 'Gx', 'Gy', 'Gy', )]) maxLengths = [0, 1, 2, 4, 8, 16, 32, 64, 128, 256] self.lsgst_lists = make_lsgst_lists(gates, fiducials, germs, maxLengths) self.circuit_list = self.lsgst_lists[-1] self.dataset = pc.generate_fake_data(self.depolGateset, self.circuit_list, nSamples=1000, sampleError='binomial', seed=100)
def simulate_convergence(germs, prepFiducials, effectFiducials, targetGS, randStr=1e-2, numPertGS=5, maxLengthsPower=8, clickNums=32, numRuns=7, seed=None, randState=None, gaugeOptRatio=1e-3, constrainToTP=True): if not isinstance(clickNums, list): clickNums = [clickNums] if randState is None: randState = _np.random.RandomState(seed) perturbedGatesets = [ targetGS.randomize_with_unitary(scale=randStr, randState=randState) for n in range(numPertGS) ] maxLengths = [0] + [2**n for n in range(maxLengthsPower + 1)] expList = constr.make_lsgst_experiment_list(targetGS.gates.keys(), prepFiducials, effectFiducials, germs, maxLengths) errorDict = {} resultDict = {} for trueGatesetNum, trueGateset in enumerate(perturbedGatesets): for numClicks in clickNums: for run in range(numRuns): success = False failCount = 0 while not success and failCount < 10: try: ds = constr.generate_fake_data(trueGateset, expList, nSamples=numClicks, sampleError="binomial", randState=randState) result = pygsti.do_long_sequence_gst( ds, targetGS, prepFiducials, effectFiducials, germs, maxLengths, gaugeOptRatio=gaugeOptRatio, constrainToTP=constrainToTP) errors = [(trueGateset.frobeniusdist( alg.optimize_gauge(estimate, 'target', targetGateset=trueGateset, constrainToTP=constrainToTP, spamWeight=0.0), spamWeight=0.0), L) for estimate, L in zip( result.gatesets['iteration estimates'][1:], result.parameters['max length list'][1:])] resultDict[trueGatesetNum, numClicks, run] = result errorDict[trueGatesetNum, numClicks, run] = errors success = True except Exception as e: failCount += 1 if failCount == 10: raise e print(e) return obj.GermSetEval(germset=germs, gatesets=perturbedGatesets, resultDict=resultDict, errorDict=errorDict)
def simulate_convergence(germs, prepFiducials, effectFiducials, targetGS, randStr=1e-2, numPertGS=5, maxLengthsPower=8, clickNums=32, numRuns=7, seed=None, randState=None, gaugeOptRatio=1e-3, constrainToTP=True): if not isinstance(clickNums, list): clickNums = [clickNums] if randState is None: randState = _np.random.RandomState(seed) perturbedGatesets = [targetGS.randomize_with_unitary(scale=randStr, randState=randState) for n in range(numPertGS)] maxLengths = [0] + [2**n for n in range(maxLengthsPower + 1)] expList = constr.make_lsgst_experiment_list(targetGS.gates.keys(), prepFiducials, effectFiducials, germs, maxLengths) errorDict = {} resultDict = {} for trueGatesetNum, trueGateset in enumerate(perturbedGatesets): for numClicks in clickNums: for run in range(numRuns): success = False failCount = 0 while not success and failCount < 10: try: ds = constr.generate_fake_data(trueGateset, expList, nSamples=numClicks, sampleError="binomial", randState=randState) result = pygsti.do_long_sequence_gst( ds, targetGS, prepFiducials, effectFiducials, germs, maxLengths, gaugeOptRatio=gaugeOptRatio, constrainToTP=constrainToTP) errors = [(trueGateset .frobeniusdist( alg.optimize_gauge( estimate, 'target', targetGateset=trueGateset, constrainToTP=constrainToTP, spamWeight=0.0), spamWeight=0.0), L) for estimate, L in zip(result .gatesets['iteration estimates'][1:], result .parameters['max length list'][1:])] resultDict[trueGatesetNum, numClicks, run] = result errorDict[trueGatesetNum, numClicks, run] = errors success = True except Exception as e: failCount += 1 if failCount == 10: raise e print(e) return obj.GermSetEval(germset=germs, gatesets=perturbedGatesets, resultDict=resultDict, errorDict=errorDict)