def ds(self): expList = pc.make_lsgst_experiment_list(self.opLabels, self.fiducials, self.fiducials, self.germs, self.maxLengthList) return pc.generate_fake_data(self.datagen_gateset, expList, nSamples=1000, sampleError='binomial', seed=_SEED)
def setUpClass(cls): #Let's make our underlying model have a little bit of random unitary noise. cls.mdl_exp_0 = std1Q_XYI.target_model().randomize_with_unitary(.01, seed=0) cls.mdl_exp_1 = std1Q_XYI.target_model().randomize_with_unitary(.01, seed=1234) germs = std1Q_XYI.germs fiducials = std1Q_XYI.fiducials max_lengths = [1, 2, 4, 8] gate_sequences = pc.make_lsgst_experiment_list(std1Q_XYI.gates, fiducials, fiducials, germs, max_lengths) #Generate the data for the two datasets, using the same model, with 100 repetitions of each sequence. N = 100 cls.DS_0 = pc.generate_fake_data(cls.mdl_exp_0, gate_sequences, N, 'binomial', seed=10) cls.DS_1 = pc.generate_fake_data(cls.mdl_exp_1, gate_sequences, N, 'binomial', seed=20)
def setUpClass(cls): opLabels = list(cls.target_model.operations.keys()) strs = pc.make_lsgst_experiment_list(opLabels, cls.prepStrs, cls.measStrs, cls.germs, cls.maxLens, includeLGST=False) tools.remove_duplicates_in_place(strs) ret = cls.target_model.simplify_circuits(strs) cls.compiled_gatestrings, cls.lookup, _, _ = ret # Tree instances can be copied, so instantiate once and copy for each test # XXX this is bad testing practice but it's quick cls._tree = cls.constructor() cls._tree.initialize(cls.compiled_gatestrings)
def test_auto_experiment_desgin(self): # Let's construct a 1-qubit $X(\pi/2)$, $Y(\pi/2)$, $I$ gateset for which we will need to find germs and fiducials. gs_target = constr.build_gateset([2], [('Q0',)], ['Gi', 'Gx', 'Gy'], ["I(Q0)", "X(pi/2,Q0)", "Y(pi/2,Q0)"], prepLabels=['rho0'], prepExpressions=["0"], effectLabels=['E0'], effectExpressions=["1"], spamdefs={'plus': ('rho0', 'E0'), 'minus': ('rho0', 'remainder')}) # ## Hands-off # We begin by demonstrating the most hands-off approach. # We can generate a germ set simply by providing the target gateset. germs = germsel.generate_germs(gs_target) # In the same way we can generate preparation and measurement fiducials. prepFiducials, measFiducials = fidsel.generate_fiducials(gs_target) # Now that we have germs and fiducials, we can construct the list of experiments we need to perform in # order to do GST. The only new things to provide at this point are the sizes for the experiments we want # to perform (in this case we want to perform between 0 and 256 gates between fiducial pairs, going up # by a factor of 2 at each stage). maxLengths = [0] + [2**n for n in range(8 + 1)] listOfExperiments = constr.make_lsgst_experiment_list(gs_target.gates.keys(), prepFiducials, measFiducials, germs, maxLengths) # The list of `GateString` that the previous function gave us isn't necessarily the most readable # form to present the information in, so we can write the experiment list out to an empty data # file to be filled in after the experiments are performed. graspGerms = germsel.generate_germs(gs_target, algorithm='grasp', algorithm_kwargs={'iterations': 1}) slackPrepFids, slackMeasFids = fidsel.generate_fiducials(gs_target, algorithm='slack', algorithm_kwargs={'slackFrac': 0.25}) max([len(germ) for germ in germs]) germsMaxLength5 = germsel.generate_germs(gs_target, maxGermLength=5) max([len(germ) for germ in germsMaxLength5]) germsMaxLength3 = germsel.generate_germs(gs_target, maxGermLength=3) uniformPrepFids, uniformMeasFids = fidsel.generate_fiducials(gs_target, maxFidLength=3, algorithm='grasp', algorithm_kwargs={'iterations': 100}) incompletePrepFids, incompleteMeasFids = fidsel.generate_fiducials(gs_target, maxFidLength=1) nonSingletonGerms = germsel.generate_germs(gs_target, forceSingletons=False, maxGermLength=4, algorithm='grasp', algorithm_kwargs={'iterations': 5}) omitIdentityPrepFids, omitIdentityMeasFids = fidsel.generate_fiducials(gs_target, omitIdentity=False, gatesToOmit=['Gi'])
def simulate_convergence(germs, prepFiducials, effectFiducials, targetGS, randStr=1e-2, numPertGS=5, maxLengthsPower=8, clickNums=32, numRuns=7, seed=None, randState=None, gaugeOptRatio=1e-3, constrainToTP=True): if not isinstance(clickNums, list): clickNums = [clickNums] if randState is None: randState = _np.random.RandomState(seed) perturbedGatesets = [ targetGS.randomize_with_unitary(scale=randStr, randState=randState) for n in range(numPertGS) ] maxLengths = [0] + [2**n for n in range(maxLengthsPower + 1)] expList = constr.make_lsgst_experiment_list(targetGS.gates.keys(), prepFiducials, effectFiducials, germs, maxLengths) errorDict = {} resultDict = {} for trueGatesetNum, trueGateset in enumerate(perturbedGatesets): for numClicks in clickNums: for run in range(numRuns): success = False failCount = 0 while not success and failCount < 10: try: ds = constr.generate_fake_data(trueGateset, expList, nSamples=numClicks, sampleError="binomial", randState=randState) result = pygsti.do_long_sequence_gst( ds, targetGS, prepFiducials, effectFiducials, germs, maxLengths, gaugeOptRatio=gaugeOptRatio, constrainToTP=constrainToTP) errors = [(trueGateset.frobeniusdist( alg.optimize_gauge(estimate, 'target', targetGateset=trueGateset, constrainToTP=constrainToTP, spamWeight=0.0), spamWeight=0.0), L) for estimate, L in zip( result.gatesets['iteration estimates'][1:], result.parameters['max length list'][1:])] resultDict[trueGatesetNum, numClicks, run] = result errorDict[trueGatesetNum, numClicks, run] = errors success = True except Exception as e: failCount += 1 if failCount == 10: raise e print(e) return obj.GermSetEval(germset=germs, gatesets=perturbedGatesets, resultDict=resultDict, errorDict=errorDict)
def simulate_convergence(germs, prepFiducials, effectFiducials, targetGS, randStr=1e-2, numPertGS=5, maxLengthsPower=8, clickNums=32, numRuns=7, seed=None, randState=None, gaugeOptRatio=1e-3, constrainToTP=True): if not isinstance(clickNums, list): clickNums = [clickNums] if randState is None: randState = _np.random.RandomState(seed) perturbedGatesets = [targetGS.randomize_with_unitary(scale=randStr, randState=randState) for n in range(numPertGS)] maxLengths = [0] + [2**n for n in range(maxLengthsPower + 1)] expList = constr.make_lsgst_experiment_list(targetGS.gates.keys(), prepFiducials, effectFiducials, germs, maxLengths) errorDict = {} resultDict = {} for trueGatesetNum, trueGateset in enumerate(perturbedGatesets): for numClicks in clickNums: for run in range(numRuns): success = False failCount = 0 while not success and failCount < 10: try: ds = constr.generate_fake_data(trueGateset, expList, nSamples=numClicks, sampleError="binomial", randState=randState) result = pygsti.do_long_sequence_gst( ds, targetGS, prepFiducials, effectFiducials, germs, maxLengths, gaugeOptRatio=gaugeOptRatio, constrainToTP=constrainToTP) errors = [(trueGateset .frobeniusdist( alg.optimize_gauge( estimate, 'target', targetGateset=trueGateset, constrainToTP=constrainToTP, spamWeight=0.0), spamWeight=0.0), L) for estimate, L in zip(result .gatesets['iteration estimates'][1:], result .parameters['max length list'][1:])] resultDict[trueGatesetNum, numClicks, run] = result errorDict[trueGatesetNum, numClicks, run] = errors success = True except Exception as e: failCount += 1 if failCount == 10: raise e print(e) return obj.GermSetEval(germset=germs, gatesets=perturbedGatesets, resultDict=resultDict, errorDict=errorDict)
def test_auto_experiment_design(self): # Let's construct a 1-qubit $X(\pi/2)$, $Y(\pi/2)$, $I$ gateset for which we will need to find germs and fiducials. gs_target = constr.build_gateset([2], [('Q0',)], ['Gi', 'Gx', 'Gy'], ["I(Q0)", "X(pi/2,Q0)", "Y(pi/2,Q0)"]) # ## Hands-off # We begin by demonstrating the most hands-off approach. # We can generate a germ set simply by providing the target gateset. (and seed so it's deterministic) germs = germsel.generate_germs(gs_target, seed=2017) # In the same way we can generate preparation and measurement fiducials. prepFiducials, measFiducials = fidsel.generate_fiducials(gs_target) #test returnAll - this just prints more info... p,m = fidsel.generate_fiducials(gs_target, algorithm_kwargs={'returnAll': True}) #test invalid algorithm with self.assertRaises(ValueError): fidsel.generate_fiducials(gs_target, algorithm='foobar') # Now that we have germs and fiducials, we can construct the list of experiments we need to perform in # order to do GST. The only new things to provide at this point are the sizes for the experiments we want # to perform (in this case we want to perform between 0 and 256 gates between fiducial pairs, going up # by a factor of 2 at each stage). maxLengths = [0] + [2**n for n in range(8 + 1)] listOfExperiments = constr.make_lsgst_experiment_list(gs_target.gates.keys(), prepFiducials, measFiducials, germs, maxLengths) # The list of `GateString` that the previous function gave us isn't necessarily the most readable # form to present the information in, so we can write the experiment list out to an empty data # file to be filled in after the experiments are performed. graspGerms = germsel.generate_germs(gs_target, algorithm='grasp', seed=2017, numGSCopies=2, candidateGermCounts={3: 'all upto', 4:10, 5:10, 6:10}, candidateSeed=2017, algorithm_kwargs={'iterations': 1}) slackPrepFids, slackMeasFids = fidsel.generate_fiducials(gs_target, algorithm='slack', algorithm_kwargs={'slackFrac': 0.25}) fidsel.generate_fiducials(gs_target, algorithm='slack') # slacFrac == 1.0 if don't specify either slackFrac or fixedSlack germsMaxLength3 = germsel.generate_germs(gs_target, candidateGermCounts={3: 'all upto'}, seed=2017) uniformPrepFids, uniformMeasFids = fidsel.generate_fiducials(gs_target, maxFidLength=3, algorithm='grasp', algorithm_kwargs={'iterations': 100}) incompletePrepFids, incompleteMeasFids = fidsel.generate_fiducials(gs_target, maxFidLength=1) nonSingletonGerms = germsel.generate_germs(gs_target, numGSCopies=2, force=None, candidateGermCounts={4: 'all upto'}, algorithm='grasp', algorithm_kwargs={'iterations': 5}, seed=2017) omitIdentityPrepFids, omitIdentityMeasFids = fidsel.generate_fiducials(gs_target, omitIdentity=False, gatesToOmit=['Gi'])
def test_auto_experiment_desgin(self): # Let's construct a 1-qubit $X(\pi/2)$, $Y(\pi/2)$, $I$ gateset for which we will need to find germs and fiducials. gs_target = constr.build_gateset([2], [('Q0', )], ['Gi', 'Gx', 'Gy'], ["I(Q0)", "X(pi/2,Q0)", "Y(pi/2,Q0)"], prepLabels=['rho0'], prepExpressions=["0"], effectLabels=['E0'], effectExpressions=["1"], spamdefs={ 'plus': ('rho0', 'E0'), 'minus': ('rho0', 'remainder') }) # ## Hands-off # We begin by demonstrating the most hands-off approach. # We can generate a germ set simply by providing the target gateset. germs = germsel.generate_germs(gs_target) # In the same way we can generate preparation and measurement fiducials. prepFiducials, measFiducials = fidsel.generate_fiducials(gs_target) # Now that we have germs and fiducials, we can construct the list of experiments we need to perform in # order to do GST. The only new things to provide at this point are the sizes for the experiments we want # to perform (in this case we want to perform between 0 and 256 gates between fiducial pairs, going up # by a factor of 2 at each stage). maxLengths = [0] + [2**n for n in range(8 + 1)] listOfExperiments = constr.make_lsgst_experiment_list( gs_target.gates.keys(), prepFiducials, measFiducials, germs, maxLengths) # The list of `GateString` that the previous function gave us isn't necessarily the most readable # form to present the information in, so we can write the experiment list out to an empty data # file to be filled in after the experiments are performed. graspGerms = germsel.generate_germs(gs_target, algorithm='grasp', algorithm_kwargs={'iterations': 1}) slackPrepFids, slackMeasFids = fidsel.generate_fiducials( gs_target, algorithm='slack', algorithm_kwargs={'slackFrac': 0.25}) max([len(germ) for germ in germs]) germsMaxLength5 = germsel.generate_germs(gs_target, maxGermLength=5) max([len(germ) for germ in germsMaxLength5]) germsMaxLength3 = germsel.generate_germs(gs_target, maxGermLength=3) uniformPrepFids, uniformMeasFids = fidsel.generate_fiducials( gs_target, maxFidLength=3, algorithm='grasp', algorithm_kwargs={'iterations': 100}) incompletePrepFids, incompleteMeasFids = fidsel.generate_fiducials( gs_target, maxFidLength=1) nonSingletonGerms = germsel.generate_germs( gs_target, force=None, maxGermLength=4, algorithm='grasp', algorithm_kwargs={'iterations': 5}) omitIdentityPrepFids, omitIdentityMeasFids = fidsel.generate_fiducials( gs_target, omitIdentity=False, gatesToOmit=['Gi'])
def main(): gates = ['Gi', 'Gx', 'Gy'] fiducials = pc.gatestring_list([(), ('Gx', ), ('Gy', ), ('Gx', 'Gx'), ('Gx', 'Gx', 'Gx'), ('Gy', 'Gy', 'Gy') ]) # fiducials for 1Q MUB germs = pc.gatestring_list([('Gx', ), ('Gy', ), ('Gi', ), ( 'Gx', 'Gy', ), ( 'Gx', 'Gy', 'Gi', ), ( 'Gx', 'Gi', 'Gy', ), ( 'Gx', 'Gi', 'Gi', ), ( 'Gy', 'Gi', 'Gi', ), ( 'Gx', 'Gx', 'Gi', 'Gy', ), ( 'Gx', 'Gy', 'Gy', 'Gi', ), ( 'Gx', 'Gx', 'Gy', 'Gx', 'Gy', 'Gy', )]) maxLengths = [1, 2, 4, 8, 16, 32, 64, 128, 256] lsgst_lists = pc.make_lsgst_experiment_list(gates, fiducials, fiducials, germs, maxLengths) lsgst_tuple = tuple(lsgst_lists) iterations = 1000 timeDict = dict() with timed_block('hash_gatestring_list', timeDict): for i in range(iterations): hash(lsgst_tuple) exampleUUID = uuid.uuid4() alt_hash = pygsti.tools.smartcache.digest with timed_block('digest_uuid', timeDict): for i in range(iterations): alt_hash(exampleUUID) print('Hashing gslist of length {} takes {} seconds on average'.format( len(lsgst_tuple), timeDict['hash_gatestring_list'] / iterations)) print('UUID digest takes {} seconds on average'.format( timeDict['digest_uuid'] / iterations))