Beispiel #1
0
    def test_load_dataset_from_cache(self, tmp_path):
        tmp_path = str(tmp_path)
        ds = io_gen.ds
        io.write_dataset(tmp_path, ds)
        ds3 = readers.read_dataset(tmp_path, cache=True)  # creates cache file
        ds4 = readers.read_dataset(tmp_path, cache=True)  # loads from cache file
        # TODO assert cache behavior

        ds.comment = "# Hello"  # comment character doesn't get doubled...
        io.write_dataset(tmp_path, ds)
        ds5 = readers.read_dataset(tmp_path, cache=True)  # rewrites cache file
        for s in ds:
            self.assertEqual(ds[s]['0'], ds5[s][('0',)])
            self.assertEqual(ds[s]['1'], ds5[s][('1',)])
Beispiel #2
0
    def test_stdpractice_gst_file_args(self, ds_path, model_path,
                                       fiducial_path, germ_path):
        io.write_dataset(ds_path, self.ds, self.lsgstStrings[-1])
        io.write_model(self.model, model_path)
        io.write_circuit_list(fiducial_path, self.fiducials)
        io.write_circuit_list(germ_path, self.germs)

        result = ls.do_stdpractice_gst(ds_path,
                                       model_path,
                                       fiducial_path,
                                       fiducial_path,
                                       germ_path,
                                       self.maxLens,
                                       modes="TP",
                                       comm=None,
                                       memLimit=None,
                                       verbosity=5)
Beispiel #3
0
    def test_long_sequence_gst_with_file_args(self, ds_path, model_path,
                                              fiducial_path, germ_path):
        io.write_dataset(ds_path, self.ds, self.lsgstStrings[-1])
        io.write_model(self.model, model_path)
        io.write_circuit_list(fiducial_path, self.fiducials)
        io.write_circuit_list(germ_path, self.germs)

        self.options.update(randomizeStart=1e-6,
                            profile=2,
                            verbosity=10,
                            memoryLimitInBytes=2 * 1000**3)
        result = ls.do_long_sequence_gst(ds_path,
                                         model_path,
                                         fiducial_path,
                                         fiducial_path,
                                         germ_path,
                                         self.maxLens,
                                         advancedOptions=self.options)
Beispiel #4
0
    def test_long_sequence_gst_with_file_args(self, ds_path, model_path,
                                              fiducial_path, germ_path):
        io.write_dataset(ds_path, self.ds, self.lsgstStrings[-1])
        io.write_model(self.model, model_path)
        io.write_circuit_list(fiducial_path, self.fiducials)
        io.write_circuit_list(germ_path, self.germs)

        self.options.update(
            randomize_start=1e-6,
            profile=2,
        )
        result = ls.run_long_sequence_gst(ds_path,
                                          model_path,
                                          fiducial_path,
                                          fiducial_path,
                                          germ_path,
                                          self.maxLens,
                                          advanced_options=self.options,
                                          verbosity=10)
Beispiel #5
0
    def test_stdpractice_gst_file_args(self, ds_path, model_path,
                                       fiducial_path, germ_path):
        import pickle
        #io.write_model(self.model, model_path)
        io.write_dataset(ds_path, self.ds, self.lsgstStrings[-1])
        io.write_circuit_list(fiducial_path, self.fiducials)
        io.write_circuit_list(germ_path, self.germs)
        target_model = create_explicit_model(self.pspec,
                                             ideal_gate_type='static')
        io.write_model(target_model, model_path)
        #with open(model_path, 'wb') as f:
        #    pickle.dump(target_model, f)

        result = ls.run_stdpractice_gst(ds_path,
                                        model_path,
                                        fiducial_path,
                                        fiducial_path,
                                        germ_path,
                                        self.maxLens,
                                        modes="full TP",
                                        comm=None,
                                        mem_limit=None,
                                        verbosity=5)
Beispiel #6
0
    def test_load_ignore_zero_count_lines4(self, pth):
        c1 = Circuit('Gc1')
        c2 = Circuit('Gc2')
        c3 = Circuit('Gc3')

        ds = DataSet()

        ds.add_count_dict(c1, {}, aux={'test': 1})
        ds.add_count_dict(c2, {'0': 1}, aux={'test': 1})
        ds.add_count_dict(c3, {}, aux={'test': 1})
        #print(ds)

        io.write_dataset(pth, ds, fixed_column_mode=False)
        ds = io.read_dataset(pth, ignore_zero_count_lines=False)

        self.assertEqual(ds[c1]['0'], 0)
        self.assertEqual(ds[c2]['0'], 1)
        self.assertEqual(
            ds[c3]['0'],
            0)  # especially make sure last line is read in properly!

        self.assertEqual(ds[c1].aux['test'], 1)
        self.assertEqual(ds[c2].aux['test'], 1)
        self.assertEqual(ds[c3].aux['test'], 1)
Beispiel #7
0
def write_benchmarker(benchmarker, outdir, overwrite=False, verbosity=0):

    try:
        _os.makedirs(outdir)
        if verbosity > 0:
            print(" - Created `" + outdir +
                  "` folder to store benchmarker in txt format.")
    except:
        if overwrite:
            if verbosity > 0:
                print(
                    " - `" + outdir +
                    "` folder already exists. Will write data into that folder."
                )
        else:
            raise ValueError(
                "Directory already exists! Set overwrite to True or change the directory name!"
            )

    globaldict = {}
    globaldict['speckeys'] = benchmarker._speckeys
    globaldict['numpasses'] = benchmarker.numpasses
    globaldict['success_outcome'] = benchmarker.success_outcome
    globaldict['success_key'] = benchmarker.success_key

    if benchmarker.dscomparator is not None:

        globaldict['dscomparator'] = {}
        globaldict['dscomparator'][
            'pVal_pseudothreshold'] = benchmarker.dscomparator.pVal_pseudothreshold
        globaldict['dscomparator'][
            'llr_pseudothreshold'] = benchmarker.dscomparator.llr_pseudothreshold
        globaldict['dscomparator'][
            'pVal_pseudothreshold'] = benchmarker.dscomparator.pVal_pseudothreshold
        globaldict['dscomparator'][
            'jsd_pseudothreshold'] = benchmarker.dscomparator.jsd_pseudothreshold
        globaldict['dscomparator'][
            'aggregate_llr'] = benchmarker.dscomparator.aggregate_llr
        globaldict['dscomparator'][
            'aggregate_llr_threshold'] = benchmarker.dscomparator.aggregate_llr_threshold
        globaldict['dscomparator'][
            'aggregate_nsigma'] = benchmarker.dscomparator.aggregate_nsigma
        globaldict['dscomparator'][
            'aggregate_nsigma_threshold'] = benchmarker.dscomparator.aggregate_nsigma_threshold
        globaldict['dscomparator'][
            'aggregate_pVal'] = benchmarker.dscomparator.aggregate_pVal
        globaldict['dscomparator'][
            'aggregate_pVal_threshold'] = benchmarker.dscomparator.aggregate_pVal_threshold
        globaldict['dscomparator']['inconsistent_datasets_detected'] = \
            benchmarker.dscomparator.inconsistent_datasets_detected
        globaldict['dscomparator']['number_of_significant_sequences'] = int(
            benchmarker.dscomparator.number_of_significant_sequences)
        globaldict['dscomparator'][
            'significance'] = benchmarker.dscomparator.significance

    else:
        globaldict['dscomparator'] = None

    # Write global details to file
    with open(outdir + '/global.txt', 'w') as f:
        _json.dump(globaldict, f, indent=4)

    _os.makedirs(outdir + '/specs')
    _os.makedirs(outdir + '/summarydata')
    _os.makedirs(outdir + '/aux')

    for pkey in benchmarker.predicted_summary_data.keys():
        _os.makedirs(outdir + '/predictions/{}/summarydata'.format(pkey))

    for i, spec in enumerate(benchmarker._specs):
        structure = spec.get_structure()
        write_benchmarkspec(spec,
                            outdir + '/specs/{}.txt'.format(i),
                            warning=0)

        for j, qubits in enumerate(structure):
            summarydict = {
                'pass': benchmarker.pass_summary_data[i][qubits],
                'global': benchmarker.global_summary_data[i][qubits]
            }
            fname = outdir + '/summarydata/' + '{}-{}.txt'.format(i, j)
            with open(fname, 'w') as f:
                _json.dump(summarydict, f, indent=4)

            aux = benchmarker.aux[i][qubits]
            fname = outdir + '/aux/' + '{}-{}.txt'.format(i, j)
            with open(fname, 'w') as f:
                _json.dump(aux, f, indent=4)

            for pkey in benchmarker.predicted_summary_data.keys():
                summarydict = benchmarker.predicted_summary_data[pkey][i][
                    qubits]
                fname = outdir + '/predictions/{}/summarydata/'.format(
                    pkey) + '{}-{}.txt'.format(i, j)
                with open(fname, 'w') as f:
                    _json.dump(summarydict, f, indent=4)

    for dskey in benchmarker.multids.keys():
        fdir = outdir + '/data/{}'.format(dskey)
        _os.makedirs(fdir)
        for dsind in benchmarker.multids[dskey].keys():
            fname = fdir + '/ds{}.txt'.format(dsind)
            _io.write_dataset(fname,
                              benchmarker.multids[dskey][dsind],
                              fixed_column_mode=False)