Exemplo n.º 1
0
def load_pygsti_dataset(filename):
    """
    Loads a pygsti dataset from file.

    This is a wrapper that just checks the first line, and replaces it with the newer outcome specification
    format if its the old type.
    """
    try:
        # file = open(filename, "r")
        open(filename, "r")
    except IOError:
        print("File not found, or other file IO error.")

    # lines = file.readlines()
    # file.close()

    # if lines[0] == "## Columns = 00 count, 01 count, 10 count, 11 count\n":
    # 	lines[0] = "## Columns = 0:0 count, 0:1 count, 1:0 count, 1:1 count\n"
    # 	file = open(filename, "w")
    # 	file.writelines(lines)
    # 	file.close()

    data = _pygio.read_dataset(filename)

    return data
Exemplo n.º 2
0
    def test_load_ignore_zero_count_lines3(self, pth):
        contents = ("## Outcomes = 0, 1\n"
                    "Gc1 0:1 1:1 # {'test': 1}\n"
                    "Gc2  # {'test': 1}\n")
        with open(pth, 'w') as f:
            f.write(contents)

        ds = io.read_dataset(pth, ignore_zero_count_lines=False)
        self.assertEqual(ds[Circuit('Gc1')]['0'], 1)
        self.assertEqual(ds[Circuit('Gc2')]['0'], 0)

        self.assertEqual(ds[Circuit('Gc1')].aux['test'], 1)
        self.assertEqual(ds[Circuit('Gc2')].aux['test'], 1)
Exemplo n.º 3
0
def _load_dataset(data_filename_or_set, comm, verbosity):
    """Loads a DataSet from the data_filename_or_set argument of functions in this module."""
    printer = _baseobjs.VerbosityPrinter.create_printer(verbosity, comm)
    if isinstance(data_filename_or_set, str):
        if comm is None or comm.Get_rank() == 0:
            if _os.path.splitext(data_filename_or_set)[1] == ".pkl":
                with open(data_filename_or_set, 'rb') as pklfile:
                    ds = _pickle.load(pklfile)
            else:
                ds = _io.read_dataset(data_filename_or_set, True, "aggregate",
                                      printer)
            if comm is not None: comm.bcast(ds, root=0)
        else:
            ds = comm.bcast(None, root=0)
    else:
        ds = data_filename_or_set  # assume a Dataset object

    return ds
Exemplo n.º 4
0
    def test_load_ignore_zero_count_lines4(self, pth):
        c1 = Circuit('Gc1')
        c2 = Circuit('Gc2')
        c3 = Circuit('Gc3')

        ds = DataSet()

        ds.add_count_dict(c1, {}, aux={'test': 1})
        ds.add_count_dict(c2, {'0': 1}, aux={'test': 1})
        ds.add_count_dict(c3, {}, aux={'test': 1})
        #print(ds)

        io.write_dataset(pth, ds, fixed_column_mode=False)
        ds = io.read_dataset(pth, ignore_zero_count_lines=False)

        self.assertEqual(ds[c1]['0'], 0)
        self.assertEqual(ds[c2]['0'], 1)
        self.assertEqual(
            ds[c3]['0'],
            0)  # especially make sure last line is read in properly!

        self.assertEqual(ds[c1].aux['test'], 1)
        self.assertEqual(ds[c2].aux['test'], 1)
        self.assertEqual(ds[c3].aux['test'], 1)
Exemplo n.º 5
0
def load_data_into_benchmarker(dsfilenames=None,
                               summarydatasets_filenames=None,
                               summarydatasets_folder=None,
                               predicted_summarydatasets_folders={},
                               verbosity=1):
    """
    todo

    """
    if len(predicted_summarydatasets_folders) > 0:
        assert (summarydatasets_folder is not None)
        #if len(predicted_summarydatasets_folders) > 1:
        #    raise NotImplementedError("This is not yet supported!")

    if dsfilenames is not None:

        # If it is a filename, then we import the dataset from file.
        if isinstance(dsfilenames, str):
            dsfilenames = [
                dsfilenames,
            ]
        elif not isinstance(dsfilenames, list):
            raise ValueError("dsfilenames must be a str or a list of strings!")

        mds = _mds.MultiDataSet()
        for dsfn_ind, dsfn in enumerate(dsfilenames):

            if dsfn[-4:] == '.txt':
                print(dsfn)
                mds.add_dataset(
                    dsfn_ind,
                    _io.read_dataset(dsfn,
                                     collision_action='keepseparate',
                                     record_zero_counts=False,
                                     ignore_zero_count_lines=False,
                                     verbosity=verbosity))

            elif dsfn[-4:] == '.pkl':

                if verbosity > 0:
                    print(" - Loading DataSet from pickle file...", end='')
                with open(dsfn, 'rb') as f:
                    mds.add_dataset(dsfn_ind, _pickle.load(f))
                if verbosity > 0:
                    print("complete.")

            else:
                raise ValueError("File must end in .pkl or .txt!")

        # # If it isn't a string, we assume that `dsfilenames` is a DataSet.
        # else:

        #     ds = dsfilenames

        if verbosity > 0:
            print(" - Extracting metadata from the DataSet...", end='')

        # To store the aux information about the RB experiments.
        all_spec_filenames = []
        # circuits_for_specfile = {}
        # outdslist = []

        # We go through the dataset and extract all the necessary auxillary information.
        for circ in mds[mds.keys()[0]].keys():

            # The spec filename or names for this circuits
            specfns_forcirc = mds.auxInfo[circ]['spec']
            # The RB length for this circuit
            # try:
            # l = mds.auxInfo[circ]['depth']
            # except:
            # l = mds.auxInfo[circ]['length']
            # The target bitstring for this circuit.
            # target = mds.auxInfo[circ]['target']

            # This can be a string (a single spec filename) or a list, so make always a list.
            if isinstance(specfns_forcirc, str):
                specfns_forcirc = [
                    specfns_forcirc,
                ]

            for sfn_forcirc in specfns_forcirc:
                # If this is the first instance of seeing this filename then...
                if sfn_forcirc not in all_spec_filenames:
                    # ... we store it in the list of all spec filenames to import later.
                    all_spec_filenames.append(sfn_forcirc)
                    # And it won't yet be a key in the circuits_for_specfile dict, so we add it.
            #         circuits_for_specfile[sfn_forcirc] = {}

            #     # If we've not yet had this length for that spec filename, we add that as a key.
            #     if l not in circuits_for_specfile[sfn_forcirc].keys():
            #         circuits_for_specfile[sfn_forcirc][l] = []

            #     # We add the circuit and target output to the dict for the corresponding spec files.
            #     circuits_for_specfile[sfn_forcirc][l].append((circ, target))

            # circ_specindices = []
            # for sfn_forcirc in specfns_forcirc:
            #     circ_specindices.append(all_spec_filenames.index(sfn_forcirc))

        if verbosity > 0:
            print("complete.")
            print(" - Reading in the metadata from the extracted filenames...",
                  end='')

        # We put RB specs that we create via file import (and the circuits above) into this dict
        rbspecdict = {}

        # We look for spec files in the same directory as the datafiles, so we find what that is.
        # THIS REQUIRES ALL THE FILES TO BE IN THE SAME DIRECTORY
        directory = dsfilenames[0].split('/')
        directory = '/'.join(directory[:-1])
        if len(directory) > 0:
            directory += '/'

        for specfilename in all_spec_filenames:

            # Import the RB spec file.
            rbspec = load_benchmarkspec(directory + specfilename)
            # Add in the circuits that correspond to each spec, extracted from the dataset.
            # rbspec.add_circuits(circuits_for_specfile[specfilename])
            # Record the spec in a list, to be given to an RBAnalyzer object.
            rbspecdict[specfilename] = rbspec

        if verbosity > 0:
            print("complete.")
            print(" - Recording all of the data in a Benchmarker...", end='')

        # Put everything into an RBAnalyzer object, which is a container for RB data, and return this.
        benchmarker = _benchmarker.Benchmarker(rbspecdict,
                                               ds=mds,
                                               summary_data=None)

        if verbosity > 0: print("complete.")

        return benchmarker

    elif (summarydatasets_filenames is not None) or (summarydatasets_folder
                                                     is not None):

        rbspecdict = {}

        # If a dict, its just the keys of the dict that are the rbspec file names.
        if summarydatasets_filenames is not None:

            specfiles = list(summarydatasets_filenames.keys())

        # If a folder, we look for files in that folder with the standard name format.
        elif summarydatasets_folder is not None:
            specfiles = []
            specfilefound = True
            i = 0
            while specfilefound:
                try:
                    filename = summarydatasets_folder + "/spec{}.txt".format(i)
                    with open(filename, 'r') as f:
                        if verbosity > 0:
                            print(filename + " found")
                    specfiles.append(filename)
                    i += 1
                except:
                    specfilefound = False
                    if verbosity > 0:
                        print(filename +
                              " not found so terminating spec file search.")

        for sfn_ind, specfilename in enumerate(specfiles):

            rbspec = load_benchmarkspec(specfilename)
            rbspecdict[sfn_ind] = rbspec

        summary_data = {}
        predicted_summary_data = {
            pkey: {}
            for pkey in predicted_summarydatasets_folders.keys()
        }

        for i, (specfilename,
                rbspec) in enumerate(zip(specfiles, rbspecdict.values())):

            structure = rbspec.get_structure()
            summary_data[i] = {}
            for pkey in predicted_summarydatasets_folders.keys():
                predicted_summary_data[pkey][i] = {}

            if summarydatasets_filenames is not None:
                sds_filenames = summarydatasets_filenames[specfilename]
            elif summarydatasets_folder is not None:
                sds_filenames = [
                    summarydatasets_folder + '/{}-{}.txt'.format(i, j)
                    for j in range(len(structure))
                ]
                predsds_filenames_dict = {}
                for pkey, pfolder in predicted_summarydatasets_folders.items():
                    predsds_filenames_dict[pkey] = [
                        pfolder + '/{}-{}.txt'.format(i, j)
                        for j in range(len(structure))
                    ]

            for sdsfn, qubits in zip(sds_filenames, structure):
                summary_data[i][qubits] = import_rb_summary_data(
                    sdsfn, len(qubits), verbosity=verbosity)

            for pkey, predsds_filenames in predsds_filenames_dict.items():
                for sdsfn, qubits in zip(predsds_filenames, structure):
                    predicted_summary_data[pkey][i][
                        qubits] = import_rb_summary_data(sdsfn,
                                                         len(qubits),
                                                         verbosity=verbosity)

        benchmarker = _benchmarker.Benchmarker(
            rbspecdict,
            ds=None,
            summary_data=summary_data,
            predicted_summary_data=predicted_summary_data)

        return benchmarker

    else:
        raise ValueError(
            "Either a filename for a DataSet or filenames for a set of RBSpecs "
            + "and RBSummaryDatasets must be provided!")
Exemplo n.º 6
0
def load_benchmarker(directory, load_datasets=True, verbosity=1):
    """

    """
    with open(directory + '/global.txt', 'r') as f:
        globaldict = _json.load(f)

    numpasses = globaldict['numpasses']
    speckeys = globaldict['speckeys']
    success_key = globaldict['success_key']
    success_outcome = globaldict['success_outcome']
    dscomparator = globaldict['dscomparator']

    if load_datasets:
        dskeys = [
            dskey.name for dskey in _os.scandir(directory + '/data')
            if dskey.is_dir()
        ]
        multidsdict = {dskey: _mds.MultiDataSet() for dskey in dskeys}

        for dskey in dskeys:
            for passnum in range(numpasses):
                dsfn = directory + '/data/{}/ds{}.txt'.format(dskey, passnum)
                ds = _io.read_dataset(dsfn,
                                      collision_action='keepseparate',
                                      record_zero_counts=False,
                                      ignore_zero_count_lines=False,
                                      verbosity=verbosity)
                multidsdict[dskey].add_dataset(passnum, ds)
    else:
        multidsdict = None

    specs = {}
    for i, speckey in enumerate(speckeys):
        specs[speckey] = load_benchmarkspec(directory +
                                            '/specs/{}.txt'.format(i))

    summary_data = {'global': {}, 'pass': {}, 'aux': {}}
    predictionkeys = [
        pkey.name for pkey in _os.scandir(directory + '/predictions')
        if pkey.is_dir()
    ]
    predicted_summary_data = {pkey: {} for pkey in predictionkeys}

    for i, spec in enumerate(specs.values()):

        summary_data['pass'][i] = {}
        summary_data['global'][i] = {}
        summary_data['aux'][i] = {}
        for pkey in predictionkeys:
            predicted_summary_data[pkey][i] = {}

        structure = spec.get_structure()

        for j, qubits in enumerate(structure):

            # Import the summary data for that spec and qubit subset
            with open(directory + '/summarydata/{}-{}.txt'.format(i, j),
                      'r') as f:
                sd = _json.load(f)
                summary_data['pass'][i][qubits] = {}
                for dtype, data in sd['pass'].items():
                    summary_data['pass'][i][qubits][dtype] = {
                        int(key): value
                        for (key, value) in data.items()
                    }
                summary_data['global'][i][qubits] = {}
                for dtype, data in sd['global'].items():
                    summary_data['global'][i][qubits][dtype] = {
                        int(key): value
                        for (key, value) in data.items()
                    }

            # Import the auxillary data
            with open(directory + '/aux/{}-{}.txt'.format(i, j), 'r') as f:
                aux = _json.load(f)
                summary_data['aux'][i][qubits] = {}
                for dtype, data in aux.items():
                    summary_data['aux'][i][qubits][dtype] = {
                        int(key): value
                        for (key, value) in data.items()
                    }

            # Import the predicted summary data for that spec and qubit subset
            for pkey in predictionkeys:
                with open(
                        directory +
                        '/predictions/{}/summarydata/{}-{}.txt'.format(
                            pkey, i, j), 'r') as f:
                    psd = _json.load(f)
                    predicted_summary_data[pkey][i][qubits] = {}
                    for dtype, data in psd.items():
                        predicted_summary_data[pkey][i][qubits][dtype] = {
                            int(key): value
                            for (key, value) in data.items()
                        }

    benchmarker = _benchmarker.Benchmarker(
        specs,
        ds=multidsdict,
        summary_data=summary_data,
        predicted_summary_data=predicted_summary_data,
        dstype='dict',
        success_outcome=success_outcome,
        success_key=success_key,
        dscomparator=dscomparator)

    return benchmarker