コード例 #1
0
ファイル: client.py プロジェクト: ioverka/wos
    def search(self, query, count=5, offset=1):
        """Perform a query. Check the WOS documentation for v3 syntax."""
        if not self._SID:
            raise RuntimeError('Session not open. Invoke .connect() before.')

        qparams = _OrderedDict([('databaseId', 'WOS'),
                                ('userQuery', query),
                                ('queryLanguage', 'en')])

        rparams = _OrderedDict([('firstRecord', offset),
                                ('count', count),
                                ('sortField', _OrderedDict([('name', 'RS'),
                                                            ('sort', 'D')]))])

        return self._search.service.search(qparams, rparams)
コード例 #2
0
    def _from_program_image(
        cls, program_image, base_offset, dominions_version
    ):
        """ Extracts the table from a program image.
            (Internal version - override as needed.) """

        RECORD_SIZE \
        = cls._ROW_CLASS.PROGRAM_IMAGE_RECORD_SIZE( dominions_version )

        table       = _OrderedDict( )
        offset      = base_offset
        number      = 0

        try:
            while True:

                table[ number ] = cls._ROW_CLASS.from_program_image(
                    program_image, offset, number, dominions_version
                )

                number += 1
                offset += RECORD_SIZE

        except StopIteration as exc: pass

        return cls( table )
コード例 #3
0
 def _submit(self, operationUrl, data):
     # type: (str, dict) -> dict
     
     orderedData = _OrderedDict()
     isBatch = "batch" in operationUrl
     
     if not self.submitRequests and "format" in data.keys():
         data.pop("format")
     
     for key in sorted(data.keys()):
         orderedData[key] = data[key]
     data = orderedData
     
     requestUrls = data.pop("requests") if isBatch else []
     requestAsParams = "&".join(["requests[]=" + url for url in requestUrls]) if isBatch else ""
         
     urlParams = _urlencode(data)
     urlParams += "&" + requestAsParams if isBatch else ""
     urlToSignature = operationUrl + urlParams + self.privateKey
     signature = _md5(urlToSignature.encode()).hexdigest()
     finalUrl = operationUrl + urlParams + "&signature=" + signature
     
     if self.submitRequests:
         if _DEBUG: print("Requesting URL:", finalUrl)
         response = _urlopen(finalUrl).read().decode()
         
         if self.responseFormat == "json":
             return _literal_eval(response)["response"]
         else:
             return response
     else:
         if _DEBUG: print("Generated URL:", finalUrl)
         return finalUrl
コード例 #4
0
    def from_csv( cls, istream, dominions_version ):
        """ Creates an instance from a stream of CSV rows. """

        sniffer = _csv.Sniffer( )
        if not sniffer.has_header( istream.read( 1024 ) ):
            raise IOError( "Invalid CSV input stream." )
        istream.seek( 0 )
        dialect = sniffer.sniff( istream.read( 1024 ) )
        istream.seek( 0 )

        table = _OrderedDict( )
        platform = dominions_version.platform

        csv = _csv.DictReader( istream, dialect = dialect )
        for row in csv:

            # Exclude rows which are tagged for a version of Dominions
            # greater than the targeted version.
            if "dominions_version" in row:
                row_dominions_version \
                = _DominionsVersion( platform, row[ "dominions_version" ] )
                if row_dominions_version > dominions_version: continue

            # Add new table entry or update table entry from CSV row.
            # Note: Newer versions of a row are expected to be encountered
            #       later than earlier versions of it.
            table[ cls._ROW_CLASS.key_from_dict( row ) ] \
            = cls._ROW_CLASS.from_dict( row )

        return cls( table )
コード例 #5
0
ファイル: table.py プロジェクト: stjordanis/pyGSTi
 def __getitem__(self, key):
     """Indexes the first column rowdata"""
     for row in self._rows:
         row_data = row.cells
         if len(row_data) > 0 and row_data[0].data.get_value() == key:
             return _OrderedDict(zip(self._columnNames, row_data))
     raise KeyError("%s not found as a first-column value" % key)
コード例 #6
0
ファイル: spectroscopy.py プロジェクト: APILASTRI/Magni
class Spectroscopy(_File):
    """
    Data class of the .mi spectroscopy files.

    Parameters
    ----------
    attrs : dict
        The attributes of the image.
    buffers : list or tuple
        The buffers of the image.

    See Also
    --------
    magni.utils.types.File : Superclass of the present class.

    Examples
    --------
    No example .mi spectroscopy file is distributed with magni.

    """

    _params = _OrderedDict((('plotType', str), ('BgImageFile', str)))

    def __init__(self, attrs, buffers):
        @_decorate_validation
        def validate_input():
            _levels('buffers', (_generic(
                None, 'explicit collection'), _generic(None, Buffer)))

        _File.__init__(self, attrs, buffers)
        validate_input()
コード例 #7
0
ファイル: rbutils.py プロジェクト: pyIonControl/pyGSTi
def summary_dict_to_f_empirical_squared_dict(summary_dict,
                                             use_frequencies=False):
    """
    Maps summary dict (defined in rbutils.dataset_to_summary_dict) to 
    f_empirical_squared_dict.
    """
    f_empirical_squared_dict = _OrderedDict({})
    if not use_frequencies:
        for m in summary_dict.keys():
            if isinstance(m, int):
                K = summary_dict[m, 'K']  #len(summary_dict[m])
                bias_correction = K / (K + 1.)
                N = summary_dict[m, 'N']
                f_empirical_squared_dict[m, 'N'] = N
                f_empirical_squared_dict[m] = bias_correction * 1. / (
                    2 * K**2 * N**2) * _np.sum([(nl - nm)**2
                                                for nl in summary_dict[m]
                                                for nm in summary_dict[m]])
        return f_empirical_squared_dict
    else:
        for m in summary_dict.keys():
            if isinstance(m, int):
                K = summary_dict[m, 'K']  #len(summary_dict[m])
                bias_correction = K / (K + 1.)
                f_empirical_squared_dict[m] = bias_correction * 1. / (
                    2 * K**2) * _np.sum([(fl - fm)**2 for fl in summary_dict[m]
                                         for fm in summary_dict[m]])
        return f_empirical_squared_dict
コード例 #8
0
ファイル: rbutils.py プロジェクト: pyIonControl/pyGSTi
def summary_dict_to_delta_f1_squared_dict(summary_dict, infinite_data=True):
    """
    Maps summary dict (defined in rbutils.dataset_to_summary_dict) to 
    delta_f1_squared_dict.
    """
    #    if infinite_data and not use_frequencies:
    #        raise ValueError('If infinite_data is True, then use_frequencies must be True too!')
    delta_f1_squared_dict = _OrderedDict({})
    if infinite_data:
        use_frequencies = True
    else:
        use_frequencies = False
    delta_f_empirical_squared_dict = summary_dict_to_f_empirical_squared_dict(
        summary_dict, use_frequencies)
    f1_hat_dict = summary_dict_to_f1_hat_dict(summary_dict, use_frequencies)
    for m in summary_dict.keys():
        if isinstance(m, int):
            K = summary_dict[m, 'K']
            if infinite_data:
                term_1 = 1. / (2 * K)
            else:
                N = summary_dict[m, 'N']
                term_1 = f1_hat_dict[m] * (1 - f1_hat_dict[m]) / float(N)
            term_2 = delta_f_empirical_squared_dict[m]
            delta_f1_squared_dict[m] = 1. / K * _np.max([term_1, term_2])
    return delta_f1_squared_dict
コード例 #9
0
ファイル: rbutils.py プロジェクト: pyIonControl/pyGSTi
def dataset_to_summary_dict(dataset,
                            seqs,
                            success_spam_label,
                            use_frequencies=False):
    """
    Maps an RB dataset to an ordered dictionary; keys are 
    sequence lengths, values are number of success counts or frequencies, where
    value of the i^th element is the total number of successes seen for the 
    i^th sequence of length m.  (Auxiliary keys map (m,'N') and (m,'K') to  
    number of repetitions per sequence and number of sequences, respectively,
    for sequences of length m.)"""
    output = _OrderedDict({})
    if not use_frequencies:
        #        N = None
        for seq in seqs:
            m = len(seq)
            N_temp = int(_np.round(dataset[seq].total()))
            try:
                output[m, 'N']
            except:
                output[m, 'N'] = N_temp
            if output[m, 'N'] != N_temp:
                raise ValueError("Different N values used at same m!")
            try:
                output[m, 'K'] += 1
            except:
                output[m, 'K'] = 1


#            if N is None:
#                N = N_temp
#            elif N_temp != N:
#                raise ValueError("Different N values discovered!")
            n = dataset[seq][success_spam_label]
            try:
                output[m].append(n)
            except:
                output[m] = [n]
        return output
    else:
        #        N = None
        for seq in seqs:
            m = len(seq)
            #            N_temp = int(_np.round(dataset[seq].total()))
            #            if N is None:
            #                N = N_temp
            #            elif N_temp != N:
            #                raise ValueError("Different N values discovered!")
            try:
                output[m, 'K'] += 1
            except:
                output[m, 'K'] = 1
            N = dataset[seq].total()
            n = dataset[seq][success_spam_label]
            frac = float(n) / float(N)
            try:
                output[m].append(frac)
            except:
                output[m] = [frac]
        return output
コード例 #10
0
def _combine_mps_tasks(**tasks):
    # Concatenate output from all the completed tasks.
    models = []
    evaluations = []
    parameters = []
    metadatas = []
    status = {'Failed': 0, 'Completed': 0}
    for t in tasks.values():
        if t is not None:  # If an exception occurred, t is None
            models.append(t['model'])
            evaluations.append(t['evaluation'])
            parameters.append(t['parameters'])
            metadatas.append(t['metadata'])
            status['Completed'] += 1
        else:
            status['Failed'] += 1

    if all(m is None for m in models):
        models = None
    if all(x is None or len(x) == 0 for x in parameters):
        parameters = _SArray([None] * len(parameters), dtype=dict)
    evaluations = _SArray(evaluations, dtype=dict)
    parameters = _SArray(parameters, dtype=dict)
    metadatas = _SArray(metadatas, dtype=dict)

    summary = _SFrame({'metric': evaluations,
                       'metadata': metadatas,
                       'parameters': parameters})

    return _OrderedDict([('models', models),
                         ('summary', summary),
                         ('status', status)])
コード例 #11
0
def get_hierarchy(file, pth=None, fulldsetpath=False, grp_w_dset=False):
    """
    Return an ordered dictionary, where the keys are groups and the items are
    the datasets

    Parameters
    ----------

    file : str or h5py.File
        Filename or File-object for open HDF5 file

    fulldsetpath : bool
        If True, a dataset name will be prepended with the group down to the
        base level, '/'. If False, it will just be the dset name.

    grp_w_dset : bool
        If True, only return groups that contain datasets. If False, include
        empty groups

    Returns
    -------
    OrderedDict : (group, [dataset list])
        Group and dataset names

    """
    fp = _fullpath(file, pth)

    # Get fid for a file (str or open fid)
    fof = _FidOrFile(fp)
    fid = fof.fid

    grp_list = get_groups(fid)
    dset_list = get_datasets(fid, fulldsetpath=True)

    grp_dict = _OrderedDict([[grp, []] for grp in grp_list])

    for dset in dset_list:
        split_out = dset.rsplit('/', maxsplit=1)
        if len(split_out) == 1:
            grp_dict['/'].append(dset)
        else:
            if fulldsetpath:
                grp_dict[split_out[0]].append(dset)
            else:
                grp_dict[split_out[0]].append(split_out[1])

    # Only keep groups with datasets
    if grp_w_dset:
        to_pop = []
        for k in grp_dict:
            if not grp_dict[k]:  # is empty
                to_pop.append(k)

        for empty_grp in to_pop:
            grp_dict.pop(empty_grp)

    fof.close_if_file_not_fid()

    return grp_dict
コード例 #12
0
def _combiner(**tasks):
    """
    Take the return values from each task, and return
    the combined result.

    The combined result is a tuple, where the first
    element is a list of models, and the second
    sframe is a summary sframe containing
    the searched parameters and the evaluation result.
    """
    # Concatenate output from all the tasks.
    models = []
    evaluations = []
    parameters = []
    metadatas = []
    for t in tasks.values():
        if t is not None:  # If an exception occurred, t is None
            models.append(t['model'])
            evaluations.append(t['evaluation'])
            parameters.append(t['parameters'])
            metadatas.append(t['metadata'])

    if all(m is None for m in models):
        models = None

    # SFrame contains all the evaluation results, one row per model
    if all(type(x) in (int, float, str, list, type(None))
           for x in evaluations):
        evaluation_sframe = _SFrame({'metric': evaluations})
    else:
        evaluation_sframe = _SArray(evaluations).unpack(
            column_name_prefix=None)

    # SFrame contains all metadata, one row per model
    if all(type(x) in (int, float, str, list, type(None))
           for x in metadatas):
        metadata_sframe = _SFrame({'metadata': metadatas})
    else:
        metadata_sframe = _SArray(metadatas).unpack(
            column_name_prefix=None)

    # SFrame contains all the tuning parameters, one row per model
    if all(x is None or len(x) == 0 for x in parameters):
        parameter_sframe = _SFrame(
            {'parameters': [None] * len(parameters)})
    else:
        parameter_sframe = _SArray(parameters).unpack(
            column_name_prefix=None)

    # Make a summary sframe concatenating horizontally the evalution_sframe
    # and paramter_sframe
    summary_sframe = _SFrame()
    param_columns = sorted(parameter_sframe.column_names())
    metric_columns = sorted(evaluation_sframe.column_names())
    metadata_columns = sorted(metadata_sframe.column_names())
    summary_sframe[param_columns] = parameter_sframe[param_columns]
    summary_sframe[metric_columns] = evaluation_sframe[metric_columns]
    summary_sframe[metadata_columns] = metadata_sframe[metadata_columns]
    return _OrderedDict([('models', models), ('summary', summary_sframe)])
コード例 #13
0
 def __init__(self,
              log_level: int = WARNING,
              log_data: bool = False,
              log_level_data: int = DEBUG):
     self.log_level = log_level
     self.log_data = log_data
     self.log_level_data = log_level_data
     self._data_tree_root = _OrderedDict()
コード例 #14
0
ファイル: recdict.py プロジェクト: darnoceloc/MPContribs
 def insert_default_plot_options(self, pd_obj, k, update_plot_options=None):
     # make default plot (add entry in 'plots') for each
     # table, first column as x-column
     table_name = ''.join([replacements.get(c, c) for c in k])
     key = 'default_{}'.format(table_name)
     plots_dict = _OrderedDict([
         (mp_level01_titles[2],
          _OrderedDict([(key,
                         _OrderedDict([('x', pd_obj.columns[0]),
                                       ('table', table_name)]))]))
     ])
     if update_plot_options is not None:
         plots_dict[mp_level01_titles[2]][key].update(update_plot_options)
     if mp_level01_titles[2] in self:
         self.rec_update(plots_dict)
     else:
         self[mp_level01_titles[2]] = plots_dict[mp_level01_titles[2]]
コード例 #15
0
ファイル: model.py プロジェクト: ssorj/pumpjack
    def _virtual_properties(self):
        props = _OrderedDict()

        for cls in self.classes:
            for prop in cls.properties:
                props[prop.name] = prop

        return props.values()
コード例 #16
0
ファイル: model.py プロジェクト: ssorj/pumpjack
    def _virtual_properties(self):
        props = _OrderedDict()
        
        for cls in self.classes:
            for prop in cls.properties:
                props[prop.name] = prop

        return props.values()
コード例 #17
0
 def make_retrieveParameters(offset=1, count=100, name='RS', sort='D'):
     """Create retrieve parameters dictionary to be used with APIs.
     :count: Number of records to display in the result. Cannot be less than
             0 and cannot be greater than 100. If count is 0 then only the
             summary information will be returned.
     :offset: First record in results to return. Must be greater than zero
     :name: Name of the field to order by. Use a two-character abbreviation
            to specify the field ('AU': Author, 'CF': Conference Title,
            'CG': Page, 'CW': Source, 'CV': Volume, 'LC': Local Times Cited,
            'LD': Load Date, 'PG': Page, 'PY': Publication Year, 'RS':
            Relevance, 'SO': Source, 'TC': Times Cited, 'VL': Volume)
     :sort: Must be A (ascending) or D (descending). The sort parameter can
            only be D for Relevance and TimesCited.
     """
     return _OrderedDict([('firstRecord', offset), ('count', count),
                          ('sortField',
                           _OrderedDict([('name', name), ('sort', sort)]))])
コード例 #18
0
ファイル: model.py プロジェクト: ssorj/pumpjack
    def _virtual_methods(self):
        meths = _OrderedDict()

        for cls in self.classes:
            for meth in cls.methods:
                meths[meth.name] = meth

        return meths.values()
コード例 #19
0
 def __init__(self, name, root=None):
     self.name = name
     self.files = []
     self.startupfile = None
     if root:
         self.root = qualify_path(root)
     self.pattern_to_function = _OrderedDict()
     self.catchall_handler = None
コード例 #20
0
ファイル: model.py プロジェクト: ssorj/pumpjack
    def _virtual_methods(self):
        meths = _OrderedDict()
        
        for cls in self.classes:
            for meth in cls.methods:
                meths[meth.name] = meth

        return meths.values()
コード例 #21
0
 def search(self,
            query,
            count=5,
            offset=1,
            editions=None,
            symbolicTimeSpan=None,
            timeSpan=None,
            retrieveParameters=None):
     """The search operation submits a search query to the specified
     database edition and retrieves data. This operation returns a query ID
     that can be used in subsequent operations to retrieve more records.
     :query: User query for requesting data. The query parser will return
             errors for invalid queries
     :count: Number of records to display in the result. Cannot be less than
             0 and cannot be greater than 100. If count is 0 then only the
             summary information will be returned.
     :offset: First record in results to return. Must be greater than zero
     :editions: List of editions to be searched. If None, user permissions
                will be substituted.
                Fields:
                collection - Name of the collection
                edition - Name of the edition
     :symbolicTimeSpan: This element defines a range of load dates. The load
                        date is the date when a record was added to a
                        database. If symbolicTimeSpan is specified, the
                        timeSpan parameter must be omitted.  If timeSpan and
                        symbolicTimeSpan are both omitted, then the maximum
                        publication date time span will be inferred from the
                        editions data.
                        Valid values:
                        '1week' - Specifies to use the end date as today and
                                  the begin date as 1 week prior to today.
                        '2week' - Specifies to use the end date as today and
                                  the begin date as 2 week prior to today.
                        '4week' - Specifies to use the end date as today and
                                  the begin date as 4 week prior to today.
     :timeSpan: This element defines specifies a range of publication dates.
                If timeSpan is used, the symbolicTimeSpan parameter must be
                omitted. If timeSpan and symbolicTimeSpan are both omitted,
                then the maximum time span will be inferred from the
                editions data.
                Fields:
                begin - Beginning date for this search. Format is: YYYY-MM-DD
                end - Ending date for this search. Format is: YYYY-MM-DD
     :retrieveParameters: Retrieve parameters. If omitted the result of
                          make_retrieveParameters(offset, count, 'RS', 'D')
                          is used.
     """
     return self._search.service.search(
         queryParameters=_OrderedDict([('databaseId', 'WOS'),
                                       ('userQuery', query),
                                       ('editions', editions),
                                       ('symbolicTimeSpan',
                                        symbolicTimeSpan),
                                       ('timeSpan', timeSpan),
                                       ('queryLanguage', 'en')]),
         retrieveParameters=(retrieveParameters or
                             self.make_retrieveParameters(offset, count)))
コード例 #22
0
    def network_news(self, row_limit='0', **kwargs):

        api_url = ('{}://{}/affiliates/api/2/reports.asmx/NetworkNews'.format(
            self.protocol, self.admin_domain))

        parameters = _OrderedDict()
        parameters['api_key'] = self.api_key
        parameters['affiliate_id'] = self.affiliate_id
        parameters['row_limit'] = row_limit
コード例 #23
0
 def __setstate__(self, state_dict):
     gsIndexKeys = [cgs.expand() for cgs in state_dict['gsIndexKeys']]
     self.gsIndex = _OrderedDict(
         list(zip(gsIndexKeys, state_dict['gsIndexVals'])))
     self.slIndex = state_dict['slIndex']
     self.counts = state_dict['counts']
     self.bStatic = state_dict['bStatic']
     self.collisionAction = state_dict.get(
         'collisionAction', "aggregate")  #backwards compatibility
コード例 #24
0
 def read_dict():
   d = _OrderedDict()
   assert(tokens[c.i] == ('chr', '{'))
   c.i += 1
   while 1:
     read_stmt(d)
     if tokens[c.i] == ('chr', '}'):
       c.i += 1
       return d
コード例 #25
0
 def read_dict():
     d = _OrderedDict()
     assert tokens[c.i] == ("chr", "{")
     c.i += 1
     while 1:
         read_stmt(d)
         if tokens[c.i] == ("chr", "}"):
             c.i += 1
             return d
コード例 #26
0
class Grid(_BaseClass):
    """
    Data class of the .mi spectroscopy grids.

    Parameters
    ----------
    attrs : dict
        The attributes of the grid.
    points : list or tuple
        The points of the grid.

    Attributes
    ----------
    points : tuple
        The points of the grid.

    See Also
    --------
    magni.utils.types.BaseClass : Superclass of the present class.

    Notes
    -----
    The points are input and output as a 2D tuple of Point instances.

    Examples
    --------
    No example .mi spectroscopy file is distributed with magni.

    """

    _params = _OrderedDict((('index', int),
                            ('xCenter', float),
                            ('yCenter', float),
                            ('xPoints', int),
                            ('yPoints', int),
                            ('pointSpacing', float),
                            ('angle', int),
                            ('yDirection', str),
                            ('xDirection', str)))

    def __init__(self, attrs, points):
        @_decorate_validation
        def validate_input():
            _generic('attrs', 'mapping', has_keys=(
                'xCenter', 'yCenter', 'xPoints', 'yPoints', 'pointSpacing'))
            _levels('points', (_generic(None, 'explicit collection',
                                        len_=self.attrs['yPoints']),
                               _generic(None, 'explicit collection',
                                        len_=self.attrs['xPoints']),
                               _generic(None, Point, ignore_none=True)))

        _BaseClass.__init__(self, attrs)
        validate_input()

        self._points = tuple(tuple(val) for val in points)

    points = property(lambda self: self._points)
コード例 #27
0
ファイル: multidataset.py プロジェクト: pyIonControl/pyGSTi
    def load(self, fileOrFilename):
        """
        Load MultiDataSet from a file, clearing any data is contained previously.

        Parameters
        ----------
        fileOrFilename : file or string
            Either a filename or a file object.  In the former case, if the
            filename ends in ".gz", the file will be gzip uncompressed as it is read.
        """
        # Compatability for unicode-literal filenames
        bOpen = not (hasattr(fileOrFilename, 'write'))
        if bOpen:
            if fileOrFilename.endswith(".gz"):
                import gzip as _gzip
                f = _gzip.open(fileOrFilename, "rb")
            else:
                f = open(fileOrFilename, "rb")
        else:
            f = fileOrFilename

        state_dict = _pickle.load(f)

        def expand(x):  #to be backward compatible
            assert isinstance(x, _gs.CompressedGateString)
            return x.expand()
            #else:
            #  _warnings.warn("Deprecated dataset format.  Please re-save " +
            #                 "this dataset soon to avoid future incompatibility.")
            #  return _gs.GateString(_gs.CompressedGateString.expand_gate_label_tuple(x))

        gsIndexKeys = [expand(cgs) for cgs in state_dict['gsIndexKeys']]

        #gsIndexKeys = [ cgs.expand() for cgs in state_dict['gsIndexKeys'] ]
        self.gsIndex = _OrderedDict(
            list(zip(gsIndexKeys, state_dict['gsIndexVals'])))
        self.slIndex = state_dict['slIndex']
        self.collisionActions = state_dict['collisionActions']
        self.comment = state_dict["comment"]
        self.countsDict = _OrderedDict()
        for key in state_dict['countsKeys']:
            self.countsDict[key] = _np.lib.format.read_array(
                f)  #np.load(f) doesn't play nice with gzip
        if bOpen: f.close()
コード例 #28
0
    def load(self, fileOrFilename):
        """
        Load DataSet from a file, clearing any data is contained previously.

        Parameters
        ----------
        fileOrFilename string or file object.
            If a string,  interpreted as a filename.  If this filename ends
            in ".gz", the file will be gzip uncompressed as it is read.

        Returns
        -------
        None
        """
        # Compatability for unicode-literal filenames
        bOpen = not (hasattr(fileOrFilename, 'write'))
        if bOpen:
            if fileOrFilename.endswith(".gz"):
                import gzip as _gzip
                f = _gzip.open(fileOrFilename, "rb")
            else:
                f = open(fileOrFilename, "rb")
        else:
            f = fileOrFilename

        state_dict = _pickle.load(f)

        def expand(x):  #to be backward compatible
            if isinstance(x, _gs.CompressedGateString): return x.expand()
            else:
                _warnings.warn(
                    "Deprecated dataset format.  Please re-save " +
                    "this dataset soon to avoid future incompatibility.")
                return _gs.GateString(
                    _gs.CompressedGateString.expand_gate_label_tuple(x))

        gsIndexKeys = [expand(cgs) for cgs in state_dict['gsIndexKeys']]

        #gsIndexKeys = [ cgs.expand() for cgs in state_dict['gsIndexKeys'] ]
        self.gsIndex = _OrderedDict(
            list(zip(gsIndexKeys, state_dict['gsIndexVals'])))
        self.slIndex = state_dict['slIndex']
        self.bStatic = state_dict['bStatic']
        self.collisionAction = state_dict.get(
            "collisionAction", "aggregate")  #backward compatibility
        self.comment = state_dict.get("comment", None)  #backward compatibility

        if self.bStatic:
            self.counts = _np.lib.format.read_array(
                f)  #_np.load(f) doesn't play nice with gzip
        else:
            self.counts = []
            for i in range(state_dict['nRows']):  #pylint: disable=unused-variable
                self.counts.append(_np.lib.format.read_array(
                    f))  #_np.load(f) doesn't play nice with gzip
        if bOpen: f.close()
コード例 #29
0
def _combiner(**tasks):
    """
    Take the return values from each task, and return
    the combined result.

    The combined result is a tuple, where the first
    element is a list of models, and the second
    sframe is a summary sframe containing
    the searched parameters and the evaluation result.
    """
    # Concatenate output from all the tasks.
    models = []
    evaluations = []
    parameters = []
    metadatas = []
    for t in tasks.values():
        if t is not None:  # If an exception occurred, t is None
            models.append(t['model'])
            evaluations.append(t['evaluation'])
            parameters.append(t['parameters'])
            metadatas.append(t['metadata'])

    if all(m is None for m in models):
        models = None

    # SFrame contains all the evaluation results, one row per model
    if all(
            type(x) in (int, float, str, list, type(None))
            for x in evaluations):
        evaluation_sframe = _SFrame({'metric': evaluations})
    else:
        evaluation_sframe = _SArray(evaluations).unpack(
            column_name_prefix=None)

    # SFrame contains all metadata, one row per model
    if all(type(x) in (int, float, str, list, type(None)) for x in metadatas):
        metadata_sframe = _SFrame({'metadata': metadatas})
    else:
        metadata_sframe = _SArray(metadatas).unpack(column_name_prefix=None)

    # SFrame contains all the tuning parameters, one row per model
    if all(x is None or len(x) == 0 for x in parameters):
        parameter_sframe = _SFrame({'parameters': [None] * len(parameters)})
    else:
        parameter_sframe = _SArray(parameters).unpack(column_name_prefix=None)

    # Make a summary sframe concatenating horizontally the evalution_sframe
    # and paramter_sframe
    summary_sframe = _SFrame()
    param_columns = sorted(parameter_sframe.column_names())
    metric_columns = sorted(evaluation_sframe.column_names())
    metadata_columns = sorted(metadata_sframe.column_names())
    summary_sframe[param_columns] = parameter_sframe[param_columns]
    summary_sframe[metric_columns] = evaluation_sframe[metric_columns]
    summary_sframe[metadata_columns] = metadata_sframe[metadata_columns]
    return _OrderedDict([('models', models), ('summary', summary_sframe)])
コード例 #30
0
    def get_verticals(self, **kwargs):

        api_url = ('{}://{}/affiliates/api/2/offers.asmx/GetVerticals'.format(
            self.protocol, self.admin_domain))

        parameters = _OrderedDict()
        parameters['api_key'] = self.api_key
        parameters['affiliate_id'] = self.affiliate_id

        return self._make_api_call(url=api_url, params=parameters)
コード例 #31
0
 def __repr__(self):
     try:
         res = json.dumps(self, indent=2)
     except Exception as err:
         logger.warning("Header is not JSON-serializable: %s", err)
         tmp = _OrderedDict()
         for key, value in self.items():
             tmp[str(key)] = str(value)
         res = json.dumps(tmp, indent=2)
     return res
コード例 #32
0
    def return_lead(self, lead_id, return_reason_id, buyer_contract_id='0'):
        api_url = '{}://{}/buyers/api/1/leads.asmx/Return'.format(
            self.protocol, self.admin_domain)

        parameters = _OrderedDict()
        parameters['lead_id'] = lead_id
        parameters['return_reason_id'] = return_reason_id
        parameters['buyer_contract_id'] = buyer_contract_id

        return self._make_api_call(url=api_url, params=parameters)
コード例 #33
0
 def __setstate__(self, state_dict):
     gsIndexKeys = [ cgs.expand() for cgs in state_dict['gsIndexKeys'] ]
     self.gsIndex = _OrderedDict( list(zip(gsIndexKeys, state_dict['gsIndexVals'])) )
     self.olIndex = state_dict['olIndex']
     self.oliDict = state_dict['oliDict']
     self.timeDict = state_dict['timeDict']
     self.repDict = state_dict['repDict']
     self.collisionActions = state_dict['collisionActions']
     self.comments = state_dict['comments']
     self.comment = state_dict['comment']
コード例 #34
0
    def bills(self, start_at_row='0', row_limit='0', **kwargs):
        api_url = '{}://{}/affiliates/api/3/reports.asmx/Bills'.format(
            self.protocol, self.admin_domain)

        parameters = _OrderedDict()
        parameters['api_key'] = self.api_key
        parameters['affiliate_id'] = self.affiliate_id
        parameters['start_at_row'] = start_at_row
        parameters['row_limit'] = row_limit

        return self._make_api_call(url=api_url, params=parameters)
コード例 #35
0
    def get_campaign(self, campaign_id, **kwargs):

        api_url = '{}://{}/affiliates/api/2/offers.asmx/GetCampaign'.format(
            self.protocol, self.admin_domain)

        parameters = _OrderedDict()
        parameters['api_key'] = self.api_key
        parameters['affiliate_id'] = self.affiliate_id
        parameters['campaign_id'] = campaign_id

        return self._make_api_call(url=api_url, params=parameters)
コード例 #36
0
    def change_account_info(self,
                            contact_id,
                            contact_type_id='0',
                            first_name='',
                            last_name='',
                            email_address='',
                            title='',
                            phone_work='',
                            phone_cell='',
                            phone_fax='',
                            im_service='',
                            im_name='',
                            tax_class='',
                            ssn_tax_id='',
                            payment_to='',
                            website='',
                            address_street_1='',
                            address_street_2='',
                            address_city='',
                            address_state='',
                            address_country='',
                            address_zip_code='',
                            **kwargs):

        api_url = (
            '{}://{}/affiliates/api/2/account.asmx/ChangeAccountInfo'.format(
                self.protocol, self.admin_domain))

        parameters = _OrderedDict()
        parameters['api_key'] = self.api_key
        parameters['affiliate_id'] = self.affiliate_id
        parameters['contact_id'] = contact_id
        parameters['contact_type_id'] = contact_type_id
        parameters['first_name'] = first_name
        parameters['last_name'] = last_name
        parameters['email_address'] = email_address
        parameters['title'] = title
        parameters['phone_work'] = phone_work
        parameters['phone_cell'] = phone_cell
        parameters['phone_fax'] = phone_fax
        parameters['im_service'] = im_service
        parameters['im_name'] = im_name
        parameters['tax_class'] = tax_class
        parameters['ssn_tax_id'] = ssn_tax_id
        parameters['payment_to'] = payment_to
        parameters['website'] = website
        parameters['address_street_1'] = address_street_1
        parameters['address_street_2'] = address_street_2
        parameters['address_city'] = address_city
        parameters['address_state'] = address_state
        parameters['address_country'] = address_country
        parameters['address_zip_code'] = address_zip_code

        return self._make_api_call(url=api_url, params=parameters)
コード例 #37
0
    def get_tax_classes(self, **kwargs):

        api_url = (
            '{}://{}/affiliates/api/2/account.asmx/GetTaxClasses'.format(
                self.protocol, self.admin_domain))

        parameters = _OrderedDict()
        parameters['api_key'] = self.api_key
        parameters['affiliate_id'] = self.affiliate_id

        return self._make_api_call(url=api_url, params=parameters)
コード例 #38
0
    def performance_summary(self, date, **kwargs):

        api_url = (
            '{}://{}/affiliates/api/2/reports.asmx/PerformanceSummary'.format(
                self.protocol, self.admin_domain))

        parameters = _OrderedDict()
        parameters['api_key'] = self.api_key
        parameters['affiliate_id'] = self.affiliate_id
        parameters['date'] = str(date)

        return self._make_api_call(url=api_url, params=parameters)
コード例 #39
0
ファイル: dataset.py プロジェクト: pyGSTio/pyGSTi
    def truncate(self, listOfGateStringsToKeep, bThrowErrorIfStringIsMissing=True):
        """
        Create a truncated dataset comprised of a subset of the counts in this dataset.

        Parameters
        ----------
        listOfGateStringsToKeep : list of (tuples or GateStrings)
            A list of the gate strings for the new returned dataset.  If a
            gate string is given in this list that isn't in the original
            data set, bThrowErrorIfStringIsMissing determines the behavior.

        bThrowErrorIfStringIsMissing : bool, optional
            When true, a ValueError exception is raised when a strin)g
            if verbosity > 0:
            in listOfGateStringsToKeep is not in the data set.

        Returns
        -------
        DataSet
            The truncated data set.
        """
        if self.bStatic:
            gateStringIndices = []
            gateStrings = []
            for gs in listOfGateStringsToKeep:
                gateString = gs if isinstance(gs, _gs.GateString) else _gs.GateString(gs)

                if gateString not in self.gsIndex:
                    if bThrowErrorIfStringIsMissing:
                        raise ValueError("Gate string %s was not found in dataset begin truncated and bThrowErrorIfStringIsMissing == True" % str(gateString))
                    else: continue

                #only keep track of gate strings if they could be different from listOfGateStringsToKeep
                if not bThrowErrorIfStringIsMissing: gateStrings.append( gateString )
                gateStringIndices.append( self.gsIndex[gateString] )

            if bThrowErrorIfStringIsMissing: gateStrings = listOfGateStringsToKeep
            trunc_gsIndex = _OrderedDict( list(zip(gateStrings, gateStringIndices)) )
            trunc_dataset = DataSet(self.counts, gateStringIndices=trunc_gsIndex, spamLabelIndices=self.slIndex, bStatic=True) #don't copy counts, just reference
            #trunc_dataset = StaticDataSet(self.counts.take(gateStringIndices,axis=0), gateStrings=gateStrings, spamLabelIndices=self.slIndex)

        else:
            trunc_dataset = DataSet(spamLabels=self.get_spam_labels())
            for gateString in _lt.remove_duplicates(listOfGateStringsToKeep):
                if gateString in self.gsIndex:
                    gateStringIndx = self.gsIndex[gateString]
                    trunc_dataset.add_count_list( gateString, self.counts[ gateStringIndx ].copy() ) #Copy operation so trucated dataset can be modified
                elif bThrowErrorIfStringIsMissing:
                    raise ValueError("Gate string %s was not found in dataset begin truncated and bThrowErrorIfStringIsMissing == True" % str(gateString))

        return trunc_dataset
コード例 #40
0
ファイル: multidataset.py プロジェクト: pyGSTio/pyGSTi
    def load(self, fileOrFilename):
        """
        Load MultiDataSet from a file, clearing any data is contained previously.

        Parameters
        ----------
        fileOrFilename : file or string
            Either a filename or a file object.  In the former case, if the
            filename ends in ".gz", the file will be gzip uncompressed as it is read.
        """
        # Compatability for unicode-literal filenames
        bOpen = not (hasattr(fileOrFilename, 'write'))
        if bOpen:
            if fileOrFilename.endswith(".gz"):
                import gzip as _gzip
                f = _gzip.open(fileOrFilename,"rb")
            else:
                f = open(fileOrFilename,"rb")
        else:
            f = fileOrFilename

        state_dict = _pickle.load(f)
        def expand(x): #to be backward compatible
            assert isinstance(x,_gs.CompressedGateString)
            return x.expand()
            #else:
            #  _warnings.warn("Deprecated dataset format.  Please re-save " +
            #                 "this dataset soon to avoid future incompatibility.")
            #  return _gs.GateString(_gs.CompressedGateString.expand_gate_label_tuple(x))
        gsIndexKeys = [ expand(cgs) for cgs in state_dict['gsIndexKeys'] ]

        #gsIndexKeys = [ cgs.expand() for cgs in state_dict['gsIndexKeys'] ]
        self.gsIndex = _OrderedDict( list(zip(gsIndexKeys, state_dict['gsIndexVals'])) )
        self.slIndex = state_dict['slIndex']
        self.countsDict = _OrderedDict()
        for key in state_dict['countsKeys']:
            self.countsDict[key] = _np.lib.format.read_array(f) #np.load(f) doesn't play nice with gzip
        if bOpen: f.close()
コード例 #41
0
ファイル: util.py プロジェクト: gesellkammer/pedlbrd
def sort_natural_dict(d, recursive=True, aslist=False):
    """
    sort dict d naturally and recursively
    """
    rows = []
    if recursive:
        for key, value in d.iteritems():
            if isinstance(value, dict):
                value = sort_natural_dict(value, recursive=recursive, aslist=aslist)
            rows.append((key, value))
            sorted_rows = sort_natural(rows, key=0)
    else:
        sorted_rows = [(key, d[key]) in sort_natural(d)]
    if aslist:
        return sorted_rows
    return _OrderedDict(sorted_rows)
コード例 #42
0
ファイル: dataset.py プロジェクト: pyGSTio/pyGSTi
    def load(self, fileOrFilename):
        """
        Load DataSet from a file, clearing any data is contained previously.

        Parameters
        ----------
        fileOrFilename string or file object.
            If a string,  interpreted as a filename.  If this filename ends
            in ".gz", the file will be gzip uncompressed as it is read.

        Returns
        -------
        None
        """
        # Compatability for unicode-literal filenames
        bOpen = not (hasattr(fileOrFilename, 'write'))
        if bOpen:
            if fileOrFilename.endswith(".gz"):
                import gzip as _gzip
                f = _gzip.open(fileOrFilename,"rb")
            else:
                f = open(fileOrFilename,"rb")
        else:
            f = fileOrFilename

        state_dict = _pickle.load(f)
        def expand(x): #to be backward compatible
            if isinstance(x,_gs.CompressedGateString): return x.expand()
            else:
                _warnings.warn("Deprecated dataset format.  Please re-save " +
                               "this dataset soon to avoid future incompatibility.")
                return _gs.GateString(_gs.CompressedGateString.expand_gate_label_tuple(x))
        gsIndexKeys = [ expand(cgs) for cgs in state_dict['gsIndexKeys'] ]

        #gsIndexKeys = [ cgs.expand() for cgs in state_dict['gsIndexKeys'] ]
        self.gsIndex = _OrderedDict( list(zip( gsIndexKeys, state_dict['gsIndexVals'])) )
        self.slIndex = state_dict['slIndex']
        self.bStatic = state_dict['bStatic']

        if self.bStatic:
            self.counts = _np.lib.format.read_array(f) #_np.load(f) doesn't play nice with gzip
        else:
            self.counts = []
            for i in range(state_dict['nRows']): #pylint: disable=unused-variable
                self.counts.append( _np.lib.format.read_array(f) ) #_np.load(f) doesn't play nice with gzip
        if bOpen: f.close()
コード例 #43
0
def _parse_tokens(tokens):
    c = _Counter(0)

    def read_dict():
        d = _OrderedDict()
        assert tokens[c.i] == ("chr", "{")
        c.i += 1
        while 1:
            read_stmt(d)
            if tokens[c.i] == ("chr", "}"):
                c.i += 1
                return d

    def read_stmt(top_dict):
        if tokens[c.i][0] != "key":
            return
        key = tokens[c.i][1]
        c.i += 1
        if tokens[c.i][1] == "=":
            c.i += 1
            assert tokens[c.i][0] in "key str number".split()
            value = tokens[c.i][1]
            c.i += 1
        elif tokens[c.i][1] == ":":
            c.i += 1
            value = read_dict()
        else:
            assert False

        top_dict[key] = value

        assert tokens[c.i] == ("chr", ";")
        c.i += 1

    out = _OrderedDict()

    while 1:
        read_stmt(out)
        if tokens[c.i] == _END:
            break

    return out
コード例 #44
0
    def from_program_and_data_files(
        cls, program_path, constants_path_base
    ):
        """ Instantiates from a Dominions executable
            and supporting data files. """

        tables = _OrderedDict( )

        with open( program_path, "rb" ) as program_file:
            with _mmap.mmap(
                program_file.fileno( ), 0, prot = _mmap.PROT_READ
#                Use this line on Windows
#                program_file.fileno( ), 0, access = _mmap.ACCESS_READ
            ) as program_image:

                dominions_version \
                = _DominionsVersion.from_program_image( program_image )

                # Load tables of constants from CSV files.
                for table_type in cls._LOADABLE_TABLE_TYPES:
                    table = table_type.from_csv_file(
                        _path_join(
                            constants_path_base,
                              table_type.FILE_NAME_BASE( )
                            + _path_extsep + "csv"
                        ),
                        dominions_version
                    )
                    tables[ table_type.LABEL( ) ] = table

                # Extract other tables from the Dominions executable.
                for table_type in cls._EXTRACTABLE_TABLE_TYPES:
                    table = table_type.from_program_image(
                        program_image, dominions_version
                    )
                    tables[ table_type.LABEL( ) ] = table

                # TODO: Implement other extractions.

                self = cls( dominions_version, tables )

        return self
コード例 #45
0
ファイル: stdinput.py プロジェクト: pyGSTio/pyGSTi
    def _extractLabelsFromMultiDataColLabels(self, colLabels):
        dsSpamLabels = _OrderedDict()
        countCols = []; freqCols = []; impliedCounts1Q = []
        for i,colLabel in enumerate(colLabels):
            wordsInColLabel = colLabel.split() #split on whitespace into words
            if len(wordsInColLabel) < 3: continue #allow other columns we don't recognize

            if wordsInColLabel[-1] == 'count':
                spamLabel = wordsInColLabel[-2]
                dsLabel = wordsInColLabel[-3]
                if dsLabel not in dsSpamLabels:
                    dsSpamLabels[dsLabel] = [ spamLabel ]
                else: dsSpamLabels[dsLabel].append( spamLabel )
                countCols.append( (dsLabel,spamLabel,i) )

            elif wordsInColLabel[-1] == 'frequency':
                spamLabel = wordsInColLabel[-2]
                dsLabel = wordsInColLabel[-3]
                if '%s count total' % dsLabel not in colLabels:
                    raise ValueError("Frequency columns specified without" +
                                     "count total for dataset '%s'" % dsLabel)
                else: iTotal = colLabels.index( '%s count total' % dsLabel )

                if dsLabel not in dsSpamLabels:
                    dsSpamLabels[dsLabel] = [ spamLabel ]
                else: dsSpamLabels[dsLabel].append( spamLabel )
                freqCols.append( (dsLabel,spamLabel,i,iTotal) )

        for dsLabel,spamLabels in dsSpamLabels.items():
            if '%s count total' % dsLabel in colLabels:
                if 'plus' in spamLabels and 'minus' not in spamLabels:
                    dsSpamLabels[dsLabel].append('minus')
                    iTotal = colLabels.index( '%s count total' % dsLabel )
                    impliedCounts1Q.append( (dsLabel, iTotal) )
            #TODO - add standard count completion for 2Qubit case?

        fillInfo = (countCols, freqCols, impliedCounts1Q)
        return dsSpamLabels, fillInfo
コード例 #46
0
ファイル: reportables.py プロジェクト: jarthurgross/pyGSTi
def compute_dataset_qtys(qtynames, dataset, gatestrings=None):
    """
    Compute the named "Dataset" quantities.

    Parameters
    ----------
    qtynames : list of strings
        Names of the quantities to compute.
        
    dataset : DataSet
        Data used to compute the quantity.

    gatestrings : list of tuples or GateString objects, optional
        A list of gatestrings used in the computation of certain quantities.
        If None, all the gatestrings in the dataset are used.
        
    Returns
    -------
    dict
        Dictionary whose keys are the requested quantity names and values are
        ReportableQty objects.
    """

    ret = _OrderedDict()
    possible_qtys = [ ]

    #Quantities computed per gatestring
    per_gatestring_qtys = _OrderedDict( [('gate string', []), ('gate string index', []), ('gate string length', []), ('count total', [])] )
    spamLabels = dataset.get_spam_labels()
    for spl in spamLabels:
        per_gatestring_qtys['Exp prob(%s)' % spl] = []
        per_gatestring_qtys['Exp count(%s)' % spl] = []

    if any( [qtyname in per_gatestring_qtys for qtyname in qtynames ] ):
        if gatestrings is None: gatestrings = dataset.keys()
        for (i,gs) in enumerate(gatestrings):
            if gs in dataset: # skip gate strings given that are not in dataset
                dsRow = dataset[gs]
            else:
                #print "Warning: skipping gate string %s" % str(gs)
                continue

            N = dsRow.total()
            per_gatestring_qtys['gate string'].append(  ''.join(gs)  )
            per_gatestring_qtys['gate string index'].append( i )
            per_gatestring_qtys['gate string length'].append(  len(gs)  )
            per_gatestring_qtys['count total'].append(  N  )
        
            for spamLabel in spamLabels:
                pExp = _projectToValidProb( dsRow[spamLabel] / N, tol=1e-10 )
                per_gatestring_qtys['Exp prob(%s)' % spamLabel].append( pExp )
                per_gatestring_qtys['Exp count(%s)' % spamLabel].append( dsRow[spamLabel] )

        for qtyname in qtynames:
            if qtyname in per_gatestring_qtys:
                ret[qtyname] = ReportableQty(per_gatestring_qtys[qtyname])

    
    #Quantities computed per dataset
    qty = "max logl"; possible_qtys.append(qty)
    if qty in qtynames:
        ret[qty] = ReportableQty( _tools.logl_max(dataset))

    qty = "number of gate strings"; possible_qtys.append(qty)
    if qty in qtynames:
        ret[qty] = ReportableQty( len(dataset) )

    if qtynames[0] is None:
        return possible_qtys + per_gatestring_qtys.keys()
    return ret
コード例 #47
0
ファイル: reportables.py プロジェクト: jarthurgross/pyGSTi
def compute_gateset_dataset_qtys(qtynames, gateset, dataset, gatestrings=None):
    """
    Compute the named "GateSet & Dataset" quantities.

    Parameters
    ----------
    qtynames : list of strings
        Names of the quantities to compute.

    gateset : GateSet
        Gate set used to compute the quantities.
        
    dataset : DataSet
        Data used to compute the quantities.

    gatestrings : list of tuples or GateString objects, optional
        A list of gatestrings used in the computation of certain quantities.
        If None, all the gatestrings in the dataset are used.
        
    Returns
    -------
    dict
        Dictionary whose keys are the requested quantity names and values are
        ReportableQty objects.
    """

    #Note: no error bars computed for these quantities yet...

    ret = _OrderedDict()
    possible_qtys = [ ]

    #Quantities computed per gatestring
    per_gatestring_qtys = _OrderedDict() # OLD qtys: [('logl term diff', []), ('score', [])]
    for spl in gateset.get_spam_labels():
        per_gatestring_qtys['prob(%s) diff' % spl] = []
        per_gatestring_qtys['count(%s) diff' % spl] = []
        per_gatestring_qtys['Est prob(%s)' % spl] = []
        per_gatestring_qtys['Est count(%s)' % spl] = []
        per_gatestring_qtys['gatestring chi2(%s)' % spl] = []

    if any( [qtyname in per_gatestring_qtys for qtyname in qtynames ] ):
        if gatestrings is None: gatestrings = dataset.keys()
        for (i,gs) in enumerate(gatestrings):
            if gs in dataset: # skip gate strings given that are not in dataset
                dsRow = dataset[gs]
            else: continue

            p = gateset.probs(gs)  
            pExp = { }; N = dsRow.total()
            for spamLabel in p:
                p[spamLabel] = _projectToValidProb( p[spamLabel], tol=1e-10 )
                pExp[spamLabel] = _projectToValidProb( dsRow[spamLabel] / N, tol=1e-10 )
            
            #OLD
            #per_gatestring_qtys['logl term diff'].append(  _tools.logL_term(dsRow, pExp) - _tools.logL_term(dsRow, p)  )
            #per_gatestring_qtys['score'].append(  (_tools.logL_term(dsRow, pExp) - _tools.logL_term(dsRow, p)) / N  )

            for spamLabel in p:
                per_gatestring_qtys['prob(%s) diff' % spamLabel].append( abs(p[spamLabel] - pExp[spamLabel]) )
                per_gatestring_qtys['count(%s) diff' % spamLabel].append( int( round(p[spamLabel] * N) - dsRow[spamLabel]) )
                per_gatestring_qtys['Est prob(%s)' % spamLabel].append( p[spamLabel] )
                per_gatestring_qtys['Est count(%s)' % spamLabel].append( int(round(p[spamLabel] * N)) )
                per_gatestring_qtys['gatestring chi2(%s)' % spamLabel].append( _tools.chi2fn( N, p[spamLabel], pExp[spamLabel], 1e-4 ) )
                        
        for qtyname in qtynames:
            if qtyname in per_gatestring_qtys:
                ret[qtyname] = ReportableQty( per_gatestring_qtys[qtyname] )

    #Quantities which take a single value for a given gateset and dataset
    qty = "logl"; possible_qtys.append(qty)
    if qty in qtynames:
        ret[qty] = ReportableQty( _tools.logl(gateset, dataset) )

    qty = "logl diff"; possible_qtys.append(qty)
    if qty in qtynames:
        ret[qty] = ReportableQty( _tools.logl_max(dataset) - _tools.logl(gateset, dataset) )

    qty = "chi2"; possible_qtys.append(qty)        
    if qty in qtynames:
        ret[qty] = ReportableQty( _tools.chi2( dataset, gateset, minProbClipForWeighting=1e-4) )

    #Quantities which take a single value per spamlabel for a given gateset and dataset
    #for spl in gateset.get_spam_labels(): 
    #    qty = "chi2(%s)" % spl; possible_qtys.append(qty)        
    #    if qty in qtynames:
    #        ret[qty] = _tools.chi2( dataset, gateset, minProbClipForWeighting=1e-4)

    if qtynames[0] is None:
        return possible_qtys + per_gatestring_qtys.keys()
    return ret
コード例 #48
0
ファイル: typesys.py プロジェクト: gwk/ploy-py
def def_struct(type_name, fields_str, verbose=False):
  '''
  Returns a new subclass of tuple with named fields.
  generate immutable tuple subclass types.
  like namedtuple, but:
    allows for type annotations and parameter defaults in the constructor.
    fields specifier must be a space-separated string;
      (commas are preserved for type annotations, which cannot contain spaces).
    field names are less limited.

  note: recursive and mutually recursive struct types can be declared using the Fwd class.
  a special syntactic prefix '^' is recognized and converted to Fwd;
  however this does not currently work when nested inside of Collection type declarations.
  '''

  _struct_fmt = '''\
from builtins import len, property, tuple, TypeError, ValueError
from builtins import getattr as _getattr, tuple as _tuple, type as _type
from operator import itemgetter
from collections import OrderedDict

_is_a = is_a

class {type_name}(tuple):
  "generated by def_struct."

  __slots__ = ()

  name = '{type_name}'
  _fields = None # OrderedDict, filled in after by def_struct after exec; replaces __new__.__annotations__.
  # NOTE: for some reason, when this class property was named 'fields',
  # it got returned by _getattr(_res, n) below; renamed it with leading underscore to avoid conflict (a hack).

  def __new__(_cls, {fields_str}):
    'Create new instance of {type_name}.'
    _res = _tuple.__new__(_cls, ({fields_tuple_str}))
    for n, F in _cls._fields.items():
      v = _getattr(_res, n)
      if not _is_a(v, F):
        raise TypeError('{type_name}.{{}} expects {{}}; received {{}}'.format(n, F, _type(v)))
    return _res

  @classmethod
  def from_seq(cls, iterable):
    'Make a new {type_name} object from an iterable.'
    res = tuple.__new__(cls, iterable)
    if len(res) != {num_fields}:
      raise TypeError('{type_name} expects {num_fields} argument{plural}, received {{}}'.format(len(res)))
    return res

  @classmethod
  def _fulfill(cls, final):
    for k, T in list(cls._fields.items()):
      if isinstance(T, Fwd) and T.name == final.name:
        cls._fields[k] = final

  def __repr__(self):
    'Return a formatted representation string.'
    return '{{}}({repr_fmt})'.format(self.__class__.__name__, *self)

  def __getnewargs__(self):
    'Return self as a plain tuple. Used by copy and pickle.'
    return tuple(self)

  def __getstate__(self):
    'Exclude the OrderedDict from pickling.'
    return None

  @property
  def __dict__(self):
    'A new OrderedDict mapping field names to their values'
    return OrderedDict(zip(self._fields, self))

  @property
  def _as_dict(self):
    'Return a new OrderedDict which maps field names to their values.'
    return self.__dict__

  def _update(self, **kw):
    'Return a new {type_name} object, replacing specified fields with new values.'
    res = self._from_seq(map(kw.pop, self._fields, self))
    if kw:
      raise ValueError('{type_name}.update() received invalid field names: {{}}'.format(kw))
    return res

  {field_defs}
'''

  _field_fmt = \
'''{name} = property(itemgetter({index}), doc="field {index}: {name}:{type}{eq_dflt}.")'''

  _reserved_names = { '_as_dict', '_cls', '_getattr', '_is_a', '_res', '_tuple',
    '_type', '_update' }

  def _ValErrF(fmt, *items):
    return ValueError(fmt.format(*items))

  def _field_triple_from_str(str):
    name, sep, rest = str.partition(':')
    if not rest:
      raise _ValErrF("field definition is missing type: {!r}", str)
    type_, sep, dflt = rest.partition('=')
    if sep and not dflt:
      raise _ValErrF("field definition is missing default: {!r}", str)
    if type_.startswith('^'): # forward declaration.
      type_ = "Fwd({!r})".format(type_[1:])
    return (name, type_, dflt or None) # default is optional.

  def _str_from_field_triple(triple):
    n, t, d = triple
    ds = '=' + d if d else ''
    return '{}:{}{}'.format(n, t, ds)

  def _validate_name(n):
    if type(n) != str:
      raise TypeError("struct/field name is not a str: {!r}".format(n))
    if not n.isidentifier():
      raise _ValErrF("struct/field name is not a valid identifier: {!r}", n)
    if _iskeyword(n):
      raise ValErrF("struct/field name cannot be a keyword: {!r}", n)
    if n.startswith('__'):
      raise ValErrF("struct/field name cannot begin with '__': {!r}", n)
    if n in _reserved_names:
      raise ValErrF("struct/field name is reserved: {!r}", n)


  _validate_name(type_name)
  field_strs = fields_str.split()
  num_fields = len(field_strs)
  field_triples = tuple(map(_field_triple_from_str, field_strs))
  fields_str = ', '.join(map(_str_from_field_triple, field_triples))
  field_names = tuple(n for n, t, d in field_triples)
  fields_tuple_str = ', '.join(field_names) + (',' if num_fields == 1 else '')
  field_name_set = set()
  for n in field_names:
    _validate_name(n)
    if n in field_name_set:
      raise _ValErrF("definition of {!r} contains duplicate field name: {!r}", type_name, n)
    field_name_set.add(n)

  repr_fmt = ', '.join('{}={{!r}}'.format(n) for n in field_names)

  field_defs = '\n  '.join(
    _field_fmt.format(index=i, name=n, type=t, eq_dflt=('=' + d if d else ''))
      for i, (n, t, d) in enumerate(field_triples))

  src = _struct_fmt.format(
    type_name=type_name,
    num_fields=num_fields,
    plural=('' if num_fields == 1 else 's'),
    fields_str=fields_str,
    fields_tuple_str=fields_tuple_str,
    repr_fmt=repr_fmt,
    field_defs=field_defs)

  src_numbered = '\n'.join('{:3}: {}'.format(i + 1, l) for i, l in enumerate(src.split('\n')))

  def _log_src():
    print('{}:'.format(type_name), '\n', src_numbered, sep='', file=_sys.stderr)

  if verbose:
    _log_src()

  # get the caller frame context.
  globals = _sys._getframe(1).f_globals
  locals = _sys._getframe(1).f_locals # identical to globals at module level.
  assert globals is locals # for now disallow inner def_struct; unknown ramifications.

  # execute the template string in a temporary namespace, but use caller's global environment.
  # this allows for the use of custom types in the type annotations,
  # as well as default values in the constructor.
  # it also means that the result's metadata is correct without further monkeying.
  try:
    exec(src, globals, locals)
  except:
    _log_src()
    raise

  result = locals[type_name]
  result._source = src

  # build the fields OrderedDict from the evaluated annotations.
  # use this to replace __annotations__, so that when fwd types get fulfilled,
  # the changes will show up correctly via both properties.
  annotations = result.__new__.__annotations__
  fields = _OrderedDict((n, annotations[n]) for n in field_names)
  result._fields = fields
  result.__new__.__annotations__ = fields
  _update_dependencies(result, fields.values())

  if verbose: print(type_name, '._fields: ', result._fields, sep='', file=_sys.stderr)
コード例 #49
0
ファイル: stdinput.py プロジェクト: pyGSTio/pyGSTi
    def parse_multidatafile(self, filename, showProgress=True):
        """
        Parse a multiple data set file into a MultiDataSet object.

        Parameters
        ----------
        filename : string
            The file to parse.

        showProgress : bool, optional
            Whether or not progress should be displayed

        Returns
        -------
        MultiDataSet
            A MultiDataSet object.
        """

        #Parse preamble -- lines beginning with # or ## until first non-# line
        preamble_directives = { }
        with open(filename, 'r') as multidatafile:
            for line in multidatafile:
                line = line.strip()
                if len(line) == 0 or line[0] != '#': break
                if line.startswith("## "):
                    parts = line[len("## "):].split("=")
                    if len(parts) == 2: # key = value
                        preamble_directives[ parts[0].strip() ] = parts[1].strip()

        #Process premble
        orig_cwd = _os.getcwd()
        if len(_os.path.dirname(filename)) > 0:
            _os.chdir( _os.path.dirname(filename) ) #allow paths relative to datafile path
        try:
            if 'Lookup' in preamble_directives:
                lookupDict = self.parse_dictfile( preamble_directives['Lookup'] )
            else: lookupDict = { }
            if 'Columns' in preamble_directives:
                colLabels = [ l.strip() for l in preamble_directives['Columns'].split(",") ]
            else: colLabels = [ 'dataset1 plus count', 'dataset1 count total' ]
            dsSpamLabels, fillInfo = self._extractLabelsFromMultiDataColLabels(colLabels)
            nDataCols = len(colLabels)
        finally:
            _os.chdir(orig_cwd)

        #Read data lines of data file
        datasets = _OrderedDict()
        for dsLabel,spamLabels in dsSpamLabels.items():
            datasets[dsLabel] = _objs.DataSet(spamLabels=spamLabels)

        dsCountDicts = _OrderedDict()
        for dsLabel in dsSpamLabels: dsCountDicts[dsLabel] = {}

        nLines = 0
        with open(filename, 'r') as datafile:
            nLines = sum(1 for line in datafile)
        nSkip = max(int(nLines / 100.0),1)

        def is_interactive():
            import __main__ as main
            return not hasattr(main, '__file__')

        if is_interactive() and showProgress:
            try:
                import time
                from IPython.display import clear_output
                def display_progress(i,N):
                    time.sleep(0.001); clear_output()
                    print("Loading %s: %.0f%%" % (filename, 100.0*float(i)/float(N)))
                    _sys.stdout.flush()
            except:
                def display_progress(i,N): pass
        else:
            def display_progress(i,N): pass

        with open(filename, 'r') as inputfile:
            for (iLine,line) in enumerate(inputfile):
                if iLine % nSkip == 0 or iLine+1 == nLines: display_progress(iLine+1, nLines)

                line = line.strip()
                if len(line) == 0 or line[0] == '#': continue
                try:
                    gateStringTuple, _, valueList = self.parse_dataline(line, lookupDict, nDataCols)
                except ValueError as e:
                    raise ValueError("%s Line %d: %s" % (filename, iLine, str(e)))

                self._fillMultiDataCountDicts(dsCountDicts, fillInfo, valueList)
                for dsLabel, countDict in dsCountDicts.items():
                    datasets[dsLabel].add_count_dict(gateStringTuple, countDict)

        mds = _objs.MultiDataSet()
        for dsLabel,ds in datasets.items():
            ds.done_adding_data()
            mds.add_dataset(dsLabel, ds)
        return mds
コード例 #50
0
ファイル: __init__.py プロジェクト: giflw/afn-tools
 def __init__(self, info):
     self.locations = _OrderedDict()
     self.info = info
コード例 #51
0
    def from_program_image(
        cls, program_image, base_offset, number, dominions_version
    ):
        """ Creates an instance from a program image. """

        # TODO: Version this constant.
        NAME_LENGTH     = 36

        offset = base_offset
        
        unknowns = _OrderedDict( )
        
        name, __ = _from_string( program_image, offset, NAME_LENGTH )
        if "end" == name: raise StopIteration( )
        offset += NAME_LENGTH

        protections = [ ]
        for i in range( 6 ):
            protection_zone, offset \
            = _from_native_uint16( program_image, offset )
            protection_amount, offset = _from_native_uint16(
                program_image, offset
            )
            if protection_zone:
                protections.append( ArmorProtection(
                    armor_number = number,
                    zone_number = protection_zone,
                    protection = protection_amount
                ) )

        unknowns[ offset ], offset \
        = _from_native_uint16( program_image, offset )
        
        defense, offset = _from_native_int16( program_image, offset )

        encumbrance, offset = _from_native_uint16( program_image, offset )

        armor_type, offset = _from_native_uint16( program_image, offset )

        resource_cost, offset = _from_native_uint16( program_image, offset )

        unknowns[ offset ], offset \
        = _from_native_uint16( program_image, offset )

        attribute_keys = [ ]
        for i in range( 3 ):
            attribute_key, offset \
            = _from_native_uint32( program_image, offset )
            attribute_keys.append( attribute_key )

        attribute_values = [ ]
        for i in range( 3 ):
            attribute_value, offset \
            = _from_native_uint32( program_image, offset )
            attribute_values.append( attribute_value )

        attributes = [ ]
        for key, value in zip( attribute_keys, attribute_values ):
            if not key: continue
            attributes.append( _ArmorAttribute.from_raw_data(
                armor_number = number,
                attribute_number = key,
                raw_value = value
            ) )

        unknown_fields = [
            ArmorUnknownField(
                armor_number = number,
                offset = offset, value = value
            )
            for offset, value in unknowns.items( ) if value
        ]

        return cls(
            number = number, name = name,
            armor_type = armor_type,
            protections = protections, defense = defense,
            encumbrance = encumbrance,
            resource_cost = resource_cost,
            attributes = attributes,
            unknown_fields = unknown_fields
        )
コード例 #52
0
def _sensible_defaults(model_factory, data=None):
    _defaults = {
        _gl.linear_regression.create: _OrderedDict(
            [
                ("l2_penalty", [0.0, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0]),
                ("l1_penalty", [0.0, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0]),
            ]
        ),
        _gl.ranking_factorization_recommender.create: _OrderedDict(
            [
                ("num_factors", [8, 16, 32, 64]),
                ("max_iterations", [25, 50]),
                ("regularization", [1e-9, 1e-8, 1e-7, 1e-6, 1e-4]),
                ("num_sampled_negative_examples", [4, 8]),
                ("ranking_regularization", [0.1, 0.25, 0.5]),
            ]
        ),
        _gl.factorization_recommender.create: _OrderedDict(
            [
                ("num_factors", [8, 16, 32, 64]),
                ("max_iterations", [25, 50]),
                ("regularization", [1e-9, 1e-8, 1e-7, 1e-6, 1e-4]),
                ("linear_regularization", [1e-9, 1e-7, 1e-5]),
            ]
        ),
        _gl.boosted_trees_classifier.create: _OrderedDict(
            [
                ("max_depth", [4, 6, 8, 10]),
                ("max_iterations", [10, 50, 100]),
                ("min_loss_reduction", [0, 1, 10]),
                ("step_size", [0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.25, 0.5]),
                ("column_subsample", [1, 0.9, 0.8]),
                ("row_subsample", [1, 0.9]),
                ("min_child_weight", [1, 2, 4, 8, 16]),
            ]
        ),
        _gl.boosted_trees_regression.create: _OrderedDict(
            [
                ("max_depth", [4, 6, 8, 10]),
                ("max_iterations", [10, 50, 100]),
                ("min_loss_reduction", [0, 1, 10]),
                ("step_size", [0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.25, 0.5]),
                ("column_subsample", [1, 0.9, 0.8]),
                ("row_subsample", [1, 0.9]),
                ("min_child_weight", [1, 2, 4, 8, 16]),
            ]
        ),
        _gl.random_forest_classifier.create: _OrderedDict(
            [
                ("max_depth", [4, 6, 8, 10]),
                ("num_trees", [10, 50, 100]),
                ("min_loss_reduction", [0, 1, 10]),
                ("step_size", [0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.25, 0.5]),
                ("column_subsample", [1, 0.9, 0.8]),
                ("row_subsample", [1, 0.9]),
                ("min_child_weight", [1, 2, 4, 8, 16]),
            ]
        ),
        _gl.random_forest_regression.create: _OrderedDict(
            [
                ("max_depth", [4, 6, 8, 10]),
                ("num_trees", [10, 50, 100]),
                ("min_loss_reduction", [0, 1, 10]),
                ("step_size", [0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.25, 0.5]),
                ("column_subsample", [1, 0.9, 0.8]),
                ("row_subsample", [1, 0.9]),
                ("min_child_weight", [1, 2, 4, 8, 16]),
            ]
        ),
        _gl.logistic_classifier.create: _OrderedDict(
            [
                ("l2_penalty", [0.0, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0]),
                ("l1_penalty", [0.0, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0]),
            ]
        ),
        _gl.neuralnet_classifier.create: _OrderedDict([("learning_rate", [0.0001, 0.001, 0.01])]),
        _gl.svm_classifier.create: _OrderedDict([("penalty", [0.001, 0.01, 0.1, 1.0, 10.0])]),
        _gl.topic_model.create: _OrderedDict([("num_topics", [10, 20, 50])]),
        _gl.kmeans.create: _OrderedDict([("num_clusters", range(2, 21))]),
    }

    if HAS_SKLEARN:
        from sklearn.svm import SVC, LinearSVC
        from sklearn.linear_model import ElasticNet, LogisticRegression, LinearRegression
        from sklearn.ensemble import (
            GradientBoostingClassifier,
            GradientBoostingRegressor,
            RandomForestRegressor,
            RandomForestClassifier,
        )

        _defaults[LogisticRegression] = _OrderedDict(
            [("penalty", ["l1", "l2"]), ("C", [0.01, 0.1, 1.0, 2.0, 3.0, 10.0])]
        )
        _defaults[SVC] = _OrderedDict(
            [
                ("C", [0.01, 0.1, 1.0, 2.0, 3.0, 10.0]),
                ("kernel", ["rbf", "linear", "poly", "rbf", "sigmoid"]),
                ("degree", [2, 3]),
                ("probability", [True, False]),
            ]
        )
        _defaults[LinearSVC] = _OrderedDict(
            [
                ("C", [0.01, 0.1, 1.0, 2.0, 3.0, 10.0]),
                ("loss", ["squared_hinge", "hing"]),
                ("penalty", ["l2", "l1"]),
                ("dual", [True, False]),
            ]
        )
        _defaults[ElasticNet] = _OrderedDict(
            [("alpha", [0.01, 0.5, 1.0]), ("l1_ratio", [0.2, 0.4, 0.6, 0.8, 1.0]), ("normalize", [True, False])]
        )
        _defaults[LinearRegression] = _OrderedDict([("normalize", [True, False])])
        _defaults[GradientBoostingClassifier] = _OrderedDict(
            [
                ("loss", ["deviance"]),
                ("learning_rate", [0.01, 0.05, 0.1, 0.15, 0.2, 0.5]),
                ("n_estimators", [10, 25, 50, 100, 250]),
                ("max_depth", [3, 4, 5, 6, 8, 10, 12]),
                ("min_samples_split", [2, 3, 5]),
                # ('min_weight_fraction_leaf', [0.0]),
                ("subsample", [0.8, 0.9, 1.0]),
                ("max_features", ["auto", "sqrt", None]),
            ]
        )
        _defaults[GradientBoostingRegressor] = _OrderedDict(
            [
                ("loss", ["ls", "lad", "huber"]),
                ("learning_rate", [0.01, 0.05, 0.1, 0.15, 0.2, 0.5]),
                ("n_estimators", [10, 25, 50, 100, 250]),
                ("max_depth", [3, 4, 5, 6, 8, 10, 12]),
                ("min_samples_split", [2, 3, 5]),
                ("min_samples_leaf", [2, 3, 4, 5]),
                ("subsample", [0.8, 0.9, 1.0]),
                ("max_features", ["auto", "sqrt", None]),
            ]
        )
        _defaults[RandomForestRegressor] = _OrderedDict(
            [
                ("n_estimators", [10, 25, 50, 100, 250]),
                # ('criterion', ['gini']),
                ("max_features", ["auto", "sqrt", None]),
                ("max_depth", [3, 4, 5, 6, 8, 10, 12]),
                ("min_samples_split", [2, 3, 5]),
                ("min_samples_leaf", [2, 3, 4, 5]),
                ("bootstrap", [True, False]),
            ]
        )
        _defaults[RandomForestClassifier] = _OrderedDict(
            [
                ("n_estimators", [10, 25, 50, 100, 250]),
                ("criterion", ["gini"]),
                ("max_features", ["auto", "sqrt", None]),
                ("max_depth", [3, 4, 5, 6, 8, 10, 12]),
                ("min_samples_split", [2, 3, 5]),
                ("min_samples_leaf", [2, 3, 4, 5]),
                ("bootstrap", [True, False]),
            ]
        )

    if model_factory not in _defaults:
        raise ValueError(
            "Provided model_factory %s not currently supported for"
            " automatic model parameter search. For a list of supported models "
            " check graphlab.model_parameter_search.create documentation."
            " You may also create custom model factories for use with graphlab.random_search"
            " or graphlab.grid_search." % model_factory
        )

    return _defaults[model_factory]
コード例 #53
0
__author__ = 'kun xi'
"""
Taken from http://www.kunxi.org/blog/2014/05/lru-cache-in-python/
"""

from collections import OrderedDict as _OrderedDict
from support.numpy_hash import hashable as _hashable

CAPACITY = 65536
cache = _OrderedDict()


def get(key):
    """
    Finds the value for the numpy array key.
    Throws KeyError when encountering an unknown key.
    :param key: input into the function whose results we are caching.
    :return: the result of the function, if it has been calculated before.
    """
    key = _hashable(key)
    value = cache.pop(key)
    cache[key] = value
    return value


def set(key, value):
    key = _hashable(key)
    try:
        cache.pop(key)
    except KeyError:
        if len(cache) >= CAPACITY:
コード例 #54
0
ファイル: reportables.py プロジェクト: jarthurgross/pyGSTi
def compute_gateset_qtys(qtynames, gateset, confidenceRegionInfo=None):
    """
    Compute the named "GateSet" quantities.

    Parameters
    ----------
    qtynames : list of strings
        Names of the quantities to compute.
        
    gateset : GateSet
        Gate set used to compute the quantities.

    confidenceRegionInfo : ConfidenceRegion, optional
        If not None, specifies a confidence-region used to compute the error bars
        contained in the returned quantities.  If None, then no error bars are
        computed.

    Returns
    -------
    dict
        Dictionary whose keys are the requested quantity names and values are
        ReportableQty objects.
    """
    ret = _OrderedDict()
    possible_qtys = [ ]
    eps = FINITE_DIFF_EPS
    mxBasis = gateset.get_basis_name()

    def choi_matrix(gate):
        return _tools.jamiolkowski_iso(gate, mxBasis, mxBasis)

    def choi_evals(gate):
        choi = _tools.jamiolkowski_iso(gate, mxBasis, mxBasis)
        choi_eigvals = _np.linalg.eigvals(choi)
        return _np.array(sorted(choi_eigvals))

    def choi_trace(gate):
        choi = _tools.jamiolkowski_iso(gate, mxBasis, mxBasis)
        return _np.trace(choi)

    def decomp_angle(gate):
        decomp = _tools.decompose_gate_matrix(gate)
        return decomp.get('pi rotations',0)

    def decomp_decay_diag(gate):
        decomp = _tools.decompose_gate_matrix(gate)
        return decomp.get('decay of diagonal rotation terms',0)

    def decomp_decay_offdiag(gate):
        decomp = _tools.decompose_gate_matrix(gate)
        return decomp.get('decay of off diagonal rotation terms',0)

    def decomp_cu_angle(gate):
        closestUGateMx = _alg.find_closest_unitary_gatemx(gate)
        decomp = _tools.decompose_gate_matrix(closestUGateMx)
        return decomp.get('pi rotations',0)

    def decomp_cu_decay_diag(gate):
        closestUGateMx = _alg.find_closest_unitary_gatemx(gate)
        decomp = _tools.decompose_gate_matrix(closestUGateMx)
        return decomp.get('decay of diagonal rotation terms',0)

    def decomp_cu_decay_offdiag(gate):
        closestUGateMx = _alg.find_closest_unitary_gatemx(gate)
        decomp = _tools.decompose_gate_matrix(closestUGateMx)
        return decomp.get('decay of off diagonal rotation terms',0)

    def upper_bound_fidelity(gate):
        ubF, ubGateMx = _tools.fidelity_upper_bound(gate)
        return ubF

    def closest_ujmx(gate):
        closestUGateMx = _alg.find_closest_unitary_gatemx(gate)
        return _tools.jamiolkowski_iso(closestUGateMx, mxBasis, mxBasis)
        
    def maximum_fidelity(gate):
        closestUGateMx = _alg.find_closest_unitary_gatemx(gate)
        closestUJMx = _tools.jamiolkowski_iso(closestUGateMx, mxBasis, mxBasis)
        choi = _tools.jamiolkowski_iso(gate, mxBasis, mxBasis)
        return _tools.fidelity(closestUJMx, choi)

    def maximum_trace_dist(gate):
        closestUGateMx = _alg.find_closest_unitary_gatemx(gate)
        closestUJMx = _tools.jamiolkowski_iso(closestUGateMx, mxBasis, mxBasis)
        return _tools.jtracedist(gate, closestUGateMx)

    def spam_dotprods(rhoVecs, EVecs):
        ret = _np.empty( (len(rhoVecs), len(EVecs)), 'd')
        for i,rhoVec in enumerate(rhoVecs):
            for j,EVec in enumerate(EVecs):
                ret[i,j] = _np.dot(_np.transpose(EVec), rhoVec)
        return ret

    def angles_btwn_rotn_axes(gateset):
        gateLabels = gateset.gates.keys()
        angles_btwn_rotn_axes = _np.zeros( (len(gateLabels), len(gateLabels)), 'd' )

        for i,gl in enumerate(gateLabels):
            decomp = _tools.decompose_gate_matrix(gateset.gates[gl])
            rotnAngle = decomp.get('pi rotations','X')
            axisOfRotn = decomp.get('axis of rotation',None)
    
            for j,gl_other in enumerate(gateLabels[i+1:],start=i+1):
                decomp_other = _tools.decompose_gate_matrix(gateset.gates[gl_other])
                rotnAngle_other = decomp_other.get('pi rotations','X')                

                if rotnAngle == 'X' or abs(rotnAngle) < 1e-4 or \
                   rotnAngle_other == 'X' or abs(rotnAngle_other) < 1e-4:
                    angles_btwn_rotn_axes[i,j] =  _np.nan
                else:
                    axisOfRotn_other = decomp_other.get('axis of rotation',None)
                    if axisOfRotn is not None and axisOfRotn_other is not None:
                        real_dot =  _np.clip( _np.real(_np.dot(axisOfRotn,axisOfRotn_other)), -1.0, 1.0)
                        angles_btwn_rotn_axes[i,j] = _np.arccos( real_dot ) / _np.pi
                    else: 
                        angles_btwn_rotn_axes[i,j] = _np.nan

                angles_btwn_rotn_axes[j,i] = angles_btwn_rotn_axes[i,j]
        return angles_btwn_rotn_axes



    # Spam quantities (computed for all spam vectors at once):
    key = "Spam DotProds"; possible_qtys.append(key)
    if key in qtynames:
        ret[key] = _getSpamQuantity(spam_dotprods, gateset, eps, confidenceRegionInfo)

    key = "Gateset Axis Angles"; possible_qtys.append(key)
    if key in qtynames:
        ret[key] = _getGateSetQuantity(angles_btwn_rotn_axes, gateset, eps, confidenceRegionInfo)

    # Quantities computed per gate
    for (label,gate) in gateset.gates.iteritems():

        #Gate quantities
        suffixes = ('eigenvalues', 'eigenvectors', 'choi eigenvalues', 'choi trace',
                    'choi matrix', 'decomposition')
        gate_qtys = _OrderedDict( [ ("%s %s" % (label,s), None) for s in suffixes ] )
        possible_qtys += gate_qtys.keys()

        if any( [qtyname in gate_qtys for qtyname in qtynames] ):
            #gate_evals,gate_evecs = _np.linalg.eig(gate)
            evalsQty = _getGateQuantity(_np.linalg.eigvals, gateset, label, eps, confidenceRegionInfo)
            choiQty = _getGateQuantity(choi_matrix, gateset, label, eps, confidenceRegionInfo) 
            choiEvQty = _getGateQuantity(choi_evals, gateset, label, eps, confidenceRegionInfo) 
            choiTrQty = _getGateQuantity(choi_trace, gateset, label, eps, confidenceRegionInfo) 

            decompDict = _tools.decompose_gate_matrix(gate)
            if decompDict['isValid']:
                angleQty = _getGateQuantity(decomp_angle, gateset, label, eps, confidenceRegionInfo) 
                diagQty = _getGateQuantity(decomp_decay_diag, gateset, label, eps, confidenceRegionInfo) 
                offdiagQty = _getGateQuantity(decomp_decay_offdiag, gateset, label, eps, confidenceRegionInfo) 
                errBarDict = { 'pi rotations': angleQty.get_err_bar(), 
                               'decay of diagonal rotation terms': diagQty.get_err_bar(),
                               'decay of off diagonal rotation terms': offdiagQty.get_err_bar() }
                decompQty = ReportableQty(decompDict, errBarDict)
            else:
                decompQty = ReportableQty({})

            gate_qtys[ '%s eigenvalues' % label ]      = evalsQty
            #gate_qtys[ '%s eigenvectors' % label ]     = gate_evecs
            gate_qtys[ '%s choi matrix' % label ]      = choiQty
            gate_qtys[ '%s choi eigenvalues' % label ] = choiEvQty
            gate_qtys[ '%s choi trace' % label ]       = choiTrQty
            gate_qtys[ '%s decomposition' % label]     = decompQty
            
            for qtyname in qtynames:
                if qtyname in gate_qtys: 
                    ret[qtyname] = gate_qtys[qtyname]


        #Closest unitary quantities
        suffixes = ('max fidelity with unitary', 
                    'max trace dist with unitary',
                    'upper bound on fidelity with unitary',
                    'closest unitary choi matrix',
                    'closest unitary decomposition')
        closestU_qtys = _OrderedDict( [ ("%s %s" % (label,s), None) for s in suffixes ] )
        possible_qtys += closestU_qtys.keys()
        if any( [qtyname in closestU_qtys for qtyname in qtynames] ):
            ubFQty = _getGateQuantity(upper_bound_fidelity, gateset, label, eps, confidenceRegionInfo) 
            closeUJMxQty = _getGateQuantity(closest_ujmx, gateset, label, eps, confidenceRegionInfo) 
            maxFQty = _getGateQuantity(maximum_fidelity, gateset, label, eps, confidenceRegionInfo) 
            maxJTDQty = _getGateQuantity(maximum_trace_dist, gateset, label, eps, confidenceRegionInfo) 

            closestUGateMx = _alg.find_closest_unitary_gatemx(gate)
            decompDict = _tools.decompose_gate_matrix(closestUGateMx)
            if decompDict['isValid']:
                angleQty = _getGateQuantity(decomp_cu_angle, gateset, label, eps, confidenceRegionInfo) 
                diagQty = _getGateQuantity(decomp_cu_decay_diag, gateset, label, eps, confidenceRegionInfo) 
                offdiagQty = _getGateQuantity(decomp_cu_decay_offdiag, gateset, label, eps, confidenceRegionInfo) 
                errBarDict = { 'pi rotations': angleQty.get_err_bar(), 
                               'decay of diagonal rotation terms': diagQty.get_err_bar(),
                               'decay of off diagonal rotation terms': offdiagQty.get_err_bar() }
                decompQty = ReportableQty(decompDict, errBarDict)
            else:
                decompQty = ReportableQty({})

            closestU_qtys[ '%s max fidelity with unitary' % label ]                  = maxFQty
            closestU_qtys[ '%s max trace dist with unitary' % label ]                = maxJTDQty
            closestU_qtys[ '%s upper bound on fidelity with unitary' % label ]       = ubFQty
            closestU_qtys[ '%s closest unitary choi matrix' % label ]                = closeUJMxQty
            closestU_qtys[ '%s closest unitary decomposition' % label ]              = decompQty

            for qtyname in qtynames:
                if qtyname in closestU_qtys: 
                    ret[qtyname] = closestU_qtys[qtyname]

    if qtynames[0] is None:
        return possible_qtys
    return ret        
コード例 #55
0
    DataTableRow                as _DataTableRow,
)
from dominions.constants_tables import (
    AttributeKey,
    AttributeKeys_DataTable,
    MapTerrainType,
    MapTerrainTypes_DataTable,
)


_AttributesBuilderData = _namedtuple(
    "AttributesBuilderData",
    "value_table_name value_title value_mixin_class_name"
)

ATTRIBUTES_BUILDER_DATA             = _OrderedDict( )
ATTRIBUTES_BUILDER_DATA[ 35 ]      = _AttributesBuilderData(
    "unknown_values", "<Unknown Attribute>",
    "AttributeValue_GenericValue"
)
ATTRIBUTES_BUILDER_DATA[ 36 ]      = _AttributesBuilderData(
    "unknown_values", "<Unknown Attribute>",
    "AttributeValue_GenericValue"
)
ATTRIBUTES_BUILDER_DATA[ 41 ]      = _AttributesBuilderData(
    "unknown_values", "<Unknown Attribute>",
    "AttributeValue_GenericValue"
)
ATTRIBUTES_BUILDER_DATA[ 43 ]      = _AttributesBuilderData(
    "unknown_values", "<Unknown Attribute>",
    "AttributeValue_GenericValue"
コード例 #56
0
    def from_database( cls, db_engine ):
        """ Instantiates from a databse. """

        tables = _OrderedDict( )
コード例 #57
0
ファイル: multidataset.py プロジェクト: jarthurgross/pyGSTi
 def __setstate__(self, state_dict):
   gsIndexKeys = [ cgs.expand() for cgs in state_dict['gsIndexKeys'] ]
   self.gsIndex = _OrderedDict( zip(gsIndexKeys, state_dict['gsIndexVals']) )
   self.slIndex = state_dict['slIndex']
   self.countsDict = state_dict['countsDict']
コード例 #58
0
ファイル: multidataset.py プロジェクト: jarthurgross/pyGSTi
  def __init__(self, countsDict=None, 
               gateStrings=None, gateStringIndices=None, 
               spamLabels=None, spamLabelIndices=None,
               fileToLoadFrom=None):
    """ 
    Initialize a MultiDataSet.
      
    Parameters
    ----------
    countsDict : ordered dictionary, optional
      Keys specify dataset names.  Values are 2D numpy arrays which specify counts. Rows of the arrays
      correspond to gate strings and columns to spam labels.

    gateStrings : list of (tuples or GateStrings), optional
      Each element is a tuple of gate labels or a GateString object.  Indices for these strings
      are assumed to ascend from 0.  These indices must correspond to rows/elements of counts (above).
      Only specify this argument OR gateStringIndices, not both.

    gateStringIndices : ordered dictionary, optional
      An OrderedDict with keys equal to gate strings (tuples of gate labels) and values equal to
      integer indices associating a row/element of counts with the gate string.  Only
      specify this argument OR gateStrings, not both.

    spamLabels : list of strings, optional
      Specifies the set of spam labels for the DataSet.  Indices for the spam labels
      are assumed to ascend from 0, starting with the first element of this list.  These
      indices will index columns of the counts array/list.  Only specify this argument
      OR spamLabelIndices, not both.

    spamLabelIndices : ordered dictionary, optional
      An OrderedDict with keys equal to spam labels (strings) and value  equal to 
      integer indices associating a spam label with a column of counts.  Only 
      specify this argument OR spamLabels, not both.

    fileToLoadFrom : string or file object, optional
      Specify this argument and no others to create a MultiDataSet by loading
      from a file (just like using the load(...) function).
    """

    #Optionally load from a file
    if fileToLoadFrom is not None:
      assert(countsDict is None and gateStrings is None and gateStringIndices is None and spamLabels is None and spamLabelIndices is None)
      self.load(fileToLoadFrom)
      return

    # self.gsIndex  :  Ordered dictionary where keys = gate strings (tuples), values = integer indices into counts
    if gateStringIndices is not None:
      self.gsIndex = gateStringIndices
    elif gateStrings is not None:
      dictData = [ (gs if isinstance(gs,_gs.GateString) else _gs.GateString(gs),i) \
                     for (i,gs) in enumerate(gateStrings) ] #convert to GateStrings if necessary
      self.gsIndex = _OrderedDict( dictData )
    else:
      self.gsIndex = None

    # self.slIndex  :  Ordered dictionary where keys = spam labels (strings), values = integer indices into counts
    if spamLabelIndices is not None:
      self.slIndex = spamLabelIndices
    elif spamLabels is not None:
      self.slIndex = _OrderedDict( [(sl,i) for (i,sl) in enumerate(spamLabels) ] )
    else: 
      self.slIndex = None

    if self.gsIndex:  #Note: tests if not none and nonempty
      assert( min(self.gsIndex.values()) >= 0)
    if self.slIndex:  #Note: tests if not none and nonempty
      assert( min(self.slIndex.values()) >= 0)

    # self.countsDict : a dictionary of 2D numpy arrays, each corresponding to a DataSet.  Rows = gate strings, Cols = spam labels      
    #                   ( keys = dataset names, values = 2D counts array of corresponding dataset )
    if countsDict is not None:
      self.countsDict = _OrderedDict( [ (name,counts) for name,counts in countsDict.iteritems() ] ) #copy OrderedDict but share counts arrays
      if self.gsIndex:  #Note: tests if not none and nonempty
        minIndex = min(self.gsIndex.values())
        maxIndex = max(self.gsIndex.values())
        for dsName,counts in self.countsDict.iteritems():
          assert( counts.shape[0] > maxIndex and counts.shape[1] == len(self.slIndex) )
    else:
      self.countsDict = _OrderedDict()
コード例 #59
0
ファイル: stdinput.py プロジェクト: pyGSTio/pyGSTi
def read_gateset(filename):
    """
    Parse a gateset file into a GateSet object.

    Parameters
    ----------
    filename : string
        The file to parse.

    Returns
    -------
    GateSet
    """

    def add_current_label():
        if cur_format == "StateVec":
            ar = _evalRowList( cur_rows, bComplex=True )
            if ar.shape == (1,2):
                spam_vecs[cur_label] = _tools.state_to_pauli_density_vec(ar[0,:])
            else: raise ValueError("Invalid state vector shape for %s: %s" % (cur_label,ar.shape))

        elif cur_format == "DensityMx":
            ar = _evalRowList( cur_rows, bComplex=True )
            if ar.shape == (2,2) or ar.shape == (4,4):
                spam_vecs[cur_label] = _tools.stdmx_to_ppvec(ar)
            else: raise ValueError("Invalid density matrix shape for %s: %s" % (cur_label,ar.shape))

        elif cur_format == "PauliVec":
            spam_vecs[cur_label] = _np.transpose( _evalRowList( cur_rows, bComplex=False ) )

        elif cur_format == "UnitaryMx":
            ar = _evalRowList( cur_rows, bComplex=True )
            if ar.shape == (2,2):
                gs.gates[cur_label] = _objs.FullyParameterizedGate(
                        _tools.unitary_to_pauligate_1q(ar))
            elif ar.shape == (4,4):
                gs.gates[cur_label] = _objs.FullyParameterizedGate(
                        _tools.unitary_to_pauligate_2q(ar))
            else: raise ValueError("Invalid unitary matrix shape for %s: %s" % (cur_label,ar.shape))

        elif cur_format == "UnitaryMxExp":
            ar = _evalRowList( cur_rows, bComplex=True )
            if ar.shape == (2,2):
                gs.gates[cur_label] = _objs.FullyParameterizedGate(
                        _tools.unitary_to_pauligate_1q( _expm(-1j * ar) ))
            elif ar.shape == (4,4):
                gs.gates[cur_label] = _objs.FullyParameterizedGate(
                        _tools.unitary_to_pauligate_2q( _expm(-1j * ar) ))
            else: raise ValueError("Invalid unitary matrix exponent shape for %s: %s" % (cur_label,ar.shape))

        elif cur_format == "PauliMx":
            gs.gates[cur_label] = _objs.FullyParameterizedGate( _evalRowList( cur_rows, bComplex=False ) )


    gs = _objs.GateSet()
    spam_vecs = _OrderedDict(); spam_labels = _OrderedDict(); remainder_spam_label = ""
    identity_vec = _np.transpose( _np.array( [ _np.sqrt(2.0), 0,0,0] ) )  #default = 1-QUBIT identity vector

    basis_abbrev = "pp" #default assumed basis
    basis_dims = None

    state = "look for label"
    cur_label = ""; cur_format = ""; cur_rows = []
    with open(filename) as inputfile:
        for line in inputfile:
            line = line.strip()

            if len(line) == 0:
                state = "look for label"
                if len(cur_label) > 0:
                    add_current_label()
                    cur_label = ""; cur_rows = []
                continue

            if line[0] == "#":
                continue

            if state == "look for label":
                if line.startswith("SPAMLABEL "):
                    eqParts = line[len("SPAMLABEL "):].split('=')
                    if len(eqParts) != 2: raise ValueError("Invalid spam label line: ", line)
                    if eqParts[1].strip() == "remainder":
                        remainder_spam_label = eqParts[0].strip()
                    else:
                        spam_labels[ eqParts[0].strip() ] = [ s.strip() for s in eqParts[1].split() ]

                elif line.startswith("IDENTITYVEC "):  #Vectorized form of identity density matrix in whatever basis is used
                    if line != "IDENTITYVEC None":  #special case for designating no identity vector, so default is not used
                        identity_vec  = _np.transpose( _evalRowList( [ line[len("IDENTITYVEC "):].split() ], bComplex=False ) )

                elif line.startswith("BASIS "): # Line of form "BASIS <abbrev> [<dims>]", where optional <dims> is comma-separated integers
                    parts = line[len("BASIS "):].split()
                    basis_abbrev = parts[0]
                    if len(parts) > 1:
                        basis_dims = list(map(int, "".join(parts[1:]).split(",")))
                        if len(basis_dims) == 1: basis_dims = basis_dims[0]
                    elif gs.get_dimension() is not None:
                        basis_dims = int(round(_np.sqrt(gs.get_dimension())))
                    elif len(spam_vecs) > 0:
                        basis_dims = int(round(_np.sqrt(list(spam_vecs.values())[0].size)))
                    else:
                        raise ValueError("BASIS directive without dimension, and cannot infer dimension!")
                else:
                    cur_label = line
                    state = "expect format"

            elif state == "expect format":
                cur_format = line
                if cur_format not in ["StateVec", "DensityMx", "UnitaryMx", "UnitaryMxExp", "PauliVec", "PauliMx"]:
                    raise ValueError("Expected object format for label %s and got line: %s -- must specify a valid object format" % (cur_label,line))
                state = "read object"

            elif state == "read object":
                cur_rows.append( line.split() )

    if len(cur_label) > 0:
        add_current_label()

    #Try to infer basis dimension if none is given
    if basis_dims is None:
        if gs.get_dimension() is not None:
            basis_dims = int(round(_np.sqrt(gs.get_dimension())))
        elif len(spam_vecs) > 0:
            basis_dims = int(round(_np.sqrt(list(spam_vecs.values())[0].size)))
        else:
            raise ValueError("Cannot infer basis dimension!")

    #Set basis
    gs.set_basis(basis_abbrev, basis_dims)

    #Default SPAMLABEL directive if none are give and rho and E vectors are:
    if len(spam_labels) == 0 and "rho" in spam_vecs and "E" in spam_vecs:
        spam_labels['plus'] = [ 'rho', 'E' ]
        spam_labels['minus'] = [ 'rho', 'remainder' ] #NEW default behavior
        # OLD default behavior: remainder_spam_label = 'minus'
    if len(spam_labels) == 0: raise ValueError("Must specify rho and E or spam labels directly.")

    #Make SPAMs
     #get unique rho and E names
    rho_names = list(_OrderedDict.fromkeys( [ rho for (rho,E) in list(spam_labels.values()) ] ) ) #if this fails, may be due to malformatted
    E_names   = list(_OrderedDict.fromkeys( [ E   for (rho,E) in list(spam_labels.values()) ] ) ) #  SPAMLABEL line (not 2 items to right of = sign)
    if "remainder" in rho_names:
        del rho_names[ rho_names.index("remainder") ]
    if "remainder" in E_names:
        del E_names[ E_names.index("remainder") ]

    #Order E_names and rho_names using spam_vecs ordering
    #rho_names = sorted(rho_names, key=spam_vecs.keys().index)
    #E_names = sorted(E_names, key=spam_vecs.keys().index)

     #add vectors to gateset
    for rho_nm in rho_names: gs.preps[rho_nm] = spam_vecs[rho_nm]
    for E_nm   in E_names:   gs.effects[E_nm] = spam_vecs[E_nm]

    gs.povm_identity = identity_vec

     #add spam labels to gateset
    for spam_label in spam_labels:
        (rho_nm,E_nm) = spam_labels[spam_label]
        gs.spamdefs[spam_label] = (rho_nm , E_nm)

    if len(remainder_spam_label) > 0:
        gs.spamdefs[remainder_spam_label] = ('remainder', 'remainder')

    return gs
コード例 #60
0
ファイル: reportables.py プロジェクト: jarthurgross/pyGSTi
def compute_gateset_gateset_qtys(qtynames, gateset1, gateset2,
                                 confidenceRegionInfo=None):
    """
    Compute the named "GateSet vs. GateSet" quantities.

    Parameters
    ----------
    qtynames : list of strings
        Names of the quantities to compute.

    gateset1 : GateSet
        First gate set used to compute the quantities.

    gateset2 : GateSet
        Second gate set used to compute the quantities.

    confidenceRegionInfo : ConfidenceRegion, optional
        If not None, specifies a confidence-region used to compute the error bars
        contained in the returned quantities.  If None, then no error bars are
        computed.

    Returns
    -------
    dict
        Dictionary whose keys are the requested quantity names and values are
        ReportableQty objects.
    """
    ret = _OrderedDict()
    possible_qtys = [ ]
    eps = FINITE_DIFF_EPS

    for gateLabel in gateset1.gates:
        if gateLabel not in gateset2.gates:
            raise ValueError("%s gate is missing from second gateset - cannot compare gatesets", gateLabel)
    for gateLabel in gateset2.gates:
        if gateLabel not in gateset1.gates:
            raise ValueError("%s gate is missing from first gateset - cannot compare gatesets", gateLabel)

    mxBasis = gateset1.get_basis_name()
    if mxBasis != gateset2.get_basis_name():
        raise ValueError("Basis mismatch: %s != %s" % 
                         (mxBasis, gateset2.get_basis_name()))

    ### per gate quantities           
    #############################################
    for gateLabel in gateset1.gates:

        key = '%s fidelity' % gateLabel; possible_qtys.append(key)
        key2 = '%s infidelity' % gateLabel; possible_qtys.append(key)
        if key in qtynames or key2 in qtynames:

            def process_fidelity(gate):
                return _tools.process_fidelity(gate, gateset2.gates[gateLabel], mxBasis) 
                  #vary elements of gateset1 (assume gateset2 is fixed)

            #print "DEBUG: fidelity(%s)" % gateLabel
            FQty = _getGateQuantity(process_fidelity, gateset1, gateLabel,
                                    eps, confidenceRegionInfo) 

            InFQty = ReportableQty( 1.0-FQty.get_value(), FQty.get_err_bar() )
            if key in qtynames: ret[key] = FQty
            if key2 in qtynames: ret[key2] = InFQty

        key = '%s closest unitary fidelity' % gateLabel; possible_qtys.append(key)
        if key in qtynames:
            
            #Note: default 'gm' basis
            def closest_unitary_fidelity(gate): # assume vary gateset1, gateset2 fixed
                decomp1 = _tools.decompose_gate_matrix(gate)
                decomp2 = _tools.decompose_gate_matrix(gateset2.gates[gateLabel])

                if decomp1['isUnitary']:
                    closestUGateMx1 = gate
                else: closestUGateMx1 = _alg.find_closest_unitary_gatemx(gate)
    
                if decomp2['isUnitary']:
                    closestUGateMx2 = gateset2.gates[gateLabel] 
                else: closestUGateMx2 = _alg.find_closest_unitary_gatemx(gateset2.gates[gateLabel])
            
                closeChoi1 = _tools.jamiolkowski_iso(closestUGateMx1)
                closeChoi2 = _tools.jamiolkowski_iso(closestUGateMx2)
                return _tools.fidelity(closeChoi1,closeChoi2)

            ret[key] = _getGateQuantity(closest_unitary_fidelity, gateset1, gateLabel, eps, confidenceRegionInfo) 

        key = "%s Frobenius diff" % gateLabel; possible_qtys.append(key)
        if key in qtynames: 
            def fro_diff(gate): # assume vary gateset1, gateset2 fixed
                return _tools.frobeniusdist(gate,gateset2.gates[gateLabel])
            #print "DEBUG: frodist(%s)" % gateLabel
            ret[key] = _getGateQuantity(fro_diff, gateset1, gateLabel, eps, confidenceRegionInfo) 

        key = "%s Jamiolkowski trace dist" % gateLabel; possible_qtys.append(key)
        if key in qtynames: 
            def jt_diff(gate): # assume vary gateset1, gateset2 fixed
                return _tools.jtracedist(gate,gateset2.gates[gateLabel]) #Note: default 'gm' basis
            #print "DEBUG: jtdist(%s)" % gateLabel
            ret[key] = _getGateQuantity(jt_diff, gateset1, gateLabel, eps, confidenceRegionInfo) 

        key = '%s diamond norm' % gateLabel; possible_qtys.append(key)
        if key in qtynames:

            def half_diamond_norm(gate):
                return 0.5 * _tools.diamonddist(gate, gateset2.gates[gateLabel]) #Note: default 'gm' basis
                  #vary elements of gateset1 (assume gateset2 is fixed)

            try:
                ret[key] = _getGateQuantity(half_diamond_norm, gateset1, gateLabel,
                                            eps, confidenceRegionInfo) 
            except ImportError: #if failed to import cvxpy (probably b/c it's not installed)
                ret[key] = ReportableQty(_np.nan) # report NAN for diamond norms

        key = '%s angle btwn rotn axes' % gateLabel; possible_qtys.append(key)
        if key in qtynames:

            def angle_btwn_axes(gate): #Note: default 'gm' basis
                decomp = _tools.decompose_gate_matrix(gate)
                decomp2 = _tools.decompose_gate_matrix(gateset2.gates[gateLabel])
                axisOfRotn = decomp.get('axis of rotation',None)
                rotnAngle = decomp.get('pi rotations','X')
                axisOfRotn2 = decomp2.get('axis of rotation',None)
                rotnAngle2 = decomp2.get('pi rotations','X')

                if rotnAngle == 'X' or abs(rotnAngle) < 1e-4 or \
                   rotnAngle2 == 'X' or abs(rotnAngle2) < 1e-4:
                    return _np.nan

                if axisOfRotn is None or axisOfRotn2 is None:
                    return _np.nan

                real_dot =  _np.clip( _np.real(_np.dot(axisOfRotn,axisOfRotn2)), -1.0, 1.0)
                return _np.arccos( abs(real_dot) ) / _np.pi  
                  #Note: abs() allows axis to be off by 180 degrees -- if showing *angle* as
                  #      well, must flip sign of angle of rotation if you allow axis to
                  #      "reverse" by 180 degrees.

            ret[key] = _getGateQuantity(angle_btwn_axes, gateset1, gateLabel,
                                    eps, confidenceRegionInfo) 

        key = '%s relative eigenvalues' % gateLabel; possible_qtys.append(key)
        if key in qtynames:
            def rel_eigvals(gate):
                rel_gate = _np.dot(_np.linalg.inv(gateset2.gates[gateLabel]), gate)
                return _np.linalg.eigvals(rel_gate)
                  #vary elements of gateset1 (assume gateset2 is fixed)

            ret[key] = _getGateQuantity(rel_eigvals, gateset1, gateLabel,
                                        eps, confidenceRegionInfo) 

    ### per prep vector quantities           
    #############################################
    for prepLabel in gateset1.get_prep_labels():

        key = '%s prep state fidelity' % prepLabel; possible_qtys.append(key)
        key2 = '%s prep state infidelity' % prepLabel; possible_qtys.append(key)
        if key in qtynames or key2 in qtynames:

            def fidelity(vec):
                rhoMx1 = _tools.vec_to_stdmx(vec, mxBasis)
                rhoMx2 = _tools.vec_to_stdmx(gateset2.preps[prepLabel], mxBasis)
                return _tools.fidelity(rhoMx1, rhoMx2) 
                  #vary elements of gateset1 (assume gateset2 is fixed)

            FQty = _getPrepQuantity(fidelity, gateset1, prepLabel,
                                    eps, confidenceRegionInfo) 

            InFQty = ReportableQty( 1.0-FQty.get_value(), FQty.get_err_bar() )
            if key in qtynames: ret[key] = FQty
            if key2 in qtynames: ret[key2] = InFQty
        
        key = "%s prep trace dist" % prepLabel; possible_qtys.append(key)
        if key in qtynames: 
            def tr_diff(vec): # assume vary gateset1, gateset2 fixed
                rhoMx1 = _tools.vec_to_stdmx(vec, mxBasis)
                rhoMx2 = _tools.vec_to_stdmx(gateset2.preps[prepLabel], mxBasis)
                return _tools.tracedist(rhoMx1, rhoMx2)
            ret[key] = _getPrepQuantity(tr_diff, gateset1, prepLabel,
                                        eps, confidenceRegionInfo) 


    ### per effect vector quantities           
    #############################################
    for effectLabel in gateset1.get_effect_labels():

        key = '%s effect state fidelity' % effectLabel; possible_qtys.append(key)
        key2 = '%s effect state infidelity' % effectLabel; possible_qtys.append(key)
        if key in qtynames or key2 in qtynames:

            def fidelity(vec):
                EMx1 = _tools.vec_to_stdmx(vec, mxBasis)
                EMx2 = _tools.vec_to_stdmx(gateset2.effects[effectLabel], mxBasis)
                return _tools.fidelity(EMx1,EMx2) 
                  #vary elements of gateset1 (assume gateset2 is fixed)

            FQty = _getEffectQuantity(fidelity, gateset1, effectLabel,
                                      eps, confidenceRegionInfo) 

            InFQty = ReportableQty( 1.0-FQty.get_value(), FQty.get_err_bar() )
            if key in qtynames: ret[key] = FQty
            if key2 in qtynames: ret[key2] = InFQty
        
        key = "%s effect trace dist" % effectLabel; possible_qtys.append(key)
        if key in qtynames: 
            def tr_diff(vec): # assume vary gateset1, gateset2 fixed
                EMx1 = _tools.vec_to_stdmx(vec, mxBasis)
                EMx2 = _tools.vec_to_stdmx(gateset2.effects[effectLabel], mxBasis)
                return _tools.tracedist(EMx1, EMx2)
            ret[key] = _getEffectQuantity(tr_diff, gateset1, effectLabel,
                                          eps, confidenceRegionInfo) 


    ###  per gateset quantities
    #############################################
    key = "Gateset Frobenius diff"; possible_qtys.append(key)
    if key in qtynames: ret[key] = ReportableQty( gateset1.frobeniusdist(gateset2) )

    key = "Max Jamiolkowski trace dist"; possible_qtys.append(key)
    if key in qtynames: ret[key] = ReportableQty( 
        max( [ _tools.jtracedist(gateset1.gates[l],gateset2.gates[l])
               for l in gateset1.gates ] ) )

 
    #Special case: when qtyname is None then return a list of all possible names that can be computed
    if qtynames[0] is None: 
        return possible_qtys
    return ret