示例#1
0
def render_as_latex(qtys, render_options, verbosity):
    """
    Render the workspace quantities (outputs; not switchboards) in the `qtys` dictionary as LaTeX.

    Parameters
    ----------
    qtys : dict
        A dictionary of workspace quantities to render.

    render_options : dict
        a dictionary of render options to set via the
        `WorkspaceOutput.set_render_options` method of workspace output objects.

    verbosity : int
        How much detail to print to stdout.

    Returns
    -------
    dict
        With the same keys as `qtys` and values which contain the objects
        rendered as strings.
    """
    printer = _VerbosityPrinter.create_printer(verbosity)
    from .workspace import Switchboard as _Switchboard

    #render quantities as Latex
    qtys_latex = _collections.defaultdict(lambda x=0: "OMITTED")
    for key, val in qtys.items():
        if isinstance(val, _Switchboard):
            continue  # silently don't render switchboards in latex
        if isinstance(val, str):
            qtys_latex[key] = val
        else:
            printer.log("Rendering %s" % key, 3)
            if hasattr(val, 'set_render_options'):
                val.set_render_options(**render_options)
            render_out = val.render("latex")

            # Note: render_out is a dictionary of rendered portions
            qtys_latex[key] = render_out['latex']

    return qtys_latex
示例#2
0
def render_as_html(qtys, render_options, link_to, verbosity):
    """
    Render the workspace quantities (outputs and switchboards) in the `qtys` dictionary as HTML.

    Parameters
    ----------
    qtys : dict
        A dictionary of workspace quantities to render.

    render_options : dict
        a dictionary of render options to set via the
        `WorkspaceOutput.set_render_options` method of workspace output objects.

    link_to : tuple
        If not None, a list of one or more items from the set
        {"tex", "pdf", "pkl"} indicating whether or not to
        create and include links to Latex, PDF, and Python pickle
        files, respectively.

    verbosity : int
        How much detail to print to stdout.

    Returns
    -------
    dict
        With the same keys as `qtys` and values which contain the objects
        rendered as strings.
    """
    printer = _VerbosityPrinter.create_printer(verbosity)

    #render quantities as HTML
    qtys_html = _collections.defaultdict(lambda: "OMITTED")
    for key, val in qtys.items():
        with _timed_block(key,
                          format_str='Rendering {:35}',
                          printer=printer,
                          verbosity=2):
            qtys[key] = _render_as_html(val, render_options, link_to)

    return qtys_html
示例#3
0
    def __init__(self,
                 processor_spec,
                 gatedict,
                 prep_layers=None,
                 povm_layers=None,
                 build_cloudnoise_fn=None,
                 build_cloudkey_fn=None,
                 simulator="map",
                 evotype="default",
                 errcomp_type="gates",
                 implicit_idle_mode="none",
                 verbosity=0):

        qudit_labels = processor_spec.qudit_labels
        state_space = _statespace.QubitSpace(qudit_labels) if isinstance(processor_spec, _QubitProcessorSpec) \
            else _statespace.QuditSpace(qudit_labels, processor_spec.qudit_udims)

        simulator = _FSim.cast(
            simulator, state_space.num_qubits if isinstance(
                state_space, _statespace.QubitSpace) else None)
        prefer_dense_reps = isinstance(simulator, _MatrixFSim)
        evotype = _Evotype.cast(evotype,
                                default_prefer_dense_reps=prefer_dense_reps)

        # Build gate dictionaries. A value of `gatedict` can be an array, a LinearOperator, or an OpFactory.
        # For later processing, we'll create mm_gatedict to contain each item as a ModelMember.  For cloud-
        # noise models, these gate operations should be *static* (no parameters) as they represent the target
        # operations and all noise (and parameters) are assumed to enter through the cloudnoise members.
        mm_gatedict = _collections.OrderedDict(
        )  # static *target* ops as ModelMembers
        for key, gate in gatedict.items():
            if isinstance(gate, _op.LinearOperator):
                assert (
                    gate.num_params == 0
                ), "Only *static* ideal operators are allowed in `gatedict`!"
                mm_gatedict[key] = gate
            elif isinstance(gate, _opfactory.OpFactory):
                assert (
                    gate.num_params == 0
                ), "Only *static* ideal factories are allowed in `gatedict`!"
                mm_gatedict[key] = gate
            else:  # presumably a numpy array or something like it:
                mm_gatedict[key] = _op.StaticArbitraryOp(
                    gate, evotype, state_space=None)  # use default state space
            assert(mm_gatedict[key]._evotype == evotype), \
                ("Custom gate object supplied in `gatedict` for key %s has evotype %s (!= expected %s)"
                 % (str(key), str(mm_gatedict[key]._evotype), str(evotype)))

        #Set other members
        self.processor_spec = processor_spec
        self.errcomp_type = errcomp_type

        idle_names = self.processor_spec.idle_gate_names
        global_idle_name = self.processor_spec.global_idle_gate_name

        # Set noisy_global_idle_name == global_idle_name if the global idle gate isn't the perfect identity
        #  and if we're generating cloudnoise members (if we're not then layer rules could encouter a key error
        #  if we let noisy_global_idle_name be non-None).
        global_idle_gate = mm_gatedict.get(global_idle_name, None)
        if (global_idle_gate is not None) and (build_cloudnoise_fn is not None) \
           and (build_cloudnoise_fn(self.processor_spec.global_idle_layer_label) is not None):
            noisy_global_idle_name = global_idle_name
        else:
            noisy_global_idle_name = None

        singleq_idle_layer_labels = {}
        for idle_name in idle_names:
            if self.processor_spec.gate_num_qubits(idle_name) == 1:
                for idlelayer_sslbls in self.processor_spec.resolved_availability(
                        idle_name, 'tuple'):
                    if idlelayer_sslbls is None:
                        continue  # case of 1Q model with "global" idle
                    assert (len(idlelayer_sslbls) == 1
                            )  # should be a 1-qubit gate!
                    if idlelayer_sslbls not in singleq_idle_layer_labels:
                        singleq_idle_layer_labels[idlelayer_sslbls] = _Lbl(
                            idle_name, idlelayer_sslbls)
        #assert(set(idle_names).issubset([global_idle_name])), \
        #    "Only global idle operations are allowed in a CloudNoiseModel!"

        layer_rules = CloudNoiseLayerRules(errcomp_type, qudit_labels,
                                           implicit_idle_mode,
                                           singleq_idle_layer_labels,
                                           noisy_global_idle_name)
        super(CloudNoiseModel, self).__init__(state_space,
                                              layer_rules,
                                              "pp",
                                              simulator=simulator,
                                              evotype=evotype)

        flags = {
            'auto_embed': False,
            'match_parent_statespace': False,
            'match_parent_evotype': True,
            'cast_to_type': None
        }
        self.prep_blks['layers'] = _OrderedMemberDict(self, None, None, flags)
        self.povm_blks['layers'] = _OrderedMemberDict(self, None, None, flags)
        self.operation_blks['gates'] = _OrderedMemberDict(
            self, None, None, flags)
        self.operation_blks['cloudnoise'] = _OrderedMemberDict(
            self, None, None, flags)
        self.operation_blks['layers'] = _OrderedMemberDict(
            self, None, None, flags)
        self.instrument_blks['layers'] = _OrderedMemberDict(
            self, None, None, flags)
        self.factories['gates'] = _OrderedMemberDict(self, None, None, flags)
        self.factories['cloudnoise'] = _OrderedMemberDict(
            self, None, None, flags)
        self.factories['layers'] = _OrderedMemberDict(self, None, None, flags)

        printer = _VerbosityPrinter.create_printer(verbosity)
        printer.log("Creating a %d-qudit cloud-noise model" %
                    self.processor_spec.num_qudits)

        # a dictionary of "cloud" objects
        # keys = cloud identifiers, e.g. (target_qudit_indices, cloud_qudit_indices) tuples
        # values = list of gate-labels giving the gates (primitive layers?) associated with that cloud (necessary?)
        self._clouds = _collections.OrderedDict()

        for gn in self.processor_spec.gate_names:
            # process gate names (no sslbls, e.g. "Gx", not "Gx:0") - we'll check for the
            # latter when we process the corresponding gate name's availability

            gate_unitary = self.processor_spec.gate_unitaries[gn]
            resolved_avail = self.processor_spec.resolved_availability(gn)
            gate = mm_gatedict.get(
                gn, None
            )  # a static op or factory, no need to consider if "independent" (no params)
            gate_is_factory = callable(gate_unitary) or isinstance(
                gate, _opfactory.OpFactory)
            #gate_is_noiseless_identity = (gate is None) or \
            #    (isinstance(gate, _op.ComposedOp) and len(gate.factorops) == 0)

            if gate is not None:  # (a gate name may not be in gatedict if it's an identity without any noise)
                if gate_is_factory:
                    self.factories['gates'][_Lbl(gn)] = gate
                else:
                    self.operation_blks['gates'][_Lbl(gn)] = gate

            if callable(resolved_avail) or resolved_avail == '*':

                # Target operation
                if gate is not None:
                    allowed_sslbls_fn = resolved_avail if callable(
                        resolved_avail) else None
                    gate_nQudits = self.processor_spec.gate_num_qudits(gn)
                    printer.log("Creating %dQ %s gate on arbitrary qudits!!" %
                                (gate_nQudits, gn))
                    self.factories['layers'][_Lbl(
                        gn)] = _opfactory.EmbeddingOpFactory(
                            state_space,
                            gate,
                            num_target_labels=gate_nQudits,
                            allowed_sslbls_fn=allowed_sslbls_fn)
                    # add any primitive ops for this embedding factory?

                # Cloudnoise operation
                if build_cloudnoise_fn is not None:
                    cloudnoise = build_cloudnoise_fn(_Lbl(gn))
                    if cloudnoise is not None:  # build function can return None to signify no noise
                        assert (isinstance(cloudnoise, _opfactory.EmbeddingOpFactory)), \
                            ("`build_cloudnoise_fn` must return an EmbeddingOpFactory for gate %s"
                             " with arbitrary availability") % gn
                        self.factories['cloudnoise'][_Lbl(gn)] = cloudnoise

            else:  # resolved_avail is a list/tuple of available sslbls for the current gate/factory
                for inds in resolved_avail:  # inds are target qudit labels

                    #Target operation
                    if gate is not None:
                        printer.log("Creating %dQ %s gate on qudits %s!!" %
                                    ((len(qudit_labels) if inds is None else
                                      len(inds)), gn, inds))
                        assert(inds is None or _Lbl(gn, inds) not in gatedict), \
                            ("Cloudnoise models do not accept primitive-op labels, e.g. %s, in `gatedict` as this dict "
                             "specfies the ideal target gates. Perhaps make the cloudnoise depend on the target qudits "
                             "of the %s gate?") % (str(_Lbl(gn, inds)), gn)

                        if gate_is_factory:
                            self.factories['layers'][_Lbl(gn, inds)] = gate if (inds is None) else \
                                _opfactory.EmbeddedOpFactory(state_space, inds, gate)
                            # add any primitive ops for this factory?
                        else:
                            self.operation_blks['layers'][_Lbl(gn, inds)] = gate if (inds is None) else \
                                _op.EmbeddedOp(state_space, inds, gate)

                    #Cloudnoise operation
                    if build_cloudnoise_fn is not None:
                        cloudnoise = build_cloudnoise_fn(_Lbl(gn, inds))
                        if cloudnoise is not None:  # build function can return None to signify no noise
                            if isinstance(cloudnoise, _opfactory.OpFactory):
                                self.factories['cloudnoise'][_Lbl(
                                    gn, inds)] = cloudnoise
                            else:
                                self.operation_blks['cloudnoise'][_Lbl(
                                    gn, inds)] = cloudnoise

                    if build_cloudkey_fn is not None:
                        # TODO: is there any way to get a default "key", e.g. the
                        # qudits touched by the corresponding cloudnoise op?
                        # need a way to identify a clound (e.g. Gx and Gy gates on some qudit will have *same* cloud)
                        cloud_key = build_cloudkey_fn(_Lbl(gn, inds))
                        if cloud_key not in self.clouds:
                            self.clouds[cloud_key] = []
                        self.clouds[cloud_key].append(_Lbl(gn, inds))
                    #keep track of the primitive-layer labels in each cloud,
                    # used to specify which gate parameters should be amplifiable by germs for a given cloud (?)
                    # TODO CHECK THIS

        _init_spam_layers(self, prep_layers, povm_layers)  # SPAM

        printer.log("DONE! - created Model with nqudits=%d and op-blks=" %
                    self.state_space.num_qudits)
        for op_blk_lbl, op_blk in self.operation_blks.items():
            printer.log("  %s: %s" %
                        (op_blk_lbl, ', '.join(map(str, op_blk.keys()))))
        self._clean_paramvec()
示例#4
0
    def find_splitting(self, num_elements, max_sub_tree_size, num_sub_trees,
                       verbosity):
        """
        Find a partition of the indices of `circuit_tree` to define a set of sub-trees with the desire properties.

        This is done in order to reduce the maximum size of any tree (useful for
        limiting memory consumption or for using multiple cores).  Must specify
        either max_sub_tree_size or num_sub_trees.

        Parameters
        ----------
        num_elements : int
            The number of elements `self` is meant to compute (this means that any
            tree indices `>= num_elements` are considered "scratch" space.

        max_sub_tree_size : int, optional
            The maximum size (i.e. list length) of each sub-tree.  If the
            original tree is smaller than this size, no splitting will occur.
            If None, then there is no limit.

        num_sub_trees : int, optional
            The maximum size (i.e. list length) of each sub-tree.  If the
            original tree is smaller than this size, no splitting will occur.

        verbosity : int, optional
            How much detail to send to stdout.

        Returns
        -------
        list
            A list of sets of elements to place in sub-trees.
        """
        tm = _time.time()
        printer = _VerbosityPrinter.create_printer(verbosity)

        if max_sub_tree_size is None and num_sub_trees is None:
            return [set(range(num_elements))]  # no splitting needed

        if max_sub_tree_size is not None and num_sub_trees is not None:
            raise ValueError(
                "Cannot specify both max_sub_tree_size and num_sub_trees")
        if num_sub_trees is not None and num_sub_trees <= 0:
            raise ValueError("num_sub_trees must be > 0!")

        #Don't split at all if it's unnecessary
        if max_sub_tree_size is None or len(self) < max_sub_tree_size:
            if num_sub_trees is None or num_sub_trees == 1:
                return [set(range(num_elements))]  # no splitting needed

        #First pass - identify which indices go in which subtree
        #   Part 1: create disjoint set of subtrees generated by single items
        singleItemTreeSetList = self._create_single_item_trees(num_elements)

        #each element represents a subtree, and
        # is a set of the indices owned by that subtree
        nSingleItemTrees = len(singleItemTreeSetList)

        printer.log("EvalTree.split created singles in %.0fs" %
                    (_time.time() - tm))
        tm = _time.time()

        #   Part 2: determine whether we need to split/merge "single" trees
        if num_sub_trees is not None:

            #Merges: find the best merges to perform if any are required
            if nSingleItemTrees > num_sub_trees:

                #Find trees that have least intersection to begin:
                # The goal is to find a set of single-item trees such that
                # none of them intersect much with any other of them.
                #
                # Algorithm:
                #   - start with a set of the one tree that has least
                #       intersection with any other tree.
                #   - iteratively add the tree that has the least intersection
                #       with the trees in the existing set
                iStartingTrees = []

                def _get_start_indices(max_intersect):
                    """ Builds an initial set of indices by merging single-
                        item trees that don't intersect too much (intersection
                        is less than `max_intersect`.  Returns a list of the
                        single-item tree indices and the final set of indices."""
                    starting = [0]  # always start with 0th tree
                    startingSet = singleItemTreeSetList[0].copy()
                    for i, s in enumerate(singleItemTreeSetList[1:], start=1):
                        if len(startingSet.intersection(s)) <= max_intersect:
                            starting.append(i)
                            startingSet.update(s)
                    return starting, startingSet

                left, right = 0, max(map(len, singleItemTreeSetList))
                while left < right:
                    mid = (left + right) // 2
                    iStartingTrees, startingTreeEls = _get_start_indices(mid)
                    nStartingTrees = len(iStartingTrees)
                    if nStartingTrees < num_sub_trees:
                        left = mid + 1
                    elif nStartingTrees > num_sub_trees:
                        right = mid
                    else:
                        break  # nStartingTrees == num_sub_trees!

                if len(iStartingTrees) < num_sub_trees:
                    iStartingTrees, startingTreeEls = _get_start_indices(mid +
                                                                         1)
                if len(iStartingTrees) > num_sub_trees:
                    iStartingTrees = iStartingTrees[0:num_sub_trees]
                    startingTreeEls = set()
                    for i in iStartingTrees:
                        startingTreeEls.update(singleItemTreeSetList[i])

                printer.log(
                    "EvalTree.split fast-found starting trees in %.0fs" %
                    (_time.time() - tm))
                tm = _time.time()

                #else:
                #    raise ValueError("Invalid start select method: %s" % start_select_method)

                #Merge all the non-starting trees into the starting trees
                # so that we're left with the desired number of trees
                subTreeSetList = [
                    singleItemTreeSetList[i] for i in iStartingTrees
                ]
                assert (len(subTreeSetList) == num_sub_trees)

                indicesLeft = list(range(nSingleItemTrees))
                for i in iStartingTrees:
                    del indicesLeft[indicesLeft.index(i)]

                printer.log("EvalTree.split deleted initial indices in %.0fs" %
                            (_time.time() - tm))
                tm = _time.time()

                #merge_method = "fast"
                #Another possible algorith (but slower)
                #if merge_method == "best":
                #    while len(indicesLeft) > 0:
                #        iToMergeInto,_ = min(enumerate(map(len,subTreeSetList)),
                #                             key=lambda x: x[1]) #argmin
                #        setToMergeInto = subTreeSetList[iToMergeInto]
                #        #intersectionSizes = [ len(setToMergeInto.intersection(
                #        #            singleItemTreeSetList[i])) for i in indicesLeft ]
                #        #iMaxIntsct = _np.argmax(intersectionSizes)
                #        iMaxIntsct,_ = max( enumerate( ( len(setToMergeInto.intersection(
                #                            singleItemTreeSetList[i])) for i in indicesLeft )),
                #                          key=lambda x: x[1]) #argmax
                #        setToMerge = singleItemTreeSetList[indicesLeft[iMaxIntsct]]
                #        subTreeSetList[iToMergeInto] = \
                #              subTreeSetList[iToMergeInto].union(setToMerge)
                #        del indicesLeft[iMaxIntsct]
                #
                #elif merge_method == "fast":
                most_at_once = 10
                while len(indicesLeft) > 0:
                    iToMergeInto, _ = min(enumerate(map(len, subTreeSetList)),
                                          key=lambda x: x[1])  # argmin
                    setToMergeInto = subTreeSetList[iToMergeInto]
                    intersectionSizes = sorted(
                        [(ii,
                          len(
                              setToMergeInto.intersection(
                                  singleItemTreeSetList[i])))
                         for ii, i in enumerate(indicesLeft)],
                        key=lambda x: x[1],
                        reverse=True)
                    toDelete = []
                    for i in range(min(most_at_once, len(indicesLeft))):
                        #if len(subTreeSetList[iToMergeInto]) >= desiredLength: break
                        iMaxIntsct, _ = intersectionSizes[i]
                        setToMerge = singleItemTreeSetList[
                            indicesLeft[iMaxIntsct]]
                        subTreeSetList[iToMergeInto].update(setToMerge)
                        toDelete.append(iMaxIntsct)
                    for i in sorted(toDelete, reverse=True):
                        del indicesLeft[i]

                #else:
                #    raise ValueError("Invalid merge method: %s" % merge_method)

                assert (len(subTreeSetList) == num_sub_trees)
                printer.log("EvalTree.split merged trees in %.0fs" %
                            (_time.time() - tm))
                tm = _time.time()

            #Splits (more subtrees desired than there are single item trees!)
            else:
                #Splits: find the best splits to perform
                #TODO: how to split a tree intelligently -- for now, just do
                # trivial splits by making empty trees.
                subTreeSetList = singleItemTreeSetList[:]
                nSplitsNeeded = num_sub_trees - nSingleItemTrees
                while nSplitsNeeded > 0:
                    # LATER...
                    # for iSubTree,subTreeSet in enumerate(subTreeSetList):
                    subTreeSetList.append([])  # create empty subtree
                    nSplitsNeeded -= 1

        else:
            assert (max_sub_tree_size is not None)
            subTreeSetList = []

            #Merges: find the best merges to perform if any are allowed given
            # the maximum tree size
            min_sub_tree_size = max(list(map(len, singleItemTreeSetList)))
            if min_sub_tree_size > max_sub_tree_size:
                raise ValueError("Max. sub tree size (%d) is too low (<%d)!" %
                                 (max_sub_tree_size, min_sub_tree_size))

            for singleItemTreeSet in singleItemTreeSetList:
                #See if we should merge this single-item-generated tree with
                # another one or make it a new subtree.
                newTreeSize = len(singleItemTreeSet)
                maxIntersectSize = None
                iMaxIntersectSize = None
                for k, existingSubTreeSet in enumerate(subTreeSetList):
                    mergedSize = len(existingSubTreeSet) + newTreeSize
                    if mergedSize <= max_sub_tree_size:
                        intersectionSize = \
                            len(singleItemTreeSet.intersection(existingSubTreeSet))
                        if maxIntersectSize is None or \
                                maxIntersectSize < intersectionSize:
                            maxIntersectSize = intersectionSize
                            iMaxIntersectSize = k

                if iMaxIntersectSize is not None:
                    # then we merge the new tree with this existing set
                    subTreeSetList[iMaxIntersectSize] = \
                        subTreeSetList[iMaxIntersectSize].union(singleItemTreeSet)
                else:  # we create a new subtree
                    subTreeSetList.append(singleItemTreeSet)

        #Remove all "scratch" indices, as we want a partition just of the "final" items:
        subTreeSetList = [
            set(filter(lambda x: x < num_elements, s)) for s in subTreeSetList
        ]

        #Remove duplicated "final" items, as only a single tree (the first one to claim it)
        # should be assigned each final item, even if other trees need to compute that item as scratch.
        # BUT: keep these removed final items as helpful scratch items, as these items, though
        #      not needed, can help in the creating of a balanced evaluation tree.
        claimed_final_indices = set()
        disjointLists = []
        helpfulScratchLists = []
        for subTreeSet in subTreeSetList:
            disjointLists.append(subTreeSet - claimed_final_indices)
            helpfulScratchLists.append(
                subTreeSet -
                disjointLists[-1])  # the final items that were duplicated
            claimed_final_indices.update(subTreeSet)

        assert (sum(map(
            len,
            disjointLists)) == num_elements), "sub-tree sets are not disjoint!"
        return disjointLists, helpfulScratchLists
示例#5
0
    def create_layout(self,
                      circuits,
                      dataset=None,
                      resource_alloc=None,
                      array_types=('E', ),
                      derivative_dimension=None,
                      verbosity=0):
        """
        Constructs an circuit-outcome-probability-array (COPA) layout for a list of circuits.

        Parameters
        ----------
        circuits : list
            The circuits whose outcome probabilities should be included in the layout.

        dataset : DataSet
            The source of data counts that will be compared to the circuit outcome
            probabilities.  The computed outcome probabilities are limited to those
            with counts present in `dataset`.

        resource_alloc : ResourceAllocation
            A available resources and allocation information.  These factors influence how
            the layout (evaluation strategy) is constructed.

        array_types : tuple, optional
            A tuple of string-valued array types.  See :method:`ForwardSimulator.create_layout`.

        derivative_dimension : int, optional
            Optionally, the parameter-space dimension used when taking first
            and second derivatives with respect to the cirucit outcome probabilities.  This must be
            non-None when `array_types` contains `'ep'` or `'epp'` types.

        verbosity : int or VerbosityPrinter
            Determines how much output to send to stdout.  0 means no output, higher
            integers mean more output.

        Returns
        -------
        MapCOPALayout
        """
        resource_alloc = _ResourceAllocation.cast(resource_alloc)
        printer = _VerbosityPrinter.create_printer(verbosity, resource_alloc)
        mem_limit = resource_alloc.mem_limit - resource_alloc.allocated_memory \
            if (resource_alloc.mem_limit is not None) else None  # *per-processor* memory limit
        nprocs = resource_alloc.comm_size
        comm = resource_alloc.comm
        num_params = derivative_dimension if (
            derivative_dimension is not None) else self.model.num_params
        C = 1.0 / (1024.0**3)

        if mem_limit is not None:
            if mem_limit <= 0:
                raise MemoryError(
                    "Attempted layout creation w/memory limit = %g <= 0!" %
                    mem_limit)
            printer.log("Layout creation w/mem limit = %.2fGB" %
                        (mem_limit * C))

        #Start with how we'd like to split processors up (without regard to memory limit):

        # when there are lots of processors, the from_vector calls dominante over the actual fwdsim,
        # but we can reduce from_vector calls by having np1, np2 > 0 (each param requires a from_vector
        # call when using finite diffs) - so we want to choose nc = Ng < nprocs and np1 > 1 (so nc * np1 = nprocs).
        #work_per_proc = self.model.dim**2

        natoms, na, npp, param_dimensions, param_blk_sizes = self._compute_processor_distribution(
            array_types,
            nprocs,
            num_params,
            len(circuits),
            default_natoms=2 * self.model.dim)  # heuristic?

        printer.log(
            "MapLayout: %d processors divided into %s (= %d) grid along circuit and parameter directions."
            %
            (nprocs, ' x '.join(map(str,
                                    (na, ) + npp)), _np.product((na, ) + npp)))
        printer.log("   %d atoms, parameter block size limits %s" %
                    (natoms, str(param_blk_sizes)))
        assert (_np.product((na, ) + npp) <=
                nprocs), "Processor grid size exceeds available processors!"

        layout = _MapCOPALayout(circuits, self.model, dataset,
                                self._max_cache_size, natoms, na, npp,
                                param_dimensions, param_blk_sizes,
                                resource_alloc, verbosity)

        if mem_limit is not None:
            loc_nparams1 = num_params / npp[0] if len(npp) > 0 else 0
            loc_nparams2 = num_params / npp[1] if len(npp) > 1 else 0
            blk1 = param_blk_sizes[0] if len(param_blk_sizes) > 0 else 0
            blk2 = param_blk_sizes[1] if len(param_blk_sizes) > 1 else 0
            if blk1 is None: blk1 = loc_nparams1
            if blk2 is None: blk2 = loc_nparams2
            global_layout = layout.global_layout
            if comm is not None:
                from mpi4py import MPI
                max_local_els = comm.allreduce(
                    layout.num_elements,
                    op=MPI.MAX)  # layout.max_atom_elements
                max_atom_els = comm.allreduce(layout.max_atom_elements,
                                              op=MPI.MAX)
                max_local_circuits = comm.allreduce(layout.num_circuits,
                                                    op=MPI.MAX)
                max_atom_cachesize = comm.allreduce(layout.max_atom_cachesize,
                                                    op=MPI.MAX)
            else:
                max_local_els = layout.num_elements
                max_atom_els = layout.max_atom_elements
                max_local_circuits = layout.num_circuits
                max_atom_cachesize = layout.max_atom_cachesize
            mem_estimate = _bytes_for_array_types(
                array_types, global_layout.num_elements, max_local_els,
                max_atom_els, global_layout.num_circuits, max_local_circuits,
                layout._param_dimensions, (loc_nparams1, loc_nparams2),
                (blk1, blk2), max_atom_cachesize, self.model.dim)

            #def approx_mem_estimate(nc, np1, np2):
            #    approx_cachesize = (num_circuits / nc) * 1.3  # inflate expected # of circuits per atom => cache_size
            #    return _bytes_for_array_types(array_types, num_elements, num_elements / nc,
            #                                  num_circuits, num_circuits / nc,
            #                                  (num_params, num_params), (num_params / np1, num_params / np2),
            #                                  approx_cachesize, self.model.dim)

            GB = 1.0 / 1024.0**3
            if mem_estimate > mem_limit:
                raise MemoryError(
                    "Not enough memory for desired layout! (limit=%.1fGB, required=%.1fGB)"
                    % (mem_limit * GB, mem_estimate * GB))
            else:
                printer.log("   Esimated memory required = %.1fGB" %
                            (mem_estimate * GB))

        return layout
示例#6
0
def merge_latex_template(qtys,
                         template_filename,
                         output_filename,
                         toggles=None,
                         precision=None,
                         verbosity=0):
    """
    Renders `qtys` and merges them into the LaTeX file `template_filename`, saving the output under `output_filename`.

    Parameters
    ----------
    qtys : dict
        A dictionary of workspace quantities (outputs).

    template_filename : str
        The template filename, relative to pyGSTi's `templates` directory.

    output_filename : str
        The merged-output filename.

    toggles : dict, optional
        A dictionary of toggle_name:bool pairs specifying
        how to preprocess the template.

    precision : int or dict, optional
        The amount of precision to display.  A dictionary with keys
        "polar", "sci", and "normal" can separately specify the
        precision for complex angles, numbers in scientific notation, and
        everything else, respectively.  If an integer is given, it this
        same value is taken for all precision types.  If None, then
        a default is used.

    verbosity : int, optional
        Amount of detail to print to stdout.

    Returns
    -------
    None
    """

    printer = _VerbosityPrinter.create_printer(verbosity)
    template_filename = _os.path.join(
        _os.path.dirname(_os.path.abspath(__file__)), "templates",
        template_filename)
    output_dir = _os.path.dirname(output_filename)
    output_base = _os.path.splitext(_os.path.basename(output_filename))[0]

    #render quantities as LaTeX within dir where report will be simplified
    cwd = _os.getcwd()
    if len(output_dir) > 0: _os.chdir(output_dir)
    try:
        fig_dir = output_base + "_files"  # figure directory relative to output_dir
        if not _os.path.isdir(fig_dir):
            _os.mkdir(fig_dir)

        qtys_latex = render_as_latex(
            qtys,
            dict(switched_item_mode="inline",
                 output_dir=fig_dir,
                 precision=precision), printer)
    finally:
        _os.chdir(cwd)

    if toggles:
        qtys_latex['settoggles'] = ""
        for toggleNm, val in toggles.items():
            qtys_latex['settoggles'] += "\\toggle%s{%s}\n" % \
                (("true" if val else "false"), toggleNm)

    template = ''
    with open(template_filename, 'r') as templatefile:
        template = templatefile.read()
    template = template.replace("{", "{{").replace(
        "}", "}}")  # double curly braces (for format processing)
    # Replace template field markers with `str.format` fields.
    template = _re.sub(r"\\putfield\{\{([^}]+)\}\}\{\{[^}]*\}\}", "{\\1}",
                       template)

    # Replace str.format fields with values and write to output file
    filled_template = template.format_map(qtys_latex)

    with open(output_filename, 'w') as outputfile:
        outputfile.write(filled_template)
示例#7
0
def minimize(fn, x0, method='cg', callback=None,
             tol=1e-10, maxiter=1000000, maxfev=None,
             stopval=None, jac=None, verbosity=0, **addl_kwargs):
    """
    Minimizes the function fn starting at x0.

    This is a gateway function to all other minimization routines within this
    module, providing a common interface to many different minimization methods
    (including and extending beyond those available from scipy.optimize).

    Parameters
    ----------
    fn : function
        The function to minimize.

    x0 : numpy array
        The starting point (argument to fn).

    method : string, optional
        Which minimization method to use.  Allowed values are:
        "simplex" : uses _fmin_simplex
        "supersimplex" : uses _fmin_supersimplex
        "customcg" : uses fmax_cg (custom CG method)
        "brute" : uses scipy.optimize.brute
        "basinhopping" : uses scipy.optimize.basinhopping with L-BFGS-B
        "swarm" : uses _fmin_particle_swarm
        "evolve" : uses _fmin_evolutionary (which uses DEAP)
        < methods available from scipy.optimize.minimize >

    callback : function, optional
        A callback function to be called in order to track optimizer progress.
        Should have signature: myCallback(x, f=None, accepted=None).  Note that
        create_objfn_printer(...) function can be used to create a callback.

    tol : float, optional
        Tolerance value used for all types of tolerances available in a given method.

    maxiter : int, optional
        Maximum iterations.

    maxfev : int, optional
        Maximum function evaluations; used only when available, and defaults to maxiter.

    stopval : float, optional
        For basinhopping method only.  When f <= stopval then basinhopping outer loop
        will terminate.  Useful when a bound on the minimum is known.

    jac : function
        Jacobian function.

    verbosity : int
        Level of detail to print to stdout.

    addl_kwargs : dict
        Additional arguments for the specific optimizer being used.

    Returns
    -------
    scipy.optimize.Result object
        Includes members 'x', 'fun', 'success', and 'message'.
    """

    if maxfev is None: maxfev = maxiter
    printer = _VerbosityPrinter.create_printer(verbosity)

    #Run Minimization Algorithm
    if method == 'simplex':
        solution = _fmin_simplex(fn, x0, slide=1.0, tol=tol, maxiter=maxiter, **addl_kwargs)

    elif method == 'supersimplex':
        if 'abs_outer_tol' not in addl_kwargs: addl_kwargs['abs_outer_tol'] = 1e-6
        if 'inner_tol' not in addl_kwargs: addl_kwargs['inner_tol'] = 1e-6
        if 'min_inner_maxiter' not in addl_kwargs: addl_kwargs['min_inner_maxiter'] = 100
        if 'max_inner_maxiter' not in addl_kwargs: addl_kwargs['max_inner_maxiter'] = 100
        solution = _fmin_supersimplex(fn, x0, rel_outer_tol=tol, max_outer_iter=maxiter, callback=callback,
                                      printer=printer, **addl_kwargs)

    elif method == 'customcg':
        def fn_to_max(x):
            """ Function to maximize """
            f = fn(x); return -f if f is not None else None

        if jac is not None:
            def dfdx_and_bdflag(x):
                """ Returns derivative and boundary flag """
                j = -jac(x)
                bd = _np.zeros(len(j))  # never say fn is on boundary, since this is an analytic derivative
                return j, bd
        else:
            dfdx_and_bdflag = None

        # Note: even though we maximize, return value is negated to conform to min routines
        solution = fmax_cg(fn_to_max, x0, maxiter, tol, dfdx_and_bdflag, None)

    elif method == 'brute':
        ranges = [(0.0, 1.0)] * len(x0); Ns = 4  # params for 'brute' algorithm
        xmin, _ = _spo.brute(fn, ranges, (), Ns)  # jac=jac
        #print "DEBUG: Brute fmin = ",fmin
        solution = _spo.minimize(fn, xmin, method="Nelder-Mead", options={}, tol=tol, callback=callback, jac=jac)

    elif method == 'basinhopping':
        def _basin_callback(x, f, accept):
            if callback is not None: callback(x, f=f, accepted=accept)
            if stopval is not None and f <= stopval:
                return True  # signals basinhopping to stop
            return False
        solution = _spo.basinhopping(fn, x0, niter=maxiter, T=2.0, stepsize=1.0,
                                     callback=_basin_callback, minimizer_kwargs={'method': "L-BFGS-B", 'jac': jac})

        #DEBUG -- follow with Nelder Mead to make sure basinhopping found a minimum. (It seems to)
        #print "DEBUG: running Nelder-Mead:"
        #opts = { 'maxfev': maxiter, 'maxiter': maxiter }
        #solution = _spo.minimize(fn, solution.x, options=opts, method="Nelder-Mead", tol=1e-8, callback=callback)
        #print "DEBUG: done: best f = ",solution.fun

        solution.success = True  # basinhopping doesn't seem to set this...

    elif method == 'swarm':
        solution = _fmin_particle_swarm(fn, x0, tol, maxiter, printer, popsize=1000)  # , callback = callback)

    elif method == 'evolve':
        solution = _fmin_evolutionary(fn, x0, num_generations=maxiter, num_individuals=500, printer=printer)

#    elif method == 'homebrew':
#      solution = fmin_homebrew(fn, x0, maxiter)

    else:
        #Set options for different algorithms
        opts = {'maxiter': maxiter, 'disp': False}
        if method == "BFGS": opts['gtol'] = tol  # gradient norm tolerance
        elif method == "L-BFGS-B": opts['gtol'] = opts['ftol'] = tol  # gradient norm and fractional y-tolerance
        elif method == "Nelder-Mead": opts['maxfev'] = maxfev  # max fn evals (note: ftol and xtol can also be set)

        if method in ("BFGS", "CG", "Newton-CG", "L-BFGS-B", "TNC", "SLSQP", "dogleg", "trust-ncg"):  # use jacobian
            solution = _spo.minimize(fn, x0, options=opts, method=method, tol=tol, callback=callback, jac=jac)
        else:
            solution = _spo.minimize(fn, x0, options=opts, method=method, tol=tol, callback=callback)

    return solution
示例#8
0
    def gauge_propagate_confidence_region_factory(
            self,
            to_model_label,
            from_model_label='final iteration estimate',
            circuits_label='final',
            eps=1e-3,
            verbosity=0):
        """
        Propagates a confidence region among gauge-equivalent models.

        More specifically, this function propagates an existing "reference"
        confidence region for a Model "G0" to a new confidence region for a
        gauge-equivalent model "G1".

        When successful, a new confidence region factory is created for the
        `.models[to_model_label]` `Model` and `circuits_label` gate
        string list from the existing factory for `.models[from_model_label]`.

        Parameters
        ----------
        to_model_label : str
            The key into this `Estimate` object's `models` and `goparameters`
            dictionaries that identifies the final gauge-optimized result to
            create a factory for.  This gauge optimization must have begun at
            "from" reference model, i.e., `models[from_model_label]` must
            equal (by frobeinus distance) `goparameters[to_model_label]['model']`.

        from_model_label : str, optional
            The key into this `Estimate` object's `models` dictionary
            that identifies the reference model.

        circuits_label : str, optional
            The key of the circuit list (within the parent `Results`'s
            `.circuit_lists` dictionary) that identifies the circuit
            list used by the old (&new) confidence region factories.

        eps : float, optional
            A small offset used for constructing finite-difference derivatives.
            Usually the default value is fine.

        verbosity : int, optional
            A non-negative integer indicating the amount of detail to print
            to stdout.

        Returns
        -------
        ConfidenceRegionFactory
            Note: this region is also stored internally and as such the return
            value of this function can often be ignored.
        """
        printer = _VerbosityPrinter.create_printer(verbosity)

        ref_model = self.models[from_model_label]
        goparams = self._gaugeopt_suite.gaugeopt_argument_dicts[to_model_label]
        goparams_list = [goparams] if hasattr(goparams, 'keys') else goparams
        start_model = goparams_list[0]['model'].copy() if (
            'model' in goparams_list[0]) else ref_model.copy()
        final_model = self.models[to_model_label].copy()

        gauge_group_els = []
        for gop in goparams_list:
            assert('_gaugeGroupEl' in gop), "To propagate a confidence " + \
                "region, goparameters must contain the gauge-group-element as `_gaugeGroupEl`"
            gauge_group_els.append(gop['_gaugeGroupEl'])

        assert(start_model.frobeniusdist(ref_model) < 1e-6), \
            "Gauge-opt starting point must be the 'from' (reference) Model"

        crf = self.confidence_region_factories.get(
            CRFkey(from_model_label, circuits_label), None)

        assert (
            crf
            is not None), "Initial confidence region factory doesn't exist!"
        assert (crf.has_hessian
                ), "Initial factory must contain a computed Hessian!"

        #Update hessian by TMx = d(diffs in current go'd model)/d(diffs in ref model)
        tmx = _np.empty((final_model.num_params, ref_model.num_params), 'd')
        v0, w0 = ref_model.to_vector(), final_model.to_vector()
        mdl = ref_model.copy()

        printer.log(" *** Propagating Hessian from '%s' to '%s' ***" %
                    (from_model_label, to_model_label))

        with printer.progress_logging(1):
            for icol in range(ref_model.num_params):
                v = v0.copy()
                v[icol] += eps  # dv is along iCol-th direction
                mdl.from_vector(v)
                for gauge_group_el in gauge_group_els:
                    mdl.transform_inplace(gauge_group_el)
                w = mdl.to_vector()
                dw = (w - w0) / eps
                tmx[:, icol] = dw
                printer.show_progress(icol,
                                      ref_model.num_params,
                                      prefix='Column: ')
                #,suffix = "; finite_diff = %g" % _np.linalg.norm(dw)

        #rank = _np.linalg.matrix_rank(TMx)
        #print("DEBUG: constructed TMx: rank = ", rank)

        # Hessian is gauge-transported via H -> TMx_inv^T * H * TMx_inv
        tmx_inv = _np.linalg.inv(tmx)
        new_hessian = _np.dot(tmx_inv.T, _np.dot(crf.hessian, tmx_inv))

        #Create a new confidence region based on the new hessian
        new_crf = _ConfidenceRegionFactory(self, to_model_label,
                                           circuits_label, new_hessian,
                                           crf.nonMarkRadiusSq)
        self.confidence_region_factories[CRFkey(to_model_label,
                                                circuits_label)] = new_crf
        printer.log(
            "   Successfully transported Hessian and ConfidenceRegionFactory.")

        return new_crf
示例#9
0
    def add_gaugeoptimized(self,
                           goparams,
                           model=None,
                           label=None,
                           comm=None,
                           verbosity=None):
        """
        Adds a gauge-optimized Model (computing it if needed) to this object.

        Parameters
        ----------
        goparams : dict or list
            A dictionary of gauge-optimization parameters, typically arguments
            to :func:`gaugeopt_to_target`, specifying how the gauge optimization
            was (or should be) performed.  When `model` is `None` (and this
            function computes the model internally) the keys and values of
            this dictionary must correspond to allowed arguments of
            :func:`gaugeopt_to_target`. By default, :func:`gaugeopt_to_target`'s
            first two arguments, the `Model` to optimize and the target,
            are taken to be `self.models['final iteration estimate']` and
            self.models['target'].  This argument can also be a *list* of
            such parameter dictionaries, which specifies a multi-stage gauge-
            optimization whereby the output of one stage is the input of the
            next.

        model : Model, optional
            The gauge-optimized model to store.  If None, then this model
            is computed by calling :func:`gaugeopt_to_target` with the contents
            of `goparams` as arguments as described above.

        label : str, optional
            A label for this gauge-optimized model, used as the key in
            this object's `models` and `goparameters` member dictionaries.
            If None, then the next available "go<X>", where <X> is a
            non-negative integer, is used as the label.

        comm : mpi4py.MPI.Comm, optional
            A default MPI communicator to use when one is not specified
            as the 'comm' element of/within `goparams`.

        verbosity : int, optional
             An integer specifying the level of detail printed to stdout
             during the calculations performed in this function.  If not
             None, this value will override any verbosity values set
             within `goparams`.

        Returns
        -------
        None
        """

        if label is None:
            i = 0
            while True:
                label = "go%d" % i
                i += 1
                if (label not in self._gaugeopt_suite.gaugeopt_argument_dicts) and \
                   (label not in self.models):
                    break

        goparams_list = [goparams] if hasattr(goparams, 'keys') else goparams
        ordered_goparams = []
        last_gs = None

        #Create a printer based on specified or maximum goparams
        # verbosity and default or existing comm.
        printer_comm = comm
        for gop in goparams_list:
            if gop.get('comm', None) is not None:
                printer_comm = gop['comm']
                break
        if verbosity is not None:
            max_vb = verbosity
        else:
            verbosities = [gop.get('verbosity', 0) for gop in goparams_list]
            max_vb = max([
                v.verbosity if isinstance(v, _VerbosityPrinter) else v
                for v in verbosities
            ])
        printer = _VerbosityPrinter.create_printer(max_vb, printer_comm)
        printer.log("-- Adding Gauge Optimized (%s) --" % label)

        for i, gop in enumerate(goparams_list):

            if model is not None:
                last_gs = model  # just use user-supplied result
            else:
                from ..algorithms import gaugeopt_to_target as _gaugeopt_to_target
                default_model = default_target_model = False
                gop = gop.copy()  # so we don't change the caller's dict
                if '_gaugeGroupEl' in gop: del gop['_gaugeGroupEl']

                printer.log("Stage %d:" % i, 2)
                if verbosity is not None:
                    gop['verbosity'] = printer - 1  # use common printer

                if comm is not None and 'comm' not in gop:
                    gop['comm'] = comm

                if last_gs:
                    gop["model"] = last_gs
                elif "model" not in gop:
                    if 'final iteration estimate' in self.models:
                        gop["model"] = self.models['final iteration estimate']
                        default_model = True
                    else:
                        raise ValueError(
                            "Must supply 'model' in 'goparams' argument")

                if "target_model" not in gop:
                    if 'target' in self.models:
                        gop["target_model"] = self.models['target']
                        default_target_model = True
                    else:
                        raise ValueError(
                            "Must supply 'target_model' in 'goparams' argument"
                        )

                if "maxiter" not in gop:
                    gop["maxiter"] = 100

                gop['return_all'] = True
                if isinstance(gop['model'], _ExplicitOpModel):
                    #only explicit models can be gauge optimized
                    _, gauge_group_el, last_gs = _gaugeopt_to_target(**gop)
                else:
                    #but still fill in results for other models (?)
                    gauge_group_el, last_gs = None, gop['model'].copy()

                gop['_gaugeGroupEl'] = gauge_group_el  # an output stored here for convenience

                #Don't store (and potentially serialize) model that we don't need to
                if default_model: del gop['model']
                if default_target_model: del gop['target_model']

            #sort the parameters by name for consistency
            ordered_goparams.append(
                _collections.OrderedDict([(k, gop[k])
                                          for k in sorted(list(gop.keys()))]))

        assert (last_gs is not None)
        self.models[label] = last_gs
        self._gaugeopt_suite.gaugeopt_argument_dicts[label] = ordered_goparams \
            if len(goparams_list) > 1 else ordered_goparams[0]
示例#10
0
def create_lsgst_circuit_lists(op_label_src,
                               prep_fiducials,
                               meas_fiducials,
                               germs,
                               max_lengths,
                               fid_pairs=None,
                               trunc_scheme="whole germ powers",
                               nest=True,
                               keep_fraction=1,
                               keep_seed=None,
                               include_lgst=True,
                               op_label_aliases=None,
                               circuit_rules=None,
                               dscheck=None,
                               action_if_missing="raise",
                               germ_length_limits=None,
                               verbosity=0):
    """
    Create a set of long-sequence GST circuit lists (including structure).

    Constructs a series (a list) of circuit structures used by long-sequence
    GST (LSGST) algorithms.  If `include_lgst == True` then the starting
    structure contains the LGST strings, otherwise the starting structure is
    empty.  For each nonzero element of max_length_list, call it L, a set of
    circuits is created with the form:

    Case: trunc_scheme == 'whole germ powers':
      prep_fiducial + pygsti.circuits.repeat_with_max_length(germ,L) + meas_fiducial

    Case: trunc_scheme == 'truncated germ powers':
      prep_fiducial + pygsti.circuits.repeat_and_truncate(germ,L) + meas_fiducial

    Case: trunc_scheme == 'length as exponent':
      prep_fiducial + germ^L + meas_fiducial

    If nest == True, the above set is iteratively *added* (w/duplicates
    removed) to the current circuit structure to form a final structure for
    the given L.  This results in successively larger structures, each of which
    contains all the elements of previous-L structures.  If nest == False then
    the above set *is* the final structure for the given L.

    Parameters
    ----------
    op_label_src : list or Model
        List of operation labels to determine needed LGST circuits.  If a Model,
        then the model's gate and instrument labels are used. Only
        relevant when `include_lgst == True`.

    prep_fiducials : list of Circuits
        List of the preparation fiducial circuits, which follow state
        preparation.

    effect_fiducials : list of Circuits
        List of the measurement fiducial circuits, which precede
        measurement.

    germs : list of Circuits
        List of the germ circuits.

    max_lengths : list of ints
        List of maximum lengths. A zero value in this list has special
        meaning, and corresponds to the LGST circuits.

    fid_pairs : list of 2-tuples or dict, optional
        Specifies a subset of all fiducial string pairs (prepStr, effectStr)
        to be used in the circuit lists.  If a list, each element of
        fid_pairs is a (iPrepStr, iEffectStr) 2-tuple of integers, each
        indexing a string within prep_strs and effect_strs, respectively, so
        that prepStr = prep_strs[iPrepStr] and effectStr =
        effect_strs[iEffectStr].  If a dictionary, keys are germs (elements
        of germ_list) and values are lists of 2-tuples specifying the pairs
        to use for that germ.

    trunc_scheme : str, optional
        Truncation scheme used to interpret what the list of maximum lengths
        means. If unsure, leave as default. Allowed values are:

        - 'whole germ powers' -- germs are repeated an integer number of
          times such that the length is less than or equal to the max.
        - 'truncated germ powers' -- repeated germ string is truncated
          to be exactly equal to the max (partial germ at end is ok).
        - 'length as exponent' -- max. length is instead interpreted
          as the germ exponent (the number of germ repetitions).

    nest : boolean, optional
        If True, the returned circuit lists are "nested", meaning
        that each successive list of circuits contains all the gate
        strings found in previous lists (and usually some additional
        new ones).  If False, then the returned string list for maximum
        length == L contains *only* those circuits specified in the
        description above, and *not* those for previous values of L.

    keep_fraction : float, optional
        The fraction of fiducial pairs selected for each germ-power base
        string.  The default includes all fiducial pairs.  Note that
        for each germ-power the selected pairs are *different* random
        sets of all possible pairs (unlike fid_pairs, which specifies the
        *same* fiducial pairs for *all* same-germ base strings).  If
        fid_pairs is used in conjuction with keep_fraction, the pairs
        specified by fid_pairs are always selected, and any additional
        pairs are randomly selected.

    keep_seed : int, optional
        The seed used for random fiducial pair selection (only relevant
        when keep_fraction < 1).

    include_lgst : boolean, optional
        If true, then the starting list (only applicable when
        `nest == True`) is the list of LGST strings rather than the
        empty list.  This means that when `nest == True`, the LGST
        circuits will be included in all the lists.

    op_label_aliases : dictionary, optional
        Dictionary whose keys are operation label "aliases" and whose values are tuples
        corresponding to what that operation label should be expanded into before querying
        the dataset.  This information is stored within the returned circuit
        structures.  Defaults to the empty dictionary (no aliases defined)
        e.g. op_label_aliases['Gx^3'] = ('Gx','Gx','Gx')

    circuit_rules : list, optional
        A list of `(find,replace)` 2-tuples which specify string replacement
        rules.  Both `find` and `replace` are tuples of operation labels
        (or `Circuit` objects).

    dscheck : DataSet, optional
        A data set which is checked for each of the generated circuits. When
        a generated circuit is missing from this `DataSet`, action is taken
        according to `action_if_missing`.

    action_if_missing : {"raise","drop"}, optional
        The action to take when a generated circuit is missing from
        `dscheck` (only relevant when `dscheck` is not None).  "raise" causes
        a ValueError to be raised; "drop" causes the missing circuits to be
        dropped from the returned set.

    germ_length_limits : dict, optional
        A dictionary limiting the max-length values used for specific germs.
        Keys are germ circuits and values are integers.  For example, if
        this argument is `{('Gx',): 4}` and `max_length_list = [1,2,4,8,16]`,
        then the germ `('Gx',)` is only repeated using max-lengths of 1, 2,
        and 4 (whereas other germs use all the values in `max_length_list`).

    verbosity : int, optional
        The level of output to print to stdout.

    Returns
    -------
    list of PlaquetteGridCircuitStructure objects
        The i-th object corresponds to a circuit list containing repeated
        germs limited to length max_length_list[i].  If nest == True, then
        repeated germs limited to previous max-lengths are also included.
        Note that a "0" maximum-length corresponds to the LGST strings.
    """

    # ensure circuit lists have computed their string reps so addition produces "nice" strings for printing
    [germ.str for germ in germs]
    [c.str for c in prep_fiducials]
    [c.str for c in meas_fiducials]

    #print('Germs: ', germs)

    def filter_ds(circuits, ds, missing_lgst):
        if ds is None: return circuits[:]
        filtered_circuits = []
        for opstr in circuits:
            trans_opstr = _gsc.translate_circuit(opstr, op_label_aliases)
            if trans_opstr not in ds:
                missing_lgst.append(opstr)
            else:
                filtered_circuits.append(opstr)
        return filtered_circuits

    def add_to_plaquettes(pkey_dict, plaquette_dict, base_circuit, maxlen,
                          germ, power, fidpair_indices, ds, missing_list):
        """ Only create a new plaquette for a new base circuit; otherwise add to existing """
        if ds is not None:
            inds_to_remove = []
            for k, (i, j) in enumerate(fidpair_indices):
                el = prep_fiducials[i] + base_circuit + meas_fiducials[j]
                trans_el = _gsc.translate_circuit(el, op_label_aliases)
                if trans_el not in ds:
                    missing_list.append((prep_fiducials[i], germ, maxlen,
                                         meas_fiducials[j], el))
                    inds_to_remove.append(k)

            if len(inds_to_remove) > 0:
                fidpair_indices = fidpair_indices[:]  # copy
                for i in reversed(inds_to_remove):
                    del fidpair_indices[i]

        fidpairs = _collections.OrderedDict([((j, i), (prep_fiducials[i],
                                                       meas_fiducials[j]))
                                             for i, j in fidpair_indices])

        if base_circuit not in plaquette_dict:
            pkey_dict[base_circuit] = (maxlen, germ)
            if power is None:  # no well-defined power, so just make a fiducial-pair plaquette
                plaq = _FiducialPairPlaquette(base_circuit, fidpairs,
                                              len(meas_fiducials),
                                              len(prep_fiducials),
                                              op_label_aliases, circuit_rules)
            else:
                plaq = _GermFiducialPairPlaquette(germ, power, fidpairs,
                                                  len(meas_fiducials),
                                                  len(prep_fiducials),
                                                  op_label_aliases,
                                                  circuit_rules)
            plaquette_dict[base_circuit] = plaq
        else:
            #Add to existing plaquette (assume we don't need to change number of rows/cols of plaquette)
            existing_plaq = plaquette_dict[base_circuit]
            existing_circuits = set(existing_plaq.circuits)
            new_fidpairs = existing_plaq.fidpairs.copy()
            for (j, i), (prep, meas) in fidpairs.items():
                if prep + base_circuit + meas not in existing_circuits:
                    new_fidpairs[(j, i)] = (prep, meas)
            if power is None:  # no well-defined power, so just make a fiducial-pair plaquette
                plaquette_dict[base_circuit] = _FiducialPairPlaquette(
                    base_circuit, new_fidpairs, len(meas_fiducials),
                    len(prep_fiducials), op_label_aliases, circuit_rules)
            else:
                plaquette_dict[base_circuit] = _GermFiducialPairPlaquette(
                    germ, power, new_fidpairs, len(meas_fiducials),
                    len(prep_fiducials), op_label_aliases, circuit_rules)

    printer = _VerbosityPrinter.create_printer(verbosity)
    if germ_length_limits is None: germ_length_limits = {}

    if nest and include_lgst and len(max_lengths) > 0 and max_lengths[0] == 0:
        _warnings.warn(
            "Setting the first element of a max-length list to zero" +
            " to ensure the inclusion of LGST circuits has been" +
            " replaced by the `include_lgst` parameter which" +
            " defaults to `True`.  Thus, in most cases, you can" +
            " simply remove the leading 0 and start your" +
            " max-length list at 1 now." + "")

    from pygsti.processors.processorspec import QuditProcessorSpec as _QuditProcessorSpec
    from pygsti.models.model import OpModel as _OpModel
    if isinstance(op_label_src, _QuditProcessorSpec):
        opLabels = op_label_src.primitive_op_labels
    elif isinstance(op_label_src, _OpModel):
        opLabels = op_label_src.primitive_op_labels + op_label_src.primitive_instrument_labels
    else:
        opLabels = op_label_src

    lgst_list = _gsc.create_lgst_circuits(prep_fiducials, meas_fiducials,
                                          opLabels)
    if circuit_rules is not None:
        lgst_list = _manipulate_circuits(lgst_list, circuit_rules)

    allPossiblePairs = list(
        _itertools.product(range(len(prep_fiducials)),
                           range(len(meas_fiducials))))

    if keep_fraction < 1.0:
        rndm = _rndm.RandomState(keep_seed)  # ok if seed is None
        nPairs = len(prep_fiducials) * len(meas_fiducials)
        nPairsToKeep = int(round(float(keep_fraction) * nPairs))
    else:
        rndm = None

    fidpair_germ_power_keys = False
    if isinstance(fid_pairs, dict) or hasattr(fid_pairs, "keys"):
        fidPairDict = fid_pairs  # assume a dict of per-germ pairs
        if isinstance(list(fidPairDict.keys())[0], tuple):
            fidpair_germ_power_keys = True
    else:
        if fid_pairs is not None:  # assume fid_pairs is a list
            fidPairDict = {germ: fid_pairs for germ in germs}
        else:
            fidPairDict = None

    truncFn = _get_trunc_function(trunc_scheme)

    line_labels = germs[0].line_labels if len(germs) > 0 \
        else (prep_fiducials + meas_fiducials)[0].line_labels   # if an empty germ list, base line_labels off fiducials

    empty_germ = _Circuit((
    ), line_labels)  # , stringrep="{}@(%s)" % ','.join(map(str, line_labels)))
    if include_lgst and empty_germ not in germs: germs = [empty_germ] + germs

    if nest:
        #keep track of running quantities used to build circuit structures
        running_plaquette_keys = {
        }  # base-circuit => (maxlength, germ) key for final plaquette dict
        running_plaquettes = _collections.OrderedDict(
        )  # keep consistent ordering in produced circuit list.
        running_unindexed = []
        running_maxLens = []

    lsgst_structs = []  # list of circuit structures to return
    missing_list = []  # keep track of missing data if dscheck is given
    missing_lgst = [
    ]  # keep track of missing LGST circuits separately (for better error msgs)
    tot_circuits = 0

    if include_lgst and len(max_lengths) == 0:
        # Then we won't add LGST circuits during first iteration of loop below, so add them now
        unindexed = filter_ds(lgst_list, dscheck, missing_lgst)
        lsgst_structs.append(
            _PlaquetteGridCircuitStructure(
                {}, [],
                germs,
                "L",
                "germ",
                unindexed,
                op_label_aliases,
                circuit_weights_dict=None,
                additional_circuits_location='start',
                name=None))

    for i, maxLen in enumerate(max_lengths):

        if nest:  # add to running_* variables and pinch off a copy later on
            running_maxLens.append(maxLen)
            pkey = running_plaquette_keys
            plaquettes = running_plaquettes
            maxLens = running_maxLens
            unindexed = running_unindexed
        else:  # create a new cs for just this maxLen
            pkey = {
            }  # base-circuit => (maxlength, germ) key for final plaquette dict
            plaquettes = _collections.OrderedDict()
            maxLens = [maxLen]
            unindexed = []

        if maxLen == 0:
            # Special LGST case
            unindexed.extend(filter_ds(
                lgst_list, dscheck,
                missing_lgst))  # overlap w/plaquettes ok (removed later)
        else:
            if include_lgst and i == 0:  # first maxlen, so add LGST seqs as empty germ
                #Add LGST circuits as an empty-germ plaquette (and as unindexed circuits to include everything)
                #Note: no FPR on LGST strings
                add_to_plaquettes(pkey, plaquettes, empty_germ, maxLen,
                                  empty_germ, 1, allPossiblePairs, dscheck,
                                  missing_list)
                unindexed.extend(filter_ds(
                    lgst_list, dscheck,
                    missing_lgst))  # overlap w/plaquettes ok (removed later)
            #Typical case of germs repeated to maxLen using r_fn
            for ii, germ in enumerate(germs):
                if germ == empty_germ: continue  # handled specially above
                if maxLen > germ_length_limits.get(germ, 1e100): continue

                germ_power = truncFn(germ, maxLen)
                power = len(germ_power) // len(
                    germ)  # this *could* be the germ power
                if germ_power != germ * power:
                    power = None  # Signals there is no well-defined power

                if power == 0 and len(germ) != 0:
                    continue

                # Switch on fidpair dicts with germ or (germ, L) keys
                key = germ
                if fidpair_germ_power_keys:
                    key = (germ, maxLen)

                if rndm is None:
                    fiducialPairsThisIter = fidPairDict.get(key, allPossiblePairs) \
                        if fidPairDict is not None else allPossiblePairs
                    #if fiducialPairsThisIter==allPossiblePairs:
                    #    print('Couldn\'t find ', key, ' using allPossiblePairs')
                    #print('FiducialPairsThisIter: ', fiducialPairsThisIter)
                elif fidPairDict is not None:
                    pair_indx_tups = fidPairDict.get(key, allPossiblePairs)
                    remainingPairs = [(i, j)
                                      for i in range(len(prep_fiducials))
                                      for j in range(len(meas_fiducials))
                                      if (i, j) not in pair_indx_tups]
                    nPairsRemaining = len(remainingPairs)
                    nPairsToChoose = nPairsToKeep - len(pair_indx_tups)
                    nPairsToChoose = max(0, min(nPairsToChoose,
                                                nPairsRemaining))
                    assert (0 <= nPairsToChoose <= nPairsRemaining)
                    # FUTURE: issue warnings when clipping nPairsToChoose?

                    fiducialPairsThisIter = fidPairDict[key] + \
                        [remainingPairs[k] for k in
                         sorted(rndm.choice(nPairsRemaining, nPairsToChoose,
                                            replace=False))]

                else:  # rndm is not None and fidPairDict is None
                    assert (nPairsToKeep <= nPairs
                            )  # keep_fraction must be <= 1.0
                    fiducialPairsThisIter = \
                        [allPossiblePairs[k] for k in
                         sorted(rndm.choice(nPairs, nPairsToKeep, replace=False))]

                add_to_plaquettes(pkey, plaquettes, germ_power, maxLen, germ,
                                  power, fiducialPairsThisIter, dscheck,
                                  missing_list)

        if nest:
            # pinch off a copy of variables that were left as the running variables above
            maxLens = maxLens[:]
            plaquettes = plaquettes.copy()
            unindexed = unindexed[:]

        lsgst_structs.append(
            _PlaquetteGridCircuitStructure(
                _collections.OrderedDict([
                    (pkey[base], plaq) for base, plaq in plaquettes.items()
                ]),
                maxLens,
                germs,
                "L",
                "germ",
                unindexed,
                op_label_aliases,
                circuit_weights_dict=None,
                additional_circuits_location='start',
                name=None))
        tot_circuits += len(
            lsgst_structs[-1])  # only relevant for non-nested case

    if nest:  # then totStrs computation about overcounts -- just take string count of final stage
        tot_circuits = len(lsgst_structs[-1]) if len(lsgst_structs) > 0 else 0

    printer.log("--- Circuit Creation ---", 1)
    printer.log(" %d circuits created" % tot_circuits, 2)
    #print("Total Number of Circuits: ", tot_circuits)
    if dscheck:
        printer.log(
            " Dataset has %d entries: %d utilized, %d requested circuits were missing"
            % (len(dscheck), tot_circuits, len(missing_list)), 2)
    #print(len(missing_lgst))
    if len(missing_list) > 0 or len(missing_lgst) > 0:
        MAX = 10  # Maximum missing-seq messages to display
        missing_msgs = [("Prep: %s, Germ: %s, L: %d, Meas: %s, Circuit: %s" % tup) for tup in missing_list[0:MAX + 1]] \
            + ["LGST Seq: %s" % opstr for opstr in missing_lgst[0:MAX + 1]]
        if len(missing_list) > MAX or len(missing_lgst) > MAX:
            missing_msgs.append(" ... (more missing circuits not show) ... ")
        printer.log("The following circuits were missing from the dataset:", 4)
        printer.log("\n".join(missing_msgs), 4)
        if action_if_missing == "raise":
            raise ValueError("Missing data! %d missing circuits" %
                             len(missing_msgs))
        elif action_if_missing == "drop":
            pass
        else:
            raise ValueError("Invalid `action_if_missing` argument: %s" %
                             action_if_missing)

    for i, struct in enumerate(lsgst_structs):
        if nest:
            assert (struct.xs == max_lengths[0:i + 1]
                    )  # Make sure lengths are correct!
        else:
            assert (struct.xs == max_lengths[i:i + 1]
                    )  # Make sure lengths are correct!

    return lsgst_structs
示例#11
0
def create_idletomography_report(results,
                                 filename,
                                 title="auto",
                                 ws=None,
                                 auto_open=False,
                                 link_to=None,
                                 brevity=0,
                                 advanced_options=None,
                                 verbosity=1):
    """
    Creates an Idle Tomography report, summarizing the results of running
    idle tomography on a data set.

    Parameters
    ----------
    results : IdleTomographyResults
        An object which represents the set of results from an idle tomography
        run, typically obtained from running :func:`do_idle_tomography` OR a
        dictionary of such objects, representing multiple idle tomography runs
        to be compared (typically all with *different* data sets). The keys of
        this dictionary are used to label different data sets that are
        selectable in the report.

    filename : string, optional
       The output filename where the report file(s) will be saved.  If
       None, then no output file is produced (but returned Workspace
       still caches all intermediate results).

    title : string, optional
       The title of the report.  "auto" causes a random title to be
       generated (which you may or may not like).

    ws : Workspace, optional
        The workspace used as a scratch space for performing the calculations
        and visualizations required for this report.  If you're creating
        multiple reports with similar tables, plots, etc., it may boost
        performance to use a single Workspace for all the report generation.

    auto_open : bool, optional
        If True, automatically open the report in a web browser after it
        has been generated.

    link_to : list, optional
        If not None, a list of one or more items from the set
        {"tex", "pdf", "pkl"} indicating whether or not to
        create and include links to Latex, PDF, and Python pickle
        files, respectively.  "tex" creates latex source files for
        tables; "pdf" renders PDFs of tables and plots ; "pkl" creates
        Python versions of plots (pickled python data) and tables (pickled
        pandas DataFrams).

    advanced_options : dict, optional
        A dictionary of advanced options for which the default values aer usually
        are fine.  Here are the possible keys of `advanced_options`:

        - connected : bool, optional
            Whether output HTML should assume an active internet connection.  If
            True, then the resulting HTML file size will be reduced because it
            will link to web resources (e.g. CDN libraries) instead of embedding
            them.

        - cachefile : str, optional
            filename with cached workspace results

        - precision : int or dict, optional
            The amount of precision to display.  A dictionary with keys
            "polar", "sci", and "normal" can separately specify the
            precision for complex angles, numbers in scientific notation, and
            everything else, respectively.  If an integer is given, it this
            same value is taken for all precision types.  If None, then
            `{'normal': 6, 'polar': 3, 'sci': 0}` is used.

        - resizable : bool, optional
            Whether plots and tables are made with resize handles and can be
            resized within the report.

        - autosize : {'none', 'initial', 'continual'}
            Whether tables and plots should be resized, either initially --
            i.e. just upon first rendering (`"initial"`) -- or whenever
            the browser window is resized (`"continual"`).

    verbosity : int, optional
       How much detail to send to stdout.

    Returns
    -------
    Workspace
        The workspace object used to create the report
    """
    tStart = _time.time()
    printer = _VerbosityPrinter.create_printer(verbosity)  # , comm=comm)

    if advanced_options is None: advanced_options = {}
    precision = advanced_options.get('precision', None)
    cachefile = advanced_options.get('cachefile', None)
    connected = advanced_options.get('connected', False)
    resizable = advanced_options.get('resizable', True)
    autosize = advanced_options.get('autosize', 'initial')
    mdl_sim = advanced_options.get('simulator', None)  # a model

    if filename and filename.endswith(".pdf"):
        fmt = "latex"
    else:
        fmt = "html"

    printer.log('*** Creating workspace ***')
    if ws is None: ws = _ws.Workspace(cachefile)

    if title is None or title == "auto":
        if filename is not None:
            autoname = _autotitle.generate_name()
            title = "Idle Tomography Report for " + autoname
            _warnings.warn(
                ("You should really specify `title=` when generating reports,"
                 " as this makes it much easier to identify them later on.  "
                 "Since you didn't, pyGSTi has generated a random one"
                 " for you: '{}'.").format(autoname))
        else:
            title = "N/A"  # No title - but it doesn't matter since filename is None

    results_dict = results if isinstance(results, dict) else {
        "unique": results
    }

    render_math = True

    qtys = {}  # stores strings to be inserted into report template

    def addqty(b, name, fn, *args, **kwargs):
        """Adds an item to the qtys dict within a timed block"""
        if b is None or brevity < b:
            with _timed_block(name,
                              format_str='{:45}',
                              printer=printer,
                              verbosity=2):
                qtys[name] = fn(*args, **kwargs)

    qtys['title'] = title
    qtys['date'] = _time.strftime("%B %d, %Y")

    pdfInfo = [('Author', 'pyGSTi'), ('Title', title), ('Keywords', 'GST'),
               ('pyGSTi Version', _version.version)]
    qtys['pdfinfo'] = _merge.to_pdfinfo(pdfInfo)

    # Generate Switchboard
    printer.log("*** Generating switchboard ***")

    #Create master switchboard
    switchBd, dataset_labels = \
        _create_switchboard(ws, results_dict)
    if fmt == "latex" and (len(dataset_labels) > 1):
        raise ValueError("PDF reports can only show a *single* dataset,"
                         " estimate, and gauge optimization.")

    # Generate Tables
    printer.log("*** Generating tables ***")

    multidataset = bool(len(dataset_labels) > 1)
    #REM intErrView = [False,True,True]

    if fmt == "html":
        qtys['topSwitchboard'] = switchBd
        #REM qtys['intrinsicErrSwitchboard'] = switchBd.view(intErrView,"v1")

    results = switchBd.results
    #REM errortype = switchBd.errortype
    #REM errorop = switchBd.errorop
    A = None  # no brevity restriction: always display; for "Summary"- & "Help"-tab figs

    #Brevity key:
    # TODO - everything is always displayed for now

    addqty(A, 'intrinsicErrorsTable', ws.IdleTomographyIntrinsicErrorsTable,
           results)
    addqty(A, 'observedRatesTable', ws.IdleTomographyObservedRatesTable,
           results, 20, mdl_sim)  # HARDCODED - show only top 20 rates
    # errortype, errorop,

    # Generate plots
    printer.log("*** Generating plots ***")

    toggles = {}
    toggles['CompareDatasets'] = False  # not comparable by default
    if multidataset:
        #check if data sets are comparable (if they have the same sequences)
        comparable = True
        gstrCmpList = list(results_dict[dataset_labels[0]].dataset.keys()
                           )  # maybe use circuit_lists['final']??
        for dslbl in dataset_labels:
            if list(results_dict[dslbl].dataset.keys()) != gstrCmpList:
                _warnings.warn(
                    "Not all data sets are comparable - no comparisions will be made."
                )
                comparable = False
                break

        if comparable:
            #initialize a new "dataset comparison switchboard"
            dscmp_switchBd = ws.Switchboard(["Dataset1", "Dataset2"],
                                            [dataset_labels, dataset_labels],
                                            ["buttons", "buttons"], [0, 1])
            dscmp_switchBd.add("dscmp", (0, 1))
            dscmp_switchBd.add("dscmp_gss", (0, ))
            dscmp_switchBd.add("refds", (0, ))

            for d1, dslbl1 in enumerate(dataset_labels):
                dscmp_switchBd.dscmp_gss[d1] = results_dict[
                    dslbl1].circuit_structs['final']
                dscmp_switchBd.refds[d1] = results_dict[
                    dslbl1].dataset  # only used for #of spam labels below

            # dsComp = dict()
            all_dsComps = dict()
            indices = []
            for i in range(len(dataset_labels)):
                for j in range(len(dataset_labels)):
                    indices.append((i, j))

            for d1, d2 in indices:
                dslbl1 = dataset_labels[d1]
                dslbl2 = dataset_labels[d2]
                ds1 = results_dict[dslbl1].dataset
                ds2 = results_dict[dslbl2].dataset
                all_dsComps[(d1,
                             d2)] = _DataComparator([ds1, ds2],
                                                    ds_names=[dslbl1, dslbl2])
                dscmp_switchBd.dscmp[d1, d2] = all_dsComps[(d1, d2)]

            qtys['dscmpSwitchboard'] = dscmp_switchBd
            addqty(4, 'ds_comparison_summary', ws.DatasetComparisonSummaryPlot,
                   dataset_labels, all_dsComps)
            #addqty('ds_comparison_histogram', ws.DatasetComparisonHistogramPlot, dscmp_switchBd.dscmp,display='pvalue')
            addqty(4,
                   'ds_comparison_histogram',
                   ws.ColorBoxPlot,
                   'dscmp',
                   dscmp_switchBd.dscmp_gss,
                   dscmp_switchBd.refds,
                   None,
                   dscomparator=dscmp_switchBd.dscmp,
                   typ="histogram")
            addqty(1,
                   'ds_comparison_box_plot',
                   ws.ColorBoxPlot,
                   'dscmp',
                   dscmp_switchBd.dscmp_gss,
                   dscmp_switchBd.refds,
                   None,
                   dscomparator=dscmp_switchBd.dscmp)
            toggles['CompareDatasets'] = True
        else:
            toggles['CompareDatasets'] = False  # not comparable!
    else:
        toggles['CompareDatasets'] = False

    if filename is not None:
        if True:  # comm is None or comm.Get_rank() == 0:
            # 3) populate template file => report file
            printer.log("*** Merging into template file ***")

            if fmt == "html":
                if filename.endswith(".html"):
                    _merge.merge_jinja_template(
                        qtys,
                        filename,
                        template_dir='~idletomography_html_report',
                        auto_open=auto_open,
                        precision=precision,
                        link_to=link_to,
                        connected=connected,
                        toggles=toggles,
                        render_math=render_math,
                        resizable=resizable,
                        autosize=autosize,
                        verbosity=printer)
                else:
                    _merge.merge_jinja_template_dir(
                        qtys,
                        filename,
                        template_dir='~idletomography_html_report',
                        auto_open=auto_open,
                        precision=precision,
                        link_to=link_to,
                        connected=connected,
                        toggles=toggles,
                        render_math=render_math,
                        resizable=resizable,
                        autosize=autosize,
                        verbosity=printer)

            elif fmt == "latex":
                raise NotImplementedError(
                    "No PDF version of this report is available yet.")
                templateFile = "idletomography_pdf_report.tex"
                base = _os.path.splitext(filename)[0]  # no extension
                _merge.merge_latex_template(qtys, templateFile, base + ".tex",
                                            toggles, precision, printer)

                # compile report latex file into PDF
                cmd = _ws.WorkspaceOutput.default_render_options.get(
                    'latex_cmd', None)
                flags = _ws.WorkspaceOutput.default_render_options.get(
                    'latex_flags', [])
                assert (
                    cmd
                ), "Cannot render PDF documents: no `latex_cmd` render option."
                printer.log(
                    "Latex file(s) successfully generated.  Attempting to compile with %s..."
                    % cmd)
                _merge.compile_latex_report(base, [cmd] + flags, printer,
                                            auto_open)
            else:
                raise ValueError("Unrecognized format: %s" % fmt)
    else:
        printer.log(
            "*** NOT Merging into template file (filename is None) ***")
    printer.log("*** Report Generation Complete!  Total time %gs ***" %
                (_time.time() - tStart))

    return ws