Exemple #1
0
def _decompose_gpindices(parent_gpindices, sibling_gpindices):
    """
    Maps `sibling_gpindices`, which index the same space as `parent_gpindices`,
    into a new slice or array of indices that gives the indices into
    `parent_gpindices` which result in `sibling_gpindices` (this requires that
    `sibling_indices` lies within `parent_gpindices`.

    Essentially:
    `sibling_gpindices = parent_gpindices[returned_gpindices]`
    """
    if parent_gpindices is None or sibling_gpindices is None: return None
    if isinstance(parent_gpindices, slice):
        start, stop = parent_gpindices.start, parent_gpindices.stop
        assert(parent_gpindices.step is None), "No support for nontrivial step size yet"

        if isinstance(sibling_gpindices, slice):
            if sibling_gpindices.start == sibling_gpindices.stop == 0:  # "null slice"
                return slice(0, 0, None)  # ==> just return null slice
            assert(start <= sibling_gpindices.start and sibling_gpindices.stop <= stop), \
                "Sibling indices (%s) must be a sub-slice of parent indices (%s)!" % (
                    str(sibling_gpindices), str(parent_gpindices))
            return _slct.shift(sibling_gpindices, -start)
        else:  # child_gpindices is an index array
            return sibling_gpindices - start  # numpy "shift"

    else:  # parent_gpindices is an index array
        sibInds = _slct.indices(sibling_gpindices) \
            if isinstance(sibling_gpindices, slice) else sibling_gpindices
        parent_lookup = {j: i for i, j in enumerate(parent_gpindices)}
        return _np.array([parent_lookup[j] for j in sibInds], _np.int64)
Exemple #2
0
    def create_elementary_errorgen_space(self, op_elem_errgen_labels):
        """
        Construct a matrix whose column space spans the given list of elementary error generators.

        Parameters
        ----------
        op_elem_errgen_labels : iterable
            A list of `(operation_label, elementary_error_generator_label)` tuples, where
            `operation_label` is one of the primitive operation labels in `self.primitive_op_labels`
            and `elementary_error_generator_label` is a :class:`GlobalElementaryErrorgenLabel`
            object.

        Returns
        -------
        numpy.ndarray
            A two-dimensional array of shape `(self.errorgen_space_dim, len(op_elem_errgen_labels))`.
            Columns correspond to elements of `op_elem_errgen_labels` and the rowspace is the
            full elementary error generator space of this FOGI analysis.
        """
        lbl_to_index = {}
        for op_label in self.primitive_op_labels:
            elem_errgen_lbls = self.elem_errorgen_labels_by_op[op_label]
            elem_errgen_indices = _slct.indices(self.op_errorgen_indices[op_label])
            assert(len(elem_errgen_indices) == len(elem_errgen_lbls))
            lbl_to_index.update({(op_label, lbl): index for lbl, index in zip(elem_errgen_lbls, elem_errgen_indices)})

        ret = _np.zeros((self.fogi_directions.shape[0], len(op_elem_errgen_labels)))
        for i, lbl in enumerate(op_elem_errgen_labels):
            ret[lbl_to_index[lbl], i] = 1.0
        return ret
def mapfill_timedep_dterms(fwdsim, array_to_fill, dest_indices,
                           dest_param_indices, num_outcomes, layout_atom,
                           dataset_rows, fillfn, wrt_slice, comm):

    eps = 1e-7  # hardcoded?

    #Compute finite difference derivatives, one parameter at a time.
    param_indices = range(fwdsim.model.num_params) if (
        wrt_slice is None) else _slct.indices(wrt_slice)

    nEls = layout_atom.num_elements
    vals = _np.empty(nEls, 'd')
    vals2 = _np.empty(nEls, 'd')
    assert (
        layout_atom.cache_size == 0
    )  # so all elements have None as start and remainder[0] is a prep label

    orig_vec = fwdsim.model.to_vector().copy()
    fwdsim.model.from_vector(
        orig_vec, close=False)  # ensure we call with close=False first

    fillfn(vals, slice(0, nEls), num_outcomes, layout_atom, dataset_rows, comm)

    all_slices, my_slice, owners, subComm = \
        _mpit.distribute_slice(slice(0, len(param_indices)), comm)

    my_param_indices = param_indices[my_slice]
    st = my_slice.start  # beginning of where my_param_indices results
    # get placed into dpr_cache

    #Get a map from global parameter indices to the desired
    # final index within dpr_cache
    iParamToFinal = {i: st + ii for ii, i in enumerate(my_param_indices)}

    for i in range(fwdsim.model.num_params):
        # print("dprobs cache %d of %d" % (i,fwdsim.model.num_params))
        if i in iParamToFinal:
            iFinal = iParamToFinal[i]
            vec = orig_vec.copy()
            vec[i] += eps
            fwdsim.model.from_vector(vec, close=True)
            fillfn(vals2, slice(0, nEls), num_outcomes, layout_atom,
                   dataset_rows, subComm)
            _fas(array_to_fill, [dest_indices, iFinal], (vals2 - vals) / eps)

    fwdsim.model.from_vector(orig_vec, close=True)

    #Now each processor has filled the relavant parts of dpr_cache,
    # so gather together:
    _mpit.gather_slices(all_slices,
                        owners,
                        array_to_fill, [],
                        axes=1,
                        comm=comm)
Exemple #4
0
    def gpindices_as_array(self):
        """
        Returns gpindices as a `numpy.ndarray` of integers.

        The underlying `.gpindices` attribute itself can be None, a slice,
        or an integer array.  If gpindices is None, an empty array is returned.

        Returns
        -------
        numpy.ndarray
        """
        if self._gpindices is None:
            return _np.empty(0, _np.int64)
        elif isinstance(self._gpindices, slice):
            return _np.array(_slct.indices(self._gpindices), _np.int64)
        else:
            return self._gpindices  # it's already an array
Exemple #5
0
    def _success_dprob(self, circuit, param_slice, cache):
        """ Derived classes can override this.  Default implemntation is to use finite difference. """
        eps = 1e-7
        orig_pvec = self.to_vector()
        wrtIndices = _slct.indices(param_slice) if (
            param_slice is not None) else list(range(self.num_params))
        sp0 = self._success_prob(circuit, cache)

        deriv = _np.empty(len(wrtIndices), 'd')
        for i in wrtIndices:
            p_plus_dp = orig_pvec.copy()
            p_plus_dp[i] += eps
            self.from_vector(p_plus_dp)
            sp1 = self._success_prob(circuit, cache)
            deriv[i] = (sp1 - sp0) / eps
        self.from_vector(orig_pvec)
        return deriv
Exemple #6
0
    def __init__(self, unique_complete_circuits, unique_nospam_circuits, circuits_by_unique_nospam_circuits,
                 ds_circuits, group, helpful_scratch, model, dataset):

        #Note: group gives unique_nospam_circuits indices, which circuits_by_unique_nospam_circuits
        # turns into "unique complete circuit" indices, which the layout via it's to_unique can map
        # to original circuit indices.
        def add_expanded_circuits(indices, add_to_this_dict):
            _expanded_nospam_circuit_outcomes = add_to_this_dict
            for i in indices:
                nospam_c = unique_nospam_circuits[i]
                for unique_i in circuits_by_unique_nospam_circuits[nospam_c]:  # "unique" circuits: add SPAM to nospam_c
                    observed_outcomes = None if (dataset is None) else dataset[ds_circuits[unique_i]].unique_outcomes
                    expc_outcomes = unique_complete_circuits[unique_i].expand_instruments_and_separate_povm(
                        model, observed_outcomes)
                    #Note: unique_complete_circuits may have duplicates (they're only unique *pre*-completion)

                    for sep_povm_c, outcomes in expc_outcomes.items():  # for each expanded cir from unique_i-th circuit
                        prep_lbl = sep_povm_c.circuit_without_povm[0]
                        exp_nospam_c = sep_povm_c.circuit_without_povm[1:]  # sep_povm_c *always* has prep lbl
                        spam_tuples = [(prep_lbl, elabel) for elabel in sep_povm_c.full_effect_labels]
                        outcome_by_spamtuple = _collections.OrderedDict([(st, outcome)
                                                                         for st, outcome in zip(spam_tuples, outcomes)])

                        #Now add these outcomes to `expanded_nospam_circuit_outcomes` - note that multiple "unique_i"'s
                        # may exist for the same expanded & without-spam circuit (exp_nospam_c) and so we need to
                        # keep track of a list of unique_i indices for each circut and spam tuple below.
                        if exp_nospam_c not in _expanded_nospam_circuit_outcomes:
                            _expanded_nospam_circuit_outcomes[exp_nospam_c] = _collections.OrderedDict(
                                [(st, (outcome, [unique_i])) for st, outcome in zip(spam_tuples, outcomes)])
                        else:
                            for st, outcome in outcome_by_spamtuple.items():
                                if st in _expanded_nospam_circuit_outcomes[exp_nospam_c]:
                                    existing_outcome, existing_unique_is = \
                                        _expanded_nospam_circuit_outcomes[exp_nospam_c][st]
                                    assert(existing_outcome == outcome), "Outcome should be same when spam tuples are!"
                                    assert(unique_i not in existing_unique_is)  # SLOW - remove?
                                    existing_unique_is.append(unique_i)
                                else:
                                    _expanded_nospam_circuit_outcomes[exp_nospam_c][st] = (outcome, [unique_i])

        # keys = expanded circuits w/out SPAM layers; values = spamtuple => (outcome, unique_is) dictionary that
        # keeps track of which "unique" circuit indices having each spamtuple / outcome.
        expanded_nospam_circuit_outcomes = _collections.OrderedDict()
        add_expanded_circuits(group, expanded_nospam_circuit_outcomes)
        expanded_nospam_circuits = _collections.OrderedDict(
            [(i, cir) for i, cir in enumerate(expanded_nospam_circuit_outcomes.keys())])

        # add suggested scratch to the "final" elements as far as the tree creation is concerned
        # - this allows these scratch element to help balance the tree.
        expanded_nospam_circuit_outcomes_plus_scratch = expanded_nospam_circuit_outcomes.copy()
        add_expanded_circuits(helpful_scratch, expanded_nospam_circuit_outcomes_plus_scratch)
        expanded_nospam_circuits_plus_scratch = _collections.OrderedDict(
            [(i, cir) for i, cir in enumerate(expanded_nospam_circuit_outcomes_plus_scratch.keys())])

        double_expanded_nospam_circuits_plus_scratch = _collections.OrderedDict()
        for i, cir in expanded_nospam_circuits_plus_scratch.items():
            cir = cir.copy(editable=True)
            cir.expand_subcircuits()  # expand sub-circuits for a more efficient tree
            cir.done_editing()
            double_expanded_nospam_circuits_plus_scratch[i] = cir

        self.tree = _EvalTree.create(double_expanded_nospam_circuits_plus_scratch)
        #print("Atom tree: %d circuits => tree of size %d" % (len(expanded_nospam_circuits), len(self.tree)))

        self._num_nonscratch_tree_items = len(expanded_nospam_circuits)  # put this in EvalTree?

        # self.tree's elements give instructions for evaluating ("caching") no-spam quantities (e.g. products).
        # Now we assign final element indices to the circuit outcomes corresponding to a given no-spam ("tree")
        # quantity plus a spam-tuple. We order the final indices so that all the outcomes corresponding to a
        # given spam-tuple are contiguous.

        tree_indices_by_spamtuple = _collections.OrderedDict()  # "tree" indices index expanded_nospam_circuits
        for i, c in expanded_nospam_circuits.items():
            for spam_tuple in expanded_nospam_circuit_outcomes[c].keys():
                if spam_tuple not in tree_indices_by_spamtuple: tree_indices_by_spamtuple[spam_tuple] = []
                tree_indices_by_spamtuple[spam_tuple].append(i)

        #Assign element indices, starting at `offset`
        # now that we know how many of each spamtuple there are, assign final element indices.
        local_offset = 0
        self.indices_by_spamtuple = _collections.OrderedDict()  # values are (element_indices, tree_indices) tuples.
        for spam_tuple, tree_indices in tree_indices_by_spamtuple.items():
            self.indices_by_spamtuple[spam_tuple] = (slice(local_offset, local_offset + len(tree_indices)),
                                                     _slct.list_to_slice(tree_indices, array_ok=True))
            local_offset += len(tree_indices)
            #TODO: allow tree_indices to be None or a slice?

        element_slice = None  # slice(offset, offset + local_offset)  # *global* (of parent layout) element-index slice
        num_elements = local_offset

        elindex_outcome_tuples = _collections.OrderedDict([
            (unique_i, list()) for unique_i in range(len(unique_complete_circuits))])

        for spam_tuple, (element_indices, tree_indices) in self.indices_by_spamtuple.items():
            for elindex, tree_index in zip(_slct.indices(element_indices), _slct.to_array(tree_indices)):
                outcome_by_spamtuple = expanded_nospam_circuit_outcomes[expanded_nospam_circuits[tree_index]]
                outcome, unique_is = outcome_by_spamtuple[spam_tuple]
                for unique_i in unique_is:
                    elindex_outcome_tuples[unique_i].append((elindex, outcome))  # *local* element indices
        self.elindex_outcome_tuples = elindex_outcome_tuples

        super().__init__(element_slice, num_elements)
Exemple #7
0
    def create_fogi_aggregate_single_op_space(self, op_label, errorgen_type='H',
                                              intrinsic_or_relational='intrinsic', target='all'):
        """
        Construct a matrix with columns spanning a particular FOGI subspace for a single operation.

        This is a subspace of the full error-generator space of this FOGI analysis,
        and projecting a model's error-generator vector onto this space can be used
        to obtain the contribution of a desired subset of the `op_label`'s errors.

        Parameters
        ----------
        op_label : Label or str
            The operation to construct a subspace for.  This should be an element of
            `self.primitive_op_labels`.

        errorgen_type : {"H", "S", "all"}
            Potentially restrict to the subspace containing just Hamiltonian (H) or Pauli
            stochastic (S) errors.  `"all"` imposes no restriction.

        intrinsic_or_relational : {"intrinsic", "relational", "all"}
            Restrict to intrinsic or relational errors (or not, using `"all"`).

        target : tuple or "all"
            A tuple of state space (qubit) labels to restrict to, e.g., `('Q0','Q1')`.
            Note that including multiple labels selects only those quantities that
            target *all* the labels. The special `"all"` value includes quantities
            on all targets (no restriction).

        Returns
        -------
        numpy.ndarray
            A two-dimensional array with `self.errorgen_space_dim` rows and a number of
            columns dependent on the dimension of the selected subspace.
        """
        binned_infos = self.create_binned_fogi_infos()

        elem_errgen_lbls = self.elem_errorgen_labels_by_op[op_label]
        elem_errgen_indices = _slct.indices(self.op_errorgen_indices[op_label])
        assert(len(elem_errgen_indices) == len(elem_errgen_lbls))

        op_elem_space = _np.zeros((self.fogi_directions.shape[0], len(elem_errgen_indices)))
        for i, index in enumerate(elem_errgen_indices):
            op_elem_space[index, i] = 1.0

        if target == 'all' and errorgen_type == 'all':
            on_target_elem_errgen_indices = elem_errgen_indices
        else:
            on_target_elem_errgen_indices = []
            for index, lbl in zip(elem_errgen_indices, elem_errgen_lbls):
                if errorgen_type == 'all' or errorgen_type == lbl.errorgen_type:
                    support = lbl.support
                    if (target == 'all') or (target == support):
                        on_target_elem_errgen_indices.append(index)

        support_elem_space = _np.zeros((self.fogi_directions.shape[0], len(on_target_elem_errgen_indices)))
        for i, index in enumerate(on_target_elem_errgen_indices):
            support_elem_space[index, i] = 1.0
        #P_support_elem_space = support_elem_space @ np.linalg.pinv(support_elem_space)

        if intrinsic_or_relational in ('intrinsic', 'relational'):
            # easy case - can just use FOGIs to identify intrinsic errors
            selected_infos = []
            for ops, infos_by_type in binned_infos.items():
                if ops == (op_label,):
                    for types, infos_by_target in infos_by_type.items():  # use all types
                        #if types == (egtype,):
                        for _, info_lst in infos_by_target.items():  # use all targets here
                            selected_infos.extend(info_lst)
            fogi_indices = [info['fogi_index'] for info in selected_infos]
            full_int_space = _np.take(self.fogi_directions.toarray(), fogi_indices, axis=1)

            #space = P_op_elem_space @ full_int_space

            if intrinsic_or_relational == 'intrinsic':
                # full intrinsic space is a subspace of op_elem_space but perhaps not of support_elem_space
                #target_int_space = P_op_elem_space @ full_int_space
                support_int_space = _mt.intersection_space(support_elem_space, full_int_space, use_nice_nullspace=True)
                space = support_int_space
            elif intrinsic_or_relational == 'relational':
                local_support_space = op_elem_space.T @ support_elem_space
                local_int_space = op_elem_space.T @ full_int_space
                local_rel_space = _mt.nice_nullspace(local_int_space.T)
                support_rel_space = _mt.intersection_space(local_support_space, local_rel_space,
                                                           use_nice_nullspace=True)
                space = op_elem_space @ support_rel_space

        elif intrinsic_or_relational == 'all':
            space = support_elem_space
        else:
            raise ValueError("Invalid intrinsic_or_relational value: `%s`" % str(intrinsic_or_relational))

        space = _mt.remove_dependent_cols(space)
        return space
Exemple #8
0
        def _jacobian_fn(gauge_group_el):

            #Penalty terms below always act on the transformed non-target model.
            original_gauge_group_el = gauge_group_el

            if frobenius_transform_target:
                gauge_group_el = gauge_group_el.inverse()
                mdl_pre = full_target_model.copy()
                mdl_post = mdl_pre.copy()
            else:
                mdl_pre = model.copy()
                mdl_post = mdl_pre.copy()
            mdl_post.transform_inplace(gauge_group_el)

            # Indices: Jacobian output matrix has shape (L, N)
            start = 0
            d = mdl_pre.dim
            N = gauge_group_el.num_params
            L = mdl_pre.num_elements

            #Compute "extra" (i.e. beyond the model-element) rows of jacobian
            if cptp_penalty_factor != 0: L += _cptp_penalty_size(mdl_pre)
            if spam_penalty_factor != 0: L += _spam_penalty_size(mdl_pre)

            #Set basis for pentaly term calculation
            if cptp_penalty_factor != 0 or spam_penalty_factor != 0:
                mdl_pre.basis = mxBasis
                mdl_post.basis = mxBasis

            jacMx = _np.zeros((L, N))

            #Overview of terms:
            # objective: op_term = (S_inv * gate * S - target_op)
            # jac:       d(op_term) = (d (S_inv) * gate * S + S_inv * gate * dS )
            #            d(op_term) = (-(S_inv * dS * S_inv) * gate * S + S_inv * gate * dS )

            # objective: rho_term = (S_inv * rho - target_rho)
            # jac:       d(rho_term) = d (S_inv) * rho
            #            d(rho_term) = -(S_inv * dS * S_inv) * rho

            # objective: ET_term = (E.T * S - target_E.T)
            # jac:       d(ET_term) = E.T * dS

            #Overview of terms when frobenius_transform_target == True).  Note that the objective
            #expressions are identical to the above except for an additional overall minus sign and S <=> S_inv.

            # objective: op_term = (gate - S * target_op * S_inv)
            # jac:       d(op_term) = -(dS * target_op * S_inv + S * target_op * -(S_inv * dS * S_inv) )
            #            d(op_term) = (-dS * target_op * S_inv + S * target_op * (S_inv * dS * S_inv) )

            # objective: rho_term = (rho - S * target_rho)
            # jac:       d(rho_term) = - dS * target_rho

            # objective: ET_term = (E.T - target_E.T * S_inv)
            # jac:       d(ET_term) = - target_E.T * -(S_inv * dS * S_inv)
            #            d(ET_term) = target_E.T * (S_inv * dS * S_inv)

            #Distribute computation across processors
            allDerivColSlice = slice(0, N)
            derivSlices, myDerivColSlice, derivOwners, mySubComm = \
                _mpit.distribute_slice(allDerivColSlice, comm)
            if mySubComm is not None:
                _warnings.warn("Note: more CPUs(%d)" % comm.Get_size()
                               + " than gauge-opt derivative columns(%d)!" % N)  # pragma: no cover

            n = _slct.length(myDerivColSlice)
            wrtIndices = _slct.indices(myDerivColSlice) if (n < N) else None
            my_jacMx = jacMx[:, myDerivColSlice]  # just the columns I'm responsible for

            # S, and S_inv are shape (d,d)
            #S       = gauge_group_el.transform_matrix
            S_inv = gauge_group_el.transform_matrix_inverse
            dS = gauge_group_el.deriv_wrt_params(wrtIndices)  # shape (d*d),n
            dS.shape = (d, d, n)  # call it (d1,d2,n)
            dS = _np.rollaxis(dS, 2)  # shape (n, d1, d2)
            assert(dS.shape == (n, d, d))

            # --- NOTE: ordering here, with running `start` index MUST
            #           correspond to those in Model.residuals, which in turn
            #           must correspond to those in ForwardSimulator.residuals - which
            #           currently orders as: gates, simplified_ops, preps, effects.

            # -- LinearOperator terms
            # -------------------------
            for lbl, G in mdl_pre.operations.items():
                # d(op_term) = S_inv * (-dS * S_inv * G * S + G * dS) = S_inv * (-dS * G' + G * dS)
                #   Note: (S_inv * G * S) is G' (transformed G)
                wt = item_weights.get(lbl, opWeight)
                left = -1 * _np.dot(dS, mdl_post.operations[lbl].to_dense(on_space='minimal'))  # shape (n,d1,d2)
                right = _np.swapaxes(_np.dot(G.to_dense(on_space='minimal'), dS), 0, 1)  # shape (d1,n,d2) -> (n,d1,d2)
                result = _np.swapaxes(_np.dot(S_inv, left + right), 1, 2)  # shape (d1, d2, n)
                result = result.reshape((d**2, n))  # must copy b/c non-contiguous
                my_jacMx[start:start + d**2] = wt * result
                start += d**2

            # -- Instrument terms
            # -------------------------
            for ilbl, Inst in mdl_pre.instruments.items():
                wt = item_weights.get(ilbl, opWeight)
                for lbl, G in Inst.items():
                    # same calculation as for operation terms
                    left = -1 * _np.dot(dS, mdl_post.instruments[ilbl][lbl].to_dense(on_space='minimal'))  # (n,d1,d2)
                    right = _np.swapaxes(_np.dot(G.to_dense(on_space='minimal'), dS), 0, 1)  # (d1,n,d2) -> (n,d1,d2)
                    result = _np.swapaxes(_np.dot(S_inv, left + right), 1, 2)  # shape (d1, d2, n)
                    result = result.reshape((d**2, n))  # must copy b/c non-contiguous
                    my_jacMx[start:start + d**2] = wt * result
                    start += d**2

            # -- prep terms
            # -------------------------
            for lbl, rho in mdl_post.preps.items():
                # d(rho_term) = -(S_inv * dS * S_inv) * rho
                #   Note: (S_inv * rho) is transformed rho
                wt = item_weights.get(lbl, spamWeight)
                Sinv_dS = _np.dot(S_inv, dS)  # shape (d1,n,d2)
                result = -1 * _np.dot(Sinv_dS, rho.to_dense(on_space='minimal'))  # shape (d,n)
                my_jacMx[start:start + d] = wt * result
                start += d

            # -- effect terms
            # -------------------------
            for povmlbl, povm in mdl_pre.povms.items():
                for lbl, E in povm.items():
                    # d(ET_term) = E.T * dS
                    wt = item_weights.get(povmlbl + "_" + lbl, spamWeight)
                    result = _np.dot(E.to_dense(on_space='minimal')[None, :], dS).T  # shape (1,n,d2).T => (d2,n,1)
                    my_jacMx[start:start + d] = wt * result.squeeze(2)  # (d2,n)
                    start += d

            # -- penalty terms  -- Note: still use original gauge transform applied to `model`
            # -------------------------
            if cptp_penalty_factor > 0 or spam_penalty_factor > 0:
                if frobenius_transform_target:  # reset back to non-target-tranform "mode"
                    gauge_group_el = original_gauge_group_el
                    mdl_pre = model.copy()
                    mdl_post = mdl_pre.copy()
                    mdl_post.transform_inplace(gauge_group_el)

                if cptp_penalty_factor > 0:
                    start += _cptp_penalty_jac_fill(my_jacMx[start:], mdl_pre, mdl_post,
                                                    gauge_group_el, cptp_penalty_factor,
                                                    mdl_pre.basis, wrtIndices)

                if spam_penalty_factor > 0:
                    start += _spam_penalty_jac_fill(my_jacMx[start:], mdl_pre, mdl_post,
                                                    gauge_group_el, spam_penalty_factor,
                                                    mdl_pre.basis, wrtIndices)

            #At this point, each proc has filled the portions (columns) of jacMx that
            # it's responsible for, and so now we gather them together.
            _mpit.gather_slices(derivSlices, derivOwners, jacMx, [], 1, comm)
            #Note jacMx is completely filled (on all procs)

            if check_jac and (comm is None or comm.Get_rank() == 0):
                def _mock_objective_fn(v):
                    return _objective_fn(gauge_group_el, False)
                vec = gauge_group_el.to_vector()
                _opt.check_jac(_mock_objective_fn, vec, jacMx, tol=1e-5, eps=1e-9, err_type='abs',
                               verbosity=1)

            return jacMx