Esempio n. 1
0
 def __init__(self, a_data, a_indices, a_indptr, state_space):
     dim = len(a_indptr) - 1
     state_space = _StateSpace.cast(state_space)
     assert (state_space.dim == dim)
     self.A = _sps.csr_matrix((a_data, a_indices, a_indptr),
                              shape=(dim, dim))
     super(OpRepSparse, self).__init__(state_space)
Esempio n. 2
0
 def __init__(self, data, state_space):
     #vec = _np.asarray(vec, dtype='d')
     assert (data.dtype == _np.dtype('d'))
     self.data = _np.require(data.copy(),
                             requirements=['OWNDATA', 'C_CONTIGUOUS'])
     self.state_space = _StateSpace.cast(state_space)
     assert (len(self.data) == self.state_space.dim)
Esempio n. 3
0
    def __init__(self, state_space, target_labels, operation_to_embed, allocated_to_parent=None):
        self.target_labels = tuple(target_labels) if (target_labels is not None) else None
        self.embedded_op = operation_to_embed
        self._iter_elements_cache = {"Hilbert": None, "HilbertSchmidt": None}  # speeds up _iter_matrix_elements

        assert(_StateSpace.cast(state_space).contains_labels(target_labels)), \
            "`target_labels` (%s) not found in `state_space` (%s)" % (str(target_labels), str(state_space))

        evotype = operation_to_embed._evotype

        #Create representation
        #Create representation object
        rep_type_order = ('dense', 'embedded') if evotype.prefer_dense_reps else ('embedded', 'dense')
        rep = None
        for rep_type in rep_type_order:
            try:
                if rep_type == 'embedded':
                    rep = evotype.create_embedded_rep(state_space, self.target_labels, self.embedded_op._rep)
                elif rep_type == 'dense':
                    rep = evotype.create_dense_superop_rep(None, state_space)
                else:
                    assert(False), "Logic error!"

                self._rep_type = rep_type
                break

            except AttributeError:
                pass  # just go to the next rep_type

        if rep is None:
            raise ValueError("Unable to construct representation with evotype: %s" % str(evotype))

        _LinearOperator.__init__(self, rep, evotype)
        self.init_gpindices(allocated_to_parent)  # initialize our gpindices based on sub-members
        if self._rep_type == 'dense': self._update_denserep()
Esempio n. 4
0
    def __init__(self, state_space, target_labels, embedded_rep):
        # assert that all state space labels == qubits, since we only know
        # how to embed cliffords on qubits...
        state_space = _StateSpace.cast(state_space)
        assert(state_space.num_tensor_product_blocks == 1
               and all([state_space.label_udimension(l) == 2 for l in state_space.tensor_product_block_labels(0)])), \
            "All state space labels must correspond to *qubits*"

        #Cache info to speedup representation's acton(...) methods:
        # Note: ...labels[0] is the *only* tensor-prod-block, asserted above
        qubitLabels = state_space.tensor_product_block_labels(0)
        qubit_indices = _np.array(
            [qubitLabels.index(targetLbl) for targetLbl in target_labels],
            _np.int64)

        self.embedded_labels = target_labels
        self.embedded_rep = embedded_rep
        # Map 0-based qubit index for embedded op -> full local qubit index
        self.embedded_to_local_qubit_indices = {
            str(i): str(j)
            for i, j in enumerate(qubit_indices)
        }

        # TODO: This doesn't work as nicely for the stochastic op, where chp_ops can be reset between chp_str calls
        chp_ops = [
            _update_chp_op(op, self.embedded_to_local_qubit_indices)
            for op in self.embedded_rep.chp_ops
        ]
        super(OpRepEmbedded, self).__init__(chp_ops, state_space)
Esempio n. 5
0
 def __init__(self, rep_to_repeat, num_repetitions, state_space):
     state_space = _StateSpace.cast(state_space)
     self.repeated_rep = rep_to_repeat
     self.num_repetitions = num_repetitions
     super(OpRepRepeated,
           self).__init__(self.repeated_rep.chp_ops * self.num_repetitions,
                          state_space)
Esempio n. 6
0
 def __init__(self, mx, basis, state_space):
     state_space = _StateSpace.cast(state_space)
     if mx is None:
         mx = _np.identity(state_space.udim, complex)
     assert (mx.ndim == 2 and mx.shape[0] == state_space.udim)
     self.basis = basis
     self.base = _np.require(mx, requirements=['OWNDATA', 'C_CONTIGUOUS'])
     super(OpRepDenseUnitary, self).__init__(state_space)
Esempio n. 7
0
    def __init__(self, chp_ops, state_space):
        self.chp_ops = chp_ops
        self.state_space = _StateSpace.cast(state_space)

        assert(self.state_space.num_qubits >= 0), 'State space for "chp" evotype must consist entirely of qubits!'
        assert(self.state_space.num_tensor_product_blocks == 1)  # should be redundant with above assertion
        self.qubit_labels = self.state_space.tensor_product_block_labels(0)
        self.qubit_label_to_index = {lbl: i for i, lbl in enumerate(self.qubit_labels)}
Esempio n. 8
0
    def __init__(self, op_rep, effect_rep, op_id, state_space):
        self.op_rep = op_rep
        self.effect_rep = effect_rep
        self.op_id = op_id

        self.state_space = _StateSpace.cast(state_space)
        assert (self.state_space.is_compatible_with(effect_rep.state_space))

        super(EffectRepComposed, self).__init__(effect_rep.state_space)
Esempio n. 9
0
    def __init__(self, unitarymx, symplecticrep, basis, state_space):

        raise NotImplementedError((
            "This could be implemented in the future - we just need"
            "to decompose an arbitrary Clifford unitary/stabilizer into CHP ops"
        ))
        chp_ops = []  # compile_clifford_unitary_to_chp(unitarymx) TODO!!!
        state_space = _StateSpace.cast(state_space)
        self.basis = basis
        super(OpRepClifford, self).__init__(chp_ops, state_space)
Esempio n. 10
0
    def __init__(self, name, basis, state_space):
        std_unitaries = _itgs.standard_gatename_unitaries()
        self.name = name
        if self.name not in std_unitaries:
            raise ValueError("Name '%s' not in standard unitaries" % self.name)

        U = std_unitaries[self.name]
        state_space = _StateSpace.cast(state_space)
        assert (U.shape[0] == state_space.udim)

        super(OpRepStandard, self).__init__(U, basis, state_space)
Esempio n. 11
0
    def __init__(self, basis, rate_poly_dicts, initial_rates, seed_or_state,
                 state_space):
        self.basis = basis
        self.stochastic_superops = []
        for b in self.basis.elements[1:]:
            #REMOVE (OLD) std_superop = _lbt.nonham_lindbladian(b, b, sparse=False)
            std_superop = _lbt.create_elementary_errorgen('S', b, sparse=False)
            self.stochastic_superops.append(
                _bt.change_basis(std_superop, 'std', self.basis))

        state_space = _StateSpace.cast(state_space)
        assert (self.basis.dim == state_space.dim)

        super(OpRepStochastic, self).__init__(None, state_space)
        self.update_rates(initial_rates)
Esempio n. 12
0
    def __init__(self, state_space, target_labels, embedded_rep):
        # assert that all state space labels == qubits, since we only know
        # how to embed cliffords on qubits...
        state_space = _StateSpace.cast(state_space)
        assert(state_space.num_tensor_product_blocks == 1
               and all([state_space.label_udimension(l) == 2 for l in state_space.tensor_product_block_labels(0)])), \
            "All state space labels must correspond to *qubits*"

        #Cache info to speedup representation's acton(...) methods:
        # Note: ...labels[0] is the *only* tensor-prod-block, asserted above
        qubitLabels = state_space.tensor_product_block_labels(0)
        qubit_indices = _np.array([qubitLabels.index(targetLbl)
                                   for targetLbl in target_labels], _np.int64)

        self.embedded_rep = embedded_rep
        self.qubits = qubit_indices  # qubit *indices*
        super(OpRepEmbedded, self).__init__(state_space)
Esempio n. 13
0
    def __init__(self, name, basis, state_space):
        std_chp_ops = _itgs.standard_gatenames_chp_conversions()
        self.name = name
        if self.name not in std_chp_ops:
            raise ValueError("Name '%s' not in standard CHP operations" %
                             self.name)

        chp_ops = std_chp_ops[self.name]
        nqubits = 2 if any(['c' in n for n in chp_ops]) else 1

        state_space = _StateSpace.cast(state_space)
        assert(nqubits == state_space.num_qubits), \
            "State space of {0} qubits doesn't match {1} expected qubits for the standard {2} gate".format(
                state_space.num_qubits, nqubits, name)

        self.basis = basis
        super(OpRepStandard, self).__init__(chp_ops, state_space)
Esempio n. 14
0
    def __init__(self, povm_factors, effect_labels, state_space):
        #Arrays for speeding up kron product in effect reps
        max_factor_dim = max(fct.state_space.dim for fct in povm_factors)
        kron_array = _np.ascontiguousarray(
            _np.empty((len(povm_factors), max_factor_dim), 'd'))
        factordims = _np.ascontiguousarray(
            _np.array([fct.state_space.dim for fct in povm_factors],
                      _np.int64))

        self.povm_factors = povm_factors
        self.effect_labels = effect_labels
        self.kron_array = kron_array
        self.factor_dims = factordims
        self.max_factor_dim = max_factor_dim  # Unused
        state_space = _StateSpace.cast(state_space)
        assert (_np.product(factordims) == state_space.dim)
        super(EffectRepTensorProduct, self).__init__(state_space)
        self.factor_effects_have_changed()
Esempio n. 15
0
    def __init__(self, unitarymx, symplecticrep, basis, state_space):

        if symplecticrep is not None:
            self.smatrix, self.svector = symplecticrep
        else:
            # compute symplectic rep from unitary
            self.smatrix, self.svector = _symp.unitary_to_symplectic(unitarymx, flagnonclifford=True)

        self.inv_smatrix, self.inv_svector = _symp.inverse_clifford(
            self.smatrix, self.svector)  # cache inverse since it's expensive

        #nQubits = len(self.svector) // 2
        #dim = 2**nQubits  # "stabilizer" is a "unitary evolution"-type mode
        self.unitary = unitarymx
        self.basis = basis

        state_space = _StateSpace.cast(state_space)
        assert(state_space.num_qubits == self.smatrix.shape[0] // 2)
        super(OpRepClifford, self).__init__(state_space)
Esempio n. 16
0
    def __init__(self, zvals, basis, state_space):
        state_space = _StateSpace.cast(state_space)
        assert (
            basis.name == 'pp'
        ), "Only Pauli-product computational effect vectors are currently supported"
        assert (state_space.num_qudits == len(zvals))
        assert (len(zvals) <=
                64), "Cannot create a Computational basis rep with >64 qubits!"
        # Current storage of computational basis states converts zvals -> 64-bit integer

        base = 1
        self.zvals_int = 0
        for v in zvals:
            assert (v in (0, 1)), "zvals must contain only 0s and 1s"
            self.zvals_int += base * v
            base *= 2  # or left shift?

        self.zvals = zvals
        self.nfactors = len(zvals)  # (or nQubits)
        self.abs_elval = 1 / (_np.sqrt(2)**self.nfactors)
        self.basis = basis

        super(EffectRepComputational, self).__init__(state_space)
Esempio n. 17
0
    def __init__(self, zvals, basis, state_space):
        state_space = _StateSpace.cast(state_space)
        assert (state_space.num_qudits == len(zvals))
        assert (len(zvals) <=
                64), "Cannot create a Computational basis rep with >64 qubits!"
        # Current storage of computational basis states converts zvals -> 64-bit integer

        # Different than DM counterpart
        # as each factor only has *1* nonzero element so final state has only a
        # *single* nonzero element!  We just have to figure out where that
        # single element lies (compute it's index) based on the given zvals.

        # Assume, like tensorprod, that factor ordering == kron ordering
        # so nonzer_index = kron( factor[0], factor[1], ... factor[N-1] ).

        base = 2**(len(zvals) - 1)
        self.nonzero_index = 0
        self.zvals = zvals
        self.basis = basis
        for k, v in enumerate(zvals):
            assert (v in (0, 1)), "zvals must contain only 0s and 1s"
            self.nonzero_index += base * v
            base //= 2  # or right shift?
        super(EffectRepComputational, self).__init__(state_space)
Esempio n. 18
0
 def __init__(self, smatrix, pvectors, amps, state_space):
     self.state_space = _StateSpace.cast(state_space)
     self.sframe = _stabilizer.StabilizerFrame(smatrix, pvectors, amps)
     # just rely on StabilizerFrame class to do all the heavy lifting...
     assert(self.sframe.n == self.state_space.num_qubits)
Esempio n. 19
0
 def __init__(self, factor_reps, state_space):
     state_space = _StateSpace.cast(state_space)
     self.factor_reps = factor_reps
     super(OpRepSum, self).__init__(state_space)
Esempio n. 20
0
    def __init__(self, state_space, target_labels, embedded_rep):

        state_space = _StateSpace.cast(state_space)
        iTensorProdBlks = [
            state_space.label_tensor_product_block_index(label)
            for label in target_labels
        ]
        # index of tensor product block (of state space) a bit label is part of
        if len(set(iTensorProdBlks)) != 1:
            raise ValueError(
                "All qubit labels of a multi-qubit operation must correspond to the"
                " same tensor-product-block of the state space -- checked previously"
            )  # pragma: no cover # noqa

        iTensorProdBlk = iTensorProdBlks[
            0]  # because they're all the same (tested above) - this is "active" block
        tensorProdBlkLabels = state_space.tensor_product_block_labels(
            iTensorProdBlk)
        # count possible *state-vector-space* indices of each component of the tensor product block
        numBasisEls = _np.array(
            [state_space.label_udimension(l) for l in tensorProdBlkLabels],
            _np.int64)

        # Separate the components of the tensor product that are not operated on, i.e. that our
        # final map just acts as identity w.r.t.
        labelIndices = [
            tensorProdBlkLabels.index(label) for label in target_labels
        ]
        actionInds = _np.array(labelIndices, _np.int64)
        assert(_np.product([numBasisEls[i] for i in actionInds]) == embedded_rep.dim), \
            "Embedded operation has dimension (%d) inconsistent with the given target labels (%s)" % (
                embedded_rep.dim, str(target_labels))

        #dim = state_space.udim
        nBlocks = state_space.num_tensor_product_blocks
        iActiveBlock = iTensorProdBlk
        nComponents = len(
            state_space.tensor_product_block_labels(iActiveBlock))
        embeddedDim = embedded_rep.dim  # a *unitary* dim - see .dim property above
        blocksizes = _np.array([
            _np.product(state_space.tensor_product_block_udimensions(k))
            for k in range(nBlocks)
        ], _np.int64)

        self.target_labels = target_labels
        self.embedded_rep = embedded_rep
        self.num_basis_els = numBasisEls
        self.action_inds = actionInds
        self.blocksizes = blocksizes

        num_basis_els_noop_blankaction = self.num_basis_els.copy()
        for i in self.action_inds:
            num_basis_els_noop_blankaction[i] = 1
        self.basisInds_noop_blankaction = [
            list(range(n)) for n in num_basis_els_noop_blankaction
        ]

        # multipliers to go from per-label indices to tensor-product-block index
        # e.g. if map(len,basisInds) == [1,4,4] then multipliers == [ 16 4 1 ]
        self.multipliers = _np.array(
            _np.flipud(
                _np.cumprod([1] +
                            list(reversed(list(self.num_basis_els[1:]))))),
            _np.int64)
        self.basisInds_action = [
            list(range(self.num_basis_els[i])) for i in self.action_inds
        ]

        self.embeddedDim = embeddedDim
        self.ncomponents = nComponents  # number of components in "active" block
        self.active_block_index = iActiveBlock
        self.nblocks = nBlocks
        self.offset = sum(blocksizes[0:self.active_block_index])
        super(OpRepEmbedded, self).__init__(state_space)
Esempio n. 21
0
 def __init__(self, factor_op_reps, state_space):
     state_space = _StateSpace.cast(state_space)
     self.factor_reps = factor_op_reps
     super(OpRepComposed, self).__init__([], state_space)
Esempio n. 22
0
 def __init__(self, state_space):
     self.state_space = _StateSpace.cast(state_space)
Esempio n. 23
0
 def __init__(self, data, state_space, basis):
     assert(data.dtype == _np.dtype(complex))
     self.data = _np.require(data.copy(), requirements=['OWNDATA', 'C_CONTIGUOUS'])
     self.state_space = _StateSpace.cast(state_space)
     self.basis = basis
     assert(len(self.data) == self.state_space.udim)
Esempio n. 24
0
 def setUp(self):
     state_space = StateSpace.cast(('Q0', ))
     self.member = mm.ModelMember(state_space, evotype="densitymx")
Esempio n. 25
0
 def __init__(self, zvals, state_space):
     self.zvals = zvals
     self.state_space = _StateSpace.cast(state_space)
     assert (self.state_space.num_qubits == len(self.zvals))