Пример #1
0
    def make_real_sand_matrix(self, x, y):
        r"""Make the superoperator matrix representation of

        N[X,Y](rho) = (1/2) ( X rho Y† + Y rho X† )

        In the basis {Λ_j}, N[X,Y](rho) = N(x,y)_jk rho_k Λ_j where

        N(x,y)_jk = Re[x_m (D_mlj + iF_mlj) (y*)_n (D_knl + iF_knl) ]

        Λ_j Λ_k = (D_jkl + iF_jkl) Λ_l

        x and y are vectorized representations of the operators X and Y stored
        in sparse format.

        `sparse.tensordot` might decide to return something dense, so the user
        should be aware of that.

        """
        result_A = sparse.tensordot(x, self.struct, ([0], [0]))
        result_B = sparse.tensordot(np.conj(y), self.struct, ([0], [1]))
        # sparse.tensordot fails if both arguments are numpy ndarrays, so we
        # force the intermediate arrays to be sparse
        if type(result_B) == np.ndarray:
            result_B = COO.from_numpy(result_B)
        if type(result_A) == np.ndarray:
            result_A = COO.from_numpy(result_A)
        result = sparse_real(sparse.tensordot(result_A, result_B, ([0], [1])))
        # We want our result to be dense, to make things predictable from the
        # outside.
        if type(result) == sparse.coo.COO:
            result = result.todense()
        return result.real
Пример #2
0
def hamiltonian_sparse(v, J):
    """
    Calculate the spin Hamiltonian as a sparse array.

    Parameters
    ----------
    v : array-like
        list of frequencies in Hz (in the absence of splitting) for each
        nucleus.
    J : 2D array-like
        matrix of coupling constants. J[m, n] is the coupling constant between
        v[m] and v[n].

    Returns
    -------
    H : sparse.COO
        a sparse spin Hamiltonian.
    """
    nspins = len(v)
    Lz, Lproduct = _so_sparse(nspins)  # noqa
    # On large spin systems, converting v and J to sparse improved speed of
    # sparse.tensordot calls with them.
    # First make sure v and J are a numpy array (required by sparse.COO)
    if not isinstance(v, np.ndarray):
        v = np.array(v)
    if not isinstance(J, np.ndarray):
        J = np.array(J)
    H = sparse.tensordot(sparse.COO(v), Lz, axes=1)
    scalars = 0.5 * sparse.COO(J)
    H += sparse.tensordot(scalars, Lproduct, axes=2)
    return H
 def jac(x):
     if always_positive:
         x = scipy.sparse.diags(np.exp(x))
         jacobian = sparse.tensordot(Dij, self.slice_l, axes=(0, 0))
         return jacobian.to_scipy_sparse().dot(x)
     else:
         return sparse.tensordot(Dij, self.slice_l, axes=(0, 0))
Пример #4
0
 def __init__(self, dim, basis=None):
     if basis is None:
         self.dim = dim
         self.basis = sparse.stack(gm.get_basis(dim, sparse=True))
     else:
         self.dim = basis[0].shape[0]
         self.basis = COO.from_numpy(np.array(basis))
     # Diagonal metric (since we're working with what are assumed to be
     # orthogonal but not necessarily normalized basis vectors)
     self.sq_norms = COO.from_numpy(
             sparse.tensordot(
                 self.basis, self.basis,
                 ([1, 2], [2, 1])).to_scipy_sparse().diagonal())
     # Diagonal inverse metric
     sq_norms_inv = COO.from_numpy(1 / self.sq_norms.todense())
     # Dual basis obtained from the original basis by the inverse metric
     self.dual = self.basis * sq_norms_inv[:,None,None]
     # Structure coefficients for the Lie algebra showing how to represent a
     # product of two basis elements as a complex-linear combination of basis
     # elements
     self.struct = sparse.tensordot(sparse.tensordot(self.basis, self.basis,
                                                     ([2], [1])),
                                    self.dual, ([1, 3], [2, 1]))
     if isinstance(self.struct, np.ndarray):
         # Sometimes sparse.tensordot returns numpy arrays. We want to force
         # it to be sparse, since sparse.tensordot fails when passed two
         # numpy arrays.
         self.struct = COO.from_numpy(self.struct)
Пример #5
0
    def make_real_comm_matrix(self, x, y):
        r"""Make the superoperator matrix representation of

        M[X,Y](rho) = (1/2) ( [X rho, Y†] + [Y, rho X†] )

        In the basis {Λ_j}, M[X,Y](rho) = M(x,y)_jk rho_k Λ_j where

        M(x,y)_jk = -2 Im[ (y*)_n F_lnj x_m (D_mkl + iF_mkl) ]

        Λ_j Λ_k = (D_jkl + iF_jkl) Λ_l

        x and y are vectorized representations of the operators X and Y stored
        in sparse format.

        `sparse.tensordot` might decide to return something dense, so the user
        should be aware of that.

        """
        struct_imag = sparse_imag(self.struct)
        # sparse.tensordot fails if both arguments are numpy ndarrays, so we
        # force the intermediate arrays to be sparse
        result_A = sparse.tensordot(np.conj(y), struct_imag, ([0], [1]))
        result_B = sparse.tensordot(x, self.struct, ([0], [0]))
        if type(result_B) == np.ndarray:
            result_B = COO.from_numpy(result_B)
        if type(result_A) == np.ndarray:
            result_A = COO.from_numpy(result_A)
        result = -2 * sparse_imag(sparse.tensordot(result_A, result_B,
                                                   ([0], [1])))
        # We want our result to be dense, to make things predictable from the
        # outside.
        if type(result) == sparse.coo.COO:
            result = result.todense()
        return result.real
Пример #6
0
    def initialize(self, rate_ion, rate_cx, t_ion, t_edge, n_edge):
        for v in [rate_ion, rate_cx, t_ion]:
            if v.shape != self.r.shape:
                raise ValueError('Shape mismatch, {} and {}'.format(
                    v.shape, self.r.shape))
        self.rate.initialize(rate_ion + rate_cx, rate_cx, t_ion)
        self.t_ion = t_ion
        self.n_edge = n_edge
        self.nt_edge = n_edge * t_edge
        self.ntt_edge = n_edge * t_edge * t_edge

        n = self.size - 1
        # prepare matrices
        # matrix for the particle balance
        A_part = sparse.concatenate([
            -sparse.tensordot(
                self.rate.Rij + self.rate.Sij, self.rate.slice_l, axes=(0, 0)),
            sparse.tensordot(self.rate.Dij, self.rate.slice_l, axes=(0, 0)),
            sparse.COO([], [], shape=(n, n))
        ],
                                    axis=0).T
        # matrix for the energy balance
        A_engy = sparse.concatenate([
            -1.5 *
            sparse.tensordot(self.rate.Eij, self.rate.slice_l, axes=(0, 0)),
            -1.5 *
            sparse.tensordot(self.rate.Rij, self.rate.slice_l, axes=(0, 0)),
            2.5 *
            sparse.tensordot(self.rate.Dij, self.rate.slice_l, axes=(0, 0))
        ],
                                    axis=0).T
        # balance matrix.
        self.A = sparse.concatenate([A_part, A_engy], axis=0)

        # boundary conditions
        b_part = (-self.n_edge * sparse.tensordot(
            self.rate.Rij + self.rate.Sij, self.rate.slice_last, axes=(0, 0)) +
                  self.nt_edge * sparse.tensordot(
                      self.rate.Dij, self.rate.slice_last, axes=(0, 0))
                  ).todense()

        b_engy = (
            -1.5 * self.n_edge *
            sparse.tensordot(self.rate.Eij, self.rate.slice_last, axes=(0, 0))
            - 1.5 * self.nt_edge * sparse.tensordot(
                self.rate.Rij, self.rate.slice_last, axes=(0, 0)) +
            2.5 * self.ntt_edge * sparse.tensordot(
                self.rate.Dij, self.rate.slice_last, axes=(0, 0))).todense()
        self.b = -np.concatenate([b_part, b_engy])

        # matrix for the constraint
        self.L = scipy.sparse.hstack([
            scipy.sparse.identity(n),
            scipy.sparse.identity(n) * (-2.0),
            scipy.sparse.identity(n)
        ])
Пример #7
0
def maximization_Theta(alpha, I, T, thetaPrev, p):
    '''
    theta2 = np.zeros((I, T))
    for m in range(I):
        nonZG, nonZD = alpha[m, :, :].nonzero(), alpha[:, m, :].nonzero()
        for n in range(T):
            tmp=0.
            for s in range(T):
                for (i, inf) in zip(nonZG[0], nonZG[1]):
                    tmp+=alpha[m, i, inf] * omega[m, i, s, n, inf]
                for (i, inf) in zip(nonZD[0], nonZD[1]):
                    tmp+=alpha[i, m, inf] * omega[i, m, n, s, inf]

            theta2[m,n] = tmp / Cm[m]
    '''
    '''  Memory consuming
    divix = (thetaPrev.dot(thetaPrev.dot(p))) + 1e-10  # mrx
    divix = np.swapaxes(divix, 0, 1)  # rmx  # Parce que alpha c'est dans l'ordre rmx

    terme1 = np.swapaxes(alpha/divix, 0, 1)  # mrx
    terme2 = np.swapaxes(thetaPrev.dot(p), 1, 2)  # rxk
    theta = np.tensordot(terme1, terme2, axes=2)  # mk


    terme1 = np.swapaxes(terme1, 0, 1)  # rmx
    terme2 = np.swapaxes(thetaPrev.dot(np.swapaxes(p, 0, 1)), 1, 2)  # mxl
    theta += np.tensordot(terme1, terme2, axes=2)  # rl

    theta = theta / Cm[:, None]
    theta *= thetaPrev
    '''

    # Combinaisons : rl, mk, klx  ;  alpha(rmx)!=alpha(mrx) car on considere ici alpha_Tr

    coords = alpha.nonzero()
    vals = []
    for (r, m, k) in zip(coords[0], coords[1], coords[2]):
        vals.append(thetaPrev[r].dot(thetaPrev[m].dot(p[:, :, k])))  # rmx
    divix = sparse.COO(coords, np.array(vals), shape=(I, I, I)) + 1e-10

    Cm = (alpha.sum(axis=0).sum(axis=1) +
          alpha.sum(axis=1).sum(axis=1)).todense() + 1e-10

    terme1 = alpha / divix  # rmx
    terme2 = np.swapaxes(thetaPrev.dot(np.swapaxes(p, 0, 1)), 1, 2)  # mxl
    theta = sparse.tensordot(terme1, terme2, axes=2)  # rl

    terme1 = terme1.transpose(axes=(1, 0, 2))  # mrx
    terme2 = np.swapaxes(thetaPrev.dot(p), 1, 2)  # rxk
    theta += sparse.tensordot(terme1, terme2, axes=2)  # mk

    theta = theta / Cm[:, None]
    theta *= thetaPrev

    return theta
Пример #8
0
    def initialize(self, rate_dep, rate_cx, t_ion):
        self.rate_dep = rate_dep
        self.rate_cx = rate_cx

        rmu = self.r / (self.m * self.rate_dep)
        rmuk = vec2coo(rmu * EV)

        # Components for particle balance equation
        # gradient term
        Dij = sparse.tensordot(rmuk, self.phi_di_dj_k, axes=(0, -1))
        # depletion term by ionization and charge exchange
        Rij = -sparse.tensordot(
            vec2coo(self.r * self.rate_dep), self.phi_ijk, axes=(0, -1))
        # source term by charge exchange
        Sij = sparse.tensordot(vec2coo(self.r * self.rate_cx),
                               self.phi_ijk,
                               axes=(0, -1))
        # energy source term by charge exchange
        Eij = sparse.tensordot(vec2coo(self.r * self.rate_cx * t_ion),
                               self.phi_ijk,
                               axes=(0, -1))
        # Remove last item of last dimension
        self.Dij = sparse.tensordot(Dij, self.slice_l, axes=(-1, 0))
        self.Rij = sparse.tensordot(Rij, self.slice_l, axes=(-1, 0))
        self.Sij = sparse.tensordot(Sij, self.slice_l, axes=(-1, 0))
        self.Eij = sparse.tensordot(Eij, self.slice_l, axes=(-1, 0))
Пример #9
0
 def __init__(self, dim):
     self.dim = dim
     self.basis = COO.from_numpy(np.array(gm.get_basis(dim)))
     self.sq_norms = COO.from_numpy(np.einsum('jmn,jnm->j',
                                              self.basis.todense(),
                                              self.basis.todense()))
     sq_norms_inv = COO.from_numpy(1 / self.sq_norms.todense())
     self.dual = self.basis * sq_norms_inv[:,None,None]
     self.struct = sparse.tensordot(sparse.tensordot(self.basis, self.basis,
                                                     ([2], [1])),
                                    self.dual, ([1, 3], [2, 1]))
     if type(self.struct) == np.ndarray:
         # Sometimes sparse.tensordot returns numpy arrays. We want to force
         # it to be sparse, since sparse.tensordot fails when passed two
         # numpy arrays.
         self.struct = COO.from_numpy(self.struct)
Пример #10
0
    def trace2(self, other):
        sums1 = []
        sums2 = []
        sites1 = self.sites.copy()
        sites2 = other.sites.copy()
        unsummed1 = []
        unsummed2 = []

        index = {}

        for i, site in enumerate(self.sites):
            if site in other.sites:
                sums1.extend(2 * i + 1, 2 * i)
                sums2.extend(2 * other.sites.index(site),
                             2 * other.sites.index(site) + 1)

                index[2 * i + 1] = 2 * other.sites.index(site) + 1

                sites2.remove(site)
            else:
                unsummed1.append[site]

        unsummed2 = [site for site in other.sites if site not in sums2]

        td = sparse.tensordot(self.representation,
                              other.representation,
                              axes=(sums1, sums2))
        dense_td = td.todense()
        return np.einsum(
            dense_td,
            [n for n in range(len(dense_td.shape) // 2) for i in range(2)])
Пример #11
0
def propagate_distribution(pomdp, D_ux, u_dim=None, x_dim=None):
    '''evolve input/state distribution D_ux into output distribution D_xz
    D_xz(x', z) = \sum_{x', u) P(X+ = x, Z = z | U = u X = x' ) D_ux(u, x')
  '''

    if u_dim is None:
        u_dim = tuple(range(len(pomdp.M)))

    if x_dim is None:
        x_dim = (len(pomdp.M), )

    if len(u_dim) != len(pomdp.M) or len(x_dim) != 1:
        raise Exception('dimension problem')

    if len(D_ux.shape) <= max(u_dim + x_dim) or len(
            set(u_dim + x_dim)) < len(u_dim + x_dim) or sum(D_ux.data) != 1:
        raise Exception('D_ux not a valid distribution')

    T_uxXz = sparse.stack([sparse.stack([sparse.COO(pomdp.Tuz(m_tuple, z))
                                         for z in range(pomdp.O)],
                                        axis=-1)
                           for m_tuple in pomdp.m_tuple_iter()]) \
             .reshape(pomdp.M + (pomdp.N, pomdp.N, pomdp.O))

    T_zx = sparse.tensordot(D_ux,
                            T_uxXz,
                            axes=(u_dim + x_dim, range(len(pomdp.M) + 1)))

    return sparse.COO(T_zx)
Пример #12
0
    def sparse_dLdW(self, dLdOut):
        '''
        This function compiles the dLdW sparse matrix
        '''
        inp = self.lastInput
        (batchSize, nrInputs) = inp.shape

        nrCol = self.nrOutputs

        row = np.arange(nrInputs * nrCol)

        column = np.repeat(np.arange(nrCol), nrInputs)

        rowCol = np.array([row, column])

        data = np.tile(inp, nrCol).flatten()

        thirdDim = np.repeat(np.arange(batchSize), len(row))

        rowCol = np.tile(rowCol, batchSize)
        coords = np.array([thirdDim, *rowCol])
        x = sparse.COO(coords, data)

        dLdW = sparse.tensordot(x, dLdOut, axes=([0, 2], [0, 1]))
        return dLdW.reshape(self.W.shape)
Пример #13
0
 def vectorize(self, op):
     sparse_op = COO.from_numpy(op)
     result = sparse.tensordot(self.dual, sparse_op, ([1,2], [1,0]))
     if type(result) == np.ndarray:
         # I want the result stored in a sparse format even if it isn't
         # sparse.
         result = COO.from_numpy(result)
     return result
Пример #14
0
 def vectorize(self, op, dense=False):
     sparse_op = COO.from_numpy(op)
     result = sparse.tensordot(self.dual, sparse_op, ([1,2], [1,0]))
     if not dense and isinstance(result, np.ndarray):
         # I want the result stored in a sparse format even if it isn't
         # sparse.
         result = COO.from_numpy(result)
     elif dense and isinstance(result, sparse.COO):
         result = result.todense()
     return result
Пример #15
0
def propagate_network_distribution(network, D):
    ''' evolve input/state distribution D into output distribution D_xz
    D_xz(x', z) = \sum_{x', u) P(X+ = x, Z = z | U = u X = x' ) D(u, x')
  '''
    if not network.deterministic:
        raise Exception('not possible in nondeterministic case')

    if not D.shape == network.M + network.N:
        raise Exception('wrong dimension of D')

    slice_names = list(network.input_names) + list(network.state_names)
    treated_inputs = []

    for name, pomdp in network.forward_iter():
        # index of current state
        u_dim = tuple(
            slice_names.index(u_name) for u_name in pomdp.input_names)
        x_dim = (slice_names.index(name), )

        # D_ux -> D_xz
        D = propagate_distribution(pomdp, D, u_dim=u_dim,
                                   x_dim=x_dim)  # get old  z_out
        slice_names = [n for n in slice_names if n not in (name,) + pomdp.input_names] + \
                      [name] + [pomdp.output_name]
        treated_inputs += pomdp.input_names

        # D_xz -> D_ux
        for outputs, inp, D_uz, _ in network.connections:

            # ALTERNATIVE WAY: first diagonalize D_xz to D_xzz, then use tensordot(D_xzz, D_uz) on z
            # should be faster: diagonalization is very cheap, avoids own diagonal() fcn
            # diagonalization here: https://github.com/nils-werner/sparse/compare/master...nils-werner:einsum#diff-774b84c1fc5cd6b86a14c41931aca83bR356

            if all([output in slice_names
                    for output in outputs]) and inp not in treated_inputs:
                # outer tensor product -- D_xzuz
                D = sparse.tensordot(D, sparse.COO(D_uz), axes=0)
                slice_names = slice_names + [inp] + [
                    output + '_z' for output in outputs
                ]

                # diagonal over each z -- Dxuz
                for output in outputs:
                    D = diagonal(D,
                                 axis1=slice_names.index(output),
                                 axis2=slice_names.index(output + '_z'))
                    slice_names = [
                        name for name in slice_names
                        if name != output and name != output + '_z'
                    ] + [output]

    new_order = [slice_names.index(x) for x in network.state_names
                 ] + [slice_names.index(z) for z in network.output_names]
    return D.transpose(new_order)
Пример #16
0
def hamiltonian_sparse(v, J):
    """

    Parameters
    ----------
    v
    J

    Returns
    -------
    H: a numpy.ndarray, and NOT a sparse.COO?!?!?!
    """
    nspins = len(v)
    Lz, Lproduct = so_sparse(nspins)
    H = sparse.tensordot(v, Lz, axes=1)
    # L_T = L.transpose(1, 0, 2, 3)
    # Lproduct = np.tensordot(L_T, L, axes=((1, 3), (0, 2))).swapaxes(1, 2)
    scalars = 0.5 * J
    H += sparse.tensordot(scalars, Lproduct, axes=2)
    return H
Пример #17
0
    def get_multiscale_diffusive(self, index, n, SDIFF, TDIFF, TDIFFGrad):

        angle = self.mat['control_angle'][index]
        aa = sparse.COO([0, 1, 2], angle, shape=(3))
        HW_PLUS = sparse.tensordot(self.mesh.CP, aa, axes=1).clip(min=0)
        s = SDIFF[n] * self.mat['mfp'][n]
        temp = TDIFF[n] - self.mat['mfp'][n] * np.dot(
            self.mat['control_angle'][index], TDIFFGrad[n].T)
        t = temp * self.mat['domega'][index]
        j = np.multiply(temp, HW_PLUS) * self.mat['domega'][index]

        return t, s, j
Пример #18
0
    def test_einsum_basic(self):
        # transpose
        arr = sp.random((20, 30, 40), 0.1)
        self.assertEqual(
            (einsum_sparse('abc->cba', arr) != arr.transpose()).nnz, 0)

        # tensordot
        arr1 = sp.random((20, 30, 40), 0.1)
        arr2 = sp.random((40, 30), 0.1)
        arr3 = sp.random((40, 20, 10), 0.1)
        self.assertTrue(
            np.allclose(
                todense(einsum_sparse('abc,cb->a', arr1, arr2)),
                todense(sp.tensordot(arr1, arr2, axes=([1, 2], [1, 0])))))
        self.assertTrue(
            np.allclose(todense(einsum_sparse('ab,acd->bcd', arr2, arr3)),
                        todense(sp.tensordot(arr2, arr3, axes=(0, 0)))))

        # trace
        arr = sp.random((100, 100), 0.1)
        self.assertAlmostEqual(
            einsum_sparse('aa->', arr)[()], np.trace(todense(arr)))
Пример #19
0
def hamiltonian_sparse(v, J):
    """

        Parameters
        ----------
        v: array-like
            list of frequencies in Hz
        J: 2D array-like
            matrix of coupling constants

        Returns
        -------
        H: sparse.COO
            a sparse spin Hamiltonian
        """
    nspins = len(v)
    Lz, Lproduct = so_sparse(nspins)  # noqa
    # On large spin systems, converting v and J to sparse improved speed, so:
    H = sparse.tensordot(sparse.COO(v), Lz, axes=1)
    scalars = 0.5 * sparse.COO(J)
    H += sparse.tensordot(scalars, Lproduct, axes=2)
    return H
Пример #20
0
def so_sparse(nspins):
    """Attempted to sparsify the spin operator generation, but there are hurdles. In-place assignments can't be done on
    sparse matrices. Apparently no .swapaxes method for COO matrix either.

    """
    sigma_x = sparse.COO(np.array([[0, 1 / 2], [1 / 2, 0]]))
    sigma_y = sparse.COO(np.array([[0, -1j / 2], [1j / 2, 0]]))
    sigma_z = sparse.COO(np.array([[1 / 2, 0], [0, -1 / 2]]))
    unit = sparse.COO(np.array([[1, 0], [0, 1]]))

    L = np.empty((3, nspins, 2**nspins, 2**nspins),
                 dtype=np.complex128)  # consider other dtype?
    # Lxs = []
    # Lys = []
    # Lzs = []

    for n in range(nspins):
        Lx_current = 1
        Ly_current = 1
        Lz_current = 1

        for k in range(nspins):
            if k == n:
                Lx_current = sparse.kron(Lx_current, sigma_x)
                Ly_current = sparse.kron(Ly_current, sigma_y)
                Lz_current = sparse.kron(Lz_current, sigma_z)
            else:
                Lx_current = sparse.kron(Lx_current, unit)
                Ly_current = sparse.kron(Ly_current, unit)
                Lz_current = sparse.kron(Lz_current, unit)

        # Lxs[n] = Lx_current
        # Lys[n] = Ly_current
        # Lzs[n] = Lz_current
        # print(Lx_current.todense())
        L[0][n] = Lx_current.todense()
        L[1][n] = Ly_current.todense()
        L[2][n] = Lz_current.todense()
    Lz_sparse = sparse.COO(L[2])
    L_T = L.transpose(1, 0, 2, 3)
    L_sparse = sparse.COO(L)
    L_T_sparse = sparse.COO(L_T)
    Lproduct = sparse.tensordot(L_T_sparse, L_sparse,
                                axes=((1, 3), (0, 2))).swapaxes(1, 2)
    # Lz_sparse = sparse.COO(L[2])
    Lproduct_sparse = sparse.COO(Lproduct)

    return Lz_sparse, Lproduct_sparse
Пример #21
0
    def make_hamil_comm_matrix(self, h):
        """Make the superoperator matrix representation of

        -i[H,rho]

        h is the vectorized representation of the Hamiltonian H stored in sparse
        format.

        Returns a dense matrix.

        """
        struct_imag = sparse_imag(self.struct)
        result = 2 * sparse.tensordot(struct_imag, h, ([0], [0])).T
        if type(result) == sparse.coo.COO:
            result = result.todense()
        return result.real
Пример #22
0
    def decode(
        self, symbol_stream: np.ndarray,
        channel_state: ChannelStateInformation, stream_noises: np.ndarray
    ) -> Tuple[np.ndarray, ChannelStateInformation, np.ndarray]:

        equalized_symbols = np.empty(
            (channel_state.num_receive_streams, channel_state.num_samples),
            dtype=complex)
        equalized_noises = np.empty(
            (channel_state.num_receive_streams, channel_state.num_samples),
            dtype=float)
        equalized_channel_state = ChannelStateInformation(
            channel_state.state_format)

        # Equalize in space in a first step
        for idx, (symbols, stream_state, noise) in enumerate(
                zip(symbol_stream, channel_state.received_streams(),
                    stream_noises)):

            noise_variance = np.mean(noise)

            # Combine the responses of all superimposed transmit antennas for equalization
            linear_state = stream_state.linear
            transform = np.sum(linear_state[0, ::], axis=0, keepdims=False)

            # Compute the pseudo-inverse from the singular-value-decomposition of the linear channel transform
            # noinspection PyTupleAssignmentBalance
            u, s, vh = svd(transform.todense(),
                           full_matrices=False,
                           check_finite=False)
            u *= s / (s**2 + noise_variance)

            equalizer = (u @ vh).T.conj()

            equalized_symbols[idx, :] = equalizer @ symbols
            equalized_csi_slice = tensordot(equalizer,
                                            linear_state,
                                            axes=(1, 2)).transpose(
                                                (1, 2, 0, 3))
            equalized_channel_state.append_linear(equalized_csi_slice, 0)
            equalized_noises[idx, :] = noise[:stream_state.num_samples] * (
                s**2 + noise_variance)

        return equalized_symbols, channel_state, equalized_noises
Пример #23
0
    def compose_with(self, other):
        '''Uses np.tensordot to multiply with another tensor.

        This multiplication process involves summing over the correct indices to match how matrix
        multiplication should work. Furthermore, the process requires reordering the tensor
        components afterwards so the tensor continues to act on the proper sites.'''

        sums1 = []
        sums2 = []
        sites1 = self.sites.copy()
        sites2 = other.sites.copy()

        index = {}

        for i, site in enumerate(self.sites):
            if site in other.sites:
                sums1.append(2 * i + 1)
                sums2.append(2 * other.sites.index(site))

                index[2 * i + 1] = 2 * other.sites.index(site) + 1

                sites2.remove(site)
        td = sparse.tensordot(self.representation,
                              other.representation,
                              axes=(sums1, sums2))
        new_sites = sites1 + sites2

        base = list(range(len(td.shape)))
        base.reverse()
        transposition = []
        for i in sums1:
            base.remove(index[i] + 2 * self.dim - len(sums1) - len(sums2))
        for i in range(len(td.shape)):
            if i in sums1:
                coord = index[i] + 2 * self.dim - len(sums1) - len(sums2)
                transposition.append(coord)
            else:
                transposition.append(base.pop())
        td = td.transpose(transposition)
        return TensorGate(td, new_sites, self.N)
Пример #24
0
 def td(X1, X2):
     return sp.tensordot(X1, X2, axes) if iscoo(X1) else np.tensordot(
         X1, X2, axes)
Пример #25
0
    def get_solving_data(self, index, n, TB, TL):

        global_index = index * self.mat['n_mfp'] + n
        nc = self.n_elems

        if index == self.last_index:
            A = self.A
            HW_MINUS = self.HW_MINUS
            HW_PLUS = self.HW_PLUS
            K = self.K
            P = self.P
        elif os.path.exists(self.cache + '/P_' + str(index) + '.p'):
            A = scipy.sparse.load_npz(self.cache + '/A_' + str(index) + '.npz')
            K = scipy.sparse.load_npz(self.cache + '/K_' + str(index) + '.npz')
            HW_MINUS = np.load(
                open(self.cache + '/HW_MINUS_' + str(index) + '.p', 'rb'))
            HW_PLUS = np.load(
                open(self.cache + '/HW_PLUS_' + str(index) + '.p', 'rb'))
            P = np.load(open(self.cache + '/P_' + str(index) + '.p', 'rb'))
        else:
            angle = self.mat['control_angle'][index]
            aa = sparse.COO([0, 1, 2], angle, shape=(3))

            tmp = sparse.tensordot(self.mesh.CP, aa, axes=1)
            HW_PLUS = tmp.clip(min=0)
            #HW_MINUS = HW_PLUS - tmp

            HW_MINUS = -sparse.tensordot(self.mesh.CM, aa, axes=1).clip(max=0)
            test2 = sparse.tensordot(self.mesh.N, aa, axes=1)
            K = test2 * self.TT  #broadcasting (B_ij * V_i)
            AM = test2.clip(max=0)
            #AP = test2.clip(min=0)
            AP = test2 - AM

            AP = spdiags(AP.sum(axis=1).todense(), 0, nc, nc, format='csc')

            P = (AM * self.mesh.B).sum(axis=1).todense()

            CPB = spdiags(sparse.tensordot(self.mesh.CPB, aa,
                                           axes=1).clip(min=0),
                          0,
                          nc,
                          nc,
                          format='csc')
            A = AP + AM + CPB
            if self.argv.setdefault('save_data', True):
                scipy.sparse.save_npz(self.cache + '/A_' + str(index) + '.npz',
                                      A.tocsc())
                scipy.sparse.save_npz(self.cache + '/K_' + str(index) + '.npz',
                                      K.tocsc())
                P.dump(open(self.cache + '/P_' + str(index) + '.p', 'wb'))
                HW_MINUS.dump(
                    open(self.cache + '/HW_MINUS_' + str(index) + '.p', 'wb'))
                HW_PLUS.dump(
                    open(self.cache + '/HW_PLUS_' + str(index) + '.p', 'wb'))

            self.A = A
            self.K = K
            self.HW_MINUS = HW_MINUS
            self.HW_PLUS = HW_PLUS
            self.P = P
            self.last_index = index

    #----------------------------------------------

        if global_index in self.lu.keys() and self.argv.setdefault(
                'keep_lu', False):
            lu = self.lu[global_index]
        else:
            F = scipy.sparse.eye(self.n_elems,
                                 format='csc') + A.tocsc() * self.mat['mfp'][n]
            lu = splu(F.tocsc())
            if self.argv.setdefault('keep_lu', False):
                self.lu.update({global_index: lu})

        RHS = self.mat['mfp'][n] * (P + np.multiply(TB[n], HW_MINUS)) + TL[n]
        temp = lu.solve(RHS)

        t = temp * self.mat['domega'][index]
        s = K.dot(temp - TL[n]).sum(
        ) * self.mat['domega'][index] * 3 * self.kappa_factor
        j = np.multiply(temp, HW_PLUS) * self.mat['domega'][index] * 2 * np.pi

        return t, s, j
Пример #26
0
    def _matmat(self, X):
        r"""Compute matrix-matrix product.

        Parameters
        ----------
        X: array_like
            The matrix with which the product is desired.

        Returns
        -------
        array_like
            The product of self with X.

        Notes
        -----
        Implementation depends on the structure of the Kronecker
        product:

        .. math::
            A \otimes B = \begin{pmatrix}
                A_{11} B & A_{12} B & \cdots & A_{1n} B \\
                A_{21} B & A_{22} B & \cdots & A_{2n} B \\
                \vdots   & \vdots   & \ddots & \vdots   \\
                A_{m1} B & A_{m2} B & \cdots & A_{mn} B
            \end{pmatrix}

        Matrix-scalar products are commutative, and :math:`B` is the
        same for each block.  When right-multiplying by a matrix
        :math:`C`, we can take advantage of this by splitting
        :math:`C` into chunks, multiplying each by the corresponding
        element of :math:`A`, adding them up, and multiplying by
        :math:`B`.

        This function uses :mod:`dask` for the splitting, multiplication, and
        addition, which defaults to using all available cores.
        """
        block_size = self._block_size
        operator1 = self._operator1
        operator2 = self._operator2
        in_chunk = (operator1.shape[1], block_size, X.shape[1])

        if isinstance(X, ARRAY_TYPES) and isinstance(operator1, ARRAY_TYPES):
            partial_answer = einsum(
                "ij,jkl->kil",
                operator1,
                X.reshape(in_chunk),
                # Column-major output should speed the
                # operator2 @ tmp bit
                order="F").reshape(block_size, -1)
        else:
            partial_answer = tensordot(operator1, X.reshape(in_chunk),
                                       (1, 0)).transpose((1, 0, 2)).reshape(
                                           (block_size, -1))
        chunks = (
            operator2.dot(partial_answer)
            # Reshape to separate out the block dimension from the
            # original second dim of X
            .reshape((operator2.shape[0], self._n_chunks, X.shape[1]))
            # Transpose back to have block dimension first
            .transpose((1, 0, 2)))
        # Reshape back to expected result size
        return chunks.reshape((self.shape[0], X.shape[1]))
Пример #27
0
    def quadratic_form(self, mat):
        r"""Calculate the quadratic form mat.T @ self @ mat.

        Parameters
        ----------
        mat: array_like[N, M]

        Returns
        -------
        array_like[M, M]
            The product mat.T @ self @ mat

        Raises
        ------
        TypeError
            if mat is not an array or if self is not square
        ValueError
            if the shapes of self and mat are not compatible

        Notes
        -----
        Implementation depends on Kronecker structure, using the
        :meth:`._matmat` algorithm for self @ mat.  If mat is a lazy
        dask array, this implementation will load it multiple times to
        avoid dask keeping it all in memory.

        This function uses :mod:`dask` for the splitting, multiplication, and
        addition, which defaults to using all available cores.
        """
        if self.shape[0] != self.shape[1]:
            raise TypeError("quadratic_form only defined for square matrices.")
        elif isinstance(mat, LinearOperator):
            raise TypeError("quadratic_form only supports explicit arrays.")
        elif not isinstance(mat, ARRAY_TYPES):
            warnings.warn("mat not a recognised array type.  "
                          "Proceed with caution.")
        elif mat.ndim == 1:
            mat = mat[:, np.newaxis]
        if mat.shape[0] != self.shape[1]:
            raise ValueError("Dim mismatch: {mat:d} != {self:d}".format(
                mat=mat.shape[0], self=self.shape[1]))
        outer_size = mat.shape[-1]
        result_shape = (outer_size, outer_size)
        result_dtype = np.result_type(self.dtype, mat.dtype)
        # I load this into memory, so may as well keep as one chunk
        result = zeros(result_shape, dtype=result_dtype)

        block_size = self._block_size
        operator1 = self._operator1
        operator2 = self._operator2
        in_chunk = (operator1.shape[1], block_size, mat.shape[1])
        # row_chunk_size = mat.chunks[0][0]

        for row1, row_start in enumerate(range(0, mat.shape[0], block_size)):
            # Two function calls and a C loop, instead of python loop
            # with lots of indexing.
            # Having the chunk be fortran-contiguous should speed the
            # next steps (operator2 @ chunk)
            if isinstance(mat, np.ndarray):
                chunk = einsum("j,jkl->kl",
                               operator1[row1, :],
                               mat.reshape(in_chunk),
                               order="F")
            else:
                chunk = tensordot(operator1[row1, :], mat.reshape(in_chunk), 1)
            result += mat[row_start:(row_start + block_size)].T.dot(
                operator2.dot(chunk))
        return result
def SINGTANGENTS(resfn, X, lam, mE, opt, ei=0):
    """SINGTANGENTS   : Returns the tangents from the two branches at singular point. Assumes simple bifurcation
    USAGE   :
    du1, du2, others = SINGTANGENTS(resfn, X, lam, mE, ei=0)
    INPUTS  :
    resfn, X, lam, mE, ei=0
    OUTPUTS :
    du1, du2, others
    """

    # pdb.set_trace()
    def mineigval(lam, u0, k=0):
        us = SPNEWTONSOLVER(lambda u: resfn(u, lam)[0:2], u0, opt)
        return np.sort(np.linalg.eigvals(us.fjac.todense()))[k]

    # 1. Find Critical Point
    # pdb.set_trace()
    cpi = np.where(np.array(mE)[:-1, ei] * np.array(mE)[1:, ei] < 0)[0][0] + 1
    mc = np.argmin([mE[cpi][0], mE[cpi - 1][0]])
    biflam = so.fsolve(lambda lmu: mineigval(lmu, X[cpi - mc], k=ei),
                       lam[cpi - mc])
    # biflam = so.bisect(lambda lmu: mineigval(lmu, X[cpi-1], k=ei), lam[cpi-1], lam[cpi])
    us = SPNEWTONSOLVER(lambda u: resfn(u, biflam)[0:2], X[cpi - mc], opt)

    Rb, dRdXb, dRdlb, d2RdXlb, d2RdX2b = resfn(us.x, biflam, d3=1)
    evals, evecs = np.linalg.eig(dRdXb.todense())
    evecs = np.asarray(evecs[:, np.argsort(evals)])
    evals = evals[np.argsort(evals)]

    # pdb.set_trace()
    # 2. Branch-Switching
    zv = evecs[:, ei]
    Lm = sn.null_space(zv[np.newaxis, :])
    LdL = Lm.T.dot(dRdXb.todense()).dot(Lm)
    yv = -Lm.dot(np.linalg.solve(LdL, Lm.T.dot(dRdlb)))
    # yv = -ss.linalg.spsolve(dRdXb, dRdlb)

    aval = zv.dot(sp.tensordot(d2RdX2b, zv, axes=1).dot(zv))
    bval = zv.dot(
        sp.tensordot(d2RdX2b, zv, axes=1).dot(yv) +
        sp.tensordot(d2RdX2b, yv, axes=1).dot(zv) + 2.0 * d2RdXlb.dot(zv))
    cval = zv.dot(d2RdX2b.dot(yv).dot(yv) + 2.0 * d2RdXlb.dot(yv) + 0.0)
    if np.abs(aval > 1e-10):
        sig1 = (-bval - np.sqrt(bval**2 - 4 * aval * cval)) / (2.0 * aval)
        sig2 = (-bval + np.sqrt(bval**2 - 4 * aval * cval)) / (2.0 * aval)
    else:
        sig1 = 0.0
        sig2 = 1e10  # Some large number, representative of infty
    sig1, sig2 = (sig1, sig2)[np.argmin(
        (np.abs(sig1), np.abs(sig2)))], (sig1, sig2)[np.argmax(
            (np.abs(sig1), np.abs(sig2)))]
    du1 = (sig1 * zv + yv)  # Trivial branch
    if min(np.abs(sig1), np.abs(sig2)) == 0.0:
        du1 = du1 / np.linalg.norm(du1)
    al1 = 1.0 / np.sqrt(1.0 + du1.dot(du1))
    du2 = (sig2 * zv + yv)  # Bifurcated Branch
    if min(np.abs(sig1), np.abs(sig2)) == 0.0:
        du2 = du2 / np.linalg.norm(du2)
    al2 = 1.0 / np.sqrt(1.0 + du2.dot(du2))

    others = type(
        '', (), {
            'zv': zv,
            'yv': yv,
            'sig1': sig1,
            'sig2': sig2,
            'biflam': biflam,
            'cpi': cpi
        })()
    return du1, al1, du2, al2, others
Пример #29
0
pauli_X = sparse.COO.from_numpy(np.array([[0, 1], [1, 0]]))

pauli_Y = sparse.COO.from_numpy(np.array([[0, -1j], [1j, 0]]))

phase_gate = lambda phi: sparse.COO.from_numpy(
    np.array([[1, 0], [0, np.exp(1j * phi)]]))

gate_matrices = {
    "H":
    1.0 / 2**0.5 * sparse.COO.from_numpy(np.array([[1, 1], [1, -1]])),
    "T":
    phase_gate(np.pi / 4),
    "S":
    phase_gate(np.pi / 2),
    "CNOT":
    sparse.tensordot(P0, sparse.COO.from_numpy(np.eye(2)), axes=0) +
    sparse.tensordot(P1, pauli_X, axes=0),
    "P0":
    P0,
    "P1":
    P1,
    "X":
    pauli_X,
    "Y":
    pauli_Y,
    "NOT":
    pauli_X
}

clifford_set = {
    k: v
Пример #30
0
    def matrize(self, vec):
        """Take a (sparse) vectorized operator and return it in matrix form.

        """
        return sparse.tensordot(self.basis, vec, ([0], [0]))