Пример #1
0
    def make_real_comm_matrix(self, x, y):
        r"""Make the superoperator matrix representation of

        M[X,Y](rho) = (1/2) ( [X rho, Y†] + [Y, rho X†] )

        In the basis {Λ_j}, M[X,Y](rho) = M(x,y)_jk rho_k Λ_j where

        M(x,y)_jk = -2 Im[ (y*)_n F_lnj x_m (D_mkl + iF_mkl) ]

        Λ_j Λ_k = (D_jkl + iF_jkl) Λ_l

        x and y are vectorized representations of the operators X and Y stored
        in sparse format.

        `sparse.tensordot` might decide to return something dense, so the user
        should be aware of that.

        """
        struct_imag = sparse_imag(self.struct)
        # sparse.tensordot fails if both arguments are numpy ndarrays, so we
        # force the intermediate arrays to be sparse
        result_A = sparse.tensordot(np.conj(y), struct_imag, ([0], [1]))
        result_B = sparse.tensordot(x, self.struct, ([0], [0]))
        if type(result_B) == np.ndarray:
            result_B = COO.from_numpy(result_B)
        if type(result_A) == np.ndarray:
            result_A = COO.from_numpy(result_A)
        result = -2 * sparse_imag(sparse.tensordot(result_A, result_B,
                                                   ([0], [1])))
        # We want our result to be dense, to make things predictable from the
        # outside.
        if type(result) == sparse.coo.COO:
            result = result.todense()
        return result.real
Пример #2
0
    def batchify_y(self, trees):
        raw = self.targets_factory.transform(trees)
        output = []

        words_batch = 0
        n_cols = len(raw)

        batch = [[] for _ in range(n_cols)]
        for row_idx, tree in enumerate(trees):
            words_batch += len(tree.tokens)

            for col_idx in range(n_cols):
                batch[col_idx].append(raw[col_idx][row_idx])

            if words_batch > self.params.batch_size:
                padded_batch = [
                    pad_sequences(x, padding='post') for x in batch
                ]
                output_batch = []

                for target, padded_target in zip(self.params.targets,
                                                 padded_batch):
                    if target == 'head':
                        output_batch.append(
                            to_categorical(padded_target,
                                           num_classes=padded_target.shape[1]))
                    elif target in ['feats', 'sent']:
                        output_batch.append(padded_target)
                    else:
                        output_batch.append(
                            to_categorical(padded_target,
                                           num_classes=self.targets_factory.
                                           encoders[target].vocab_size))

                batch = [[] for _ in range(n_cols)]
                output.append([COO.from_numpy(a) for a in output_batch])
                words_batch = 0

        if words_batch > 0:
            padded_batch = [pad_sequences(x, padding='post') for x in batch]
            output_batch = []

            for target, padded_target in zip(self.params.targets,
                                             padded_batch):
                if target == 'head':
                    output_batch.append(
                        to_categorical(padded_target,
                                       num_classes=padded_target.shape[1]))
                elif target in ['feats', 'sent']:
                    output_batch.append(padded_target)
                else:
                    output_batch.append(
                        to_categorical(padded_target,
                                       num_classes=self.targets_factory.
                                       encoders[target].vocab_size))

            batch = [[] for _ in range(n_cols)]
            output.append([COO.from_numpy(a) for a in output_batch])

        return output
Пример #3
0
    def make_real_sand_matrix(self, x, y):
        r"""Make the superoperator matrix representation of

        N[X,Y](rho) = (1/2) ( X rho Y† + Y rho X† )

        In the basis {Λ_j}, N[X,Y](rho) = N(x,y)_jk rho_k Λ_j where

        N(x,y)_jk = Re[x_m (D_mlj + iF_mlj) (y*)_n (D_knl + iF_knl) ]

        Λ_j Λ_k = (D_jkl + iF_jkl) Λ_l

        x and y are vectorized representations of the operators X and Y stored
        in sparse format.

        `sparse.tensordot` might decide to return something dense, so the user
        should be aware of that.

        """
        result_A = sparse.tensordot(x, self.struct, ([0], [0]))
        result_B = sparse.tensordot(np.conj(y), self.struct, ([0], [1]))
        # sparse.tensordot fails if both arguments are numpy ndarrays, so we
        # force the intermediate arrays to be sparse
        if type(result_B) == np.ndarray:
            result_B = COO.from_numpy(result_B)
        if type(result_A) == np.ndarray:
            result_A = COO.from_numpy(result_A)
        result = sparse_real(sparse.tensordot(result_A, result_B, ([0], [1])))
        # We want our result to be dense, to make things predictable from the
        # outside.
        if type(result) == sparse.coo.COO:
            result = result.todense()
        return result.real
Пример #4
0
 def __init__(self, dim, basis=None):
     if basis is None:
         self.dim = dim
         self.basis = sparse.stack(gm.get_basis(dim, sparse=True))
     else:
         self.dim = basis[0].shape[0]
         self.basis = COO.from_numpy(np.array(basis))
     # Diagonal metric (since we're working with what are assumed to be
     # orthogonal but not necessarily normalized basis vectors)
     self.sq_norms = COO.from_numpy(
             sparse.tensordot(
                 self.basis, self.basis,
                 ([1, 2], [2, 1])).to_scipy_sparse().diagonal())
     # Diagonal inverse metric
     sq_norms_inv = COO.from_numpy(1 / self.sq_norms.todense())
     # Dual basis obtained from the original basis by the inverse metric
     self.dual = self.basis * sq_norms_inv[:,None,None]
     # Structure coefficients for the Lie algebra showing how to represent a
     # product of two basis elements as a complex-linear combination of basis
     # elements
     self.struct = sparse.tensordot(sparse.tensordot(self.basis, self.basis,
                                                     ([2], [1])),
                                    self.dual, ([1, 3], [2, 1]))
     if isinstance(self.struct, np.ndarray):
         # Sometimes sparse.tensordot returns numpy arrays. We want to force
         # it to be sparse, since sparse.tensordot fails when passed two
         # numpy arrays.
         self.struct = COO.from_numpy(self.struct)
Пример #5
0
 def test_dataset_pickle(self):
     ds1 = xr.Dataset(
         data_vars={"a": ("x", COO.from_numpy(np.ones(4)))},
         coords={"y": ("x", COO.from_numpy(np.arange(4)))},
     )
     ds2 = pickle.loads(pickle.dumps(ds1))
     assert_identical(ds1, ds2)
Пример #6
0
 def test_dataarray_pickle(self):
     a1 = xr.DataArray(
         COO.from_numpy(np.ones(4)),
         dims=["x"],
         coords={"y": ("x", COO.from_numpy(np.arange(4)))},
     )
     a2 = pickle.loads(pickle.dumps(a1))
     assert_identical(a1, a2)
Пример #7
0
 def vectorize(self, op):
     sparse_op = COO.from_numpy(op)
     result = sparse.tensordot(self.dual, sparse_op, ([1,2], [1,0]))
     if type(result) == np.ndarray:
         # I want the result stored in a sparse format even if it isn't
         # sparse.
         result = COO.from_numpy(result)
     return result
Пример #8
0
def test_addition_broadcasting():
    x = random_x((2, 3, 4))
    a = COO.from_numpy(x)

    z = random_x((3, 4))
    c = COO.from_numpy(z)

    assert_eq(x + z, a + c)
Пример #9
0
def test_coord_dtype():
    x = random_x((2, 3, 4))
    s = COO.from_numpy(x)
    assert s.coords.dtype == np.uint8

    x = np.zeros(1000)
    s = COO.from_numpy(x)
    assert s.coords.dtype == np.uint16
Пример #10
0
def test_dot():
    a = random_x((3, 4, 5))
    b = random_x((5, 6))

    sa = COO.from_numpy(a)
    sb = COO.from_numpy(b)

    assert_eq(a.dot(b), sa.dot(sb))
    assert_eq(np.dot(a, b), sparse.dot(sa, sb))
Пример #11
0
def test_addition():
    x = random_x((2, 3, 4))
    a = COO.from_numpy(x)

    y = random_x((2, 3, 4))
    b = COO.from_numpy(y)

    assert_eq(x + y, a + b)
    assert_eq(x - y, a - b)
    assert_eq(-x, -a)
Пример #12
0
def test_stack(shape, axis):
    x = random_x(shape)
    xx = COO.from_numpy(x)
    y = random_x(shape)
    yy = COO.from_numpy(y)
    z = random_x(shape)
    zz = COO.from_numpy(z)

    assert_eq(np.stack([x, y, z], axis=axis),
              sparse.stack([xx, yy, zz], axis=axis))
Пример #13
0
 def vectorize(self, op, dense=False):
     sparse_op = COO.from_numpy(op)
     result = sparse.tensordot(self.dual, sparse_op, ([1,2], [1,0]))
     if not dense and isinstance(result, np.ndarray):
         # I want the result stored in a sparse format even if it isn't
         # sparse.
         result = COO.from_numpy(result)
     elif dense and isinstance(result, sparse.COO):
         result = result.todense()
     return result
Пример #14
0
 def test_dataarray_repr(self):
     a = xr.DataArray(COO.from_numpy(np.ones((4))),
                      dims=['x'],
                      coords={'y': ('x', COO.from_numpy(np.arange(4)))})
     expected = dedent("""\
     <xarray.DataArray (x: 4)>
     <COO: shape=(4,), dtype=float64, nnz=4, fill_value=0.0>
     Coordinates:
         y        (x) int64 ...
     Dimensions without coordinates: x""")
     assert expected == repr(a)
Пример #15
0
 def test_align_outer(self):
     a1 = xr.DataArray(COO.from_numpy(np.arange(4)),
                       dims=['x'],
                       coords={'x': ['a', 'b', 'c', 'd']})
     b1 = xr.DataArray(COO.from_numpy(np.arange(4)),
                       dims=['x'],
                       coords={'x': ['a', 'b', 'd', 'e']})
     a2, b2 = xr.align(a1, b1, join='outer')
     assert isinstance(a2.data, sparse.SparseArray)
     assert isinstance(b2.data, sparse.SparseArray)
     assert np.all(a2.coords['x'].data == ['a', 'b', 'c', 'd'])
     assert np.all(b2.coords['x'].data == ['a', 'b', 'c', 'd'])
Пример #16
0
 def test_align_outer(self):
     a1 = xr.DataArray(COO.from_numpy(np.arange(4)),
                       dims=["x"],
                       coords={"x": ["a", "b", "c", "d"]})
     b1 = xr.DataArray(COO.from_numpy(np.arange(4)),
                       dims=["x"],
                       coords={"x": ["a", "b", "d", "e"]})
     a2, b2 = xr.align(a1, b1, join="outer")
     assert isinstance(a2.data, sparse.SparseArray)
     assert isinstance(b2.data, sparse.SparseArray)
     assert np.all(a2.coords["x"].data == ["a", "b", "c", "d"])
     assert np.all(b2.coords["x"].data == ["a", "b", "c", "d"])
Пример #17
0
 def test_dataset_repr(self):
     ds = xr.Dataset(
         data_vars={"a": ("x", COO.from_numpy(np.ones(4)))},
         coords={"y": ("x", COO.from_numpy(np.arange(4)))},
     )
     expected = dedent("""\
         <xarray.Dataset>
         Dimensions:  (x: 4)
         Coordinates:
             y        (x) int64 <COO: nnz=3, fill_value=0>
         Dimensions without coordinates: x
         Data variables:
             a        (x) float64 <COO: nnz=4, fill_value=0.0>""")
     assert expected == repr(ds)
Пример #18
0
def test_elemwise(func):
    x = random_x((2, 3, 4))
    s = COO.from_numpy(x)

    assert isinstance(func(s), COO)

    assert_eq(func(x), func(s))
Пример #19
0
    def four_element_traces(self) -> COO:
        r"""
        Return all traces of the form
        :math:`\mathrm{tr}(C_i C_j C_k C_l)` as a sparse COO array for
        :math:`i,j,k,l > 0` (i.e. excluding the identity).
        """
        if self._four_element_traces is None:
            # Most of the traces are zero, therefore store the result in a
            # sparse array. For GGM bases, which are inherently sparse, it
            # makes sense for any dimension to also calculate with sparse
            # arrays. For Pauli bases, which are very dense, this is not so
            # efficient but unavoidable for d > 12.
            path = [(0, 1), (0, 1), (0, 1)]
            if self.btype == 'Pauli' and self.d <= 12:
                # For d == 12, the result is ~270 MB.
                self._four_element_traces = COO.from_numpy(
                    oe.contract('iab,jbc,kcd,lda->ijkl',
                                *(self, ) * 4,
                                optimize=path))
            else:
                self._four_element_traces = oe.contract(
                    'iab,jbc,kcd,lda->ijkl',
                    *(self.sparse, ) * 4,
                    backend='sparse',
                    optimize=path)

        return self._four_element_traces
Пример #20
0
 def __init__(self, dim):
     self.dim = dim
     self.basis = COO.from_numpy(np.array(gm.get_basis(dim)))
     self.sq_norms = COO.from_numpy(np.einsum('jmn,jnm->j',
                                              self.basis.todense(),
                                              self.basis.todense()))
     sq_norms_inv = COO.from_numpy(1 / self.sq_norms.todense())
     self.dual = self.basis * sq_norms_inv[:,None,None]
     self.struct = sparse.tensordot(sparse.tensordot(self.basis, self.basis,
                                                     ([2], [1])),
                                    self.dual, ([1, 3], [2, 1]))
     if type(self.struct) == np.ndarray:
         # Sometimes sparse.tensordot returns numpy arrays. We want to force
         # it to be sparse, since sparse.tensordot fails when passed two
         # numpy arrays.
         self.struct = COO.from_numpy(self.struct)
Пример #21
0
def test_tensordot(a_shape, b_shape, axes):
    a = random_x(a_shape)
    b = random_x(b_shape)

    sa = COO.from_numpy(a)
    sb = COO.from_numpy(b)

    assert_eq(np.tensordot(a, b, axes), sparse.tensordot(sa, sb, axes))

    assert_eq(np.tensordot(a, b, axes), sparse.tensordot(sa, b, axes))

    assert isinstance(sparse.tensordot(sa, b, axes), COO)

    assert_eq(np.tensordot(a, b, axes), sparse.tensordot(a, sb, axes))

    assert isinstance(sparse.tensordot(a, sb, axes), COO)
Пример #22
0
def test_ufunc(ufunc):
    x = random_x((2, 3, 4))
    s = COO.from_numpy(x)

    assert isinstance(ufunc(s), COO)

    assert_eq(ufunc(x), ufunc(s))
Пример #23
0
 def tempcode_ICLR2018(self,arr,levels,issparse=False):
     """
     performs thermometer encoding of the one hot encoded 3D data matrix into levels specified by user, as in ICLR 2018 paper
     Parameters:
     ----------
     arr: np.array (n_samples,n_features)
         data matrix
     levels: int
         number of thermometer encoding levels
         levels should be equal to the levels of arr
     issparse: bool
         whether to output a sparse COO matrix, requires 'sparse' package
     Returns:
     -------
     arr: np.ndarray or sparse.COO matrix
         one hot encoded matrix
     """
     if(levels != arr.shape[2]):
         raise ValueError('Levels specified by the user does not match the one hot encoded input')
     tempcode = np.zeros(arr.shape,dtype = np.int8)
     for i in range(levels):
         tempcode[:,:,i] = np.sum(arr[:,:,:i+1],axis=2)
     if(issparse ==True):
         tempcode = COO.from_numpy(tempcode)
     return tempcode 
Пример #24
0
    def test_basis_constructor(self):
        """Test the constructor for several failure modes"""

        # Constructing from given elements should check for __getitem__
        with self.assertRaises(TypeError):
            _ = ff.Basis(1)

        # All elements should be either sparse, Qobj, or ndarray
        elems = [
            ff.util.paulis[1],
            COO.from_numpy(ff.util.paulis[3]), [[0, 1], [1, 0]]
        ]
        with self.assertRaises(TypeError):
            _ = ff.Basis(elems)

        # Too many elements
        with self.assertRaises(ValueError):
            _ = ff.Basis(rng.standard_normal((5, 2, 2)))

        # Properly normalized
        self.assertEqual(ff.Basis.pauli(1), ff.Basis(ff.util.paulis))

        # Non traceless elems but traceless basis requested
        with self.assertRaises(ValueError):
            _ = ff.Basis(np.ones((2, 2)), traceless=True)

        # Calling with only the identity should work with traceless true or
        # false
        self.assertEqual(ff.Basis(np.eye(2), traceless=False),
                         ff.Basis(np.eye(2), traceless=True))

        # Constructing a basis from a basis should work
        _ = ff.Basis(ff.Basis.ggm(2)[1:])
Пример #25
0
def test_to_scipy_sparse():
    x = random_x((3, 5))
    s = COO.from_numpy(x)
    a = s.to_scipy_sparse()
    b = scipy.sparse.coo_matrix(x)

    assert_eq(a.data, b.data)
    assert_eq(a.todense(), b.todense())
Пример #26
0
def test_scalar_exponentiation():
    x = random_x((2, 3, 4))
    a = COO.from_numpy(x)

    assert_eq(x**2, a**2)
    assert_eq(x**0.5, a**0.5)

    with pytest.raises((ValueError, ZeroDivisionError)):
        assert_eq(x**-1, a**-1)
Пример #27
0
def check_sparse_hamil_comm(sparse_basis, H, rho):
    h_vec = sparse_basis.vectorize(H)
    rho_vec = sparse_basis.vectorize(rho)
    dense_hamil_comm = -1j * mf.comm(H, rho)
    hamil_comm_matrix = sparse_basis.make_hamil_comm_matrix(h_vec)
    hamil_comm_vec = COO.from_numpy(hamil_comm_matrix @ rho_vec.todense())
    assert_almost_equal(
        np.abs(dense_hamil_comm - sparse_basis.matrize(hamil_comm_vec)).max(),
        0.0, 7)
Пример #28
0
def check_sparse_hamil_comm(sparse_basis, H, rho):
    h_vec = sparse_basis.vectorize(H)
    rho_vec = sparse_basis.vectorize(rho)
    dense_hamil_comm = -1j * mf.comm(H, rho)
    hamil_comm_matrix = sparse_basis.make_hamil_comm_matrix(h_vec)
    hamil_comm_vec = COO.from_numpy(hamil_comm_matrix @ rho_vec.todense())
    assert_almost_equal(np.abs(dense_hamil_comm -
                               sparse_basis.matrize(hamil_comm_vec)).max(),
                        0.0, 7)
Пример #29
0
def test_to_coo():
    a = np.random.random((6, 5, 4, 1))
    a[a > 0.3] = 0.0
    #a = np.zeros((6,5,4,1))
    x = BCOO.from_numpy(a, block_shape=(2, 5, 2, 1))
    from sparse import COO
    y = COO.from_numpy(a)
    z = x.to_coo()
    assert_eq(y, z)
Пример #30
0
def check_sparse_real_sand(sparse_basis, X, rho, Y):
    x_vec = sparse_basis.vectorize(X)
    y_vec = sparse_basis.vectorize(Y)
    rho_vec = sparse_basis.vectorize(rho)
    dense_real_sand = (X @ rho @ Y.conj().T + Y @ rho @ X.conj().T) / 2
    real_sand_matrix = sparse_basis.make_real_sand_matrix(x_vec, y_vec)
    real_sand_vec = COO.from_numpy(real_sand_matrix @ rho_vec.todense())
    assert_almost_equal(np.abs(dense_real_sand -
                               sparse_basis.matrize(real_sand_vec)).max(),
                        0.0, 7)
Пример #31
0
def check_sparse_real_comm(sparse_basis, X, rho, Y):
    x_vec = sparse_basis.vectorize(X)
    y_vec = sparse_basis.vectorize(Y)
    rho_vec = sparse_basis.vectorize(rho)
    dense_real_comm = (mf.comm(X @ rho, Y.conj().T) +
                       mf.comm(Y, rho @ X.conj().T)) / 2
    real_comm_matrix = sparse_basis.make_real_comm_matrix(x_vec, y_vec)
    real_comm_vec = COO.from_numpy(real_comm_matrix @ rho_vec.todense())
    assert_almost_equal(np.abs(dense_real_comm -
                               sparse_basis.matrize(real_comm_vec)).max(),
                        0.0, 7)