Пример #1
0
def hamiltonian_sparse(v, J):
    """
    Calculate the spin Hamiltonian as a sparse array.

    Parameters
    ----------
    v : array-like
        list of frequencies in Hz (in the absence of splitting) for each
        nucleus.
    J : 2D array-like
        matrix of coupling constants. J[m, n] is the coupling constant between
        v[m] and v[n].

    Returns
    -------
    H : sparse.COO
        a sparse spin Hamiltonian.
    """
    nspins = len(v)
    Lz, Lproduct = _so_sparse(nspins)  # noqa
    # On large spin systems, converting v and J to sparse improved speed of
    # sparse.tensordot calls with them.
    # First make sure v and J are a numpy array (required by sparse.COO)
    if not isinstance(v, np.ndarray):
        v = np.array(v)
    if not isinstance(J, np.ndarray):
        J = np.array(J)
    H = sparse.tensordot(sparse.COO(v), Lz, axes=1)
    scalars = 0.5 * sparse.COO(J)
    H += sparse.tensordot(scalars, Lproduct, axes=2)
    return H
    def fit(self, x, y_multiclass, kernel=poly(1), C=0.001):
        y_multiclass=y_multiclass.reshape(-1).astype(np.float64)
        self.x = sparse.COO(x.astype(np.float64))
        self.m = self.x.shape[0]
        self.y_multiclass = y_multiclass
        self.kernel = kernel
        self.C = C
        ys = [sparse.COO(self.cast(y_multiclass, k)) for k in range(self.n_svm)]
        self.y_matrix = sparse.stack(ys,0)
        del ys
        for k in range(self.n_svm):
            print("training ",k,"th SVM in ",self.n_svm)
            y = self.y_matrix[k, :].reshape((-1,1))
            yx = y * self.x
            G = kernel(yx, yx) # Gram matrix

            compensate = (sparse.eye(self.m)*1e-7).astype(np.float64)
            G = (G + compensate)
            objective = cp.Maximize(cp.sum(self.a[k])-(1/2)*cp.quad_form(self.a[k], G.tocsr()))

            if not objective.is_dcp():
                print("Not solvable!")
                assert objective.is_dcp()
            constraints = [self.a[k] <= C, cp.sum(cp.multiply(self.a[k],y.todense())) == 0] # box constraint
            prob = cp.Problem(objective, constraints)
            result = prob.solve()
            x_pos = x[y.todense()[:,0]==1,:]
            x_neg = x[y.todense()[:,0]==-1,:]
            b_min = -np.min(self.wTx(k,x_pos)) if x_pos.shape[0]!=0 else 0
            b_max = -np.max(self.wTx(k,x_neg)) if x_neg.shape[0]!=0 else 0
            self.b[k,0] = (1/2)*(b_min + b_max)
        self.a_matrix = np.stack([i.value.reshape(-1) for i in self.a],0)
        self.a_matrix = sparse.COO(self.a_matrix)
Пример #3
0
def test_deterministic_token():
    a = sparse.COO(data=[1, 2, 3], coords=[10, 20, 30], shape=(40, ))
    b = sparse.COO(data=[1, 2, 3], coords=[10, 20, 30], shape=(40, ))
    assert tokenize(a) == tokenize(b)
    # One of these things is not like the other....
    c = sparse.COO(data=[1, 2, 4], coords=[10, 20, 30], shape=(40, ))
    assert tokenize(a) != tokenize(c)
Пример #4
0
def test_network_distribution():

    T1 = np.array([[0, 0.5, 0.5], [0, 1, 0], [0, 0, 1]])
    Z2 = np.array([[1, 0], [1, 0], [0, 1]])
    pomdp1 = POMDP([T1], [Z2],
                   input_names=['u1'],
                   state_name='x1',
                   output_name='z1')

    T21 = np.array([[0, 1, 0], [0, 1, 0], [0, 0, 1]])
    T22 = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1]])
    pomdp2 = POMDP([T21, T22], [np.eye(3)],
                   input_names=['u2'],
                   state_name='x2',
                   output_name='z2')

    network = POMDPNetwork([pomdp1, pomdp2])
    network.add_connection(['z1'], 'u2', lambda z1: {z1})

    # distribution over u1 x1 x2
    D_ux = sparse.COO([[0], [0], [0]], [1], shape=(1, 3, 3))

    D_xz = propagate_network_distribution(network, D_ux)

    D_xz_r = sparse.COO([[1, 2], [1, 2], [0, 1], [1, 2]], [0.5, 0.5],
                        shape=(3, 3, 2, 3))

    np.testing.assert_equal(D_xz.todense(), D_xz_r.todense())
Пример #5
0
def so_numba(nspins):
    sigma_x = np.array([[0, 1 / 2], [1 / 2, 0]])
    sigma_y = np.array([[0, -1j / 2], [1j / 2, 0]])
    sigma_z = np.array([[1 / 2, 0], [0, -1 / 2]])
    unit = np.array([[1, 0], [0, 1]])

    L = np.empty((3, nspins, 2**nspins, 2**nspins),
                 dtype=np.complex128)  # consider other dtype?
    for n in range(nspins):
        Lx_current = 1
        Ly_current = 1
        Lz_current = 1

        for k in range(nspins):
            if k == n:
                Lx_current = np.kron(Lx_current, sigma_x)
                Ly_current = np.kron(Ly_current, sigma_y)
                Lz_current = np.kron(Lz_current, sigma_z)
            else:
                Lx_current = np.kron(Lx_current, unit)
                Ly_current = np.kron(Ly_current, unit)
                Lz_current = np.kron(Lz_current, unit)

        L[0][n] = Lx_current
        L[1][n] = Ly_current
        L[2][n] = Lz_current
    L_T = L.transpose(1, 0, 2, 3)
    Lproduct = np.tensordot(L_T, L, axes=((1, 3), (0, 2))).swapaxes(1, 2)
    Lz_sparse = sparse.COO(L[2])
    Lproduct_sparse = sparse.COO(Lproduct)

    return Lz_sparse, Lproduct_sparse
Пример #6
0
def propagate_distribution(pomdp, D_ux, u_dim=None, x_dim=None):
    '''evolve input/state distribution D_ux into output distribution D_xz
    D_xz(x', z) = \sum_{x', u) P(X+ = x, Z = z | U = u X = x' ) D_ux(u, x')
  '''

    if u_dim is None:
        u_dim = tuple(range(len(pomdp.M)))

    if x_dim is None:
        x_dim = (len(pomdp.M), )

    if len(u_dim) != len(pomdp.M) or len(x_dim) != 1:
        raise Exception('dimension problem')

    if len(D_ux.shape) <= max(u_dim + x_dim) or len(
            set(u_dim + x_dim)) < len(u_dim + x_dim) or sum(D_ux.data) != 1:
        raise Exception('D_ux not a valid distribution')

    T_uxXz = sparse.stack([sparse.stack([sparse.COO(pomdp.Tuz(m_tuple, z))
                                         for z in range(pomdp.O)],
                                        axis=-1)
                           for m_tuple in pomdp.m_tuple_iter()]) \
             .reshape(pomdp.M + (pomdp.N, pomdp.N, pomdp.O))

    T_zx = sparse.tensordot(D_ux,
                            T_uxXz,
                            axes=(u_dim + x_dim, range(len(pomdp.M) + 1)))

    return sparse.COO(T_zx)
Пример #7
0
            def get_groups(model: "sbmtm",
                           l: int = 0) -> Tuple[da.array, da.array]:
                # rewrite from _sbmtm to use dask
                V = model.get_V()
                D = model.get_D()

                g = model.g
                state = model.state
                state_l = state.project_level(l).copy(overlap=True)
                state_l_edges = state_l.get_edge_blocks()  # labeled half-edges

                # count labeled half-edges, group-memberships
                B = state_l.get_B()

                id_dbw = np.zeros(g.edge_index_range, dtype=np.dtype(int))
                id_wb = np.zeros(g.edge_index_range, dtype=np.dtype(int))
                id_b = np.zeros(g.edge_index_range, dtype=np.dtype(int))
                weig = np.zeros(g.edge_index_range, dtype=np.dtype(int))

                for i, e in enumerate(g.edges()):
                    _, id_b[i] = state_l_edges[e]
                    id_dbw[i] = int(e.source())
                    id_wb[i] = int(e.target()) - D
                    weig[i] = g.ep["count"][e]

                n_bw = sparse.COO(
                    [id_b, id_wb], weig, shape=(B, V), fill_value=0
                )  # number of half-edges incident on word-node w and labeled as word-group tw

                del id_wb

                n_dbw = sparse.COO(
                    [id_dbw, id_b], weig, shape=(D, B), fill_value=0
                )  # number of half-edges incident on document-node d and labeled as word-group td

                del weig
                del id_b
                del id_dbw

                ind_w = np.where(np.sum(n_bw, axis=1) > 0)[0]
                n_bw = n_bw[ind_w, :]
                del ind_w

                ind_w2 = np.where(np.sum(n_dbw, axis=0) > 0)[0]
                n_dbw = n_dbw[:, ind_w2]
                del ind_w2

                # topic-distribution for words P(t_w | w)
                p_w_tw = n_bw / np.sum(n_bw, axis=1).todense()[:, np.newaxis]

                # Mixture of word-groups into documetns P(d | t_w)
                p_tw_d = n_dbw / np.sum(n_dbw, axis=0).todense()[np.newaxis, :]

                return (
                    da.array(p_w_tw).map_blocks(lambda b: b.todense(),
                                                dtype=np.dtype(float)),
                    da.array(p_tw_d).map_blocks(lambda b: b.todense(),
                                                dtype=np.dtype(float)),
                )
def compute_matrices(resdf, k, matsize, transitionsNT=12, transitionsAA=380):

    AA_mutation = None
    nucleotide_mutation = None
    count = 0
    for idx, row in resdf.iterrows():

        for replicate in range(int(k) + 1):

            replicate = str(replicate)
            #get next job completed

            eventtypes, eventindex, AAeventindex, AAeventypes = row[[
                replicate + 'type', replicate + 'index',
                replicate + 'AAeventindex', replicate + 'AAeventypes'
            ]]
            eventtypes, eventindex, AAeventindex, AAeventypes = [
                list(a)
                for a in [eventtypes, eventindex, AAeventindex, AAeventypes]
            ]
            #save each position to event mats
            col = int(idx[1])
            if len(eventindex) > 0:

                if nucleotide_mutation:
                    nucleotide_mutation += sparseND.COO(
                        coords=(eventindex,
                                [col
                                 for i in range(len(eventindex))], eventtypes),
                        data=np.ones(len(eventindex)),
                        shape=(matsize[0], matsize[1], transitionsNT))
                else:
                    nucleotide_mutation = sparseND.COO(
                        coords=(eventindex,
                                [col
                                 for i in range(len(eventindex))], eventtypes),
                        data=np.ones(len(eventindex)),
                        shape=(matsize[0], matsize[1], transitionsNT))

            if len(AAeventindex) > 0:
                if AA_mutation:

                    AA_mutation += sparseND.COO(
                        coords=(AAeventindex,
                                [col for i in range(len(AAeventindex))
                                 ], AAeventypes),
                        data=np.ones(len(AAeventindex)),
                        shape=(matsize[0], matsize[1], transitionsAA))
                else:
                    AA_mutation = sparseND.COO(coords=(AAeventindex, [
                        col for i in range(len(AAeventindex))
                    ], AAeventypes),
                                               data=np.ones(len(AAeventindex)),
                                               shape=(matsize[0], matsize[1],
                                                      transitionsAA))

    return (nucleotide_mutation, AA_mutation)
Пример #9
0
    def test_unpack_attrs(self):
        @numba.njit
        def unpack(c):
            return c.coords, c.data, c.shape, c.fill_value

        c1 = sparse.COO(np.eye(3), fill_value=1)
        coords, data, shape, fill_value = unpack(c1)
        c2 = sparse.COO(coords, data, shape, fill_value=fill_value)
        assert_coo_same_memory(c1, c2)
Пример #10
0
def radial_bins(centerX, centerY, imageSizeX, imageSizeY,
        radius=None, radius_inner=0, n_bins=None, normalize=False, use_sparse=None, dtype=None):
    '''
    Generate antialiased rings
    '''
    if radius is None:
        radius = bounding_radius(centerX, centerY, imageSizeX, imageSizeY)

    if n_bins is None:
        n_bins = int(np.round(radius - radius_inner))

    r, phi = polar_map(centerX, centerY, imageSizeX, imageSizeY)
    r = r.flatten()

    width = (radius - radius_inner) / n_bins
    bin_area = np.pi * (radius**2 - (radius - width)**2)

    if use_sparse is None:
        use_sparse = bin_area / (imageSizeX * imageSizeY) < 0.1

    if use_sparse:
        jjs = np.arange(len(r), dtype=np.int64)

    slices = []
    for r0 in np.linspace(radius_inner, radius - width, n_bins) + width/2:
        diff = np.abs(r - r0)
        # The "0.5" ensures that the bins overlap and sum up to exactly 1
        vals = np.maximum(0, np.minimum(1, width/2 + 0.5 - diff))
        if use_sparse:
            select = vals != 0
            vals = vals[select]
            if normalize:  # Make sure each bin has a sum of 1
                s = vals.sum()
                if not np.isclose(s, 0):
                    vals /= s
            slices.append(sparse.COO(shape=len(r), data=vals.astype(dtype), coords=(jjs[select],)))
        else:
            if normalize:  # Make sure each bin has a sum of 1
                s = vals.sum()
                if not np.isclose(s, 0):
                    vals /= s
            slices.append(vals.reshape((imageSizeY, imageSizeX)).astype(dtype))
    # Patch a singularity at the center
    if radius_inner < 0.5:
        yy = int(np.round(centerY))
        xx = int(np.round(centerX))
        if use_sparse:
            index = yy * imageSizeX + xx
            diff = 1 - slices[0][index] - radius_inner
            patch = sparse.COO(shape=len(r), data=[diff], coords=[index])
            slices[0] += patch
        else:
            slices[0][yy, xx] = 1 - radius_inner
    if use_sparse:
        return sparse.stack(slices).reshape((-1, imageSizeY, imageSizeX))
    else:
        return np.stack(slices)
Пример #11
0
def test_add_intercept_sparse():
    X = sparse.COO(np.zeros((4, 4)))
    result = utils.add_intercept(X)
    expected = sparse.COO(np.array([
        [0, 0, 0, 0, 1],
        [0, 0, 0, 0, 1],
        [0, 0, 0, 0, 1],
        [0, 0, 0, 0, 1],
    ], dtype=X.dtype))
    assert (result == expected).all()
Пример #12
0
def test_add_intercept_sparse_dask():
    X = da.from_array(sparse.COO(np.zeros((4, 4))), chunks=(2, 4))
    result = utils.add_intercept(X)
    expected = da.from_array(sparse.COO(np.array([
        [0, 0, 0, 0, 1],
        [0, 0, 0, 0, 1],
        [0, 0, 0, 0, 1],
        [0, 0, 0, 0, 1],
    ], dtype=X.dtype)), chunks=2)
    assert_eq(result, expected)
Пример #13
0
def convert_ndarray(value):
    if isinstance(value, sparse.SparseArray):
        return value

    if isinstance(value, np.ndarray):
        return sparse.COO(value)

    try:
        return sparse.COO(np.asarray(value))
    except RuntimeError:
        return sparse.stack([convert_ndarray(v) for v in value])
Пример #14
0
def _so_sparse(nspins):
    """
    Either load a presaved set of spin operators as numpy arrays, or
    calculate them and save them if a presaved set wasn't found.

    Parameters
    ----------
    nspins : int
        the number of spins in the spin system

    Returns
    -------
    (Lz, Lproduct) : a tuple of:
        Lz : 3d sparse.COO array of shape (n, 2^n, 2^n) representing
            [Lz1, Lz2, ...Lzn]
        Lproduct : 4d sparse.COO array of shape (n, n, 2^n, 2^n), representing
            an n x n array (cartesian product) for all combinations of
            Lxa*Lxb + Lya*Lyb + Lza*Lzb, where 1 <= a, b <= n.

    Side Effect
    -----------
    Saves the results as .npz files to the bin directory if they were not
    found there.
    """
    # TODO: once nmrsim demonstrates installing via the PyPI *test* server,
    # need to determine how the saved solutions will be handled. For example,
    # part of the final build may be generating these files then testing.
    # Also, need to consider different users with different system capabilities
    # (e.g. at extreme, Raspberry Pi). Some way to let user select, or select
    # for user?
    filename_Lz = f'Lz{nspins}.npz'
    filename_Lproduct = f'Lproduct{nspins}.npz'
    bin_path = _bin_path()
    path_Lz = bin_path.joinpath(filename_Lz)
    path_Lproduct = bin_path.joinpath(filename_Lproduct)
    # with path_context_Lz as p:
    #     path_Lz = p
    # with path_context_Lproduct as p:
    #     path_Lproduct = p
    try:
        Lz = sparse.load_npz(path_Lz)
        Lproduct = sparse.load_npz(path_Lproduct)
        return Lz, Lproduct
    except FileNotFoundError:
        print('no SO file ', path_Lz, ' found.')
        print(f'creating {filename_Lz} and {filename_Lproduct}')
    Lz, Lproduct = _so_dense(nspins)
    Lz_sparse = sparse.COO(Lz)
    Lproduct_sparse = sparse.COO(Lproduct)
    sparse.save_npz(path_Lz, Lz_sparse)
    sparse.save_npz(path_Lproduct, Lproduct_sparse)

    return Lz_sparse, Lproduct_sparse
Пример #15
0
 def __init__(self, r, m):
     if not (np.diff(r) > 0).all():
         raise ValueError('Coordinate should be monotonically increasing.')
     self.r = r.astype(float)
     self.m = m
     n = len(self.r)
     # basises
     self.phi_ijk = basis1d.phi_ijk(r)
     self.phi_di_dj_k = basis1d.phi_di_dj_k(r)
     self.slice_l = sparse.COO(
         [np.arange(n - 1), np.arange(n - 1)],
         np.ones(n - 1),
         shape=(n, n - 1))
     self.slice_last = sparse.COO([(n - 1, )], [1.0], shape=(n, ))
    def collect_futures(  queue  , stopiter  , brake , runName  , check_interval= 10 , save_interval = 60, nucleotides_only =False  ):
        AA_mutation = None
        nucleotide_mutation = None
        t0 = time.time()
        runtime = time.time()
        count = 0

        #queue = Queue('outq')
        while stopiter == False:
            #wait a little while
            result = queue.get()
            #get next job completed

            column, eventdict , AAeventindex , AAeventypes= result
            #save each position to event mats
            for pos in [0,1,2]:
                col = column+pos
                eventindex = eventdict[pos]['index']
                eventtypes = eventdict[pos]['type']
                if len(eventindex)>0:
                    if nucleotide_mutation:
                        nucleotide_mutation  += sparseND.COO( coords =  ( eventindex  , np.ones(len(selectrows )) * col   , eventtypes ) , data = np.ones(len(eventindex)  ,  ) , shape = (matsize[0] , matsize[1] ,len(transition_dict) ),  dtype = np.int32   )
                    else:
                        nucleotide_mutation  =  sparseND.COO( coords = ( eventindex ,  np.ones(len(selectrows )) * col   , eventtypes ) , data = np.ones(len(eventindex)  ,  ) , shape = (matsize[0] , matsize[1] ,len(transition_dict) ),  dtype = np.int32   )
            if nucleotides_only == False:
                if AA_mutation:
                    AA_mutation  += sparseND.COO( coords =  (AAeventindex , np.ones(len(AAeventindex)) * column , AAeventypes ) , data = np.ones(len(AAeventindex)  ,  ) , shape = (matsize[0] , matsize[1] ,len(transitiondict_AA ) ) ,  dtype = np.int32  )
                else:
                    AA_mutation  = sparseND.COO( coords =  (AAeventindex , np.ones(len(AAeventindex)) * column , AAeventypes ) , data = np.ones(len(AAeventindex)  ,  ) , shape = (matsize[0] , matsize[1] ,len(transitiondict_AA ) )   ,  dtype = np.int32 )
            count +=1
            if time.time() - runtime > save_interval:
                print('saving', time.time()-t0)
                runtime = time.time()
                save_mats(count, runName, AA_mutation,nucleotide_mutation)
        #finish up
        for result in  queue.get( timeout=None, batch=True):
                #get next job completed
                column, eventdict , AAeventindex , AAeventypes= result
                #save each position to event mats
                for pos in [0,1,2]:
                    col = column+pos
                    eventindex = eventdict[pos]['index']
                    eventtypes = eventdict[pos]['type']
                    if len(eventindex)>0:
                        if nucleotide_mutation:
                            nucleotide_mutation  += sparseND.COO( coords =  ( eventindex  , np.ones(len(selectrows )) * col   , eventtypes ) , data = np.ones(len(eventindex)  ,  ) , shape = (matsize[0] , matsize[1] ,len(transition_dict) ),  dtype = np.int32    )
                        else:
                            nucleotide_mutation  =  sparseND.COO( coords = ( eventindex ,  np.ones(len(selectrows )) * col   , eventtypes ) , data = np.ones(len(eventindex)  ,  ) , shape = (matsize[0] , matsize[1] ,len(transition_dict) ),  dtype = np.int32   )
                if nucleotides_only == False:
                    if AA_mutation:
                        AA_mutation  += sparseND.COO( coords =  (AAeventindex , np.ones(len(AAeventindex)) * column , AAeventypes ) , data = np.ones(len(AAeventindex)  ,  ) , shape = (matsize[0] , matsize[1] ,len(transitiondict_AA ) ) ,  dtype = np.int32  )
                    else:
                        AA_mutation  = sparseND.COO( coords =  (AAeventindex , np.ones(len(AAeventindex)) * column , AAeventypes ) , data = np.ones(len(AAeventindex)  ,  ) , shape = (matsize[0] , matsize[1] ,len(transitiondict_AA ) )   ,  dtype = np.int32 )
                count +=1
        print('FINAL SAVE !')
        save_mats(count, runName, AA_mutation,nucleotide_mutation)
        print('DONE ! ')
        brake.set(False)
        return None
Пример #17
0
def test_diag():

    dA = np.random.rand(5, 5)
    sA = sparse.COO(dA)
    diag = diagonal(sA, axis1=0, axis2=1).todense()

    np.testing.assert_almost_equal(diag, np.diagonal(dA))

    dB = np.random.rand(5, 6, 5)
    sB = sparse.COO(dB)
    diag = diagonal(sB, axis1=0, axis2=2)

    np.testing.assert_equal(diag.shape, (6, 5))
    np.testing.assert_almost_equal(diag.todense(),
                                   np.diagonal(dB, axis1=0, axis2=2))
Пример #18
0
def generate_mask(cy,
                  cx,
                  sy,
                  sx,
                  filter_center,
                  semiconv_pix,
                  cutoff,
                  mask_shape,
                  dtype,
                  method='subpix'):
    # 1st diffraction order and primary beam don't overlap
    if sx**2 + sy**2 > 4 * np.sum(semiconv_pix**2):
        return empty_mask(mask_shape, dtype=dtype)

    if np.allclose((sy, sx), (0, 0)):
        # The zero order component (0, 0) is special, comes out zero with above code
        m_0 = filter_center / filter_center.sum()
        return sparse.COO(m_0.astype(dtype))

    params = dict(
        cy=cy,
        cx=cx,
        sy=sy,
        sx=sx,
        filter_center=filter_center,
        semiconv_pix=semiconv_pix,
        cutoff=cutoff,
        mask_shape=mask_shape,
    )

    if method == 'subpix':
        mask_positive, mask_negative = mask_pair_subpix(**params)
    elif method == 'shift':
        mask_positive, mask_negative = mask_pair_shift(**params)
    else:
        raise ValueError(
            f"Unsupported method {method}. Allowed are 'subpix' and 'shift'")

    non_zero_positive = mask_positive.sum()
    non_zero_negative = mask_negative.sum()

    if non_zero_positive >= cutoff and non_zero_negative >= cutoff:
        m = (mask_positive / non_zero_positive -
             mask_negative / non_zero_negative) / 2
        return sparse.COO(m.astype(dtype))
    else:
        # Exclude small, missing or unbalanced trotters
        return empty_mask(mask_shape, dtype=dtype)
Пример #19
0
    def __init__(self, dark=None, gain=None, excluded_pixels=None):
        """
        Parameters
        ----------
        dark : np.ndarray
            A ND array containing a dark frame to substract from all frames,
            its shape needs to match the signal shape of the dataset.

        gain : np.ndarray
            A ND array containing a gain map to multiply with each frame,
            its shape needs to match the signal shape of the dataset.

        excluded_pixels : sparse.COO
            A "sparse pydata" COO array containing only entries for pixels
            that should be excluded. The shape needs to match the signal
            shape of the dataset. Can also be anything that is directly
            compatible with the :code:`sparse.COO` constructor, for example a
            "roi-like" numpy array. A :code:`sparse.COO` array can be
            directly constructed from a coordinate array, using
            :code:`sparse.COO(coords=coords, data=1, shape=ds.shape.sig)`
        """
        self._dark = dark
        self._gain = gain
        if excluded_pixels is not None:
            excluded_pixels = sparse.COO(excluded_pixels, prune=True)
        self._excluded_pixels = excluded_pixels
Пример #20
0
def load_sprase_array(file: Path, **kwargs):
    _, files = walk_one_level(file)

    coords = [file for file in files if file.startswith('coords')][0]
    shape = [file for file in files if file.startswith('shape')][0]
    data = [file for file in files if file.startswith('data')][0]

    shape = load_array_from_disk(Path(file, shape), **kwargs)

    if shape.shape == ():
        shape = (int(shape), )
    else:
        shape = tuple(int(i) for i in shape)

    if coords.endswith('.txt'):
        coords = load_dense_array(Path(file, coords), ndmin=len(shape))
    else:
        coords = load_dense_array(Path(file, coords), **kwargs)

    array = sparse.COO(coords=coords,
                       data=load_dense_array(Path(file, data), **kwargs),
                       shape=shape,
                       has_duplicates=False,
                       cache=True)

    return da.from_array(array)
Пример #21
0
    def sparse_dLdW(self, dLdOut):
        '''
        This function compiles the dLdW sparse matrix
        '''
        inp = self.lastInput
        (batchSize, nrInputs) = inp.shape

        nrCol = self.nrOutputs

        row = np.arange(nrInputs * nrCol)

        column = np.repeat(np.arange(nrCol), nrInputs)

        rowCol = np.array([row, column])

        data = np.tile(inp, nrCol).flatten()

        thirdDim = np.repeat(np.arange(batchSize), len(row))

        rowCol = np.tile(rowCol, batchSize)
        coords = np.array([thirdDim, *rowCol])
        x = sparse.COO(coords, data)

        dLdW = sparse.tensordot(x, dLdOut, axes=([0, 2], [0, 1]))
        return dLdW.reshape(self.W.shape)
Пример #22
0
def correct_dot_masks(masks,
                      gain_map,
                      excluded_pixels=None,
                      allow_empty=False):
    mask_shape = masks.shape
    sig_shape = gain_map.shape
    masks = masks.reshape((-1, np.prod(sig_shape)))

    if excluded_pixels is not None:
        if is_sparse(masks):
            result = sparse.DOK(masks)
        else:
            result = masks.copy()
        desc = RepairDescriptor(sig_shape,
                                excluded_pixels=excluded_pixels,
                                allow_empty=allow_empty)
        for e, r, c in zip(desc.exclude_flat, desc.repair_flat,
                           desc.repair_counts):
            result[:, e] = 0
            rep = masks[:, e] / c
            # We have to loop because of sparse.pydata limitations
            for m in range(result.shape[0]):
                for rr in r[:c]:
                    result[m, rr] = result[m, rr] + rep[m]
        if is_sparse(result):
            result = sparse.COO(result)
    else:
        result = masks
    result = result * gain_map.flatten()
    return result.reshape(mask_shape)
Пример #23
0
    def get_coo(self, symm=True):
        """
        Create a COO format sparse representation of the accumulated values. NOTE: As scipy
        does not support multidimensional arrays, this object is from the "sparse" module.

        :param symm: ensure matrix is symmetric on return
        :return: a sparse.COO matrix
        """
        _coords = [[], [], [], []]
        _data = []
        _m = self.mat
        _inner_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
        for i, j in _m.keys():
            for k, l in _inner_indices:
                v = _m[i, j][k, l]
                if v != 0:
                    _coords[0].append(i)
                    _coords[1].append(j)
                    _coords[2].append(k)
                    _coords[3].append(l)
                    _data.append(v)

        _m = sparse.COO(_coords, _data, self.shape, has_duplicates=False)

        if symm:
            _m = Sparse4DAccumulator.symm(_m)

        return _m
Пример #24
0
def sparsesort(numbers, coords):
    X, Y = coords

    matrix = np.zeros((X, Y))

    for i in range(numbers):
        rx, ry = random.randint(0, X - 1), random.randint(0, Y - 1)
        matrix[rx, ry] = random.random()

    print()
    print(r'Sorting {} random numbers in a sparse ("zero-filled") {} x {} matrix:'.\
          format(numbers,X,Y))
    print(matrix)
    print()

    sparse_matrix = sparse.COO(matrix)
    where = sparse.where(sparse_matrix)
    smd = sparse_matrix.data

    matrix_dict = dict(zip(smd, zip(*where)))

    matrix_sorted = collections.OrderedDict(sorted(matrix_dict.items()))

    mitems = matrix_sorted.items()

    print('sorting results:')

    k = 1
    for i, j in mitems:
        print('item #{}: {} @ coordinate {}'.format(k, i, j))
        k += 1
    print()
Пример #25
0
def test_patch_pixels_only_excluded_pixels(lt_ctx, default_raw,
                                           default_raw_data):
    udf = SumUDF()
    excluded_pixels = sparse.COO(np.zeros((128, 128)))
    corr = CorrectionSet(excluded_pixels=excluded_pixels)
    res = lt_ctx.run_udf(dataset=default_raw, udf=udf, corrections=corr)
    assert np.allclose(res['intensity'], np.sum(default_raw_data, axis=(0, 1)))
Пример #26
0
def cache_tm(nspins):
    """

    Parameters
    ----------
    nspins

    Returns
    -------

    """
    """spin11 test indicates this leads to faster overall simsignals().

    11 spin x 6: 29.6 vs. 35.1 s
    8 spin x 60: 2.2 vs 3.0 s"""
    filename = f'T{nspins}.npz'
    bin_dir = os.path.join(os.path.dirname(__file__), 'bin')
    path = os.path.join(bin_dir, filename)
    try:
        T = sparse.load_npz(path)
        return T
    except FileNotFoundError:
        print(f'creating {filename}')
        T = transition_matrix_dense(nspins)
        T_sparse = sparse.COO(T)
        sparse.save_npz(path, T_sparse)
        return T_sparse
Пример #27
0
def get_mult_function(mt: sparse.COO,
                      gradeList,
                      grades_a=None,
                      grades_b=None,
                      filter_mask=None):
    '''
    Returns a function that implements the mult_table on two input multivectors
    '''
    if (filter_mask is None) and (grades_a is not None) and (grades_b
                                                             is not None):
        # If not specified explicitly, we can specify sparseness by grade
        filter_mask = np.zeros(mt.nnz, dtype=bool)
        k_list, _, m_list = mt.coords
        for i in range(len(filter_mask)):
            if gradeList[k_list[i]] in grades_a:
                if gradeList[m_list[i]] in grades_b:
                    filter_mask[i] = 1
        filter_mask = sparse.COO(coords=mt.coords,
                                 data=filter_mask,
                                 shape=mt.shape)

    if filter_mask is not None:
        # We can pass the sparse filter mask directly
        mt = sparse.where(filter_mask, mt, mt.dtype.type(0))

        return _get_mult_function(mt)

    else:
        return _get_mult_function_runtime_sparse(mt)
Пример #28
0
def phi_ijk(x):
    """
    Get a 3d-tensor \int phi_i(r) phi_j(r) phi_k(r) dr

    Parameters
    ----------
    x: 1d np.array

    Returns
    -------
    phi_ijk: sparse.COO
    """
    size = len(x)
    dx = np.diff(x)

    ind = []
    val = []
    for i in range(size):
        if i < size - 1:
            ind.append((i, i, i))
            val.append(dx[i] * 2.0 / 12.0)

            for index in itertools.product([i], [i, i + 1], [i, i + 1]):
                ind.append(index)
                val.append(dx[i] / 12.0)

        if i > 0:
            ind.append((i, i, i))
            val.append(dx[i - 1] * 2.0 / 12.0)

            for index in itertools.product([i], [i, i - 1], [i, i - 1]):
                ind.append(index)
                val.append(dx[i - 1] / 12.0)

    return sparse.COO(np.array(ind).T, val, shape=(size, ) * 3)
Пример #29
0
def sparse_template_multi_stack(mask_index, offsetX, offsetY, template,
                                imageSizeX, imageSizeY):
    '''
    Stamp the template in a multi-mask 3D stack at the positions indicated by
    mask_index, offsetY, offsetX. The function clips the bounding box as necessary.
    '''
    num_templates = len(mask_index)
    fy, fx = template.shape
    area = fy * fx
    total_index_size = num_templates * area
    y, x = np.mgrid[0:fy, 0:fx]

    data = np.zeros(total_index_size, dtype=template.dtype)
    coord_mask = np.zeros(total_index_size, dtype=int)
    coord_y = np.zeros(total_index_size, dtype=int)
    coord_x = np.zeros(total_index_size, dtype=int)

    for i in range(len(mask_index)):
        start = i * area
        stop = (i + 1) * area
        data[start:stop] = template.flatten()
        coord_mask[start:stop] = mask_index[i]
        coord_y[start:stop] = y.flatten() + offsetY[i]
        coord_x[start:stop] = x.flatten() + offsetX[i]

    selector = (coord_y >= 0) * (coord_y < imageSizeY) * (coord_x >= 0) * (
        coord_x < imageSizeX)

    return sparse.COO(data=data[selector],
                      coords=(coord_mask[selector], coord_y[selector],
                              coord_x[selector]),
                      shape=(int(max(mask_index) + 1), imageSizeY, imageSizeX))
Пример #30
0
def phi_ij(x):
    """
    Get a 2d-tensor \int phi_i(r) phi_j(r) dr

    Parameters
    ----------
    x: 1d np.array

    Returns
    -------
    phi_ij: sparse.COO
    """
    size = len(x)
    dx = np.diff(x)

    ind = []
    val = []
    for i in range(size):
        if i < size - 1:
            ind.append((i, i))
            val.append(dx[i] / 3.0)

            ind.append((i, i + 1))
            val.append(dx[i] / 6.0)

        if i > 0:
            ind.append((i, i))
            val.append(dx[i - 1] / 3.0)

            ind.append((i, i - 1))
            val.append(dx[i - 1] / 6.0)

    return sparse.COO(np.array(ind).T, val, shape=(size, ) * 2)