Esempio n. 1
0
def cmp_mat(file1, file2):
    # Compare two matlab files
    # ignoring the newlines (lf vs cr-lf)

    contents1 = sio.loadmat(file1)
    contents2 = sio.loadmat(file2)

    # Warning: Cannot guarantee that the data will be the 0th element in this array!!

    keylist = list(contents1.keys())

    for key in keylist:
        if key.startswith("test_"):

            data1 = contents1[key]
            data2 = contents2[key]

            if scipy.rank(data1) != scipy.rank(data2):
                print("Data sets in the two files are not of the same rank")
                return False

            if data1.shape != data2.shape:
                print("Data sets in the two files do not have the same shape")
                return False

            #print "About to run compare_data()"
            return compare_data(data1, data2)

    print("No data to compare!")
    return True
Esempio n. 2
0
def indx_3dto1d(idx, sz):
    from scipy import prod, rank
    if rank(idx) == 1:
        idx1 = idx[0]*prod(sz[1:3])+idx[1]*sz[2]+idx[2]
    else:
        idx1 = idx[:, 0]*prod(sz[1:3])+idx[:, 1]*sz[2]+idx[:, 2]
    return idx1
Esempio n. 3
0
def ndarray_header(A):
    header = ArrayHeader()
    header['type'] = 'ndarray'
    header['rank'] = rank(A)
    header['dims'] = ','.join(map(str,A.shape))
    header['dtype'] = A.dtype.name
    return header
Esempio n. 4
0
def write_array(fid,A,format='binary'):
    """
    Write an ndarray or sparse matrix to a file
    
        format may be one of ['basic','ascii','binary']
        
        basic
            - Most human readable
            - Only works for arrays of rank 1 and 2
            - Does not work for sparse matrices
        ascii
            - Somewhat human readable
            - Works for ndarrays and sparse matrices
        binary
            - Fastest format
            - Works for ndarrays and sparse matrices
            - Data stored in LittleEndian
    """   
        
    if format not in ['basic','ascii','binary']: raise ArrayIOException('Unknown format: ['+format+']')
   
    if type(fid) is not file: fid = open(fid,'wb')
    
    if type(A) is numpy.ndarray:
        A = numpy.ascontiguousarray(A)  #strided arrays break in write
        if format == 'basic':
            if rank(A) > 2: raise ArrayIOException('basic format only works for rank 1 or 2 arrays')
            write_basic(fid,A)
        else:            
            write_ndarray(fid,A,format)
    elif scipy.sparse.isspmatrix(A):
        if format not in ['ascii','binary']: raise ArrayIOException('sparse matrices require ascii or binary format')
        write_sparse(fid,A,format)
    else:
        try:
            A = asarray(A)
            if format == 'basic':
                if rank(A) > 2: raise ArrayIOException('basic format only works for rank 1 or 2 arrays')
                write_basic(fid,A)
            else:            
                write_ndarray(fid,A,format)
        except:
            raise ArrayIOException('Unknown data type and unable to convert to numpy.ndarray')
Esempio n. 5
0
def triangulate_ncube(vertices, indices):
    n_dims = rank(indices) - 1
    n_cubes = indices.shape[0]
    n_verts = vertices.shape[0]

    if n_dims <= 1:
        #cube mesh only contains edges
        return vertices, indices

    if n_dims > 2:
        raise NotImplementedError, 'nCube meshes with n > 2 not supported'

    cell_centers = vertices[indices.reshape(n_cubes, -1)].mean(axis=1)

    n_faces = 2 * n_dims * n_cubes

    faces = zeros((n_faces, ) + (2, ) * (n_dims - 1), dtype=indices.dtype)

    for i in range(n_dims):
        s0 = [slice(None, None, None)] * (i + 1) + [
            0
        ] + [slice(None, None, None)] * (n_dims - i - 1)
        s1 = [slice(None, None, None)] * (i + 1) + [
            1
        ] + [slice(None, None, None)] * (n_dims - i - 1)

        faces[(2 * i + 0) * n_cubes:(2 * i + 1) * n_cubes] = indices[s0]
        faces[(2 * i + 1) * n_cubes:(2 * i + 2) * n_cubes] = indices[s1]

        #this seems to be the correct pattern
        if (n_dims - 1 - i) % 2 == n_dims % 2:
            #flip 1
            temp = faces[(2 * i + 1) * n_cubes:(2 * i + 2) * n_cubes, 0].copy()
            faces[(2 * i + 1) * n_cubes:(2 * i + 2) * n_cubes,
                  0] = faces[(2 * i + 1) * n_cubes:(2 * i + 2) * n_cubes, 1]
            faces[(2 * i + 1) * n_cubes:(2 * i + 2) * n_cubes, 1] = temp
        else:
            #flip 0
            temp = faces[(2 * i + 0) * n_cubes:(2 * i + 1) * n_cubes, 0].copy()
            faces[(2 * i + 0) * n_cubes:(2 * i + 1) * n_cubes,
                  0] = faces[(2 * i + 0) * n_cubes:(2 * i + 1) * n_cubes, 1]
            faces[(2 * i + 0) * n_cubes:(2 * i + 1) * n_cubes, 1] = temp

    face_vertices, face_indices = triangulate_ncube(vertices, faces)

    center_indices = (arange(n_cubes) + face_vertices.shape[0]).reshape(
        (n_cubes, 1))
    center_indices = tile(center_indices, (face_indices.shape[0] / n_cubes, 1))

    new_vertices = vstack((face_vertices, cell_centers))
    new_indices = hstack((center_indices, face_indices))

    return new_vertices, new_indices
Esempio n. 6
0
def calcObsRank(H, Fx):

    nstates = np.size(Fx, 0)
    F = np.eye(np.size(Fx, 0))
    Rank = np.zeros(nstates)
    for i in range(nstates):
        Rank = np.vstack((Rank, H.dot(F)))
        print(Rank)
        F = F.dot(Fx)
    Rank = np.vstack((Rank, H.dot(F)))
    print(Rank)
    r = np.rank(Rank)
    return r
Esempio n. 7
0
 def test_dense(self):
     sizes = [(2,2),(3,3),(5,1),(1,5)]
     sizes += [(2,2,2),(4,3,2),(1,1,5),(1,5,1),(5,1,1)]
     for dims in sizes:
         mats = [arange(prod(dims)).reshape(dims),rand(*dims)]    
         for A in mats:
             formats = ['binary','ascii']
             if rank(A) <= 2: formats.append('basic') #use basic when possible
             for format in formats:
                 write_array(filename,A,format=format)
                 
                 B = read_array(filename)
                 assert_almost_equal(A,B,decimal=12)
Esempio n. 8
0
def triangulate_ncube(vertices,indices):
    n_dims  = rank(indices) - 1
    n_cubes = indices.shape[0]
    n_verts = vertices.shape[0]
    
    if n_dims <= 1:
        #cube mesh only contains edges
        return vertices,indices





    if n_dims > 2:
        raise NotImplementedError,'nCube meshes with n > 2 not supported'

    cell_centers = vertices[indices.reshape(n_cubes,-1)].mean(axis=1)

    n_faces = 2*n_dims*n_cubes

    faces = zeros((n_faces,) + (2,)*(n_dims-1),dtype=indices.dtype)

    for i in range(n_dims):
        s0 = [slice(None,None,None)]*(i+1) + [0] + [slice(None,None,None)]*(n_dims-i-1)
        s1 = [slice(None,None,None)]*(i+1) + [1] + [slice(None,None,None)]*(n_dims-i-1)

        faces[(2*i+0)*n_cubes:(2*i+1)*n_cubes] = indices[s0]
        faces[(2*i+1)*n_cubes:(2*i+2)*n_cubes] = indices[s1]

        #this seems to be the correct pattern
        if (n_dims-1-i) % 2 == n_dims % 2:
            #flip 1
            temp = faces[(2*i+1)*n_cubes:(2*i+2)*n_cubes,0].copy()
            faces[(2*i+1)*n_cubes:(2*i+2)*n_cubes,0] = faces[(2*i+1)*n_cubes:(2*i+2)*n_cubes,1]
            faces[(2*i+1)*n_cubes:(2*i+2)*n_cubes,1] = temp
        else:
            #flip 0
            temp = faces[(2*i+0)*n_cubes:(2*i+1)*n_cubes,0].copy()
            faces[(2*i+0)*n_cubes:(2*i+1)*n_cubes,0] = faces[(2*i+0)*n_cubes:(2*i+1)*n_cubes,1]
            faces[(2*i+0)*n_cubes:(2*i+1)*n_cubes,1] = temp


    face_vertices,face_indices = triangulate_ncube(vertices,faces)

    center_indices = (arange(n_cubes) + face_vertices.shape[0]).reshape((n_cubes,1))
    center_indices = tile(center_indices,(face_indices.shape[0]/n_cubes,1))

    new_vertices = vstack((face_vertices,cell_centers))
    new_indices  = hstack((center_indices,face_indices))

    return new_vertices,new_indices
Esempio n. 9
0
def simplex_array_searchsorted(s, v):
    """Find the row indices (of s) corresponding to the simplices stored 
    in the rows of simplex array v.  The rows of s must be stored in 
    lexicographical order.

    Example
    -------

    >>> from numpy import array
    >>> s = array([[0,1],[0,2],[1,2],[1,3]])
    >>> v = array([[1,2],[0,2]])
    >>> simplex_array_searchsorted(s,v)
    array([2, 1])

    """

    s = asarray(s)
    v = asarray(v)

    if rank(s) != 2 or rank(v) != 2:
        raise ValueError('expected rank 2 arrays')

    if s.shape[1] != v.shape[1]:
        raise ValueError('number of columns must agree')
   
    # compute row indices by sorting both arrays together
    Ns = s.shape[0]
    Nv = v.shape[0]
    
    perm = lexsort(vstack((s,v))[:,::-1].T)
    
    flags = concatenate( (ones(Ns,dtype=int),zeros(Nv,dtype=int)) )
    indices = empty(Ns+Nv, dtype=int)
    indices[perm] = cumsum(flags[perm])
    indices = indices[Ns:].copy()
    indices -= 1

    return indices
Esempio n. 10
0
def simplex_array_searchsorted(s, v):
    """Find the row indices (of s) corresponding to the simplices stored 
    in the rows of simplex array v.  The rows of s must be stored in 
    lexicographical order.

    Example
    -------

    >>> from numpy import array
    >>> s = array([[0,1],[0,2],[1,2],[1,3]])
    >>> v = array([[1,2],[0,2]])
    >>> simplex_array_searchsorted(s,v)
    array([2, 1])

    """

    s = asarray(s)
    v = asarray(v)

    if rank(s) != 2 or rank(v) != 2:
        raise ValueError('expected rank 2 arrays')

    if s.shape[1] != v.shape[1]:
        raise ValueError('number of columns must agree')

    # compute row indices by sorting both arrays together
    Ns = s.shape[0]
    Nv = v.shape[0]

    perm = lexsort(vstack((s, v))[:, ::-1].T)

    flags = concatenate((ones(Ns, dtype=int), zeros(Nv, dtype=int)))
    indices = empty(Ns + Nv, dtype=int)
    indices[perm] = cumsum(flags[perm])
    indices = indices[Ns:].copy()
    indices -= 1

    return indices
Esempio n. 11
0
    def test_dense(self):
        sizes = [(2, 2), (3, 3), (5, 1), (1, 5)]
        sizes += [(2, 2, 2), (4, 3, 2), (1, 1, 5), (1, 5, 1), (5, 1, 1)]
        for dims in sizes:
            mats = [arange(prod(dims)).reshape(dims), rand(*dims)]
            for A in mats:
                formats = ['binary', 'ascii']
                if rank(A) <= 2:
                    formats.append('basic')  #use basic when possible
                for format in formats:
                    write_array(filename, A, format=format)

                    B = read_array(filename)
                    assert_almost_equal(A, B, decimal=12)
Esempio n. 12
0
def make_local_connectivity_tcorr(func_file, clust_mask, outfile, thresh):
    from scipy.sparse import csc_matrix
    from scipy import prod, rank
    from itertools import product

    # index array used to calculate 3D neigbors
    neighbors = np.array(sorted(sorted(sorted([list(x) for x in list(set(product({-1, 0, 1}, repeat=3)))],
                                              key=lambda k: (k[0])), key=lambda k: (k[1])), key=lambda k: (k[2])))

    # read in the mask
    msk = nib.load(clust_mask)
    msz = np.shape(msk.get_data())
    msk_data = msk.get_data()
    # convert the 3D mask array into a 1D vector
    mskdat = np.reshape(msk_data, prod(msz))

    # determine the 1D coordinates of the non-zero elements of the mask
    iv = np.nonzero(mskdat)[0]
    m = len(iv)
    print("%s%s%s" % ('\nTotal non-zero voxels in the mask: ', m, '\n'))
    # read in the fmri data
    # NOTE the format of x,y,z axes and time dimension after reading
    nim = nib.load(func_file)
    sz = nim.shape

    # reshape fmri data to a num_voxels x num_timepoints array
    data = nim.get_data()
    imdat = np.reshape(data, (prod(sz[:3]), sz[3]))

    # construct a sparse matrix from the mask
    msk = csc_matrix((list(range(1, m+1)), (iv, np.zeros(m))), shape=(prod(sz[:-1]), 1))
    sparse_i = []
    sparse_j = []
    sparse_w = []

    negcount = 0

    # loop over all of the voxels in the mask
    print('Voxels:')
    for i in range(0, m):
        if i % 1000 == 0:
            print(str(i))
        # calculate the voxels that are in the 3D neighborhood of the center voxel
        ndx3d = indx_1dto3d(iv[i], sz[:-1])+neighbors
        ndx1d = indx_3dto1d(ndx3d, sz[:-1])

        # restrict the neigborhood using the mask
        ondx1d = msk[ndx1d].todense()
        ndx1d = ndx1d[np.nonzero(ondx1d)[0]]
        ndx1d = ndx1d.flatten()
        ondx1d = np.array(ondx1d[np.nonzero(ondx1d)[0]])
        ondx1d = ondx1d.flatten()

        # determine the index of the seed voxel in the neighborhood
        nndx = np.nonzero(ndx1d == iv[i])[0]
        # exctract the timecourses for all of the voxels in the neighborhood
        tc = np.matrix(imdat[ndx1d.astype('int'), :])

        # make sure that the "seed" has variance, if not just skip it
        if np.var(tc[nndx, :]) == 0:
            continue

        # calculate the correlation between all of the voxel TCs
        R = np.corrcoef(tc)
        if rank(R) == 0:
            R = np.reshape(R, (1, 1))

        # extract just the correlations with the seed TC
        R = R[nndx, :].flatten()

        # set NaN values to 0
        R[np.isnan(R)] = 0
        negcount = negcount+sum(R < 0)

        # set values below thresh to 0
        R[R < thresh] = 0

        # determine the non-zero correlations (matrix weights) and add their indices and values to the list
        nzndx = np.nonzero(R)[0]
        if len(nzndx) > 0:
            sparse_i = np.append(sparse_i, ondx1d[nzndx]-1, 0)
            sparse_j = np.append(sparse_j, (ondx1d[nndx]-1)*np.ones(len(nzndx)))
            sparse_w = np.append(sparse_w, R[nzndx], 0)

    # concatenate the i, j and w_ij into a single vector
    outlist = sparse_i
    outlist = np.append(outlist, sparse_j)
    outlist = np.append(outlist, sparse_w)

    # save the output file to a .NPY file
    np.save(outfile, outlist)

    print("%s%s" % ('Finished ', outfile))